diff --git a/MAINTAINERS b/MAINTAINERS index 5a3cab8bbe284fb7d845bc922b29f9aa414e35e4..f6c91f5b2ad2ecfc130e813b2b9a78a64b86d813 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23863,6 +23863,12 @@ S: Maintained F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* +YUNSILICON XSC DRIVERS +M: Weihonggang +S: Maintained +F: drivers/infiniband/hw/xsc +F: drivers/net/ethernet/yunsilicon/xsc + Z3FOLD COMPRESSED PAGE ALLOCATOR M: Vitaly Wool R: Miaohe Lin diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC new file mode 100644 index 0000000000000000000000000000000000000000..734ca6c9dfe0942cb749ff8ed8cd823aca34a51e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC @@ -0,0 +1 @@ +CONFIG_INFINIBAND_XSC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON new file mode 100644 index 0000000000000000000000000000000000000000..f6aca2a290f7ffebb4f6c127a967acca60ded17d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_YUNSILICON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH new file mode 100644 index 0000000000000000000000000000000000000000..343284c7c0de1b02f4b8edc32c91ee4abf953064 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH @@ -0,0 +1 @@ +CONFIG_YUNSILICON_XSC_ETH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI new file mode 100644 index 0000000000000000000000000000000000000000..3a3fbc36325a9d9f48e10e158b197dc936d0924e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI @@ -0,0 +1 @@ +CONFIG_YUNSILICON_XSC_PCI=m diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index a5827d11e9346a890c55804052c9bfa21076dde1..9d6a7cbab0ae84d0433702263e38f7ad0a28bc06 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -95,6 +95,7 @@ source "drivers/infiniband/hw/qedr/Kconfig" source "drivers/infiniband/hw/qib/Kconfig" source "drivers/infiniband/hw/usnic/Kconfig" source "drivers/infiniband/hw/vmw_pvrdma/Kconfig" +source "drivers/infiniband/hw/xsc/Kconfig" source "drivers/infiniband/sw/rdmavt/Kconfig" endif # !UML source "drivers/infiniband/sw/rxe/Kconfig" diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 1211f4317a9f4fdab32278e2000c1b2b392e64d5..b8fc3871dd1862bd8fb8ef80b7823d2a8bb18ed3 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_INFINIBAND_HNS) += hns/ obj-$(CONFIG_INFINIBAND_QEDR) += qedr/ obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/ obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/ +obj-$(CONFIG_INFINIBAND_XSC) += xsc/ diff --git a/drivers/infiniband/hw/xsc/Kconfig b/drivers/infiniband/hw/xsc/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..6c3d4b7b330e196903f74770843287e5b3338c10 --- /dev/null +++ b/drivers/infiniband/hw/xsc/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +config INFINIBAND_XSC + tristate "Yunsilicon XSC RDMA driver" + default n + depends on NETDEVICES && ETHERNET && PCI && INET + depends on YUNSILICON_XSC_PCI && YUNSILICON_XSC_ETH + help + This driver provides RDMA support for + Yunsilicon XSC devices. + + To compile this driver as a module, choose M here. The module + will be called xsc_ib. diff --git a/drivers/infiniband/hw/xsc/Makefile b/drivers/infiniband/hw/xsc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b4fa5748bbad46598a3538218e8d39bba030dece --- /dev/null +++ b/drivers/infiniband/hw/xsc/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y := -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc +ccflags-y += -Wno-implicit-fallthrough +ifeq ($(USE_INTERNAL_IB_CORE), 1) + ccflags-y += -include /usr/src/ofa_kernel/include/rdma/ib_umem.h +endif + +obj-$(CONFIG_INFINIBAND_XSC) += xsc_ib.o + +xsc_ib-y := main.o xsc_rdma_ctrl.o cq.o qp.o mem.o mr.o ah.o \ + counters.o devx.o private_dev.o ib_umem_ex.o\ + rtt.o xsc_ib_sysfs.o + +xsc_ib-$(CONFIG_XSC_PEER_SUPPORT) += peer_mem.o diff --git a/drivers/infiniband/hw/xsc/ah.c b/drivers/infiniband/hw/xsc/ah.c new file mode 100644 index 0000000000000000000000000000000000000000..39da2861897d7da4da31fe5693785504e101ee85 --- /dev/null +++ b/drivers/infiniband/hw/xsc/ah.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "xsc_ib.h" +#include "user.h" + +static u32 xsc_calc_roce_udp_flow_label(void) +{ + u32 factor = 0; + u32 hash = 0; + u32 flow_label = 0; + + /*This function will generate a 20 bit flow_label*/ + factor = (IB_GRH_FLOWLABEL_MASK - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + 1); + hash = get_random_u32() % factor; + flow_label = hash & IB_GRH_FLOWLABEL_MASK; + + return flow_label; +} + +static u16 xsc_ah_get_udp_sport(const struct xsc_ib_dev *dev, + struct rdma_ah_attr *ah_attr) +{ + enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type; + u16 sport = 0; + u32 fl = 0; + + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && + (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) && + (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK)) { + fl = ah_attr->grh.flow_label; + } else { + /*generate a 20bit flow_label and output to user layer*/ + fl = xsc_calc_roce_udp_flow_label(); + ah_attr->grh.flow_label = fl; + } + + sport = xsc_flow_label_to_udp_sport(fl); + xsc_ib_dbg(dev, "fl=0x%x,sport=0x%x\n", fl, sport); + return sport; +} + +static struct ib_ah *create_ib_ah(struct xsc_ib_dev *dev, + struct xsc_ib_ah *ah, + struct rdma_ah_attr *ah_attr) +{ + enum ib_gid_type gid_type; + + if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { + const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); + + memcpy(ah->av.rgid, &grh->dgid, 16); + ah->av.grh_gid_fl = cpu_to_be32(grh->flow_label | + (1 << 30) | + grh->sgid_index << 20); + ah->av.hop_limit = grh->hop_limit; + ah->av.tclass = grh->traffic_class; + } + + ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4); + + if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { + gid_type = ah_attr->grh.sgid_attr->gid_type; + + memcpy(ah->av.rmac, ah_attr->roce.dmac, + sizeof(ah_attr->roce.dmac)); + + ah->av.udp_sport = xsc_ah_get_udp_sport(dev, ah_attr); + ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1; + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) +#define XSC_ECN_ENABLED BIT(1) + ah->av.tclass |= XSC_ECN_ENABLED; + } else { + ah->av.rlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr)); + ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f; + ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf); + } + + return &ah->ibah; +} + +xsc_ib_create_ah_def() +{ + struct xsc_ib_ah *ah = to_mah(ibah); + struct xsc_ib_dev *dev = to_mdev(ibah->device); + struct rdma_ah_attr *ah_attr = init_attr->ah_attr; + enum rdma_ah_attr_type ah_type = ah_attr->type; + + if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && + !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) + return RET_VALUE(-EINVAL); + + if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && udata) { + int err; + struct xsc_ib_create_ah_resp resp = {}; + u32 min_resp_len = offsetof(typeof(resp), dmac) + + sizeof(resp.dmac); + + if (udata->outlen < min_resp_len) + return RET_VALUE(-EINVAL); + + resp.response_length = min_resp_len; + memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN); + err = ib_copy_to_udata(udata, &resp, resp.response_length); + if (err) + return RET_VALUE(err); + } + + create_ib_ah(dev, ah, ah_attr); /* never fails */ + return 0; +} + +int xsc_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) +{ + return 0; +} + +xsc_ib_destroy_ah_def() +{ + return 0; +} diff --git a/drivers/infiniband/hw/xsc/counters.c b/drivers/infiniband/hw/xsc/counters.c new file mode 100644 index 0000000000000000000000000000000000000000..971ecf4ff1af0355a96892d0b19ce6152c151e60 --- /dev/null +++ b/drivers/infiniband/hw/xsc/counters.c @@ -0,0 +1,538 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_lag.h" +#include "common/xsc_cmd.h" +#include "counters.h" + +#define COUNTERS_FILE_NAME "counters" +#define COUNTERS_NAMES_FILE_NAME "counters_names" +#define COUNTERS_VALUE_FILE_NAME "counters_value" +#define COUNTERS_ATTER_GROUP_NAME "counters" +#define GLOBAL_COUNTERS_GROUP_NAME "global_counters" +#define GLOBAL_COUNTERS_FILE_NAME "counters" + +static const struct counter_desc hw_rdma_stats_pf_desc[] = { + /*by mac port*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, np_cnp_sent) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rp_cnp_handled) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, np_ecn_marked_roce_packets) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rp_cnp_ignored) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, read_rsp_out_of_seq) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, implied_nak_seq_err) }, + /*by function*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, out_of_sequence) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, packet_seq_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, out_of_buffer) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rnr_nak_retry_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, local_ack_timeout_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rx_read_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rx_write_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, duplicate_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_payload_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_payload_bytes) }, + /*global*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_loopback_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_loopback_bytes) }, +}; + +static const struct counter_desc hw_rdma_stats_vf_desc[] = { + /*by function*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_tx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_tx_payload_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_rx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_rx_payload_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, out_of_sequence) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, packet_seq_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, out_of_buffer) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rnr_nak_retry_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, local_ack_timeout_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rx_read_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rx_write_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, duplicate_requests) }, +}; + +static const struct counter_desc hw_global_rdma_stats_desc[] = { + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, rdma_loopback_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, rdma_loopback_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, rx_icrc_encapsulated) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, req_cqe_error) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, resp_cqe_error) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, cqe_msg_code_error) }, +}; + +static int get_hw_stats_rdma(struct xsc_core_device *dev, struct xsc_hw_stats_rdma *stats_rdma) +{ + int i = 0; + int ret; + int inlen; + struct xsc_lag *lag; + struct xsc_hw_stats_mbox_in *in; + struct xsc_hw_stats_rdma_mbox_out out; + struct xsc_core_device *xdev_tmp; + + memset(stats_rdma, 0, sizeof(*stats_rdma)); + + if (!dev) + return -1; + + inlen = sizeof(struct xsc_hw_stats_mbox_in) + XSC_MAX_PORTS; + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + xsc_board_lag_lock(dev); + if (xsc_lag_is_roce(dev)) { + lag = xsc_get_lag(dev); + in->lag_member_num = lag->xsc_member_cnt; + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) + in->member_port[i++] = xdev_tmp->mac_port; + in->is_lag = 1; + } else { + in->is_lag = 0; + in->mac_port = dev->mac_port; + } + xsc_board_lag_unlock(dev); + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_STATS_RDMA); + memset(&out, 0, sizeof(out)); + ret = xsc_cmd_exec(dev, (void *)in, inlen, (void *)&out, sizeof(out)); + if (ret || out.hdr.status) { + kfree(in); + return -1; + } + + memcpy(stats_rdma, &out.hw_stats, sizeof(*stats_rdma)); + kfree(in); + return 0; +} + +static ssize_t counters_names_show(struct kobject *kobjs, + struct attribute *attr, char *buf) +{ + int i; + int desc_size; + ssize_t count = 0; + const struct counter_desc *desc; + struct xsc_counters_attribute *xsc_counters_name_attr; + + xsc_counters_name_attr = container_of(attr, + struct xsc_counters_attribute, + attr); + + if (is_support_hw_pf_stats(xsc_counters_name_attr->dev)) { + desc = &hw_rdma_stats_pf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_pf_desc); + } else { + desc = &hw_rdma_stats_vf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); + } + + for (i = 0; i < desc_size; ++i) + count += sprintf(&buf[count], "%s\n", desc[i].format); + + return count; +} + +static ssize_t counters_show(struct kobject *kobjs, + struct attribute *attr, char *buf) +{ + int i; + int ret; + u8 *stats; + u64 value; + int desc_size; + ssize_t count = 0; + const struct counter_desc *desc; + struct xsc_hw_stats_rdma stats_rdma; + struct xsc_counters_attribute *xsc_counters_attr; + + xsc_counters_attr = container_of(attr, + struct xsc_counters_attribute, + attr); + + ret = get_hw_stats_rdma(xsc_counters_attr->dev, &stats_rdma); + if (ret || is_support_hw_pf_stats(xsc_counters_attr->dev) != stats_rdma.is_pf) + return 0; + + if (is_support_hw_pf_stats(xsc_counters_attr->dev)) { + desc = &hw_rdma_stats_pf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_pf_desc); + stats = (u8 *)&stats_rdma.stats.pf_stats; + } else { + desc = &hw_rdma_stats_vf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); + stats = (u8 *)&stats_rdma.stats.vf_stats; + } + + for (i = 0 ; i < desc_size; i++) { + value = *(u64 *)(stats + desc[i].offset); + value = be64_to_cpu(value); + count += sprintf(&buf[count], "%-26s %-20llu\n", + desc[i].format, value); + } + + return count; +} + +static ssize_t counters_value_read(struct file *file, + struct kobject *kob, + struct bin_attribute *bin_attr, + char *buf, loff_t loff, size_t size) +{ + int i; + int ret; + u8 *stats; + int bin_size; + int desc_size; + u64 *tmp_value; + struct xsc_core_device *xdev; + const struct counter_desc *desc; + struct xsc_hw_stats_rdma stats_rdma; + struct xsc_counters_bin_attribute *xsc_counters_bin_attr; + + xsc_counters_bin_attr = container_of(&bin_attr->attr, + struct xsc_counters_bin_attribute, + attr); + + if (xsc_counters_bin_attr->size > size || xsc_counters_bin_attr->size == 0) + return 0; + + xdev = (struct xsc_core_device *)xsc_counters_bin_attr->private; + ret = get_hw_stats_rdma(xdev, &stats_rdma); + if (ret || is_support_hw_pf_stats(xdev) != stats_rdma.is_pf) + return 0; + + if (is_support_hw_pf_stats(xdev)) { + desc = &hw_rdma_stats_pf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_pf_desc); + stats = (u8 *)&stats_rdma.stats.pf_stats; + } else { + desc = &hw_rdma_stats_vf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); + stats = (u8 *)&stats_rdma.stats.vf_stats; + } + + bin_size = desc_size * sizeof(u64); + if (xsc_counters_bin_attr->size < bin_size) + return 0; + + tmp_value = kzalloc(xsc_counters_bin_attr->size, GFP_KERNEL); + if (!tmp_value) + return 0; + + for (i = 0; i < desc_size; i++) { + tmp_value[i] = *(u64 *)(stats + desc[i].offset); + tmp_value[i] = be64_to_cpu(tmp_value[i]); + } + + memcpy(buf, tmp_value, xsc_counters_bin_attr->size); + + kfree(tmp_value); + return xsc_counters_bin_attr->size; +} + +static int counters_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + struct xsc_counters_attribute *xsc_counters_name, *xsc_counters; + struct xsc_counters_bin_attribute *xsc_counters_bin; + struct attribute_group *counters_attr_g; + struct bin_attribute **counters_bin_attrs; + struct attribute **counters_attrs; + int ret = -ENOMEM; + + xsc_counters_name = kzalloc(sizeof(*xsc_counters_name), GFP_KERNEL); + if (!xsc_counters_name) + return -ENOMEM; + + xsc_counters = kzalloc(sizeof(*xsc_counters), GFP_KERNEL); + if (!xsc_counters) + goto err_xsc_counters; + + xsc_counters_bin = kzalloc(sizeof(*xsc_counters_bin), GFP_KERNEL); + if (!xsc_counters_bin) + goto err_xsc_counters_bin; + + counters_bin_attrs = kzalloc(sizeof(*counters_bin_attrs) * 2, GFP_KERNEL); + if (!counters_bin_attrs) + goto err_counters_bin_attrs; + + counters_attrs = kzalloc(sizeof(*counters_attrs) * 3, GFP_KERNEL); + if (!counters_attrs) + goto err_counters_attrs; + + counters_attr_g = kzalloc(sizeof(*counters_attr_g), GFP_KERNEL); + if (!counters_attr_g) + goto err_counters_attr_g; + + sysfs_attr_init(&xsc_counters_name->attr); + xsc_counters_name->attr.name = COUNTERS_NAMES_FILE_NAME; + xsc_counters_name->attr.mode = 0444; + xsc_counters_name->show = counters_names_show; + xsc_counters_name->dev = dev; + + sysfs_attr_init(&xsc_counters->attr); + xsc_counters->attr.name = COUNTERS_FILE_NAME; + xsc_counters->attr.mode = 0444; + xsc_counters->show = counters_show; + xsc_counters->dev = dev; + + sysfs_attr_init(&xsc_counters_bin->attr); + xsc_counters_bin->attr.name = COUNTERS_VALUE_FILE_NAME; + xsc_counters_bin->attr.mode = 0444; + xsc_counters_bin->read = counters_value_read; + xsc_counters_bin->private = dev; + xsc_counters_bin->size = sizeof(struct xsc_hw_stats_rdma); + + counters_bin_attrs[0] = (struct bin_attribute *)xsc_counters_bin; + counters_attrs[0] = (struct attribute *)xsc_counters_name; + counters_attrs[1] = (struct attribute *)xsc_counters; + + counters_attr_g->name = COUNTERS_ATTER_GROUP_NAME; + counters_attr_g->attrs = counters_attrs; + counters_attr_g->bin_attrs = counters_bin_attrs; + + dev->counters_priv = counters_attr_g; + + ret = sysfs_create_group(&ib_dev->dev.kobj, counters_attr_g); + if (ret) + goto err_counters_create_group; + + return 0; + +err_counters_create_group: + kfree(counters_attr_g); + counters_attr_g = NULL; + +err_counters_attr_g: + kfree(counters_attrs); + counters_attrs = NULL; + +err_counters_attrs: + kfree(counters_bin_attrs); + counters_bin_attrs = NULL; + +err_counters_bin_attrs: + kfree(xsc_counters_bin); + xsc_counters_bin = NULL; + +err_xsc_counters_bin: + kfree(xsc_counters); + xsc_counters = NULL; + +err_xsc_counters: + kfree(xsc_counters_name); + xsc_counters_name = NULL; + + return ret; +} + +static void counters_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + struct xsc_counters_attribute *xsc_counters_name, *xsc_counters; + struct xsc_counters_bin_attribute *xsc_counters_bin; + struct bin_attribute **counters_bin_attrs; + struct attribute **counters_attrs; + struct attribute_group *counters_attr_g; + + counters_attr_g = dev->counters_priv; + counters_attrs = counters_attr_g->attrs; + counters_bin_attrs = counters_attr_g->bin_attrs; + + xsc_counters_bin = (struct xsc_counters_bin_attribute *)counters_bin_attrs[0]; + xsc_counters_name = (struct xsc_counters_attribute *)counters_attrs[0]; + xsc_counters = (struct xsc_counters_attribute *)counters_attrs[1]; + + if (counters_attr_g) { + sysfs_remove_group(&ib_dev->dev.kobj, counters_attr_g); + kfree(counters_attr_g); + counters_attr_g = NULL; + } + + kfree(counters_attrs); + counters_attrs = NULL; + + kfree(counters_bin_attrs); + counters_bin_attrs = NULL; + + kfree(xsc_counters_bin); + xsc_counters_bin = NULL; + + kfree(xsc_counters_name); + xsc_counters_name = NULL; + + kfree(xsc_counters); + xsc_counters = NULL; +} + +static ssize_t global_cnt_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct xsc_global_cnt_attributes *a = + container_of(attr, struct xsc_global_cnt_attributes, attr); + struct xsc_global_cnt_interface *g = + container_of(kobj, struct xsc_global_cnt_interface, kobj); + + if (!a->show) + return -EIO; + + return a->show(g, a, buf); +} + +static ssize_t global_cnt_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct xsc_global_cnt_attributes *a = + container_of(attr, struct xsc_global_cnt_attributes, attr); + struct xsc_global_cnt_interface *g = + container_of(kobj, struct xsc_global_cnt_interface, kobj); + + if (!a->store) + return -EIO; + + return a->store(g, a, buf, size); +} + +static ssize_t global_counters_show(struct xsc_global_cnt_interface *g, + struct xsc_global_cnt_attributes *a, char *buf) +{ + int i; + int ret; + u8 *stats; + u64 value; + int desc_size; + ssize_t count = 0; + const struct counter_desc *desc; + struct xsc_hw_global_stats_mbox_in in; + struct xsc_hw_global_stats_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_GLOBAL_STATS); + ret = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(in), + (void *)&out, sizeof(out)); + if (ret || out.hdr.status) + return 0; + + desc = &hw_global_rdma_stats_desc[0]; + desc_size = ARRAY_SIZE(hw_global_rdma_stats_desc); + stats = (u8 *)&out.hw_stats; + + for (i = 0 ; i < desc_size; i++) { + value = *(u64 *)(stats + desc[i].offset); + value = be64_to_cpu(value); + count += sprintf(&buf[count], "%-26s %-20llu\n", + desc[i].format, value); + } + + return count; +} + +static ssize_t global_counters_store(struct xsc_global_cnt_interface *g, + struct xsc_global_cnt_attributes *a, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +#define GLOBAL_CNT_ATTR(_name) struct xsc_global_cnt_attributes xsc_global_cnt_attr_##_name = \ + __ATTR(_name, 0444, global_##_name##_show, global_##_name##_store) + +GLOBAL_CNT_ATTR(counters); + +static const struct sysfs_ops global_cnt_sysfs_ops = { + .show = global_cnt_attr_show, + .store = global_cnt_attr_store, +}; + +static struct attribute *global_cnt_attrs[] = { + &xsc_global_cnt_attr_counters.attr, + NULL +}; + +ATTRIBUTE_GROUPS(global_cnt); + +static const struct kobj_type global_cnt_ktype = { + .sysfs_ops = &global_cnt_sysfs_ops, + .default_groups = global_cnt_groups, +}; + +static struct xsc_global_cnt_interface *g_global_cnt_interface; + +static int global_cnt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + struct xsc_global_cnt_interface *tmp; + int err; + + if (!xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) + return 0; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + err = kobject_init_and_add(&tmp->kobj, &global_cnt_ktype, + &ib_dev->dev.kobj, GLOBAL_COUNTERS_GROUP_NAME); + if (err) + goto error_return; + + g_global_cnt_interface = tmp; + tmp->xdev = xdev; + return 0; + +error_return: + kobject_put(&tmp->kobj); + kfree(tmp); + return err; +} + +static void global_cnt_sysfs_fini(struct xsc_core_device *xdev) +{ + if (!g_global_cnt_interface || !xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) + return; + + kobject_put(&g_global_cnt_interface->kobj); + kfree(g_global_cnt_interface); + g_global_cnt_interface = NULL; +} + +int xsc_counters_init(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + int ret; + + ret = counters_sysfs_init(ib_dev, dev); + if (ret) + goto error_return; + + ret = global_cnt_sysfs_init(ib_dev, dev); + if (ret) + goto error_global_cnt; + + return 0; + +error_global_cnt: + counters_sysfs_fini(ib_dev, dev); +error_return: + return ret; +} + +void xsc_counters_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + counters_sysfs_fini(ib_dev, dev); + global_cnt_sysfs_fini(dev); +} diff --git a/drivers/infiniband/hw/xsc/counters.h b/drivers/infiniband/hw/xsc/counters.h new file mode 100644 index 0000000000000000000000000000000000000000..001a57b8372d0704c0e0cc976713afb5c4fc3f3f --- /dev/null +++ b/drivers/infiniband/hw/xsc/counters.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __COUNTERS_H__ +#define __COUNTERS_H__ + +#define STRING_LEN 32 +#define XSC_DECLARE_STAT(type, fld) ""#fld, offsetof(type, fld) + +struct counter_desc { + char format[STRING_LEN]; + size_t offset; /* Byte offset */ +}; + +struct xsc_counters_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t count); + struct xsc_core_device *dev; +}; + +struct xsc_counters_bin_attribute { + struct attribute attr; + size_t size; + void *private; + ssize_t (*read)(struct file *f, struct kobject *k, struct bin_attribute *bin_attr, + char *buf, loff_t l, size_t s); + ssize_t (*write)(struct file *f, struct kobject *k, struct bin_attribute *bin_attr, + char *buf, loff_t l, size_t s); + int (*mmap)(struct file *f, struct kobject *k, struct bin_attribute *bin_attr, + struct vm_area_struct *vma); +}; + +struct xsc_global_cnt_interface { + struct xsc_core_device *xdev; + struct kobject kobj; +}; + +struct xsc_global_cnt_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_global_cnt_interface *g, struct xsc_global_cnt_attributes *a, + char *buf); + ssize_t (*store)(struct xsc_global_cnt_interface *g, struct xsc_global_cnt_attributes *a, + const char *buf, size_t count); +}; + +#endif diff --git a/drivers/infiniband/hw/xsc/cq.c b/drivers/infiniband/hw/xsc/cq.c new file mode 100644 index 0000000000000000000000000000000000000000..102902410b86509c8c17c0524dc10614bd4f7876 --- /dev/null +++ b/drivers/infiniband/hw/xsc/cq.c @@ -0,0 +1,690 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_ib.h" +#include "user.h" +#include "common/xsc_hsi.h" +#include + +enum { + CQ_OK = 0, + CQ_EMPTY = -1, + CQ_POLL_ERR = -2 +}; + +enum { + XSC_CQE_APP_TAG_MATCHING = 1, +}; + +enum { + XSC_CQE_APP_OP_TM_CONSUMED = 0x1, + XSC_CQE_APP_OP_TM_EXPECTED = 0x2, + XSC_CQE_APP_OP_TM_UNEXPECTED = 0x3, + XSC_CQE_APP_OP_TM_NO_TAG = 0x4, + XSC_CQE_APP_OP_TM_APPEND = 0x5, + XSC_CQE_APP_OP_TM_REMOVE = 0x6, + XSC_CQE_APP_OP_TM_NOOP = 0x7, + XSC_CQE_APP_OP_TM_CONSUMED_SW_RDNV = 0x9, + XSC_CQE_APP_OP_TM_CONSUMED_MSG = 0xA, + XSC_CQE_APP_OP_TM_CONSUMED_MSG_SW_RDNV = 0xB, + XSC_CQE_APP_OP_TM_MSG_COMPLETION_CANCELED = 0xC, +}; + +static const u32 xsc_msg_opcode[][2][2] = { + [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND, + [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND_IMMDT, + [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV, + [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_READ, + [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_MAD][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_REQ_SEND, + [XSC_MSG_OPCODE_MAD][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_RSP_RECV, +}; + +static const u32 xsc_cqe_opcode[] = { + [XSC_OPCODE_RDMA_REQ_SEND] = IB_WC_SEND, + [XSC_OPCODE_RDMA_REQ_SEND_IMMDT] = IB_WC_SEND, + [XSC_OPCODE_RDMA_RSP_RECV] = IB_WC_RECV, + [XSC_OPCODE_RDMA_RSP_RECV_IMMDT] = IB_WC_RECV, + [XSC_OPCODE_RDMA_REQ_WRITE] = IB_WC_RDMA_WRITE, + [XSC_OPCODE_RDMA_REQ_WRITE_IMMDT] = IB_WC_RDMA_WRITE, + [XSC_OPCODE_RDMA_RSP_WRITE_IMMDT] = IB_WC_RECV_RDMA_WITH_IMM, + [XSC_OPCODE_RDMA_REQ_READ] = IB_WC_RDMA_READ, + [XSC_OPCODE_RDMA_MAD_REQ_SEND] = IB_WC_SEND, + [XSC_OPCODE_RDMA_MAD_RSP_RECV] = IB_WC_RECV, +}; + +int xsc_stall_num_loop = 60; +int xsc_stall_cq_poll_min = 60; +int xsc_stall_cq_poll_max = 100000; +int xsc_stall_cq_inc_step = 100; +int xsc_stall_cq_dec_step = 10; + +static inline u8 xsc_get_cqe_opcode(struct xsc_cqe *cqe) +{ + if (cqe->is_error) + return cqe->type ? XSC_OPCODE_RDMA_RSP_ERROR : XSC_OPCODE_RDMA_REQ_ERROR; + if (cqe->msg_opcode > XSC_MSG_OPCODE_MAD) + return XSC_OPCODE_RDMA_CQE_ERROR; + return xsc_msg_opcode[cqe->msg_opcode][cqe->type][cqe->with_immdt]; +} + +static void xsc_ib_cq_comp(struct xsc_core_cq *cq) +{ + struct ib_cq *ibcq = &to_xibcq(cq)->ibcq; + + ibcq->comp_handler(ibcq, ibcq->cq_context); +} + +static void xsc_ib_cq_event(struct xsc_core_cq *xcq, enum xsc_event type) +{ + struct xsc_ib_cq *cq = container_of(xcq, struct xsc_ib_cq, xcq); + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + struct ib_cq *ibcq = &cq->ibcq; + struct ib_event event; + + if (type != XSC_EVENT_TYPE_CQ_ERROR) { + xsc_ib_err(dev, "Unexpected event type %d on CQ %06x\n", + type, xcq->cqn); + return; + } + + if (ibcq->event_handler) { + event.device = &dev->ib_dev; + event.event = IB_EVENT_CQ_ERR; + event.element.cq = ibcq; + ibcq->event_handler(&event, ibcq->cq_context); + } +} + +static void *get_cqe_from_buf(struct xsc_ib_cq_buf *buf, int n, int size) +{ + return xsc_buf_offset(&buf->buf, n * size); +} + +static void *get_cqe(struct xsc_ib_cq *cq, int n) +{ + return get_cqe_from_buf(&cq->buf, n, cq->xcq.cqe_sz); +} + +static void *get_sw_cqe(struct xsc_ib_cq *cq, int n) +{ + struct xsc_cqe *cqe; + + cqe = (struct xsc_cqe *)get_cqe(cq, n & (cq->ibcq.cqe - 1)); + + return ((cqe->owner & XSC_CQE_OWNER_MASK) ^ + !!(n & cq->ibcq.cqe)) ? NULL : cqe; +} + +static inline void handle_good_req(struct ib_wc *wc, + struct xsc_cqe *cqe, + u8 opcode) +{ + wc->opcode = xsc_cqe_opcode[opcode]; + if (opcode == XSC_OPCODE_RDMA_REQ_READ) + wc->byte_len = RD_LE_32(cqe->msg_len); + wc->status = IB_WC_SUCCESS; +} + +static void handle_responder(struct ib_wc *wc, struct xsc_cqe *cqe, + struct xsc_ib_qp *qp, u8 opcode) +{ + struct xsc_ib_wq *wq = &qp->rq; + u16 idx; + + wc->byte_len = RD_LE_32(cqe->msg_len); + wc->opcode = xsc_cqe_opcode[opcode]; + wc->status = IB_WC_SUCCESS; + + idx = wq->tail & (wq->wqe_cnt - 1); + wc->wr_id = wq->wrid[idx]; + ++wq->tail; +} + +static void *get_wqe(struct xsc_ib_qp *qp, int offset) +{ + return xsc_buf_offset(&qp->buf, offset); +} + +static void *get_recv_wqe(struct xsc_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); +} + +static void *get_seg_wqe(void *first, int n) +{ + return first + (n << XSC_BASE_WQE_SHIFT); +} + +static void xsc_handle_rdma_mad_resp_recv(struct xsc_ib_cq *cq, + struct xsc_ib_qp **cur_qp, + struct ib_wc *wc, + struct xsc_cqe *cqe, + u8 opcode) +{ + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + void *recv; + struct xsc_wqe_data_seg *data_seg; + struct iphdr *ip4h = NULL; + struct ipv6hdr *ip6h; + struct udphdr *udph; + struct ib_unpacked_eth *eth; + struct ib_unpacked_vlan *vlan; + struct ib_grh *grh; + struct ib_mad *mad; + struct rxe_bth *bth; + struct rxe_deth *deth; + unsigned int pading_sz = 0; + struct xsc_ib_wq *wq; + int idx; + u16 eth_type; + void *l3_start; + + wq = &(*cur_qp)->rq; + idx = wq->tail & (wq->wqe_cnt - 1); + + handle_responder(wc, cqe, *cur_qp, opcode); + + data_seg = get_seg_wqe(get_recv_wqe(*cur_qp, idx), 0); + recv = xsc_ib_recv_mad_sg_virt_addr(&dev->ib_dev, wc, data_seg->va); + + eth = (struct ib_unpacked_eth *)recv; + grh = (struct ib_grh *)recv; + if (eth->type == htons(ETH_P_8021Q)) { + vlan = (struct ib_unpacked_vlan *)(eth + 1); + eth_type = ntohs(vlan->type); + l3_start = vlan + 1; + + wc->vlan_id = ntohs(vlan->tag) & 0x0fff; + wc->sl = (ntohs(vlan->tag) >> 13) & 0x7; + wc->wc_flags |= IB_WC_WITH_VLAN; + } else { + eth_type = ntohs(eth->type); + l3_start = eth + 1; + } + + if (eth_type == ETH_P_IP) { + ip4h = (struct iphdr *)l3_start; + udph = (struct udphdr *)(ip4h + 1); + } else { + ip6h = (struct ipv6hdr *)l3_start; + udph = (struct udphdr *)(ip6h + 1); + } + bth = (struct rxe_bth *)(udph + 1); + deth = (struct rxe_deth *)(bth + 1); + mad = (struct ib_mad *)(deth + 1); + + if (eth_type == ETH_P_IP) { + pading_sz = sizeof(*grh) - sizeof(*ip4h); + memmove((u8 *)(grh + 1) - sizeof(*ip4h), ip4h, sizeof(*ip4h)); + memset(grh, 0, pading_sz); + } else { + memmove(grh, ip6h, sizeof(*ip6h)); + } + memmove(grh + 1, mad, sizeof(*mad)); + + wc->wc_flags |= IB_WC_GRH; + + xsc_ib_dbg(dev, "recv cqe idx:%u, len:%u\n", wq->tail, wc->byte_len); + xsc_ib_info(dev, "qp[%d] recv MAD packet, msg_len=%d\n", (*cur_qp)->xqp.qpn, wc->byte_len); + wc->status = IB_WC_SUCCESS; +} + +static int xsc_poll_one(struct xsc_ib_cq *cq, + struct xsc_ib_qp **cur_qp, + struct ib_wc *wc) +{ + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + struct xsc_core_qp *xqp; + struct xsc_ib_wq *wq; + u8 opcode; + u32 qpn; + int idx; + struct xsc_cqe *cqe; + u32 *p = NULL; + + cqe = get_sw_cqe(cq, cq->xcq.cons_index); + if (!cqe) + return -EAGAIN; + + ++cq->xcq.cons_index; + + /* Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + + p = (u32 *)cqe; + + qpn = cqe->qp_id; + qpn = le32_to_cpu(qpn); + if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { + /* We do not have to take the QP table lock here, + * because CQs will be locked while QPs are removed + * from the table. + */ + xqp = __xsc_qp_lookup(dev->xdev, qpn); + if (unlikely(!xqp)) { + xsc_ib_warn(dev, "CQE@CQ %d for unknown QPN %d\n", + cq->xcq.cqn, qpn); + return -EINVAL; + } + + *cur_qp = to_xibqp(xqp); + } + + memset(wc, 0, sizeof(*wc)); + wc->qp = &(*cur_qp)->ibqp; + opcode = xsc_get_cqe_opcode(cqe); + switch (opcode) { + case XSC_OPCODE_RDMA_REQ_SEND: + case XSC_OPCODE_RDMA_REQ_WRITE: + case XSC_OPCODE_RDMA_REQ_READ: + case XSC_OPCODE_RDMA_MAD_REQ_SEND: + wq = &(*cur_qp)->sq; + idx = cqe->wqe_id >> (wq->wqe_shift - XSC_BASE_WQE_SHIFT); + idx &= (wq->wqe_cnt - 1); + handle_good_req(wc, cqe, opcode); + wc->wr_id = wq->wrid[idx]; + wq->tail = wq->wqe_head[idx] + 1; + xsc_ib_dbg(dev, "wqeid:%u, wq tail:%u qpn:%u\n", idx, wq->tail, qpn); + wc->status = IB_WC_SUCCESS; + break; + case XSC_OPCODE_RDMA_RSP_RECV: + wq = &(*cur_qp)->rq; + handle_responder(wc, cqe, *cur_qp, opcode); + xsc_ib_dbg(dev, "recv cqe idx:%u, len:%u, qpn:%u\n", wq->tail, wc->byte_len, qpn); + wc->status = IB_WC_SUCCESS; + break; + + case XSC_OPCODE_RDMA_MAD_RSP_RECV: + xsc_ib_dbg(dev, "recv MAD, qpn:%u\n", qpn); + xsc_handle_rdma_mad_resp_recv(cq, cur_qp, wc, cqe, opcode); + break; + + default: + xsc_ib_err(dev, "completion error\n%08x %08x %08x %08x %08x %08x\n", + p[0], p[1], p[2], p[3], p[5], p[6]); + wc->status = IB_WC_GENERAL_ERR; + wc->wr_id = 0; + break; + } + + return 0; +} + +int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct xsc_ib_cq *cq = to_xcq(ibcq); + struct xsc_core_cq *xcq = &cq->xcq; + struct xsc_ib_qp *cur_qp = NULL; + int npolled = 0; + int err = 0; + unsigned long flags; + u32 next_cid; + + spin_lock_irqsave(&cq->lock, flags); + next_cid = xcq->cons_index; + + for (npolled = 0; npolled < num_entries; npolled++) { + err = xsc_poll_one(cq, &cur_qp, wc + npolled); + if (err) + break; + } + + /* make sure cqe read out before update ci */ + rmb(); + + if (next_cid != xcq->cons_index) + xsc_cq_set_ci(xcq); + + spin_unlock_irqrestore(&cq->lock, flags); + + return npolled; +} + +int xsc_cqe_is_empty(struct xsc_ib_cq *cq) +{ + struct xsc_cqe *cqe = get_sw_cqe(cq, cq->xcq.cons_index); + + if (!cqe) + return 1; + + return 0; +} + +int xsc_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ +#ifdef MSIX_SUPPORT + union xsc_cq_doorbell db; + struct xsc_ib_cq *xcq = to_xcq(ibcq); + struct xsc_core_cq *cq = &xcq->xcq; + int ret = 0; + unsigned long irq_flags; + + spin_lock_irqsave(&xcq->lock, irq_flags); + db.val = 0; + db.cq_next_cid = cq->cons_index; + db.cq_id = cq->cqn; + if (flags & IB_CQ_NEXT_COMP) + db.arm = 0; + else if (flags & IB_CQ_SOLICITED) + db.arm = 1;/* arm next:0 arm solicited:1 */ + + if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && (!xsc_cqe_is_empty(xcq))) { + ret = 1; + goto out; + } + + /* make sure val write to memory done */ + wmb(); + writel(db.val, REG_ADDR(cq->dev, cq->arm_db)); +out: + spin_unlock_irqrestore(&xcq->lock, irq_flags); + return ret; +#else + if ((flags & IB_CQ_REPORT_MISSED_EVENTS)) + return 1; + return 0; +#endif +} + +static int alloc_cq_buf(struct xsc_ib_dev *dev, struct xsc_ib_cq_buf *buf, + int nent, int cqe_size) +{ + int err; + + err = xsc_buf_alloc(dev->xdev, nent * cqe_size, + PAGE_SIZE, &buf->buf); + if (err) + return err; + + buf->cqe_size = cqe_size; + + return 0; +} + +static void free_cq_buf(struct xsc_ib_dev *dev, struct xsc_ib_cq_buf *buf) +{ + xsc_buf_free(dev->xdev, &buf->buf); +} + +static int create_cq_user(struct xsc_ib_dev *dev, struct ib_udata *udata, + struct ib_ucontext *context, struct xsc_ib_cq *cq, + int entries, struct xsc_create_cq_mbox_in **cqb, + int *cqe_size, int *index, int *inlen) +{ + struct xsc_ib_create_cq ucmd; + int page_shift; + int npages; + int ncont; + int err; + int log_cq_sz; + int hw_npages; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) + return -EFAULT; + + *cqe_size = ucmd.cqe_size; + + cq->buf.umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, + entries * ucmd.cqe_size, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(cq->buf.umem)) { + err = PTR_ERR(cq->buf.umem); + return err; + } + + xsc_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + if (ncont != npages) { + xsc_ib_dbg(dev, "bad page_shift:%d, ncont:%d\n", page_shift, ncont); + /* amber doesn't support compound pages */ + page_shift = PAGE_SHIFT; + ncont = npages; + xsc_ib_dbg(dev, "overwrite to page_shift:%d, ncont:%d\n", page_shift, ncont); + } + log_cq_sz = ilog2(entries); + hw_npages = DIV_ROUND_UP((1 << log_cq_sz) * sizeof(struct xsc_cqe), PAGE_SIZE_4K); + xsc_ib_info(dev, "addr 0x%llx, entries %d, size %u, npages %d, page_shift %d, ncont %d, hw_npages %d\n", + ucmd.buf_addr, entries, ucmd.cqe_size, npages, page_shift, ncont, hw_npages); + + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * hw_npages; + *cqb = xsc_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_umem; + } + xsc_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, hw_npages, true); + (*cqb)->ctx.pa_num = cpu_to_be16(hw_npages); + + return 0; + +err_umem: + ib_umem_release(cq->buf.umem); + return err; +} + +static void destroy_cq_user(struct xsc_ib_cq *cq, struct ib_udata *udata) +{ + ib_umem_release(cq->buf.umem); +} + +static int create_cq_kernel(struct xsc_ib_dev *dev, struct xsc_ib_cq *cq, + int entries, int cqe_size, + struct xsc_create_cq_mbox_in **cqb, + int *index, int *inlen) +{ + int err; + int i = 0; + struct xsc_cqe *cqe = NULL; + int hw_npages; + + cq->xcq.cqe_sz = cqe_size; + + err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); + if (err) + return err; + + for (i = 0; i < entries; i++) { + cqe = (struct xsc_cqe *)get_cqe(cq, i); + cqe->owner = 1; + } + + hw_npages = DIV_ROUND_UP(entries * cqe_size, PAGE_SIZE_4K); + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * hw_npages; + *cqb = xsc_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_buf; + } + xsc_fill_page_array(&cq->buf.buf, (*cqb)->pas, hw_npages); + (*cqb)->ctx.pa_num = cpu_to_be16(hw_npages); + + return 0; + +err_buf: + free_cq_buf(dev, &cq->buf); + return err; +} + +static void destroy_cq_kernel(struct xsc_ib_dev *dev, struct xsc_ib_cq *cq) +{ + free_cq_buf(dev, &cq->buf); +} + +xsc_ib_create_cq_def() +{ + struct ib_device *ibdev = ibcq->device; + int entries = attr->cqe; + int vector = attr->comp_vector; + struct xsc_create_cq_mbox_in *cqb = NULL; + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_ib_cq *cq; + int index; + int inlen; + int cqe_size; + int irqn; + int err; + unsigned int eqn; + + entries = roundup_pow_of_two(entries); + + xsc_ib_info(dev, "entries:%d, vector:%d, max_cqes:%d\n", entries, vector, + dev->xdev->caps.max_cqes); + + if (entries > dev->xdev->caps.max_cqes) + entries = dev->xdev->caps.max_cqes; + cq = to_xcq(ibcq); + cq->ibcq.cqe = entries; + mutex_init(&cq->resize_mutex); + spin_lock_init(&cq->lock); + cq->resize_buf = NULL; + cq->resize_umem = NULL; + + if (udata) { + err = create_cq_user(dev, udata, NULL, cq, entries, + &cqb, &cqe_size, &index, &inlen); + if (err) + goto err_create; + } else { + cqe_size = sizeof(struct xsc_cqe); + err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, &index, &inlen); + if (err) + goto err_create; + } + + cq->cqe_size = cqe_size; + cqb->ctx.log_cq_sz = ilog2(entries); + cqb->ctx.glb_func_id = cpu_to_be16(dev->xdev->glb_func_id); + + err = xsc_vector2eqn(dev->xdev, vector, &eqn, &irqn); + if (err) + goto err_cqb; + + cqb->ctx.eqn = eqn; + cqb->ctx.eqn = cpu_to_be16(cqb->ctx.eqn); + + err = xsc_core_create_cq(dev->xdev, &cq->xcq, cqb, inlen); + if (err) + goto err_cqb; + + xsc_ib_info(dev, "succeeded to create cqn %d, vector=%d, cq_sz=%d, eqn=%d\n", + cq->xcq.cqn, vector, entries, eqn); + cq->xcq.irqn = irqn; + cq->xcq.comp = xsc_ib_cq_comp; + cq->xcq.event = xsc_ib_cq_event; + + if (udata) { + if (ib_copy_to_udata(udata, &cq->xcq.cqn, sizeof(__u32))) { + err = -EFAULT; + goto err_cmd; + } + } + + xsc_vfree(cqb); + + return 0; + +err_cmd: + xsc_core_destroy_cq(dev->xdev, &cq->xcq); + +err_cqb: + xsc_vfree(cqb); + if (udata) + destroy_cq_user(cq, udata); + else + destroy_cq_kernel(dev, cq); + +err_create: + return RET_VALUE(err); +} + +xsc_ib_destroy_cq_def() +{ + struct xsc_ib_dev *dev = to_mdev(cq->device); + struct xsc_ib_cq *xcq = to_xcq(cq); + + xsc_core_destroy_cq(dev->xdev, &xcq->xcq); + if (udata) + destroy_cq_user(xcq, udata); + else + destroy_cq_kernel(dev, xcq); + + return 0; +} + +static int is_equal_rsn(struct xsc_cqe *cqe, u32 rsn) +{ + u32 qpn = le32_to_cpu(cqe->qp_id); + return rsn == qpn; +} + +void __xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 rsn) +{ + struct xsc_cqe *cqe, *dest; + u32 prod_index; + int nfreed = 0; + u8 owner_bit; + + if (!cq) + return; + + /* First we need to find the current producer index, so we + * know where to start cleaning from. It doesn't matter if HW + * adds new entries after this loop -- the QP we're worried + * about is already in RESET, so the new entries won't come + * from our QP and therefore don't need to be checked. + */ + for (prod_index = cq->xcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) + if (prod_index == cq->xcq.cons_index + cq->ibcq.cqe) + break; + + /* Now sweep backwards through the CQ, removing CQ entries + * that match our QP by copying older entries on top of them. + */ + while ((int)(--prod_index) - (int)cq->xcq.cons_index >= 0) { + cqe = (struct xsc_cqe *)get_cqe(cq, prod_index & (cq->ibcq.cqe - 1)); + if (is_equal_rsn(cqe, rsn)) { + ++nfreed; + } else if (nfreed) { + dest = (struct xsc_cqe *)get_cqe(cq, (prod_index + nfreed) & + (cq->ibcq.cqe - 1)); + owner_bit = dest->owner & XSC_CQE_OWNER_MASK; + memcpy(dest, cqe, cq->xcq.cqe_sz); + dest->owner = owner_bit | + (dest->owner & ~XSC_CQE_OWNER_MASK); + } + } + + if (nfreed) { + cq->xcq.cons_index += nfreed; + /* Make sure update of buffer contents is done before + * updating consumer index. + */ + wmb(); + xsc_cq_set_ci(&cq->xcq); + } +} + +void xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn) +{ + if (!cq) + return; + + spin_lock_irq(&cq->lock); + __xsc_ib_cq_clean(cq, qpn); + spin_unlock_irq(&cq->lock); +} diff --git a/drivers/infiniband/hw/xsc/devx.c b/drivers/infiniband/hw/xsc/devx.c new file mode 100644 index 0000000000000000000000000000000000000000..fca43076bae1838296062a04a56b83072718ec0d --- /dev/null +++ b/drivers/infiniband/hw/xsc/devx.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include "common/driver.h" +#include "xsc_ib.h" +#define UVERBS_MODULE_NAME xsc_ib +#include +#include "user.h" + +static struct xsc_ib_ucontext *devx_uattrs2uctx(struct uverbs_attr_bundle *attrs) +{ + return to_xucontext(ib_uverbs_get_ucontext(attrs)); +} + +static bool devx_is_general_cmd(void *in) +{ + struct xsc_inbox_hdr *hdr = + (struct xsc_inbox_hdr *)in; + u16 opcode = be16_to_cpu(hdr->opcode); + + switch (opcode) { + case XSC_CMD_OP_QUERY_HCA_CAP: + return true; + default: + return false; + } +} + +static int UVERBS_HANDLER(XSC_IB_METHOD_DEVX_OTHER)(struct uverbs_attr_bundle *attrs) +{ + struct xsc_ib_ucontext *c; + struct xsc_ib_dev *dev; + void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN); + int cmd_out_len = uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_OUT); + void *cmd_out; + int err; + + c = devx_uattrs2uctx(attrs); + if (IS_ERR(c)) + return PTR_ERR(c); + dev = to_mdev(c->ibucontext.device); + + if (!devx_is_general_cmd(cmd_in)) + return -EINVAL; + + cmd_out = uverbs_zalloc(attrs, cmd_out_len); + if (IS_ERR(cmd_out)) + return PTR_ERR(cmd_out); + + err = xsc_cmd_exec(dev->xdev, cmd_in, + uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN), + cmd_out, cmd_out_len); + if (err) + return err; + + return uverbs_copy_to(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, cmd_out_len); +} + +DECLARE_UVERBS_NAMED_METHOD(XSC_IB_METHOD_DEVX_OTHER, + UVERBS_ATTR_PTR_IN(XSC_IB_ATTR_DEVX_OTHER_CMD_IN, + UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_inbox_hdr)), + UA_MANDATORY, + UA_ALLOC_AND_COPY), + UVERBS_ATTR_PTR_OUT(XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, + UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_outbox_hdr)), + UA_MANDATORY)); + +DECLARE_UVERBS_GLOBAL_METHODS(XSC_IB_OBJECT_DEVX, + &UVERBS_METHOD(XSC_IB_METHOD_DEVX_OTHER)); + +const struct uverbs_object_tree_def *xsc_ib_get_devx_tree(void) +{ + return NULL; +} diff --git a/drivers/infiniband/hw/xsc/ib_peer_mem.h b/drivers/infiniband/hw/xsc/ib_peer_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..b955ac53bfde27503d6d6df3843bc2b78ef3152f --- /dev/null +++ b/drivers/infiniband/hw/xsc/ib_peer_mem.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#if !defined(IB_PEER_MEM_H) +#define IB_PEER_MEM_H + +#include "peer_mem.h" + +struct ib_peer_memory_statistics { + atomic64_t num_alloc_mrs; + atomic64_t num_dealloc_mrs; + atomic64_t num_reg_pages; + atomic64_t num_dereg_pages; + atomic64_t num_reg_bytes; + atomic64_t num_dereg_bytes; + unsigned long num_free_callbacks; +}; + +struct ib_ucontext; +struct ib_umem_ex; +struct invalidation_ctx; + +struct ib_peer_memory_client { + const struct peer_memory_client *peer_mem; + struct list_head core_peer_list; + int invalidation_required; + struct kref ref; + struct completion unload_comp; + /* lock is used via the invalidation flow */ + struct mutex lock; + struct list_head core_ticket_list; + u64 last_ticket; + struct ib_peer_memory_statistics stats; +}; + +enum ib_peer_mem_flags { + IB_PEER_MEM_ALLOW = 1, + IB_PEER_MEM_INVAL_SUPP = (1 << 1), +}; + +struct core_ticket { + unsigned long key; + void *context; + struct list_head ticket_list; +}; + +struct ib_peer_memory_client *ib_get_peer_client(struct ib_ucontext *context, unsigned long addr, + size_t size, unsigned long peer_mem_flags, + void **peer_client_context); + +void ib_put_peer_client(struct ib_peer_memory_client *ib_peer_client, + void *peer_client_context); + +int ib_peer_create_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem, + struct invalidation_ctx **invalidation_ctx); + +void ib_peer_destroy_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct invalidation_ctx *invalidation_ctx); + +int ib_get_peer_private_data(struct ib_ucontext *context, __u64 peer_id, + char *peer_name); +void ib_put_peer_private_data(struct ib_ucontext *context); + +#endif diff --git a/drivers/infiniband/hw/xsc/ib_umem_ex.c b/drivers/infiniband/hw/xsc/ib_umem_ex.c new file mode 100644 index 0000000000000000000000000000000000000000..b2d57a885b65d41bf300a0b4855c60f89a07bacc --- /dev/null +++ b/drivers/infiniband/hw/xsc/ib_umem_ex.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +#include "ib_peer_mem.h" +#endif + +#include +#include "ib_umem_ex.h" + +#if defined(IB_CORE_UMEM_EX_V1) +#define get_mm(umem_ctx) ((umem_ctx)->mm) +#elif defined(IB_CORE_UMEM_EX_V2) +#define get_mm(umem_ctx) ((umem_ctx)->owning_mm) +#endif + +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) +static struct ib_umem_ex *peer_umem_get(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem_ex, unsigned long addr, + int dmasync, unsigned long peer_mem_flags) +{ + int ret; + const struct peer_memory_client *peer_mem = ib_peer_mem->peer_mem; + struct invalidation_ctx *invalidation_ctx = NULL; + struct ib_umem *umem = (struct ib_umem *)umem_ex; + + umem_ex->ib_peer_mem = ib_peer_mem; + if (peer_mem_flags & IB_PEER_MEM_INVAL_SUPP) { + ret = ib_peer_create_invalidation_ctx(ib_peer_mem, umem_ex, &invalidation_ctx); + if (ret) + goto end; + } + + /* + * We always request write permissions to the pages, to force breaking of any CoW + * during the registration of the MR. For read-only MRs we use the "force" flag to + * indicate that CoW breaking is required but the registration should not fail if + * referencing read-only areas. + */ + ret = peer_mem->get_pages(addr, umem->length, + 1, !umem->writable, + &umem->sg_head, + umem_ex->peer_mem_client_context, + invalidation_ctx ? + invalidation_ctx->context_ticket : 0); + if (ret) + goto out; + + ret = peer_mem->dma_map(&umem->sg_head, + umem_ex->peer_mem_client_context, + umem->context->device->dma_device, + dmasync, + &umem->nmap); + if (ret) + goto put_pages; + + atomic64_add(umem->nmap, &ib_peer_mem->stats.num_reg_pages); + atomic64_add(umem->nmap * BIT(PAGE_SHIFT), &ib_peer_mem->stats.num_reg_bytes); + atomic64_inc(&ib_peer_mem->stats.num_alloc_mrs); + return umem_ex; + +put_pages: + peer_mem->put_pages(&umem->sg_head, umem_ex->peer_mem_client_context); +out: + if (invalidation_ctx) + ib_peer_destroy_invalidation_ctx(ib_peer_mem, invalidation_ctx); +end: + ib_put_peer_client(ib_peer_mem, umem_ex->peer_mem_client_context); + // renamed in different kernel + mmdrop(get_mm(umem)); + kfree(umem_ex); + return ERR_PTR(ret); +} +#endif + +struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem) +{ + struct ib_umem_ex *ret_umem; + + if (!umem) + return ERR_PTR(-EINVAL); + +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + ret_umem = (struct ib_umem_ex *)umem; +#else + ret_umem = kzalloc(sizeof(*ret_umem), GFP_KERNEL); + if (!ret_umem) + return ERR_PTR(-ENOMEM); + + ret_umem->umem = *umem; + kfree(umem); +#endif + return ret_umem; +} + +struct ib_umem_ex *ib_client_umem_get(struct ib_ucontext *context, + unsigned long addr, + size_t size, int access, + int dmasync, u8 *peer_exists) +{ +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct ib_peer_memory_client *peer_mem_client; + struct ib_umem_ex *umem_ex; + struct ib_umem *umem; + + /* + * If the combination of the addr and size requested for this memory + * region causes an integer overflow, return error. + */ + if (((addr + size) < addr) || + PAGE_ALIGN(addr + size) < (addr + size)) + return ERR_PTR(-EINVAL); + + if (!can_do_mlock()) + return ERR_PTR(-EPERM); + + umem_ex = kzalloc(sizeof(*umem_ex), GFP_KERNEL); + if (!umem_ex) + return ERR_PTR(-ENOMEM); + umem = &umem_ex->umem; + + umem->context = context; + umem->length = size; + umem->address = addr; + umem->writable = ib_access_writable(access); + get_mm(umem) = current->mm; + +#if defined(IB_CORE_UMEM_EX_V1) + umem->odp_data = NULL; +#endif + + mmgrab(get_mm(umem)); + + peer_mem_client = ib_get_peer_client(context, addr, size, + IB_PEER_MEM_ALLOW | IB_PEER_MEM_INVAL_SUPP, + &umem_ex->peer_mem_client_context); + if (peer_mem_client) { + *peer_exists = 1; + umem->hugetlb = 0; + return peer_umem_get(peer_mem_client, umem_ex, addr, dmasync, + IB_PEER_MEM_ALLOW | IB_PEER_MEM_INVAL_SUPP); + } + + return ERR_PTR(-ENOMEM); +#else + return NULL; +#endif +} + +void ib_umem_ex_release(struct ib_umem_ex *umem_ex) +{ + struct ib_umem *umem = (struct ib_umem *)umem_ex; +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct ib_peer_memory_client *ib_peer_mem = umem_ex->ib_peer_mem; + const struct peer_memory_client *peer_mem; + struct invalidation_ctx *invalidation_ctx; + + if (ib_peer_mem) { + peer_mem = ib_peer_mem->peer_mem; + invalidation_ctx = umem_ex->invalidation_ctx; + + if (invalidation_ctx) + ib_peer_destroy_invalidation_ctx(ib_peer_mem, invalidation_ctx); + + peer_mem->dma_unmap(&umem->sg_head, + umem_ex->peer_mem_client_context, + umem->context->device->dma_device); + peer_mem->put_pages(&umem->sg_head, + umem_ex->peer_mem_client_context); + atomic64_add(umem->nmap, &ib_peer_mem->stats.num_dereg_pages); + atomic64_add(umem->nmap * BIT(PAGE_SHIFT), + &ib_peer_mem->stats.num_dereg_bytes); + atomic64_inc(&ib_peer_mem->stats.num_dealloc_mrs); + ib_put_peer_client(ib_peer_mem, umem_ex->peer_mem_client_context); + kfree(umem_ex); + } else { + // kernel ib umem release + ib_umem_release(umem); + } +#else + ib_umem_release(umem); +#endif +} + +int ib_client_umem_activate_invalidation_notifier(struct ib_umem_ex *umem_ex, + umem_invalidate_func_t func, + void *cookie) +{ +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct invalidation_ctx *invalidation_ctx = umem_ex->invalidation_ctx; + int ret = 0; + + mutex_lock(&umem_ex->ib_peer_mem->lock); + if (invalidation_ctx->peer_invalidated) { + pr_err("ib_umem_activate_invalidation_notifier: pages were invalidated by peer\n"); + ret = -EINVAL; + goto end; + } + invalidation_ctx->func = func; + invalidation_ctx->cookie = cookie; + /* from that point any pending invalidations can be called */ +end: + mutex_unlock(&umem_ex->ib_peer_mem->lock); + return ret; +#else + return 0; +#endif +} diff --git a/drivers/infiniband/hw/xsc/ib_umem_ex.h b/drivers/infiniband/hw/xsc/ib_umem_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..034d1c55e5aa647a8f50a4e06b032d074e451143 --- /dev/null +++ b/drivers/infiniband/hw/xsc/ib_umem_ex.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_UMEM_EX_H +#define XSC_IB_UMEM_EX_H + +#include + +struct ib_umem_ex; +struct invalidation_ctx; + +// ib umem ex ib_umem add peer memory support +struct ib_umem_ex { + struct ib_umem umem; +#ifndef CONFIG_INFINIBAND_PEER_MEMORY + struct ib_peer_memory_client *ib_peer_mem; + struct invalidation_ctx *invalidation_ctx; + void *peer_mem_client_context; +#endif +}; + +// expand ib_umem to ib_umem_ex by reallocate +struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem); + +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +typedef void (*umem_invalidate_func_t)(void *invalidation_cookie, + struct ib_umem_ex *umem_ex, unsigned long addr, size_t size); + +struct invalidation_ctx { + struct ib_umem_ex *umem_ex; + u64 context_ticket; + umem_invalidate_func_t func; + void *cookie; + int peer_callback; + int inflight_invalidation; + int peer_invalidated; + struct completion comp; +}; +#endif + +struct ib_umem_ex *ib_client_umem_get(struct ib_ucontext *context, + unsigned long addr, size_t size, int access, + int dmasync, u8 *peer_exists); + +void ib_umem_ex_release(struct ib_umem_ex *umem_ex); + +int ib_client_umem_activate_invalidation_notifier(struct ib_umem_ex *umem_ex, + umem_invalidate_func_t func, + void *cookie); +#endif diff --git a/drivers/infiniband/hw/xsc/main.c b/drivers/infiniband/hw/xsc/main.c new file mode 100644 index 0000000000000000000000000000000000000000..9381b9fc426649e52039621c21b3d789035cf912 --- /dev/null +++ b/drivers/infiniband/hw/xsc/main.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifdef HAVE_GENERIC_KMAP_TYPE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/xsc_cmd.h" +#include "common/driver.h" +#include "common/xsc_lag.h" + +#include +#include +#include + +#include "user.h" +#include "xsc_ib.h" +#include "xsc_rdma_ctrl.h" + +#define DRIVER_NAME "xsc_ib" +#define DRIVER_VERSION "1.0" +#define DRIVER_RELDATE "Jan 2022" + +MODULE_DESCRIPTION("Yunsilicon Amber HCA IB driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRIVER_VERSION); + +static char xsc_version[] = + DRIVER_NAME ": Yunsilicon Infiniband driver" + DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; + +static int xsc_ib_query_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + int max_rq_sg; + int max_sq_sg; + u64 flags; + struct xsc_ib_query_device_resp resp; + size_t resp_len; + u64 max_tso; + int err = -ENOMEM; + union xsc_ib_fw_ver fw_ver; + + memset(&resp, 0, sizeof(resp)); + memset(props, 0, sizeof(*props)); + + resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); + /*check param*/ + if (udata->outlen && udata->outlen < resp_len) + return -EINVAL; + + if (udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) + return -EINVAL; + + resp.response_length = resp_len; + + fw_ver.data = 0; + fw_ver.s.ver_major = dev->xdev->fw_version_major; + fw_ver.s.ver_minor = dev->xdev->fw_version_minor; + fw_ver.s.ver_patch = dev->xdev->fw_version_patch; + fw_ver.s.ver_tweak = dev->xdev->fw_version_tweak; + props->fw_ver = fw_ver.data; + + props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | + IB_DEVICE_PORT_ACTIVE_EVENT | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN; + props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK; + props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY; + + flags = dev->xdev->caps.flags; + if (flags & XSC_DEV_CAP_FLAG_BAD_PKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; + if (flags & XSC_DEV_CAP_FLAG_BAD_QKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; + if (flags & XSC_DEV_CAP_FLAG_APM) + props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; + if (flags & XSC_DEV_CAP_FLAG_XRC) + props->device_cap_flags |= IB_DEVICE_XRC; + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + + props->page_size_cap = dev->xdev->caps.min_page_sz; + props->max_mr_size = (1 << dev->xdev->caps.log_max_mtt) * PAGE_SIZE; + props->max_qp = 1 << dev->xdev->caps.log_max_qp; + props->max_qp_wr = (32 * 1024); /* hack for GPFS */ + max_rq_sg = dev->xdev->caps.max_rq_desc_sz / sizeof(struct xsc_wqe_data_seg); + max_sq_sg = (dev->xdev->caps.max_sq_desc_sz - sizeof(struct xsc_wqe_ctrl_seg_2)) / + sizeof(struct xsc_wqe_data_seg_2); + + props->max_send_sge = dev->xdev->caps.send_ds_num - XSC_CTRL_SEG_NUM - + XSC_RADDR_SEG_NUM; + props->max_recv_sge = dev->xdev->caps.recv_ds_num; + props->max_sge_rd = 1;/*max sge per read wqe*/ + props->max_cq = 1 << dev->xdev->caps.log_max_cq; + props->max_cqe = dev->xdev->caps.max_cqes - 1; + props->max_mr = 1 << dev->xdev->caps.log_max_mkey; + props->max_pd = 1 << dev->xdev->caps.log_max_pd; + props->max_qp_rd_atom = dev->xdev->caps.max_ra_req_qp; + props->max_qp_init_rd_atom = dev->xdev->caps.max_ra_res_qp; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_srq = + dev->xdev->caps.log_max_srq ? (1 << dev->xdev->caps.log_max_srq) : 0; + props->max_srq_wr = dev->xdev->caps.max_srq_wqes - 1; + props->max_srq_sge = dev->xdev->caps.log_max_srq ? (max_rq_sg - 1) : 0; + props->max_fast_reg_page_list_len = (unsigned int)-1; + props->local_ca_ack_delay = dev->xdev->caps.local_ca_ack_delay; + props->atomic_cap = dev->xdev->caps.flags & XSC_DEV_CAP_FLAG_ATOMIC ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; + props->masked_atomic_cap = IB_ATOMIC_HCA; + props->max_mcast_grp = + dev->xdev->caps.log_max_mcg ? (1 << dev->xdev->caps.log_max_mcg) : 0; + props->max_mcast_qp_attach = dev->xdev->caps.max_qp_mcg; + props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * + props->max_mcast_grp; + + props->sys_image_guid = dev->xdev->board_info->guid; + props->vendor_id = dev->xdev->pdev->vendor; + props->vendor_part_id = dev->xdev->pdev->device; + props->hw_ver = ((dev->xdev->chip_ver_l & 0xffff) << 16) | + (dev->xdev->hotfix_num & 0xffff); + props->max_pkeys = 0x80; + props->max_wq_type_rq = 1 << dev->xdev->caps.log_max_qp; + + props->hca_core_clock = dev->xdev->caps.hca_core_clock * 1000;//KHz + props->rss_caps.max_rwq_indirection_tables = + dev->xdev->caps.max_rwq_indirection_tables; + props->rss_caps.max_rwq_indirection_table_size = + dev->xdev->caps.max_rwq_indirection_table_size; + props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; + + /*response tso_caps extend param*/ + if (field_avail(typeof(resp), tso_caps, udata->outlen)) { + max_tso = dev->xdev->caps.log_max_tso ? (1 << dev->xdev->caps.log_max_tso) : 0; + if (max_tso) { + resp.tso_caps.max_tso = max_tso; + resp.tso_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; + resp.response_length += sizeof(resp.tso_caps); + } + } + + /*response rss_caps extend param*/ + if (field_avail(typeof(resp), rss_caps, udata->outlen)) { + resp.rss_caps.rx_hash_function = XSC_RX_HASH_FUNC_TOEPLITZ; + resp.rss_caps.rx_hash_fields_mask = + XSC_RX_HASH_SRC_IPV4 | + XSC_RX_HASH_DST_IPV4 | + XSC_RX_HASH_SRC_IPV6 | + XSC_RX_HASH_DST_IPV6 | + XSC_RX_HASH_SRC_PORT_TCP | + XSC_RX_HASH_DST_PORT_TCP | + XSC_RX_HASH_SRC_PORT_UDP | + XSC_RX_HASH_DST_PORT_UDP | + XSC_RX_HASH_INNER; + resp.response_length += sizeof(resp.rss_caps); + } + + /*response packet pacing caps*/ + if (field_avail(typeof(resp), packet_pacing_caps, udata->outlen)) { + resp.packet_pacing_caps.qp_rate_limit_max = + dev->xdev->caps.qp_rate_limit_max; + resp.packet_pacing_caps.qp_rate_limit_min = + dev->xdev->caps.qp_rate_limit_min; + resp.packet_pacing_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; + + resp.response_length += sizeof(resp.packet_pacing_caps); + } + + /*copy response data to user*/ + if (udata->outlen) { + err = ib_copy_to_udata(udata, &resp, resp.response_length); + if (err) { + xsc_ib_err(dev, "copy response info to udata fail,err=%d\n", err); + return err; + } + } + + return 0; +} + +void xsc_calc_link_info(struct xsc_core_device *xdev, + struct ib_port_attr *props) +{ + switch (xsc_get_link_speed(xdev)) { + case MODULE_SPEED_10G: + props->active_speed = XSC_RDMA_LINK_SPEED_10GB; + props->active_width = 1; + break; + case MODULE_SPEED_25G: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 1; + break; + case MODULE_SPEED_40G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_10GB; + props->active_width = 2; + break; + case MODULE_SPEED_50G_R: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 1; + break; + case MODULE_SPEED_50G_R2: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 1; + break; + case MODULE_SPEED_100G_R2: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 2; + break; + case MODULE_SPEED_100G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 2; + break; + case MODULE_SPEED_200G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 2; + break; + case MODULE_SPEED_200G_R8: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 4; + break; + case MODULE_SPEED_400G_R8: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 4; + break; + default: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 1; + break; + } +} + +static enum rdma_link_layer xsc_ib_port_link_layer(struct ib_device *ibdev, u32 port) +{ + return IB_LINK_LAYER_ETHERNET; +} + +int xsc_ib_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct net_device *ndev = dev->netdev; + struct xsc_core_device *xdev = dev->xdev; + + if (port < 1 || port > xdev->caps.num_ports) { + xsc_ib_warn(dev, "invalid port number %d\n", port); + return -EINVAL; + } + + memset(props, 0, sizeof(*props)); + + props->state = IB_PORT_ACTIVE; + props->max_mtu = IB_MTU_4096; + props->active_mtu = min(props->max_mtu, xsc_net_to_ib_mtu(ndev->mtu)); + props->gid_tbl_len = 256; + props->port_cap_flags = 0x4010000; + props->max_msg_sz = 0x40000000; + props->bad_pkey_cntr = 0; + props->qkey_viol_cntr = 0; + props->pkey_tbl_len = 1; + props->lid = 0; + props->sm_lid = 0; + props->lmc = 0; + props->max_vl_num = 0; + props->sm_sl = 0; + props->subnet_timeout = 0; + props->init_type_reply = 0; + if (!is_support_rdma(xdev)) { + props->active_width = 1; + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + } else { + xsc_calc_link_info(xdev, props); + } + + props->phys_state = netif_carrier_ok(ndev) ? XSC_RDMA_PHY_STATE_LINK_UP : + XSC_RDMA_PHY_STATE_DISABLED; + return 0; +} + +const struct xsc_gid xsc_gid_zero; + +static int xsc_ib_query_gid(struct ib_device *ibdev, u32 port_num, + int index, union ib_gid *gid) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + + /* Ignore port_num */ + memset(gid, 0, sizeof(*gid)); + if (index >= sgid_tbl->max) + return -EINVAL; + + memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid)); + + return 0; +} + +static int xsc_ib_del_gid(const struct ib_gid_attr *attr, void **context) +{ + int index = 0; + struct xsc_ib_dev *dev = to_mdev(attr->device); + struct xsc_gid *gid_raw = (struct xsc_gid *)&attr->gid; + struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + + if (!sgid_tbl) + return -EINVAL; + + if (!sgid_tbl->count) + return -ENOMEM; + + for (index = 0; index < sgid_tbl->max; index++) { + if (!memcmp(&sgid_tbl->tbl[index], gid_raw, sizeof(*gid_raw))) + break; + } + + if (index == sgid_tbl->max) + return 0; + + memcpy(&sgid_tbl->tbl[index], &xsc_gid_zero, sizeof(xsc_gid_zero)); + sgid_tbl->count--; + xsc_ib_info(dev, "Del gid from index:%u, count:%u\n", index, sgid_tbl->count); + + return 0; +} + +int xsc_ib_add_gid(const struct ib_gid_attr *attr, void **context) +{ + int i = 0; + u32 free_idx = 0; + struct xsc_ib_dev *dev = to_mdev(attr->device); + struct xsc_gid *gid_raw = (struct xsc_gid *)&attr->gid; + struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + + if (!sgid_tbl) + return -EINVAL; + + if (sgid_tbl->count == sgid_tbl->max) + return -ENOMEM; + + free_idx = sgid_tbl->max; + for (i = 0; i < sgid_tbl->max; i++) { + if (!memcmp(&sgid_tbl->tbl[i], gid_raw, sizeof(*gid_raw))) { + return 0; + } else if (!memcmp(&sgid_tbl->tbl[i], &xsc_gid_zero, sizeof(xsc_gid_zero)) && + free_idx == sgid_tbl->max) { + free_idx = i; + } + } + + if (free_idx == sgid_tbl->max) + return -ENOMEM; + + memcpy(&sgid_tbl->tbl[free_idx], gid_raw, sizeof(*gid_raw)); + sgid_tbl->count++; + xsc_ib_info(dev, "Add gid to index:%u, count:%u, max:%u\n", free_idx, sgid_tbl->count, + sgid_tbl->max); + + return 0; +} + +static int xsc_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, + u16 *pkey) +{ + *pkey = 0xffff; + return 0; +} + +struct xsc_reg_node_desc { + u8 desc[64]; +}; + +static int xsc_ib_modify_device(struct ib_device *ibdev, int mask, + struct ib_device_modify *props) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_reg_node_desc in; + struct xsc_reg_node_desc out; + int err; + + return 0; + + if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) + return -EOPNOTSUPP; + + if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) + return 0; + + /* + * If possible, pass node desc to FW, so it can generate + * a 144 trap. If cmd fails, just ignore. + */ + memcpy(&in, props->node_desc, 64); + err = xsc_core_access_reg(dev->xdev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_NODE_DESC, 0, 1); + if (err) + return err; + + memcpy(ibdev->node_desc, props->node_desc, 64); + + return err; +} + +static int xsc_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, + struct ib_port_modify *props) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct ib_port_attr attr; + u32 tmp; + int err; + + return 0; + + mutex_lock(&dev->cap_mask_mutex); + + err = xsc_ib_query_port(ibdev, port, &attr); + if (err) + goto out; + + tmp = (attr.port_cap_flags | props->set_port_cap_mask) & + ~props->clr_port_cap_mask; + + err = xsc_set_port_caps(dev->xdev, port, tmp); + +out: + mutex_unlock(&dev->cap_mask_mutex); + return err; +} + +xsc_ib_alloc_ucontext_def() +{ + struct ib_device *ibdev = uctx->device; + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_ib_alloc_ucontext_req req; + struct xsc_ib_alloc_ucontext_resp resp; + struct xsc_ib_ucontext *context; + int err; + + if (!dev->ib_active) + return RET_VALUE(-EAGAIN); + + err = ib_copy_from_udata(&req, udata, sizeof(req)); + if (err) + return RET_VALUE(err); + + resp.qp_tab_size = 1 << dev->xdev->caps.log_max_qp; + resp.cache_line_size = L1_CACHE_BYTES; + resp.max_sq_desc_sz = dev->xdev->caps.max_sq_desc_sz; + resp.max_rq_desc_sz = dev->xdev->caps.max_rq_desc_sz; + resp.max_send_wqebb = dev->xdev->caps.max_wqes; + resp.max_recv_wr = dev->xdev->caps.max_wqes; + resp.qpm_tx_db = dev->xdev->regs.tx_db; + resp.qpm_rx_db = dev->xdev->regs.rx_db; + resp.cqm_next_cid_reg = dev->xdev->regs.complete_reg; + resp.cqm_armdb = dev->xdev->regs.complete_db; + resp.send_ds_num = dev->xdev->caps.send_ds_num; + resp.recv_ds_num = dev->xdev->caps.recv_ds_num; + resp.cmds_supp_uhw |= XSC_USER_CMDS_SUPP_UHW_QUERY_DEVICE; + + context = to_xucontext(uctx); + + INIT_LIST_HEAD(&context->db_page_list); + mutex_init(&context->db_page_mutex); + + resp.num_ports = dev->xdev->caps.num_ports; + err = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (err) + goto out_ctx; + + return 0; + +out_ctx: + return RET_VALUE(err); +} + +xsc_ib_dealloc_ucontext_def() +{ +} + +static int xsc_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) +{ + struct xsc_ib_dev *dev = to_mdev(ibcontext->device); + struct xsc_core_device *xdev = dev->xdev; + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + resource_size_t reg_base; + resource_size_t reg_size = vma->vm_end - vma->vm_start; + + xsc_core_dbg(xdev, "offset:0x%lx", offset); + + if (offset == (xdev->regs.tx_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.tx_db & PAGE_MASK); + else if (offset == (xdev->regs.rx_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.rx_db & PAGE_MASK); + else if (offset == (xdev->regs.complete_reg & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.complete_reg & PAGE_MASK); + else if (offset == (xdev->regs.complete_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.complete_db & PAGE_MASK); + else + return -EINVAL; + + xsc_core_dbg(xdev, "regbase:0x%llx", reg_base); + + reg_base = xsc_core_is_pf(xdev) ? reg_base - 0xA0000000 : reg_base; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return remap_pfn_range(vma, vma->vm_start, reg_base >> PAGE_SHIFT, + reg_size, vma->vm_page_prot); + + return 0; +} + +xsc_ib_alloc_pd_def() +{ + struct ib_device *ibdev = ibpd->device; + struct xsc_ib_alloc_pd_resp resp; + struct xsc_ib_pd *pd; + int err; + + pd = to_mpd(ibpd); + + err = xsc_core_alloc_pd(to_mdev(ibdev)->xdev, &pd->pdn); + if (err) { + kfree(pd); + return RET_VALUE(err); + } + + if (udata) { + resp.pdn = pd->pdn; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + xsc_core_dealloc_pd(to_mdev(ibdev)->xdev, pd->pdn); + + return RET_VALUE(-EFAULT); + } + } else { + pd->pa_lkey = 0; + } + + return 0; +} + +xsc_ib_dealloc_pd_def() +{ + struct xsc_ib_dev *mdev = to_mdev(pd->device); + struct xsc_ib_pd *mpd = to_mpd(pd); + + xsc_core_dealloc_pd(mdev->xdev, mpd->pdn); + + return 0; +} + +static int xsc_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | + RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + immutable->max_mad_size = IB_MGMT_MAD_SIZE * 2; + + return 0; +} + +static void _xsc_get_netdev(struct xsc_ib_dev *dev) +{ + struct net_device *netdev = (struct net_device *)(dev->xdev->netdev); + + dev->netdev = netdev; +} + +static struct net_device *xsc_get_netdev(struct ib_device *ibdev, u32 port_num) +{ + struct xsc_ib_dev *xsc_ib_dev = to_mdev(ibdev); + struct net_device *dev = xsc_ib_dev->netdev; + struct xsc_core_device *xdev = xsc_ib_dev->xdev; + + if (dev) { + xsc_board_lag_lock(xdev); + if (xsc_lag_is_roce(xdev)) { + struct net_device *upper = NULL; + + rcu_read_lock(); + upper = netdev_master_upper_dev_get_rcu(dev); + if (upper) { + struct net_device *active; + + active = bond_option_active_slave_get_rcu(netdev_priv(upper)); + if (active) + dev = active; + } + rcu_read_unlock(); + } + dev_hold(dev); + xsc_board_lag_unlock(xdev); + } + + return dev; +} + +void xsc_get_guid(const u8 *dev_addr, u8 *guid) +{ + u8 mac[ETH_ALEN]; + + /* MAC-48 to EUI-64 mapping */ + memcpy(mac, dev_addr, ETH_ALEN); + guid[0] = mac[0] ^ 2; + guid[1] = mac[1]; + guid[2] = mac[2]; + guid[3] = 0xff; + guid[4] = 0xfe; + guid[5] = mac[3]; + guid[6] = mac[4]; + guid[7] = mac[5]; +} + +static int init_node_data(struct xsc_ib_dev *dev) +{ + int err = -ENOMEM; + + strscpy(dev->ib_dev.node_desc, "xsc_node_desc", sizeof(dev->ib_dev.node_desc)); + + if (unlikely(!dev->netdev->dev_addr)) + _xsc_get_netdev(dev); + xsc_get_guid(dev->netdev->dev_addr, (u8 *)&dev->ib_dev.node_guid); + err = 0; + return err; +} + +void xsc_core_event(struct xsc_core_device *xdev, enum xsc_dev_event event, + unsigned long param) +{ + struct xsc_priv *priv = &xdev->priv; + struct xsc_device_context *dev_ctx; + unsigned long flags; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + /* After xsc_detach_device, the dev_ctx->intf is still set and dev_ctx is + * still in priv->ctx_list. In this case, only notify the dev_ctx if its + * ADDED or ATTACHED bit are set. + */ + list_for_each_entry(dev_ctx, &priv->ctx_list, list) { + if (dev_ctx->intf->event) + dev_ctx->intf->event(xdev, dev_ctx->context, 0, param); + } + spin_unlock_irqrestore(&priv->ctx_lock, flags); +} + +static void xsc_ib_event(struct xsc_core_device *dev, void *context, + enum xsc_dev_event event, unsigned long data) +{ + struct xsc_ib_dev *ibdev = (struct xsc_ib_dev *)context; + struct ib_event ibev; + u8 port = 0; + + switch (event) { + case XSC_DEV_EVENT_SYS_ERROR: + ibdev->ib_active = false; + ibev.event = IB_EVENT_DEVICE_FATAL; + break; + + case XSC_DEV_EVENT_PORT_UP: + ibev.event = IB_EVENT_PORT_ACTIVE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_PORT_DOWN: + ibev.event = IB_EVENT_PORT_ERR; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_PORT_INITIALIZED: + /* not used by ULPs */ + return; + + case XSC_DEV_EVENT_LID_CHANGE: + ibev.event = IB_EVENT_LID_CHANGE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_PKEY_CHANGE: + ibev.event = IB_EVENT_PKEY_CHANGE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_GUID_CHANGE: + ibev.event = IB_EVENT_GID_CHANGE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_CLIENT_REREG: + ibev.event = IB_EVENT_CLIENT_REREGISTER; + port = *(u8 *)data; + break; + } + + ibev.device = &ibdev->ib_dev; + ibev.element.port_num = port; + + if (ibdev->ib_active) + ib_dispatch_event(&ibev); +} + +static int get_port_caps(struct xsc_ib_dev *dev) +{ + struct ib_device_attr *dprops = NULL; + struct ib_port_attr *pprops = NULL; + int err = -ENOMEM; + u32 port; + /*used to prevent coredump when insmod xsc*/ + struct ib_udata uhw = {.inlen = 0, .outlen = 0}; + + pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); + if (!pprops) + goto out; + + dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); + if (!dprops) + goto out; + + err = xsc_ib_query_device(&dev->ib_dev, dprops, &uhw); + if (err) { + xsc_ib_warn(dev, "query_device failed %d\n", err); + goto out; + } + + for (port = 1; port <= dev->xdev->caps.num_ports; port++) { + err = xsc_ib_query_port(&dev->ib_dev, port, pprops); + if (err) { + xsc_ib_warn(dev, "query_port %d failed %d\n", port, err); + break; + } + dev->xdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys; + dev->xdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; + xsc_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", + dprops->max_pkeys, pprops->gid_tbl_len); + } + +out: + kfree(pprops); + kfree(dprops); + + return err; +} + +static int xsc_create_dev_res(struct xsc_ib_res *ib_res) +{ + struct xsc_ib_dev *dev; + + dev = container_of(ib_res, struct xsc_ib_dev, ib_res); + ib_res->sgid_tbl.max = dev->xdev->caps.port[0].gid_table_len; + + ib_res->sgid_tbl.tbl = kcalloc(ib_res->sgid_tbl.max, sizeof(struct xsc_gid), + GFP_KERNEL); + + if (!ib_res->sgid_tbl.tbl) + return -ENOMEM; + + return 0; +} + +static void xsc_destroy_dev_res(struct xsc_ib_res *ib_res) +{ + kfree(ib_res->sgid_tbl.tbl); +} + +static int populate_specs_root(struct xsc_ib_dev *dev) +{ + const struct uverbs_object_tree_def **trees = + (const struct uverbs_object_tree_def **)dev->driver_trees; + size_t num_trees = 0; + + trees[num_trees++] = xsc_ib_get_devx_tree(); + WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees)); + trees[num_trees] = NULL; + + return 0; +} + +static void crc_table_init(struct xsc_ib_dev *dev) +{ + u32 c, i, j; + + for (i = 0; i < 256; i++) { + c = i; + for (j = 0; j < 8; j++) { + if (c & 1) + c = 0xedb88320L ^ (c >> 1); + else + c = c >> 1; + } + dev->crc_32_table[i] = c; + } +} + +static void xsc_ib_get_dev_fw_str(struct ib_device *ibdev, char *str) +{ + struct xsc_core_device *dev = to_mdev(ibdev)->xdev; + u8 ver_major = dev->fw_version_major; + u8 ver_minor = dev->fw_version_minor; + u16 ver_patch = dev->fw_version_patch; + u32 ver_tweak = dev->fw_version_tweak; + + if (ver_tweak == 0) { + snprintf(str, IB_FW_VERSION_NAME_MAX, "v%u.%u.%u", + ver_major, ver_minor, ver_patch); + } else { + snprintf(str, IB_FW_VERSION_NAME_MAX, "v%u.%u.%u+%u", + ver_major, ver_minor, ver_patch, ver_tweak); + } +} + +static void xsc_ib_dev_setting(struct xsc_ib_dev *dev) +{ + dev->ib_dev.ops.owner = THIS_MODULE; + dev->ib_dev.ops.uverbs_abi_ver = XSC_IB_UVERBS_ABI_VERSION; + dev->ib_dev.ops.driver_id = (enum rdma_driver_id)RDMA_DRIVER_XSC5; + dev->ib_dev.ops.uverbs_no_driver_id_binding = 1; + dev->ib_dev.ops.query_device = xsc_ib_query_device; + dev->ib_dev.ops.query_port = xsc_ib_query_port; + dev->ib_dev.ops.query_gid = xsc_ib_query_gid; + dev->ib_dev.ops.add_gid = xsc_ib_add_gid; + dev->ib_dev.ops.del_gid = xsc_ib_del_gid; + dev->ib_dev.ops.query_pkey = xsc_ib_query_pkey; + + dev->ib_dev.ops.modify_device = xsc_ib_modify_device; + dev->ib_dev.ops.modify_port = xsc_ib_modify_port; + dev->ib_dev.ops.alloc_ucontext = xsc_ib_alloc_ucontext; + dev->ib_dev.ops.dealloc_ucontext = xsc_ib_dealloc_ucontext; + dev->ib_dev.ops.mmap = xsc_ib_mmap; + + dev->ib_dev.ops.alloc_pd = xsc_ib_alloc_pd; + dev->ib_dev.ops.dealloc_pd = xsc_ib_dealloc_pd; + dev->ib_dev.ops.create_ah = xsc_ib_create_ah; + dev->ib_dev.ops.query_ah = xsc_ib_query_ah; + dev->ib_dev.ops.destroy_ah = xsc_ib_destroy_ah; + + dev->ib_dev.ops.get_link_layer = xsc_ib_port_link_layer; + dev->ib_dev.ops.get_netdev = xsc_get_netdev; + + dev->ib_dev.ops.create_qp = xsc_ib_create_qp; + dev->ib_dev.ops.modify_qp = xsc_ib_modify_qp; + dev->ib_dev.ops.query_qp = xsc_ib_query_qp; + dev->ib_dev.ops.destroy_qp = xsc_ib_destroy_qp; + dev->ib_dev.ops.post_send = xsc_ib_post_send; + dev->ib_dev.ops.post_recv = xsc_ib_post_recv; + dev->ib_dev.ops.create_cq = xsc_ib_create_cq; + dev->ib_dev.ops.destroy_cq = xsc_ib_destroy_cq; + dev->ib_dev.ops.poll_cq = xsc_ib_poll_cq; + dev->ib_dev.ops.req_notify_cq = xsc_ib_arm_cq; + dev->ib_dev.ops.get_dma_mr = xsc_ib_get_dma_mr; + dev->ib_dev.ops.reg_user_mr = xsc_ib_reg_user_mr;//optional + dev->ib_dev.ops.dereg_mr = xsc_ib_dereg_mr; + dev->ib_dev.ops.alloc_mr = xsc_ib_alloc_mr; + dev->ib_dev.ops.map_mr_sg = xsc_ib_map_mr_sg; + + dev->ib_dev.ops.get_port_immutable = xsc_port_immutable; + + dev->ib_dev.ops.drain_sq = xsc_ib_drain_sq; + dev->ib_dev.ops.drain_rq = xsc_ib_drain_rq; + dev->ib_dev.ops.get_dev_fw_str = xsc_ib_get_dev_fw_str; + + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_ah, xsc_ib_ah, ibah); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_cq, xsc_ib_cq, ibcq); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_pd, xsc_ib_pd, ibpd); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_ucontext, xsc_ib_ucontext, ibucontext); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_qp, xsc_ib_qp, ibqp); +} + +static void xsc_get_port_state(struct net_device *ndev, enum xsc_dev_event *ev) +{ + *ev = XSC_DEV_EVENT_PORT_DOWN; + if (netif_running(ndev) && netif_carrier_ok(ndev)) + *ev = XSC_DEV_EVENT_PORT_UP; +} + +static int xsc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct xsc_ib_dev *ibdev = container_of(this, struct xsc_ib_dev, nb); + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + enum xsc_dev_event ev; + u8 port = 1; + + if (ndev != ibdev->netdev) + goto done; + + xsc_ib_info(ibdev, "netdev notfiy event:%ld\n", event); + switch (event) { + case NETDEV_CHANGE: + case NETDEV_UP: + case NETDEV_DOWN: + xsc_get_port_state(ibdev->netdev, &ev); + xsc_ib_event(ibdev->xdev, ibdev, ev, (unsigned long)&port); + break; + default: + break; + } +done: + return NOTIFY_DONE; +} + +static int xsc_register_netdev_notifier(struct xsc_ib_dev *ibdev) +{ + ibdev->nb.notifier_call = xsc_netdev_event; + return register_netdevice_notifier(&ibdev->nb); +} + +static int xsc_unregister_netdev_notifier(struct xsc_ib_dev *ibdev) +{ + return unregister_netdevice_notifier(&ibdev->nb); +} + +static int init_one(struct xsc_core_device *xdev, + struct xsc_ib_dev **m_ibdev) +{ + struct xsc_ib_dev *dev; + int err; + + pr_info_once("%s", xsc_version); + + dev = (struct xsc_ib_dev *)ib_alloc_device(xsc_ib_dev, ib_dev); + if (!dev) + return -ENOMEM; + + dev->xdev = xdev; + xdev->event = xsc_core_event; + _xsc_get_netdev(dev); + err = get_port_caps(dev); + if (err) + goto err_free; + if (!xdev->caps.msix_enable) + dev->num_comp_vectors = 1; + else + dev->num_comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; + + if (xsc_lag_is_roce(xdev)) + strscpy(dev->ib_dev.name, "xscale_bond_%d", IB_DEVICE_NAME_MAX); + else + strscpy(dev->ib_dev.name, "xscale_%d", IB_DEVICE_NAME_MAX); + + dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.local_dma_lkey = 0xFF; + dev->num_ports = xdev->caps.num_ports; + dev->ib_dev.phys_port_cnt = dev->num_ports; + dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; + dev->ib_dev.dev.parent = &xdev->pdev->dev; + xsc_ib_dev_setting(dev); + dev->cm_dscp = DSCP_PCP_UNSET; + dev->cm_pcp = DSCP_PCP_UNSET; + dev->force_pcp = DSCP_PCP_UNSET; + dev->force_dscp = DSCP_PCP_UNSET; + + dev->ib_dev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_CREATE_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_REREG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | + (1ull << IB_USER_VERBS_CMD_OPEN_QP); + + init_node_data(dev); + + mutex_init(&dev->cap_mask_mutex); + spin_lock_init(&dev->mr_lock); + + err = xsc_create_dev_res(&dev->ib_res); + if (err) + goto err_free; + + crc_table_init(dev); + + populate_specs_root(dev); + + xsc_reg_local_dma_mr(xdev); + + if (ib_register_device(&dev->ib_dev, dev->ib_dev.name, dev->xdev->device)) + goto err_rsrc; + + rdma_roce_rescan_device(&dev->ib_dev); + dev->ib_active = true; + *m_ibdev = dev; + + xdev->xsc_ib_dev = dev; + + xsc_register_netdev_notifier(dev); + + xsc_counters_init(&dev->ib_dev, xdev); + + xsc_priv_dev_init(&dev->ib_dev, xdev); + + xsc_rtt_sysfs_init(&dev->ib_dev, xdev); + + xsc_ib_sysfs_init(&dev->ib_dev, xdev); + + return 0; + +err_rsrc: + xsc_destroy_dev_res(&dev->ib_res); + +err_free: + ib_dealloc_device((struct ib_device *)dev); + + return err; +} + +static void remove_one(struct xsc_core_device *xdev, void *intf_ctx) +{ + struct xsc_ib_dev *dev = (struct xsc_ib_dev *)intf_ctx; + + xsc_rtt_sysfs_fini(xdev); + xsc_ib_sysfs_fini(&dev->ib_dev, xdev); + xsc_priv_dev_fini(&dev->ib_dev, xdev); + xsc_counters_fini(&dev->ib_dev, xdev); + xsc_unregister_netdev_notifier(dev); + ib_unregister_device(&dev->ib_dev); + ib_dealloc_device(&dev->ib_dev); +} + +static void init_iommu_state(struct xsc_ib_dev *xdev) +{ + if (xdev) { + struct iommu_domain *domain; + + xdev->iommu_state = XSC_IB_IOMMU_MAP_DISABLE; + domain = iommu_get_domain_for_dev(xdev->ib_dev.dma_device); + if (domain) { + if (domain->type & __IOMMU_DOMAIN_DMA_API) + xdev->iommu_state = XSC_IB_IOMMU_MAP_NORMAL; + } else { + /* try to allocate dma memory, if dma address is not equal to phys address, + * the iommu map is enabled, but iommu domain is unknown. + */ + dma_addr_t dma_addr; + + void *tmp = dma_alloc_coherent(xdev->ib_dev.dma_device, PAGE_SIZE, + &dma_addr, GFP_KERNEL); + if (tmp) { + if (virt_to_phys(tmp) != dma_addr) + xdev->iommu_state = XSC_IB_IOMMU_MAP_UNKNOWN_DOMAIN; + dma_free_coherent(xdev->ib_dev.dma_device, PAGE_SIZE, + tmp, dma_addr); + } + } + + if (xdev->iommu_state) + xsc_ib_dbg(xdev, "ibdev supports iommu dma map, state=%d\n", + xdev->iommu_state); + else + xsc_ib_dbg(xdev, "ibdev does not support iommu dma map\n"); + } +} + +static bool xsc_need_create_ib_device(struct xsc_core_device *dev) +{ + if (xsc_get_roce_lag_xdev(dev) == dev) + return true; + + return false; +} + +static void *xsc_add(struct xsc_core_device *xpdev) +{ + struct xsc_ib_dev *m_ibdev = NULL; + int ret = -1; + + if (!xsc_need_create_ib_device(xpdev)) + return NULL; + + pr_info("add rdma driver\n"); + + ret = init_one(xpdev, &m_ibdev); + if (ret) { + pr_err("xsc ib dev add fail, ret = %d\n", ret); + return NULL; + } + + init_iommu_state(m_ibdev); + + return m_ibdev; +} + +static void xsc_remove(struct xsc_core_device *xpdev, void *context) +{ + pr_info("remove rdma driver\n"); + remove_one(xpdev, context); +} + +static struct xsc_interface xsc_interface = { + .add = xsc_add, + .remove = xsc_remove, + .event = xsc_ib_event, + .protocol = XSC_INTERFACE_PROTOCOL_IB, +}; + +int xsc_ib_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +{ + pr_info("xsc ib driver recv %lu event\n", action); + + if (exist_incomplete_qp_flush()) { + xsc_set_exit_flag(); + return NOTIFY_OK; + } + + xsc_remove_rdma_driver(); + + return NOTIFY_OK; +} + +struct notifier_block xsc_ib_nb = { + .notifier_call = xsc_ib_reboot_event_handler, + .next = NULL, + .priority = 2, +}; + +void xsc_remove_rdma_driver(void) +{ + xsc_rdma_ctrl_fini(); + xsc_unregister_interface(&xsc_interface); + xsc_priv_unregister_chrdev_region(); +} + +static int __init xsc_ib_init(void) +{ + int ret; + + ret = xsc_priv_alloc_chrdev_region(); + if (ret) + goto out; + + ret = xsc_register_interface(&xsc_interface); + if (ret) { + xsc_priv_unregister_chrdev_region(); + goto out; + } + + ret = xsc_rdma_ctrl_init(); + if (ret != 0) { + pr_err("failed to register port control node\n"); + xsc_unregister_interface(&xsc_interface); + xsc_priv_unregister_chrdev_region(); + goto out; + } + + register_reboot_notifier(&xsc_ib_nb); + + return 0; +out: + return ret; +} + +static void __exit xsc_ib_cleanup(void) +{ + unregister_reboot_notifier(&xsc_ib_nb); + xsc_remove_rdma_driver(); +} + +module_init(xsc_ib_init); +module_exit(xsc_ib_cleanup); diff --git a/drivers/infiniband/hw/xsc/mem.c b/drivers/infiniband/hw/xsc/mem.c new file mode 100644 index 0000000000000000000000000000000000000000..cf258aa8ea51a46989d93f8f637cb3d33b206912 --- /dev/null +++ b/drivers/infiniband/hw/xsc/mem.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_ib.h" + +static inline int xsc_count_trailing_zeros(unsigned long x) +{ +#define COUNT_TRAILING_ZEROS_0 (-1) + + if (sizeof(x) == 4) + return ffs(x); + else + return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; +} + +int xsc_find_chunk_cont_0(struct xsc_pa_chunk *chunk, + int is_first, + int is_last) +{ + static const int max_count = sizeof(int) << 3; + dma_addr_t pa, end_pa; + u64 va, end_va; + size_t length; + int start_count, end_count; + int va_start_count, va_end_count; + + pa = chunk->pa; + va = chunk->va; + length = chunk->length; + end_pa = pa + length; + end_va = va + length; + start_count = max_count; + end_count = max_count; + + if (!is_first) { + start_count = xsc_count_trailing_zeros((unsigned long)pa); + va_start_count = xsc_count_trailing_zeros(va); + start_count = min_t(int, start_count, va_start_count); + } + + if (!is_last) { + end_count = xsc_count_trailing_zeros((unsigned long)end_pa); + va_end_count = xsc_count_trailing_zeros(end_va); + end_count = min_t(int, end_count, va_end_count); + } + + return start_count > end_count ? end_count : start_count; +} + +int xsc_find_best_pgsz(struct ib_umem *umem, + unsigned long pgsz_bitmap, + unsigned long virt, + int *npages, + int *shift, + u64 **pas) +{ + struct scatterlist *sg; + unsigned long va; + dma_addr_t pa; + struct xsc_pa_chunk *chunk, *tmp; + struct list_head chunk_list; + int i; + int chunk_cnt; + int min_count_0 = sizeof(int) << 3; + int count_0; + int is_first = 0, is_end = 0; + size_t pgsz; + u64 mask; + int err = 0; + int pa_index; + u64 chunk_pa; + int chunk_npages; + unsigned long page_shift = PAGE_SHIFT; + + pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, 0); + + va = (virt >> page_shift) << page_shift; + + INIT_LIST_HEAD(&chunk_list); + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + err = -ENOMEM; + goto err_alloc; + } + list_add_tail(&chunk->list, &chunk_list); + + chunk_cnt = 1; + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { + pa = sg_dma_address(sg); + if (i == 0) { + chunk->va = va; + chunk->pa = pa; + chunk->length = sg_dma_len(sg); + va += chunk->length; + continue; + } + + if (pa == chunk->pa + chunk->length) { + chunk->length += sg_dma_len(sg); + va += sg_dma_len(sg); + } else { + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + err = -ENOMEM; + goto err_alloc; + } + chunk->va = va; + chunk->pa = pa; + chunk->length = sg_dma_len(sg); + va += chunk->length; + list_add_tail(&chunk->list, &chunk_list); + chunk_cnt++; + } + } + + i = 0; + list_for_each_entry(chunk, &chunk_list, list) { + is_first = (i == 0 ? 1 : 0); + is_end = (i == chunk_cnt - 1 ? 1 : 0); + count_0 = xsc_find_chunk_cont_0(chunk, is_first, is_end); + if (count_0 < min_count_0) + min_count_0 = count_0; + i++; + } + + pgsz_bitmap &= GENMASK(min_count_0, 0); + pgsz = rounddown_pow_of_two(pgsz_bitmap); + *shift = ilog2(pgsz); + *npages = 0; + + if (chunk_cnt == 1) { + list_for_each_entry(chunk, &chunk_list, list) { + mask = GENMASK(*shift - 1, min_t(int, page_shift, *shift)); + *npages += DIV_ROUND_UP(chunk->length + (virt & mask), pgsz); + *pas = vmalloc(*npages * sizeof(u64)); + if (!*pas) { + err = -ENOMEM; + goto err_alloc; + } + + chunk_pa = chunk->pa - (virt & mask); + for (i = 0; i < *npages; i++) + (*pas)[i] = chunk_pa + i * pgsz; + } + } else { + list_for_each_entry(chunk, &chunk_list, list) { + *npages += DIV_ROUND_UP(chunk->length, pgsz); + } + + *pas = vmalloc(*npages * sizeof(u64)); + if (!*pas) { + err = -ENOMEM; + goto err_alloc; + } + + pa_index = 0; + list_for_each_entry(chunk, &chunk_list, list) { + chunk_npages = DIV_ROUND_UP(chunk->length, pgsz); + chunk_pa = chunk->pa; + for (i = 0; i < chunk_npages; i++) { + if (pa_index == 0) { + mask = GENMASK(*shift - 1, + min_t(int, page_shift, *shift)); + chunk_pa -= (virt & mask); + } + (*pas)[pa_index] = chunk_pa + i * pgsz; + + pa_index++; + } + } + } + +err_alloc: + list_for_each_entry_safe(chunk, tmp, &chunk_list, list) { + list_del(&chunk->list); + kfree(chunk); + } + return err; +} + +/* @umem: umem object to scan + * @addr: ib virtual address requested by the user + * @count: number of PAGE_SIZE pages covered by umem + * @shift: page shift for the compound pages found in the region + * @ncont: number of compund pages + * @order: log2 of the number of compound pages + */ +void __xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, + unsigned long max_page_shift, + int *count, int *shift, + int *ncont, int *order) +{ + unsigned long tmp; + unsigned long m; + u64 base = ~0, p = 0; + u64 len, pfn; + int i = 0; + struct scatterlist *sg; + int entry; + unsigned long page_shift = PAGE_SHIFT; + + addr = addr >> page_shift; + tmp = (unsigned long)addr; + m = find_first_bit(&tmp, BITS_PER_LONG); + if (max_page_shift) + m = min_t(unsigned long, max_page_shift - page_shift, m); + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, entry) { + len = sg_dma_len(sg) >> page_shift; + pfn = sg_dma_address(sg) >> page_shift; + if (base + p != pfn) { + /* If either the offset or the new + * base are unaligned update m + */ + tmp = (unsigned long)(pfn | p); + if (!IS_ALIGNED(tmp, 1 << m)) + m = find_first_bit(&tmp, BITS_PER_LONG); + + base = pfn; + p = 0; + } + + p += len; + i += len; + } + + if (i) { + m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); + + if (order) + *order = ilog2(roundup_pow_of_two(i) >> m); + + *ncont = DIV_ROUND_UP(i, (1 << m)); + } else { + m = 0; + + if (order) + *order = 0; + + *ncont = 0; + } + *shift = page_shift + m; + *count = i; +} + +void xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, + int *count, int *shift, + int *ncont, int *order) +{ + // no limit for page_shift + __xsc_ib_cont_pages(umem, addr, 0, count, shift, ncont, order); +} + +void __xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, size_t offset, size_t num_pages, + __be64 *pas, int access_flags, bool need_to_devide) +{ + unsigned long umem_page_shift = PAGE_SHIFT; + int shift = page_shift - umem_page_shift; + int mask = (1 << shift) - 1; + int i = 0; + int k, idx; + u64 cur = 0; + u64 base; + int len; + struct scatterlist *sg; + int entry; + + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, entry) { + len = sg_dma_len(sg) >> umem_page_shift; + if (need_to_devide) + len = sg_dma_len(sg) >> PAGE_SHIFT_4K; + else + len = sg_dma_len(sg) >> umem_page_shift; + base = sg_dma_address(sg); + + /* Skip elements below offset */ + if (i + len < offset << shift) { + i += len; + continue; + } + + /* Skip pages below offset */ + if (i < offset << shift) { + k = (offset << shift) - i; + i = offset << shift; + } else { + k = 0; + } + + for (; k < len; k++) { + if (!(i & mask)) { + if (need_to_devide) + cur = base + (k << PAGE_SHIFT_4K); + else + cur = base + (k << umem_page_shift); + cur |= access_flags; + idx = (i >> shift) - offset; + + pas[idx] = cpu_to_be64(cur); + xsc_ib_dbg(dev, "pas[%d] 0x%llx\n", + i >> shift, be64_to_cpu(pas[idx])); + } + i++; + + /* Stop after num_pages reached */ + if (i >> shift >= offset + num_pages) + return; + } + } +} + +void xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int npages, bool need_to_devide) +{ + return __xsc_ib_populate_pas(dev, umem, page_shift, 0, + npages, pas, 0, need_to_devide); +} + +int xsc_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) +{ + u64 page_size; + u64 page_mask; + u64 off_size; + u64 off_mask; + u64 buf_off; + + page_size = 1 << page_shift; + page_mask = page_size - 1; + buf_off = addr & page_mask; + off_size = page_size >> 6; + off_mask = off_size - 1; + + if (buf_off & off_mask) + return -EINVAL; + + *offset = buf_off >> ilog2(off_size); + return 0; +} diff --git a/drivers/infiniband/hw/xsc/mr.c b/drivers/infiniband/hw/xsc/mr.c new file mode 100644 index 0000000000000000000000000000000000000000..2dddd3b6f7166f82f633b4197295a0a19ca7e3c8 --- /dev/null +++ b/drivers/infiniband/hw/xsc/mr.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "common/xsc_cmd.h" +#include +#include "ib_umem_ex.h" +#include "xsc_ib.h" + +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +static void xsc_invalidate_umem(void *invalidation_cookie, + struct ib_umem_ex *umem, + unsigned long addr, size_t size); +#endif + +enum { + DEF_CACHE_SIZE = 10, +}; + +struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_core_device *xdev = dev->xdev; + struct xsc_register_mr_mbox_in *in; + struct xsc_register_mr_request *req; + struct xsc_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free; + } + + req = &in->req; + req->acc = convert_access(acc); + req->va_base = 0; + req->map_en = !(XSC_MPT_MAP_EN); + + err = xsc_core_create_mkey(xdev, &mr->mmr); + if (err) + goto err_in; + req->mkey = cpu_to_be32(mr->mmr.key); + err = xsc_core_register_mr(xdev, &mr->mmr, in, sizeof(*in)); + if (err) + goto err_reg_mr; + kfree(in); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->umem = NULL; + + return &mr->ibmr; +err_reg_mr: + xsc_core_destroy_mkey(xdev, &mr->mmr); +err_in: + kfree(in); + +err_free: + kfree(mr); + + return ERR_PTR(err); +} + +void xsc_fill_pas(int npages, u64 *pas, __be64 *req_pas) +{ + int i; + + for (i = 0; i < npages; i++) + req_pas[i] = cpu_to_be64(pas[i]); +} + +static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, + u64 length, struct ib_umem *umem, + int npages, u64 *pas, int page_shift, + int access_flags) +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_register_mr_mbox_in *in; + struct xsc_ib_mr *mr; + int inlen; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + err = -ENOMEM; + goto err_0; + } + + inlen = sizeof(*in) + sizeof(*in->req.pas) * npages; + in = xsc_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_1; + } + err = xsc_core_create_mkey(dev->xdev, &mr->mmr); + if (err) { + xsc_ib_warn(dev, "create mkey failed\n"); + goto err_2; + } + + xsc_fill_pas(npages, pas, in->req.pas); + + in->req.acc = convert_access(access_flags); + in->req.pa_num = cpu_to_be32(npages); + in->req.pdn = cpu_to_be32(to_mpd(pd)->pdn); + in->req.va_base = cpu_to_be64(virt_addr); + in->req.map_en = XSC_MPT_MAP_EN; + in->req.len = cpu_to_be32((u32)length); + in->req.page_mode = (page_shift == XSC_PAGE_SHIFT_4K ? XSC_PAGE_MODE_4K : + (page_shift == XSC_PAGE_SHIFT_64K ? XSC_PAGE_MODE_64K : + (page_shift == XSC_PAGE_SHIFT_2M ? XSC_PAGE_MODE_2M : XSC_PAGE_MODE_1G))); + in->req.mkey = cpu_to_be32(mr->mmr.key); + err = xsc_core_register_mr(dev->xdev, &mr->mmr, in, inlen); + if (err) { + xsc_ib_warn(dev, "register mr failed, err = %d\n", err); + goto err_reg_mr; + } + mr->umem = umem; + xsc_vfree(in); + vfree(pas); + + xsc_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key); + + return mr; +err_reg_mr: + xsc_core_destroy_mkey(dev->xdev, &mr->mmr); +err_2: + xsc_vfree(in); +err_1: + kfree(mr); +err_0: + vfree(pas); + + return ERR_PTR(err); +} + +struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_ib_mr *mr = NULL; + struct ib_umem_ex *umem_ex; + struct ib_umem *umem; + int page_shift; + int npages; + u64 *pas; + int err; + struct ib_peer_memory_client *ib_peer_mem = NULL; + struct xsc_ib_peer_id *xsc_ib_peer_id = NULL; + + xsc_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n", + start, virt_addr, length); +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + umem = ib_umem_get_peer(&dev->ib_dev, start, length, + access_flags, IB_PEER_MEM_INVAL_SUPP); +#else + umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); +#endif + if (IS_ERR(umem)) { + // check client peer memory +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + xsc_ib_warn(dev, "umem get failed\n"); + return (void *)umem; +#else + u8 peer_exists = 0; + + umem_ex = ib_client_umem_get(pd->uobject->context, + start, length, access_flags, 0, &peer_exists); + if (!peer_exists) { + xsc_ib_dbg(dev, "umem get failed\n"); + return (void *)umem; + } + ib_peer_mem = umem_ex->ib_peer_mem; + xsc_ib_peer_id = kzalloc(sizeof(*xsc_ib_peer_id), GFP_KERNEL); + if (!xsc_ib_peer_id) { + err = -ENOMEM; + goto error; + } + init_completion(&xsc_ib_peer_id->comp); + err = ib_client_umem_activate_invalidation_notifier(umem_ex, + xsc_invalidate_umem, + xsc_ib_peer_id); + if (err) + goto error; +#endif + } else { + umem_ex = ib_umem_ex(umem); + if (IS_ERR(umem_ex)) { + err = -ENOMEM; + goto error; + } + } + umem = &umem_ex->umem; + + err = xsc_find_best_pgsz(umem, 0x40211000, start, &npages, &page_shift, &pas); + if (err) { + vfree(pas); + pas = NULL; + xsc_ib_warn(dev, "find best page size failed\n"); + goto error; + } + if (!npages) { + xsc_ib_warn(dev, "avoid zero region\n"); + err = -EINVAL; + goto error; + } + + xsc_ib_dbg(dev, "npages %d, page_shift %d\n", npages, page_shift); + + mr = reg_create(pd, virt_addr, length, umem, npages, pas, page_shift, access_flags); + if (IS_ERR(mr)) { + err = PTR_ERR(mr); + goto error; + } + + xsc_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key); + + mr->umem = umem; + mr->npages = npages; + spin_lock(&dev->mr_lock); + dev->xdev->dev_res->reg_pages += npages; + spin_unlock(&dev->mr_lock); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->ibmr.length = length; + atomic_set(&mr->invalidated, 0); + if (ib_peer_mem) { + init_completion(&mr->invalidation_comp); + xsc_ib_peer_id->mr = mr; + mr->peer_id = xsc_ib_peer_id; + complete(&xsc_ib_peer_id->comp); + } + + return &mr->ibmr; + +error: + if (xsc_ib_peer_id) { + complete(&xsc_ib_peer_id->comp); + kfree(xsc_ib_peer_id); + xsc_ib_peer_id = NULL; + } + + ib_umem_ex_release(umem_ex); + return ERR_PTR(err); +} + +xsc_ib_dereg_mr_def() +{ + struct xsc_ib_dev *dev = to_mdev(ibmr->device); + struct xsc_ib_mr *mr = to_mmr(ibmr); + struct ib_umem *umem = mr->umem; + struct ib_umem_ex *umem_ex = (struct ib_umem_ex *)umem; + int npages = mr->npages; + int err; + + xsc_ib_dbg(dev, "dereg mkey = 0x%x\n", mr->mmr.key); + + if (atomic_inc_return(&mr->invalidated) > 1) { + /* In case there is inflight invalidation call pending for its termination */ + wait_for_completion(&mr->invalidation_comp); + kfree(mr); + return 0; + } + + if (mr->npages) { + err = xsc_core_dereg_mr(dev->xdev, &mr->mmr); + if (err) { + xsc_ib_warn(dev, "failed to dereg mr 0x%x (%d)\n", + mr->mmr.key, err); + atomic_set(&mr->invalidated, 0); + return err; + } + } + err = xsc_core_destroy_mkey(dev->xdev, &mr->mmr); + if (err) { + xsc_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", + mr->mmr.key, err); + atomic_set(&mr->invalidated, 0); + return err; + } + + if (umem_ex) { + ib_umem_ex_release(umem_ex); + spin_lock(&dev->mr_lock); + dev->xdev->dev_res->reg_pages -= npages; + spin_unlock(&dev->mr_lock); + } + + kfree(mr->pas); + kfree(mr); + + return 0; +} + +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +static void xsc_invalidate_umem(void *invalidation_cookie, + struct ib_umem_ex *umem, + unsigned long addr, + size_t size) +{ + struct xsc_ib_mr *mr; + struct xsc_ib_dev *dev; + struct xsc_ib_peer_id *peer_id = (struct xsc_ib_peer_id *)invalidation_cookie; + + wait_for_completion(&peer_id->comp); + if (!peer_id->mr) + return; + + mr = peer_id->mr; + /* This function is called under client peer lock so its resources are race protected */ + if (atomic_inc_return(&mr->invalidated) > 1) { + umem->invalidation_ctx->inflight_invalidation = 1; + return; + } + + umem->invalidation_ctx->peer_callback = 1; + dev = to_mdev(mr->ibmr.device); + xsc_core_destroy_mkey(dev->xdev, &mr->mmr); + xsc_core_dereg_mr(dev->xdev, &mr->mmr); + complete(&mr->invalidation_comp); +} +#endif + +xsc_ib_alloc_mr_def() +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->npages = 0; + mr->mmr.pd = to_mpd(pd)->pdn; + mr->pas = kcalloc(max_num_sg, sizeof(__be64), GFP_KERNEL); + if (!mr->pas) { + err = -ENOMEM; + goto err_alloc; + } + + err = xsc_core_create_mkey(dev->xdev, &mr->mmr); + if (err) + goto err_create_mkey; + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->ibmr.device = &dev->ib_dev; + + return &mr->ibmr; +err_create_mkey: + kfree(mr->pas); +err_alloc: + kfree(mr); + return ERR_PTR(err); +} + +static int xsc_set_page(struct ib_mr *ibmr, u64 pa) +{ + struct xsc_ib_mr *mmr = to_mmr(ibmr); + + mmr->pas[mmr->npages] = pa; + mmr->npages++; + return 0; +} + +u8 xsc_get_mr_page_mode(struct xsc_core_device *xdev, u32 page_shift) +{ + u8 page_mode = 0; + + page_mode = (page_shift == XSC_PAGE_SHIFT_4K ? XSC_PAGE_MODE_4K : + (page_shift == XSC_PAGE_SHIFT_64K ? XSC_PAGE_MODE_64K : + (page_shift == XSC_PAGE_SHIFT_2M ? XSC_PAGE_MODE_2M : XSC_PAGE_MODE_1G))); + + return page_mode; +} + +int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) +{ + struct xsc_ib_mr *mmr = to_mmr(ibmr); + + mmr->npages = 0; + return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, xsc_set_page); +} + +int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) +{ + const struct ib_reg_wr *reg_wr = container_of(wr, struct ib_reg_wr, wr); + struct ib_mr *ibmr = reg_wr->mr; + struct xsc_ib_mr *mmr = to_mmr(ibmr); + struct xsc_register_mr_mbox_in *in; + int inlen; + int i; + int err; + __be64 *pas; + + inlen = sizeof(*in) + sizeof(__be64) * mmr->npages; + in = kzalloc(inlen, GFP_ATOMIC); + if (!in) + return -ENOMEM; + + in->req.pdn = cpu_to_be32(mmr->mmr.pd); + in->req.mkey = cpu_to_be32(ibmr->rkey); + in->req.acc = convert_access(reg_wr->access); + in->req.page_mode = 0; + in->req.map_en = XSC_MPT_MAP_EN; + + if (xsc_ib_iommu_dma_map(ibmr->device)) { + static u32 support_page_shift[] = {12, 16, 21, 30}; + u64 va_base; + u64 pa_base; + int len; + int i; + u32 page_shift; + + for (i = 0; i < ARRAY_SIZE(support_page_shift); i++) { + page_shift = support_page_shift[i]; + va_base = ALIGN_DOWN(ibmr->iova, 1 << page_shift); + len = ibmr->iova + ibmr->length - va_base; + if (len <= (1 << page_shift)) { + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, page_shift); + pa_base = ALIGN_DOWN(mmr->pas[0], (1 << page_shift)); + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, page_shift); + in->req.pa_num = cpu_to_be32(1); + in->req.len = cpu_to_be32(len); + in->req.va_base = cpu_to_be64(va_base); + in->req.pas[0] = cpu_to_be64(pa_base); + goto out; + } + } + + xsc_ib_warn(dev, "Not found suitable page mode for iommu dma map, using 4k mode"); + } + + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, PAGE_SHIFT_4K); + in->req.va_base = cpu_to_be64(ibmr->iova); + in->req.pa_num = cpu_to_be32(mmr->npages); + in->req.len = cpu_to_be32(ibmr->length); + pas = in->req.pas; + for (i = 0; i < mmr->npages; i++) + pas[i] = cpu_to_be64(mmr->pas[i]); + +out: + xsc_ib_dbg(dev, "iova=%llx, pas=%llx, req.page_mode=%u, req.va_base=%llx, req.pas=%llx, req.len=%d, req.pa_num=%d\n", + ibmr->iova, + mmr->pas[0], + in->req.page_mode, + be64_to_cpu(in->req.va_base), + be64_to_cpu(in->req.pas[0]), + be32_to_cpu(in->req.len), + be32_to_cpu(in->req.pa_num)); + + err = xsc_core_register_mr(dev->xdev, &mmr->mmr, in, sizeof(*in)); + + kfree(in); + return err; +} + +int xsc_wr_invalidate_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) +{ + struct xsc_core_mr mr; + int err = 0; + + if (!wr) + return -1; + mr.key = wr->ex.invalidate_rkey; + err = xsc_core_dereg_mr(dev->xdev, &mr); + return err; +} + +void xsc_reg_local_dma_mr(struct xsc_core_device *dev) +{ + struct xsc_register_mr_mbox_in in; + int err = 0; + + in.req.pdn = 0; + in.req.pa_num = 0; + in.req.len = 0; + in.req.mkey = cpu_to_be32(0xFF); + in.req.acc = XSC_PERM_LOCAL_WRITE | XSC_PERM_LOCAL_READ; + in.req.page_mode = 0; + in.req.map_en = !(XSC_MPT_MAP_EN); + in.req.va_base = 0; + + err = xsc_core_register_mr(dev, NULL, &in, sizeof(in)); + if (err) + xsc_core_err(dev, "\n"); +} diff --git a/drivers/infiniband/hw/xsc/peer_mem.c b/drivers/infiniband/hw/xsc/peer_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..eba572973b397dbd5ebe1ba26e126b4b9b7ab362 --- /dev/null +++ b/drivers/infiniband/hw/xsc/peer_mem.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "ib_peer_mem.h" +#include +#include "ib_umem_ex.h" + +static DEFINE_MUTEX(peer_memory_mutex); +static LIST_HEAD(peer_memory_list); + +static void complete_peer(struct kref *kref); + +/* Caller should be holding the peer client lock, ib_peer_client->lock */ +static struct core_ticket *ib_peer_search_context(struct ib_peer_memory_client *ib_peer_client, + u64 key) +{ + struct core_ticket *core_ticket; + + list_for_each_entry(core_ticket, &ib_peer_client->core_ticket_list, + ticket_list) { + if (core_ticket->key == key) + return core_ticket; + } + + return NULL; +} + +static int ib_invalidate_peer_memory(void *reg_handle, u64 core_context) +{ + struct ib_peer_memory_client *ib_peer_client = reg_handle; + struct invalidation_ctx *invalidation_ctx; + struct core_ticket *core_ticket; + int need_unlock = 1; + + mutex_lock(&ib_peer_client->lock); + ib_peer_client->stats.num_free_callbacks += 1; + core_ticket = ib_peer_search_context(ib_peer_client, core_context); + if (!core_ticket) + goto out; + + invalidation_ctx = (struct invalidation_ctx *)core_ticket->context; + /* If context is not ready yet, mark it to be invalidated */ + if (!invalidation_ctx->func) { + invalidation_ctx->peer_invalidated = 1; + goto out; + } + invalidation_ctx->func(invalidation_ctx->cookie, + invalidation_ctx->umem_ex, 0, 0); + if (invalidation_ctx->inflight_invalidation) { + /* init the completion to wait on before letting other thread to run */ + init_completion(&invalidation_ctx->comp); + mutex_unlock(&ib_peer_client->lock); + need_unlock = 0; + wait_for_completion(&invalidation_ctx->comp); + } + + kfree(invalidation_ctx); +out: + if (need_unlock) + mutex_unlock(&ib_peer_client->lock); + + return 0; +} + +static int ib_peer_insert_context(struct ib_peer_memory_client *ib_peer_client, + void *context, + u64 *context_ticket) +{ + struct core_ticket *core_ticket = kzalloc(sizeof(*core_ticket), GFP_KERNEL); + + if (!core_ticket) + return -ENOMEM; + + mutex_lock(&ib_peer_client->lock); + core_ticket->key = ib_peer_client->last_ticket++; + core_ticket->context = context; + list_add_tail(&core_ticket->ticket_list, + &ib_peer_client->core_ticket_list); + *context_ticket = core_ticket->key; + mutex_unlock(&ib_peer_client->lock); + + return 0; +} + +/* + * Caller should be holding the peer client lock, specifically, + * the caller should hold ib_peer_client->lock + */ +static int ib_peer_remove_context(struct ib_peer_memory_client *ib_peer_client, + u64 key) +{ + struct core_ticket *core_ticket; + + list_for_each_entry(core_ticket, &ib_peer_client->core_ticket_list, + ticket_list) { + if (core_ticket->key == key) { + list_del(&core_ticket->ticket_list); + kfree(core_ticket); + return 0; + } + } + + return 1; +} + +/* + * ib_peer_create_invalidation_ctx - creates invalidation context for a given umem + * @ib_peer_mem: peer client to be used + * @umem: umem struct belongs to that context + * @invalidation_ctx: output context + */ +int ib_peer_create_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem_ex, + struct invalidation_ctx **invalidation_ctx) +{ + int ret; + struct invalidation_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = ib_peer_insert_context(ib_peer_mem, ctx, + &ctx->context_ticket); + if (ret) { + kfree(ctx); + return ret; + } + + ctx->umem_ex = umem_ex; + umem_ex->invalidation_ctx = ctx; + *invalidation_ctx = ctx; + + return 0; +} + +/** + * ** ib_peer_destroy_invalidation_ctx - destroy a given invalidation context + * ** @ib_peer_mem: peer client to be used + * ** @invalidation_ctx: context to be invalidated + * **/ +void ib_peer_destroy_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct invalidation_ctx *invalidation_ctx) +{ + int peer_callback; + int inflight_invalidation; + + /* If we are under peer callback lock was already taken.*/ + if (!invalidation_ctx->peer_callback) + mutex_lock(&ib_peer_mem->lock); + ib_peer_remove_context(ib_peer_mem, invalidation_ctx->context_ticket); + /* make sure to check inflight flag after took the lock and remove from tree. + * in addition, from that point using local variables for peer_callback and + * inflight_invalidation as after the complete invalidation_ctx can't be accessed + * any more as it may be freed by the callback. + */ + peer_callback = invalidation_ctx->peer_callback; + inflight_invalidation = invalidation_ctx->inflight_invalidation; + if (inflight_invalidation) + complete(&invalidation_ctx->comp); + + /* On peer callback lock is handled externally */ + if (!peer_callback) + mutex_unlock(&ib_peer_mem->lock); + + /* in case under callback context or callback is pending + * let it free the invalidation context + */ + if (!peer_callback && !inflight_invalidation) + kfree(invalidation_ctx); +} + +static int ib_memory_peer_check_mandatory(const struct peer_memory_client + *peer_client) +{ +#define PEER_MEM_MANDATORY_FUNC(x) { offsetof(struct peer_memory_client, x), #x } + static const struct { + size_t offset; + char *name; + } mandatory_table[] = { + PEER_MEM_MANDATORY_FUNC(acquire), + PEER_MEM_MANDATORY_FUNC(get_pages), + PEER_MEM_MANDATORY_FUNC(put_pages), + PEER_MEM_MANDATORY_FUNC(get_page_size), + PEER_MEM_MANDATORY_FUNC(dma_map), + PEER_MEM_MANDATORY_FUNC(dma_unmap) + }; + int i; + + for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { + if (!*(void **)((void *)peer_client + mandatory_table[i].offset)) { + pr_err("Peer memory %s is missing mandatory function %s\n", + peer_client->name, mandatory_table[i].name); + return -EINVAL; + } + } + + return 0; +} + +static void complete_peer(struct kref *kref) +{ + struct ib_peer_memory_client *ib_peer_client = + container_of(kref, struct ib_peer_memory_client, ref); + + complete(&ib_peer_client->unload_comp); +} + +void *ib_register_peer_memory_client(const struct peer_memory_client *peer_client, + invalidate_peer_memory *invalidate_callback) +{ + struct ib_peer_memory_client *ib_peer_client; + + if (ib_memory_peer_check_mandatory(peer_client)) + return NULL; + + ib_peer_client = kzalloc(sizeof(*ib_peer_client), GFP_KERNEL); + if (!ib_peer_client) + return NULL; + + INIT_LIST_HEAD(&ib_peer_client->core_ticket_list); + mutex_init(&ib_peer_client->lock); + init_completion(&ib_peer_client->unload_comp); + kref_init(&ib_peer_client->ref); + ib_peer_client->peer_mem = peer_client; + + /* Once peer supplied a non NULL callback it's an indication that + * invalidation support is required for any memory owning. + */ + if (invalidate_callback) { + *invalidate_callback = ib_invalidate_peer_memory; + ib_peer_client->invalidation_required = 1; + } + ib_peer_client->last_ticket = 1; + + mutex_lock(&peer_memory_mutex); + list_add_tail(&ib_peer_client->core_peer_list, &peer_memory_list); + + mutex_unlock(&peer_memory_mutex); + return ib_peer_client; +} +EXPORT_SYMBOL(ib_register_peer_memory_client); + +void ib_unregister_peer_memory_client(void *reg_handle) +{ + struct ib_peer_memory_client *ib_peer_client = reg_handle; + + mutex_lock(&peer_memory_mutex); + list_del(&ib_peer_client->core_peer_list); + mutex_unlock(&peer_memory_mutex); + + kref_put(&ib_peer_client->ref, complete_peer); + wait_for_completion(&ib_peer_client->unload_comp); + kfree(ib_peer_client); +} +EXPORT_SYMBOL(ib_unregister_peer_memory_client); + +struct ib_peer_memory_client *ib_get_peer_client(struct ib_ucontext *context, unsigned long addr, + size_t size, unsigned long peer_mem_flags, + void **peer_client_context) +{ + struct ib_peer_memory_client *ib_peer_client = NULL; + + int ret = 0; + + mutex_lock(&peer_memory_mutex); + list_for_each_entry(ib_peer_client, &peer_memory_list, core_peer_list) { + /* In case peer requires invalidation it can't own + * memory which doesn't support it + */ + if ((ib_peer_client->invalidation_required && + (!(peer_mem_flags & IB_PEER_MEM_INVAL_SUPP)))) + continue; + ret = ib_peer_client->peer_mem->acquire(addr, size, NULL, NULL, + peer_client_context); + if (ret > 0) + goto found; + } + + ib_peer_client = NULL; + +found: + if (ib_peer_client) + kref_get(&ib_peer_client->ref); + + mutex_unlock(&peer_memory_mutex); + + return ib_peer_client; +} +EXPORT_SYMBOL(ib_get_peer_client); + +void ib_put_peer_client(struct ib_peer_memory_client *ib_peer_client, + void *peer_client_context) +{ + if (ib_peer_client->peer_mem->release) + ib_peer_client->peer_mem->release(peer_client_context); + + kref_put(&ib_peer_client->ref, complete_peer); +} +EXPORT_SYMBOL(ib_put_peer_client); + +int ib_get_peer_private_data(struct ib_ucontext *context, u64 peer_id, + char *peer_name) +{ + pr_warn("predefine peer mem is not supported by now"); + return -1; +} +EXPORT_SYMBOL(ib_get_peer_private_data); + +void ib_put_peer_private_data(struct ib_ucontext *context) +{ + pr_warn("predefine peer mem is not supported by now"); +} +EXPORT_SYMBOL(ib_put_peer_private_data); diff --git a/drivers/infiniband/hw/xsc/peer_mem.h b/drivers/infiniband/hw/xsc/peer_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..7e3f803ac246315558efab858a2fca505ccb26b4 --- /dev/null +++ b/drivers/infiniband/hw/xsc/peer_mem.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#if !defined(PEER_MEM_H) +#define PEER_MEM_H + +#include +#include +#include +#include +#include +#include + +#define IB_PEER_MEMORY_NAME_MAX 64 +#define IB_PEER_MEMORY_VER_MAX 16 +#define PEER_MEM_U64_CORE_CONTEXT + +/** + * struct peer_memory_client - registration information for peer client. + * @name: peer client name + * @version: peer client version + * @acquire: callback function to be used by IB core to detect whether a + * virtual address in under the responsibility of a specific peer client. + * @get_pages: callback function to be used by IB core asking the peer client to pin + * the physical pages of the given address range and returns that information. + * It equivalents to the kernel API of get_user_pages(), but targets peer memory. + * @dma_map: callback function to be used by IB core asking the peer client to fill + * the dma address mapping for a given address range. + * @dma_unmap: callback function to be used by IB core asking the peer client to take + * relevant actions to unmap the memory. + * @put_pages: callback function to be used by IB core asking the peer client to remove the + * pinning from the given memory. + * It's the peer-direct equivalent of the kernel API put_page. + * @get_page_size: callback function to be used by IB core to query the peer client for + * the page size for the given allocation. + * @release: callback function to be used by IB core asking peer client to release all + * resources associated with previous acquire call. The call will be performed + * only for contexts that have been successfully acquired (i.e. acquire returned a + * non-zero value). + * Additionally, IB core guarentees that there will be no pages pinned through this + * context when the callback is called. + * + * The subsections in this description contain detailed description + * of the callback arguments and expected return values for the + * callbacks defined in this struct. + * + * acquire: + * + * Callback function to be used by IB core to detect + * whether a virtual address in under the responsibility + * of a specific peer client. + * + * addr [IN] - virtual address to be checked whether belongs to peer. + * + * size [IN] - size of memory area starting at addr. + * + * peer_mem_private_data [IN] - The contents of ib_ucontext-> peer_mem_private_data. + * This parameter allows usage of the peer-direct + * API in implementations where it is impossible + * to detect if the memory belongs to the device + * based upon the virtual address alone. In such + * cases, the peer device can create a special + * ib_ucontext, which will be associated with the + * relevant peer memory. + * + * peer_mem_name [IN] - The contents of ib_ucontext-> peer_mem_name. + * Used to identify the peer memory client that + * initialized the ib_ucontext. + * This parameter is normally used along with + * peer_mem_private_data. + * client_context [OUT] - peer opaque data which holds a peer context for + * the acquired address range, will be provided + * back to the peer memory in subsequent + * calls for that given memory. + * + * If peer takes responsibility on the given address range further calls for memory + * management will be directed to the callbacks of this peer client. + * + * Return - 1 in case peer client takes responsibility on that range otherwise 0. + * Any peer internal error should resulted in a zero answer, in case address + * range really belongs to the peer, no owner will be found and application + * will get an error + * from IB Core as expected. + * + * get_pages: + * + * Callback function to be used by IB core asking the + * peer client to pin the physical pages of the given + * address range and returns that information. It + * equivalents to the kernel API of get_user_pages(), but + * targets peer memory. + * + * addr [IN] - start virtual address of that given allocation. + * + * size [IN] - size of memory area starting at addr. + * + * write [IN] - indicates whether the pages will be written to by the caller. + * Same meaning as of kernel API get_user_pages, can be + * ignored if not relevant. + * + * force [IN] - indicates whether to force write access even if user + * mapping is read only. Same meaning as of kernel API + * get_user_pages, can be ignored if not relevant. + * + * sg_head [IN/OUT] - pointer to head of struct sg_table. + * The peer client should allocate a table big + * enough to store all of the required entries. This + * function should fill the table with physical addresses + * and sizes of the memory segments composing this + * memory mapping. + * The table allocation can be done using sg_alloc_table. + * Filling in the physical memory addresses and size can + * be done using sg_set_page. + * + * client_context [IN] - peer context for the given allocation, as received from + * the acquire call. + * + * core_context [IN] - IB core context. If the peer client wishes to + * invalidate any of the pages pinned through this API, + * it must provide this context as an argument to the + * invalidate callback. + * + * Return - 0 success, otherwise errno error code. + * + * dma_map: + * + * Callback function to be used by IB core asking the peer client to fill + * the dma address mapping for a given address range. + * + * sg_head [IN/OUT] - pointer to head of struct sg_table. The peer memory + * should fill the dma_address & dma_length for + * each scatter gather entry in the table. + * + * client_context [IN] - peer context for the allocation mapped. + * + * dma_device [IN] - the RDMA capable device which requires access to the + * peer memory. + * + * dmasync [IN] - flush in-flight DMA when the memory region is written. + * Same meaning as with host memory mapping, can be ignored if + * not relevant. + * + * nmap [OUT] - number of mapped/set entries. + * + * Return - 0 success, otherwise errno error code. + * + * dma_unmap: + * + * Callback function to be used by IB core asking the peer client to take + * relevant actions to unmap the memory. + * + * sg_head [IN] - pointer to head of struct sg_table. The peer memory + * should fill the dma_address & dma_length for + * each scatter gather entry in the table. + * + * client_context [IN] - peer context for the allocation mapped. + * + * dma_device [IN] - the RDMA capable device which requires access to the + * peer memory. + * + * Return - 0 success, otherwise errno error code. + * + * put_pages: + * + * Callback function to be used by IB core asking the peer client to remove the + * pinning from the given memory. + * It's the peer-direct equivalent of the kernel API put_page. + * + * sg_head [IN] - pointer to head of struct sg_table. + * + * client_context [IN] - peer context for that given allocation. + * + * get_page_size: + * + * Callback function to be used by IB core to query the + * peer client for the page size for the given + * allocation. + * + * sg_head [IN] - pointer to head of struct sg_table. + * + * client_context [IN] - peer context for that given allocation. + * + * Return - Page size in bytes + * + * release: + * + * Callback function to be used by IB core asking peer + * client to release all resources associated with + * previous acquire call. The call will be performed only + * for contexts that have been successfully acquired + * (i.e. acquire returned a non-zero value). + * Additionally, IB core guarentees that there will be no + * pages pinned through this context when the callback is + * called. + * + * client_context [IN] - peer context for the given allocation. + * + **/ +struct peer_memory_client { + char name[IB_PEER_MEMORY_NAME_MAX]; + char version[IB_PEER_MEMORY_VER_MAX]; + int (*acquire)(unsigned long addr, size_t size, void *peer_mem_private_data, + char *peer_mem_name, void **client_context); + int (*get_pages)(unsigned long addr, + size_t size, int write, int force, + struct sg_table *sg_head, + void *client_context, u64 core_context); + int (*dma_map)(struct sg_table *sg_head, void *client_context, + struct device *dma_device, int dmasync, int *nmap); + int (*dma_unmap)(struct sg_table *sg_head, void *client_context, + struct device *dma_device); + void (*put_pages)(struct sg_table *sg_head, void *client_context); + unsigned long (*get_page_size)(void *client_context); + void (*release)(void *client_context); + void* (*get_context_private_data)(u64 peer_id); + void (*put_context_private_data)(void *context); +}; + +typedef int (*invalidate_peer_memory)(void *reg_handle, u64 core_context); + +void *ib_register_peer_memory_client(const struct peer_memory_client *peer_client, + invalidate_peer_memory *invalidate_callback); +void ib_unregister_peer_memory_client(void *reg_handle); + +#endif diff --git a/drivers/infiniband/hw/xsc/private_dev.c b/drivers/infiniband/hw/xsc/private_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..29fe98fd6b0c838937b06c234a5d3afd1e91281c --- /dev/null +++ b/drivers/infiniband/hw/xsc/private_dev.c @@ -0,0 +1,1031 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include "common/res_obj.h" +#include "xsc_ib.h" + +#define FEATURE_ONCHIP_FT_MASK BIT(4) +#define FEATURE_DMA_RW_TBL_MASK BIT(8) +#define FEATURE_PCT_EXP_MASK BIT(9) + +static int xsc_priv_dev_open(struct inode *inode, struct file *file) +{ + struct xsc_priv_device *priv_dev = + container_of(inode->i_cdev, struct xsc_priv_device, cdev); + struct xsc_core_device *xdev = + container_of(priv_dev, struct xsc_core_device, priv_device); + struct xsc_bdf_file *bdf_file; + + bdf_file = kzalloc(sizeof(*bdf_file), GFP_KERNEL); + if (!file) + return -ENOMEM; + + INIT_RADIX_TREE(&bdf_file->obj_tree, GFP_ATOMIC); + spin_lock_init(&bdf_file->obj_lock); + + bdf_file->xdev = xdev; + bdf_file->key = bdf_to_key(pci_domain_nr(xdev->pdev->bus), + xdev->pdev->bus->number, xdev->pdev->devfn); + bdf_file->restore_nic_fn = NULL; + + radix_tree_preload(GFP_KERNEL); + spin_lock(&priv_dev->bdf_lock); + radix_tree_insert(&priv_dev->bdf_tree, bdf_file->key, bdf_file); + spin_unlock(&priv_dev->bdf_lock); + radix_tree_preload_end(); + file->private_data = bdf_file; + + return 0; +} + +static int xsc_priv_dev_release(struct inode *inode, struct file *filp) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + + xsc_close_bdf_file(bdf_file); + + if (bdf_file->restore_nic_fn) { + xsc_set_user_mode(xdev, false); + bdf_file->restore_nic_fn(xdev); + } + + spin_lock(&xdev->priv_device.bdf_lock); + radix_tree_delete(&xdev->priv_device.bdf_tree, bdf_file->key); + spin_unlock(&xdev->priv_device.bdf_lock); + + kfree(bdf_file); + + return 0; +} + +static long xsc_ioctl_mem_free(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mem_info *minfo; + struct xsc_ioctl_data_tl *tl; + struct xsc_ioctl_mbox_in *in; + struct xsc_mem_entry *m_ent; + char tname[TASK_COMM_LEN]; + int in_size; + int err = 0; + u8 lfound = 0; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->len = hdr->attr.length; + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + if (in->len > sizeof(struct xsc_ioctl_data_tl)) { + tl = (struct xsc_ioctl_data_tl *)(in->data); + if (tl->length != sizeof(struct xsc_ioctl_mem_info)) { + kvfree(in); + return -EFAULT; + } + minfo = (struct xsc_ioctl_mem_info *)(tl + 1); + if (minfo->vir_addr && minfo->phy_addr) { + memset(tname, 0, sizeof(tname)); + get_task_comm(tname, current); + + spin_lock_irq(&priv_dev->mem_lock); + list_for_each_entry(m_ent, &priv_dev->mem_list, list) { + if ((!strcmp(m_ent->task_name, tname)) && + m_ent->mem_info.mem_num == minfo->mem_num && + m_ent->mem_info.size == minfo->size) { + if (m_ent->mem_info.phy_addr == minfo->phy_addr && + m_ent->mem_info.vir_addr == minfo->vir_addr) { + lfound = 1; + list_del(&m_ent->list); + } else { + err = -ENOMEM; + } + break; + } + } + spin_unlock_irq(&priv_dev->mem_lock); + + if (lfound) { + dma_free_coherent(&xdev->pdev->dev, + minfo->size, + (void *)minfo->vir_addr, + minfo->phy_addr); + } + } else { + kvfree(in); + return -EFAULT; + } + } + + hdr->attr.error = err; + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, in->data, in->len)) + err = -EFAULT; + + kvfree(in); + return err; +} + +static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, + struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mem_info *minfo; + struct xsc_ioctl_data_tl *tl; + struct xsc_ioctl_mbox_in *in; + struct xsc_mem_entry *m_ent; + char tname[TASK_COMM_LEN]; + u64 vaddr = 0; + u64 paddr = 0; + int in_size; + int err = 0; + u8 lfound = 0; + u8 needfree = 0; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->len = hdr->attr.length; + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + if (in->len > sizeof(struct xsc_ioctl_data_tl)) { + tl = (struct xsc_ioctl_data_tl *)(in->data); + if (tl->length != sizeof(struct xsc_ioctl_mem_info)) { + kvfree(in); + return -EFAULT; + } + minfo = (struct xsc_ioctl_mem_info *)(tl + 1); + memset(tname, 0, sizeof(tname)); + get_task_comm(tname, current); + + spin_lock_irq(&priv_dev->mem_lock); + list_for_each_entry(m_ent, &priv_dev->mem_list, list) { + if ((!strcmp(m_ent->task_name, tname)) && + m_ent->mem_info.mem_num == minfo->mem_num) { + if (m_ent->mem_info.size == minfo->size) { + minfo->phy_addr = m_ent->mem_info.phy_addr; + minfo->vir_addr = m_ent->mem_info.vir_addr; + lfound = 1; + } else { + needfree = 1; + list_del(&m_ent->list); + } + break; + } + } + spin_unlock_irq(&priv_dev->mem_lock); + + if (needfree) { + dma_free_coherent(&xdev->pdev->dev, + m_ent->mem_info.size, + (void *)m_ent->mem_info.vir_addr, + m_ent->mem_info.phy_addr); + } + + if (!lfound) { + vaddr = (u64)dma_alloc_coherent(&xdev->pdev->dev, + minfo->size, + (dma_addr_t *)&paddr, + GFP_KERNEL); + if (vaddr) { + memset((void *)vaddr, 0, minfo->size); + minfo->phy_addr = paddr; + minfo->vir_addr = vaddr; + m_ent = kzalloc(sizeof(*m_ent), GFP_KERNEL); + if (!m_ent) { + kvfree(in); + return -ENOMEM; + } + strscpy(m_ent->task_name, tname, sizeof(m_ent->task_name)); + m_ent->mem_info.mem_num = minfo->mem_num; + m_ent->mem_info.size = minfo->size; + m_ent->mem_info.phy_addr = paddr; + m_ent->mem_info.vir_addr = vaddr; + spin_lock_irq(&priv_dev->mem_lock); + list_add(&m_ent->list, &priv_dev->mem_list); + spin_unlock_irq(&priv_dev->mem_lock); + } else { + kvfree(in); + return -ENOMEM; + } + } + } + + hdr->attr.error = err; + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, in->data, in->len)) + err = -EFAULT; + + kvfree(in); + return err; +} + +static long xsc_priv_dev_ioctl_mem(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_priv_device *priv_dev = &xdev->priv_device; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_IOCTL_MEM_ALLOC: + return xsc_ioctl_mem_alloc(priv_dev, xdev, user_hdr, &hdr); + case XSC_IOCTL_MEM_FREE: + return xsc_ioctl_mem_free(priv_dev, xdev, user_hdr, &hdr); + default: + return -EINVAL; + } +} + +static int xsc_priv_modify_qp(struct xsc_core_device *xdev, void *in, void *out) +{ + int ret = 0, i = 0; + struct xsc_ioctl_qp_range *resp; + struct xsc_ioctl_data_tl *tl; + int insize; + struct xsc_modify_qp_mbox_in *mailin; + struct xsc_modify_qp_mbox_out mailout; + u32 qpn; + + tl = (struct xsc_ioctl_data_tl *)out; + resp = (struct xsc_ioctl_qp_range *)(tl + 1); + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: qpn:%d, num:%d, opcode:%d\n", + resp->qpn, resp->num, resp->opcode); + if (resp->num == 0) { + xsc_core_err(xdev, "xsc_ioctl_qp_range: resp->num == 0\n"); + return 0; + } + qpn = resp->qpn; + insize = sizeof(struct xsc_modify_qp_mbox_in); + mailin = kvzalloc(insize, GFP_KERNEL); + if (!mailin) + return -ENOMEM; + for (i = 0; i < resp->num; i++) { + mailin->hdr.opcode = cpu_to_be16(resp->opcode); + mailin->qpn = cpu_to_be32(qpn + i); + ret = xsc_cmd_exec(xdev, mailin, insize, &mailout, sizeof(mailout)); + xsc_core_dbg(xdev, "modify qp state qpn:%d\n", qpn + i); + } + kvfree(mailin); + + return ret; +} + +static int xsc_priv_dev_ioctl_get_phy(struct xsc_core_device *xdev, + void *in, void *out) +{ + int ret = 0; + struct xsc_eswitch *esw = xdev->priv.eswitch; + struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_ioctl_get_phy_info_res *resp; + u16 lag_id = xsc_get_lag_id(xdev); + + switch (tl->opmod) { + case XSC_IOCTL_OP_GET_LOCAL: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + resp->pcie_no = xdev->pcie_no; + resp->func_id = xdev->glb_func_id; + resp->pcie_host = xdev->caps.pcie_host; + resp->mac_phy_port = xdev->mac_port; + resp->funcid_to_logic_port_off = xdev->caps.funcid_to_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = xdev->caps.lag_logic_port_ofst; + resp->send_seg_num = xdev->caps.send_ds_num; + resp->recv_seg_num = xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = xdev->caps.raw_tpe_qp_num; + resp->chip_version = xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", + resp->pcie_no, resp->func_id, resp->pcie_host, + resp->mac_phy_port, resp->lag_id, + resp->funcid_to_logic_port_off); + resp->pf0_vf_funcid_base = xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = xdev->caps.pf0_vf_funcid_top; + resp->pf1_vf_funcid_base = xdev->caps.pf1_vf_funcid_base; + resp->pf1_vf_funcid_top = xdev->caps.pf1_vf_funcid_top; + resp->pcie0_pf_funcid_base = xdev->caps.pcie0_pf_funcid_base; + resp->pcie0_pf_funcid_top = xdev->caps.pcie0_pf_funcid_top; + resp->pcie1_pf_funcid_base = xdev->caps.pcie1_pf_funcid_base; + resp->pcie1_pf_funcid_top = xdev->caps.pcie1_pf_funcid_top; + resp->hca_core_clock = xdev->caps.hca_core_clock; + resp->mac_bit = xdev->caps.mac_bit; + if (xsc_core_is_pf(xdev)) { + mutex_lock(&esw->mode_lock); + resp->esw_mode = esw->mode; + mutex_unlock(&esw->mode_lock); + } else { + resp->esw_mode = 0; + } + resp->board_id = xdev->board_info->board_id; + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int xsc_priv_dev_ioctl_get_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *resp = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->pcp = ib_dev->force_pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *resp = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->dscp = ib_dev->force_dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *req = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_pcp = req->pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *req = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_dscp = req->dscp; + return 0; +} + +int xsc_priv_dev_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + int opcode, ret = 0; + struct xsc_ioctl_attr *hdr; + + hdr = (struct xsc_ioctl_attr *)in; + opcode = hdr->opcode; + switch (opcode) { + case XSC_IOCTL_GET_PHY_INFO: + ret = xsc_priv_dev_ioctl_get_phy(xdev, in, out); + break; + case XSC_IOCTL_GET_FORCE_PCP: + xsc_core_dbg(xdev, "getting global pcp\n"); + ret = xsc_priv_dev_ioctl_get_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_GET_FORCE_DSCP: + ret = xsc_priv_dev_ioctl_get_force_dscp(xdev, in, out); + break; + case XSC_IOCTL_SET_QP_STATUS: + xsc_core_dbg(xdev, "case XSC_IOCTL_SET_QP_STATUS:\n"); + ret = xsc_priv_modify_qp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_PCP: + xsc_core_dbg(xdev, "setting global pcp\n"); + ret = xsc_priv_dev_ioctl_set_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_DSCP: + xsc_core_dbg(xdev, "setting global dscp\n"); + ret = xsc_priv_dev_ioctl_set_force_dscp(xdev, in, out); + break; + default: + ret = -EINVAL; + break; + } + + xsc_core_dbg(xdev, "xsc_priv_dev exec_ioctl.ret=%u\n", ret); + + return ret; +} + +static long xsc_priv_dev_ioctl_getinfo(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_hdr *in; + int in_size; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + switch (hdr.attr.opcode) { + case XSC_IOCTL_GET_PHY_INFO: + case XSC_IOCTL_GET_FORCE_PCP: + case XSC_IOCTL_GET_FORCE_DSCP: + case XSC_IOCTL_SET_QP_STATUS: + case XSC_IOCTL_SET_FORCE_PCP: + case XSC_IOCTL_SET_FORCE_DSCP: + case XSC_IOCTL_GET_CONTEXT: + break; + default: + return -EINVAL; + } + in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + in->attr.opcode = hdr.attr.opcode; + in->attr.length = hdr.attr.length; + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + err = xsc_priv_dev_exec_ioctl(xdev, &in->attr, + (in_size - offsetof(struct xsc_ioctl_hdr, attr)), + in->attr.data, + hdr.attr.length); + in->attr.error = err; + if (copy_to_user((void *)arg, in, in_size)) + err = -EFAULT; + kvfree(in); + return err; +} + +static int xsc_ioctl_flow_add_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl, + char *data, unsigned int datalen) +{ + int err = 0; + struct xsc_flow_pct_v4_add *pct_v4; + struct xsc_flow_pct_v6_add *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v4->priority, data, datalen); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v6->priority, data, datalen); + break; + default: + break; + } + + return err; +} + +static void xsc_ioctl_flow_destroy_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl) +{ + struct xsc_flow_pct_v4_del *pct_v4; + struct xsc_flow_pct_v6_del *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v4->priority); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v6->priority); + break; + default: + break; + } +} + +static int xsc_ioctl_flow_cmdq_handle_res_obj(struct xsc_bdf_file *file, + char *data, unsigned int datalen) +{ + struct xsc_ioctl_data_tl *tl; + int err = 0; + + tl = (struct xsc_ioctl_data_tl *)data; + + switch (tl->opmod) { + case XSC_IOCTL_OP_ADD: + err = xsc_ioctl_flow_add_obj(file, tl, data, datalen); + break; + case XSC_IOCTL_OP_DEL: + xsc_ioctl_flow_destroy_obj(file, tl); + break; + default: + break; + } + + return err; +} + +static int xsc_ioctl_flow_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + int in_size; + int out_size; + int err; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->len = __cpu_to_be16(hdr->attr.length); + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + err = xsc_ioctl_flow_cmdq_handle_res_obj(file, in->data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + out_size = sizeof(struct xsc_ioctl_mbox_out) + hdr->attr.length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + kvfree(in); + return -ENOMEM; + } + memcpy(out->data, in->data, hdr->attr.length); + out->len = in->len; + err = xsc_cmd_exec(file->xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out->data, hdr->attr.length)) + err = -EFAULT; + + kvfree(in); + kvfree(out); + return err; +} + +static int xsc_ioctl_modify_raw_qp(struct xsc_priv_device *priv_dev, + struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_modify_raw_qp_mbox_in *in; + struct xsc_modify_raw_qp_mbox_out *out; + int err; + + if (hdr->attr.length != sizeof(struct xsc_modify_raw_qp_request)) + return -EINVAL; + + in = kvzalloc(sizeof(struct xsc_modify_raw_qp_mbox_in), GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(struct xsc_modify_raw_qp_mbox_out), GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->req, user_hdr->attr.data, + sizeof(struct xsc_modify_raw_qp_request)); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->pcie_no = xdev->pcie_no; + + err = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + + hdr->attr.error = __be32_to_cpu(out->hdr.status); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static void xsc_handle_multiqp_create(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out) +{ + u16 qp_num = 0; + int i = 0; + struct xsc_create_qp_request *req = NULL; + void *ptr = NULL; + int len = 0; + u32 qpn_base = be32_to_cpu(((struct xsc_create_multiqp_mbox_out *)out)->qpn_base); + + qp_num = be16_to_cpu(((struct xsc_create_multiqp_mbox_in *)in)->qp_num); + ptr = ((struct xsc_create_multiqp_mbox_in *)in)->data; + for (i = 0; i < qp_num; i++) { + req = (struct xsc_create_qp_request *)ptr; + len = sizeof(struct xsc_create_qp_request) + + be16_to_cpu(req->pa_num) * sizeof(u64); + xsc_alloc_qp_obj(file, qpn_base + i, (char *)req, len); + ptr += len; + } +} + +static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, + void *in, unsigned int inlen, void *out, int opcode) +{ + unsigned int idx; + + switch (opcode) { + case XSC_CMD_OP_ALLOC_PD: + idx = be32_to_cpu(((struct xsc_alloc_pd_mbox_out *)out)->pdn); + xsc_alloc_pd_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DEALLOC_PD: + idx = be32_to_cpu(((struct xsc_dealloc_pd_mbox_in *)in)->pdn); + xsc_destroy_pd_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MKEY: + idx = be32_to_cpu(((struct xsc_create_mkey_mbox_out *)out)->mkey); + xsc_alloc_mr_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_MKEY: + idx = be32_to_cpu(((struct xsc_destroy_mkey_mbox_in *)in)->mkey); + xsc_destroy_mr_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_CQ: + idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); + xsc_alloc_cq_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_CQ: + idx = be32_to_cpu(((struct xsc_destroy_cq_mbox_in *)in)->cqn); + xsc_destroy_cq_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_QP: + idx = be32_to_cpu(((struct xsc_create_qp_mbox_out *)out)->qpn); + xsc_alloc_qp_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_QP: + idx = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_destroy_qp_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MULTI_QP: + xsc_handle_multiqp_create(file, in, inlen, out); + break; + default: + break; + } +} + +static long xsc_priv_dev_ioctl_cmdq(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_priv_device *priv_dev = &bdf_file->xdev->priv_device; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_FLOW: + return xsc_ioctl_flow_cmdq(bdf_file, user_hdr, &hdr); + case XSC_CMD_OP_MODIFY_RAW_QP: + return xsc_ioctl_modify_raw_qp(priv_dev, xdev, user_hdr, &hdr); + default: + return -EINVAL; + } +} + +static long xsc_priv_dev_ioctl_cmdq_raw(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + u16 out_len; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out_len = min_t(u16, hdr.attr.length, (u16)MAX_MBOX_OUT_LEN); + out = kvzalloc(out_len, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, in, hdr.attr.length, out, out_len); + xsc_pci_ctrl_cmdq_handle_res_obj(bdf_file, in, hdr.attr.length, out, hdr.attr.opcode); + + if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out, out_len)) + err = -EFAULT; +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int xsc_ioctl_user_mode(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *dev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_user_mode_attr *attr; + u8 *buf; + int err = 0; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(dev, "fail to copy from user user_hdr\n"); + return -EFAULT; + } + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(dev, "invalid check filed %u\n", hdr.check_filed); + return -EINVAL; + } + + buf = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + err = copy_from_user(buf, user_hdr->attr.data, hdr.attr.length); + if (err) { + xsc_core_err(dev, "failed to copy ioctl user data.\n"); + kvfree(buf); + return -EFAULT; + } + + switch (hdr.attr.opcode) { + case XSC_IOCTL_OPCODE_ENABLE_USER_MODE: + attr = (struct xsc_ioctl_user_mode_attr *)buf; + xsc_set_user_mode(dev, (attr->enable ? true : false)); + if (attr->enable) + bdf_file->restore_nic_fn = xsc_eth_restore_nic_hca; + else + bdf_file->restore_nic_fn = NULL; + + break; + default: + err = -EOPNOTSUPP; + break; + } + + kvfree(buf); + return err; +} + +static long xsc_priv_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = xsc_priv_dev_ioctl_cmdq(filp, arg); + break; + case XSC_IOCTL_DRV_GET: + case XSC_IOCTL_DRV_SET: + // TODO refactor to split driver get and set + err = xsc_priv_dev_ioctl_getinfo(filp, arg); + break; + case XSC_IOCTL_MEM: + err = xsc_priv_dev_ioctl_mem(filp, arg); + break; + case XSC_IOCTL_CMDQ_RAW: + err = xsc_priv_dev_ioctl_cmdq_raw(filp, arg); + break; + case XSC_IOCTL_USER_MODE: + err = xsc_ioctl_user_mode(filp, arg); + break; + default: + err = -EFAULT; + break; + } + return err; +} + +static const struct file_operations dev_fops = { + .owner = THIS_MODULE, + .open = xsc_priv_dev_open, + .unlocked_ioctl = xsc_priv_dev_ioctl, + .compat_ioctl = xsc_priv_dev_ioctl, + .release = xsc_priv_dev_release, +}; + +#define XSC_MAX_CDEV_NUM 1024 +static dev_t g_priv_cdev_no; +static int g_priv_cdev_cnt; +static char *g_priv_class_name = "xscale"; +static struct class *g_priv_class; +DECLARE_BITMAP(g_bitmap_cdev_id, XSC_MAX_CDEV_NUM); + +int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + int ret; + int dev_id = 0; + struct xsc_priv_device *priv_dev = &dev->priv_device; + + if (g_priv_cdev_cnt >= XSC_MAX_CDEV_NUM) { + xsc_core_err(dev, "too many xscale cdevice\n"); + priv_dev->devno = U32_MAX; + return -EBUSY; + } + + sprintf(priv_dev->device_name, "%s", ib_dev->name); + + xsc_core_dbg(dev, "device_name %s\n", priv_dev->device_name); + + cdev_init(&priv_dev->cdev, &dev_fops); + priv_dev->cdev.owner = THIS_MODULE; + dev_id = find_first_zero_bit(g_bitmap_cdev_id, XSC_MAX_CDEV_NUM); + priv_dev->devno = g_priv_cdev_no + dev_id; + + ret = cdev_add(&priv_dev->cdev, priv_dev->devno, 1); + if (ret) { + xsc_core_err(dev, "%s cdev_add error ret:%d major:%d\n", + priv_dev->device_name, ret, MAJOR(priv_dev->devno)); + return ret; + } + + device_create(g_priv_class, NULL, priv_dev->devno, + NULL, "%s!%s", g_priv_class_name, priv_dev->device_name); + g_priv_cdev_cnt++; + set_bit(dev_id, g_bitmap_cdev_id); + + INIT_LIST_HEAD(&priv_dev->mem_list); + spin_lock_init(&priv_dev->mem_lock); + + INIT_RADIX_TREE(&priv_dev->bdf_tree, GFP_ATOMIC); + spin_lock_init(&priv_dev->bdf_lock); + + xsc_core_dbg(dev, "init success\n"); + + return 0; +} + +void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + struct xsc_priv_device *priv_dev; + struct cdev *char_dev; + struct xsc_bdf_file *bdf_file; + struct radix_tree_iter iter; + void **slot; + int dev_id = 0; + + if (!dev || !ib_dev) { + pr_err("[%s:%d] device is null pointer\n", __func__, __LINE__); + return; + } + + priv_dev = &dev->priv_device; + if (priv_dev->devno == U32_MAX) + return; + + char_dev = &priv_dev->cdev; + + dev_id = MINOR(priv_dev->devno); + spin_lock(&priv_dev->bdf_lock); + radix_tree_for_each_slot(slot, &priv_dev->bdf_tree, &iter, 0) { + bdf_file = (struct xsc_bdf_file *)(*slot); + xsc_close_bdf_file(bdf_file); + radix_tree_iter_delete(&priv_dev->bdf_tree, &iter, slot); + kfree(bdf_file); + } + spin_unlock(&priv_dev->bdf_lock); + device_destroy(g_priv_class, priv_dev->devno); + cdev_del(&priv_dev->cdev); + + clear_bit(dev_id, g_bitmap_cdev_id); + g_priv_cdev_cnt--; + xsc_core_dbg(dev, "fini success\n"); +} + +int xsc_priv_alloc_chrdev_region(void) +{ + int ret = 0; + char *device_name = "xscale"; + + ret = alloc_chrdev_region(&g_priv_cdev_no, 0, XSC_MAX_CDEV_NUM, device_name); + if (ret) { + pr_err("%s cant't get major %d\n", device_name, MAJOR(g_priv_cdev_no)); + return ret; + } + g_priv_class = class_create(g_priv_class_name); + g_priv_cdev_cnt = 0; + + return 0; +} + +void xsc_priv_unregister_chrdev_region(void) +{ + class_destroy(g_priv_class); + unregister_chrdev_region(g_priv_cdev_no, XSC_MAX_CDEV_NUM); +} diff --git a/drivers/infiniband/hw/xsc/qp.c b/drivers/infiniband/hw/xsc/qp.c new file mode 100644 index 0000000000000000000000000000000000000000..6df90c841af4c9e95b68ce909538069915399482 --- /dev/null +++ b/drivers/infiniband/hw/xsc/qp.c @@ -0,0 +1,1939 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_ib.h" +#include "user.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include +#include +#include + +/* not supported currently */ +static int wq_signature; + +#define MAD_QUEUE_DEPTH 128 + +enum { + XSC_IB_CACHE_LINE_SIZE = 64, +}; + +#define MAC_INVALID 0xff + +#define LAG_PORT_NUM_MASK_EN 0x80000000 +#define LAG_PORT_NUM_MASK_EN_OFFSET 31 +#define LAG_PORT_NUM_MASK 0x30000 +#define LAG_PORT_NUM_OFFSET 16 + +#define UDP_SPORT_MASK_EN 0x40000000 +#define UDP_SPORT_MASK_EN_OFFSET 30 +#define UDP_SPORT_MASK 0xffff +#define UDP_SPORT_OFFSET 0 + +static const u32 xsc_ib_opcode[] = { + [IB_WR_SEND] = XSC_MSG_OPCODE_SEND, + [IB_WR_SEND_WITH_IMM] = XSC_MSG_OPCODE_SEND, + [IB_WR_RDMA_WRITE] = XSC_MSG_OPCODE_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = XSC_MSG_OPCODE_RDMA_WRITE, + [IB_WR_RDMA_READ] = XSC_MSG_OPCODE_RDMA_READ, + [IB_WR_LOCAL_INV] = XSC_MSG_OPCODE_SEND, + [IB_WR_REG_MR] = XSC_MSG_OPCODE_SEND, + [IB_WR_SEND_WITH_INV] = XSC_MSG_OPCODE_SEND, +}; + +static int is_qp0(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_SMI; +} + +static int is_qp1(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_GSI; +} + +static int is_sqp(enum ib_qp_type qp_type) +{ + return is_qp0(qp_type) || is_qp1(qp_type); +} + +static void *get_wqe(struct xsc_ib_qp *qp, int offset) +{ + return xsc_buf_offset(&qp->buf, offset); +} + +static void *get_recv_wqe(struct xsc_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); +} + +static void *get_seg_wqe(void *first, int n) +{ + return first + (n << XSC_BASE_WQE_SHIFT); +} + +void *xsc_get_send_wqe(struct xsc_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); +} + +static int iboe_tos_to_sl(struct net_device *ndev, int tos) +{ + int prio; + struct net_device *dev; + + prio = rt_tos2priority(tos); + dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; + if (dev->num_tc) + return netdev_get_prio_tc_map(dev, prio); + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (is_vlan_dev(ndev)) + return (vlan_dev_get_egress_qos_mask(ndev, prio) & + VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +#endif + return 0; +} + +static inline void set_remote_addr_seg(struct xsc_wqe_data_seg *remote_seg, + u32 msg_len, u64 remote_addr, u32 rkey) +{ + remote_seg->in_line = 0; + WR_LE_32(remote_seg->seg_len, msg_len); + WR_LE_32(remote_seg->mkey, rkey); + WR_LE_64(remote_seg->va, remote_addr); +} + +static void set_local_data_seg(struct xsc_wqe_data_seg *data_seg, struct ib_sge *sg) +{ + data_seg->in_line = 0; + WR_LE_32(data_seg->seg_len, sg->length); + WR_LE_32(data_seg->mkey, sg->lkey); + WR_LE_64(data_seg->va, sg->addr); +} + +static int set_data_inl_seg(struct xsc_ib_qp *qp, const struct ib_send_wr *wr, void *ctrl) +{ + struct xsc_wqe_data_seg *data_seg; + unsigned int seg_index; + void *addr; + int len; + int i; + + for (i = 0, seg_index = 1; i < wr->num_sge; ++i, ++seg_index) { + if (likely(wr->sg_list[i].length)) { + addr = (void *)wr->sg_list[i].addr; + len = wr->sg_list[i].length; + + if (unlikely(len > qp->max_inline_data)) + return -ENOMEM; + + data_seg = get_seg_wqe(ctrl, seg_index); + data_seg->in_line = 1; + data_seg->len = len; + memcpy(data_seg->in_line_data, addr, len); + } + } + + return 0; +} + +static __be32 send_ieth(const struct ib_send_wr *wr) +{ + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + case IB_WR_RDMA_WRITE_WITH_IMM: + return wr->ex.imm_data; + default: + return 0; + } +} + +static void xsc_ib_qp_event(struct xsc_core_qp *qp, int type) +{ + struct ib_qp *ibqp = &to_xibqp(qp)->ibqp; + struct ib_event event; + + if (ibqp->event_handler) { + event.device = ibqp->device; + event.element.qp = ibqp; + switch (type) { + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + default: + pr_warn("xsc_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); + return; + } + + ibqp->event_handler(&event, ibqp->qp_context); + } +} + +static int set_rq_size(struct xsc_ib_dev *dev, struct ib_qp_cap *cap, + int has_rq, struct xsc_ib_qp *qp, struct xsc_ib_create_qp *ucmd) +{ + u32 wqe_cnt = roundup_pow_of_two(cap->max_recv_wr); + + /* Sanity check RQ size before proceeding */ + if (wqe_cnt > dev->xdev->caps.max_wqes) { + xsc_ib_warn(dev, "max_recv_wr:%d exceed max rq depth\n", cap->max_recv_wr); + wqe_cnt = dev->xdev->caps.max_wqes; + } + + if (!has_rq) { + qp->rq.max_gs = 0; + qp->rq.wqe_cnt = 0; + qp->rq.wqe_shift = 0; + } else { + if (ucmd) { + qp->rq.wqe_cnt = ucmd->rq_wqe_count; + qp->rq.wqe_shift = ucmd->rq_wqe_shift; + qp->rq.max_gs = 1; + qp->rq.max_post = qp->rq.wqe_cnt; + } else { + qp->rq.wqe_cnt = wqe_cnt; + qp->rq.wqe_shift = dev->xdev->caps.recv_wqe_shift; + qp->rq.max_gs = dev->xdev->caps.recv_ds_num; + qp->rq.max_post = qp->rq.wqe_cnt; + } + } + + return 0; +} + +static int calc_sq_size(struct xsc_ib_dev *dev, struct ib_qp_init_attr *attr, + struct xsc_ib_qp *qp) +{ + int wqe_size; + int wq_size; + + if (!attr->cap.max_send_wr) { + xsc_ib_err(dev, "invalid max_send_wr:%d\n", attr->cap.max_send_wr); + return -1; + } + + wqe_size = 1 << dev->xdev->caps.send_wqe_shift; + qp->max_inline_data = (dev->xdev->caps.send_ds_num - 2) * sizeof(struct xsc_wqe_data_seg); + attr->cap.max_inline_data = qp->max_inline_data; + + qp->sq.wqe_cnt = roundup_pow_of_two(attr->cap.max_send_wr); + qp->sq.wqe_cnt = min_t(int, qp->sq.wqe_cnt, (int)dev->xdev->caps.max_wqes); + qp->sq.ds_cnt = qp->sq.wqe_cnt << (dev->xdev->caps.send_wqe_shift - XSC_BASE_WQE_SHIFT); + wq_size = qp->sq.wqe_cnt * wqe_size; + qp->sq.wqe_shift = ilog2(wqe_size); + qp->sq.max_gs = dev->xdev->caps.send_ds_num - XSC_CTRL_SEG_NUM - XSC_RADDR_SEG_NUM; + qp->sq.max_post = qp->sq.wqe_cnt; + + return wq_size; +} + +static int qp_has_rq(struct ib_qp_init_attr *attr) +{ + if (attr->qp_type == IB_QPT_XRC_INI || + attr->qp_type == IB_QPT_XRC_TGT || attr->srq || + !attr->cap.max_recv_wr) + return 0; + + return 1; +} + +static enum xsc_qp_state to_xsc_state(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return XSC_QP_STATE_RST; + case IB_QPS_INIT: return XSC_QP_STATE_INIT; + case IB_QPS_RTR: return XSC_QP_STATE_RTR; + case IB_QPS_RTS: return XSC_QP_STATE_RTS; + case IB_QPS_SQD: return XSC_QP_STATE_SQD; + case IB_QPS_SQE: return XSC_QP_STATE_SQER; + case IB_QPS_ERR: return XSC_QP_STATE_ERR; + default: return -1; + } +} + +static char *qp_state_to_str(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return "RST"; + case IB_QPS_INIT: return "INIT"; + case IB_QPS_RTR: return "RTR"; + case IB_QPS_RTS: return "RTS"; + case IB_QPS_SQD: return "SQD"; + case IB_QPS_SQE: return "SQE"; + case IB_QPS_ERR: return "ERR"; + default: return "UNKNOWN"; + } +} + +static int create_user_qp(struct xsc_ib_dev *dev, struct ib_pd *pd, + struct xsc_ib_qp *qp, struct ib_udata *udata, + struct xsc_create_qp_mbox_in **in, + struct xsc_ib_create_qp_resp *resp, int *inlen) +{ + struct xsc_ib_ucontext *context; + struct xsc_ib_create_qp ucmd; + int page_shift; + int npages; + u32 offset; + int ncont; + int err; + int hw_npages; + + err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); + if (err) { + xsc_ib_err(dev, "failed to copy from udata, err=%d\n", err); + return err; + } + xsc_ib_info(dev, "buf_addr:0x%lx db_addr:0x%lx sq cnt:%u, rq cnt:%u, rq shift:%u\n", + (uintptr_t)ucmd.buf_addr, (uintptr_t)ucmd.db_addr, + ucmd.sq_wqe_count, ucmd.rq_wqe_count, ucmd.rq_wqe_shift); + + context = to_xucontext(pd->uobject->context); + + qp->sq.ds_cnt = ucmd.sq_wqe_count; + qp->sq.wqe_cnt = ucmd.sq_wqe_count; + qp->sq.wqe_shift = XSC_BASE_WQE_SHIFT; + qp->rq.ds_cnt = ucmd.rq_wqe_count; + qp->rq.wqe_cnt = ucmd.rq_wqe_count; + qp->rq.wqe_shift = XSC_BASE_WQE_SHIFT; + + qp->buf_size = (qp->sq.wqe_cnt << qp->sq.wqe_shift) + (qp->rq.wqe_cnt << qp->rq.wqe_shift); + qp->umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, qp->buf_size, 0); + if (IS_ERR(qp->umem)) { + xsc_ib_err(dev, "umem_get failed\n"); + err = PTR_ERR(qp->umem); + goto err_uuar; + } + + xsc_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + if (ncont != npages) { + page_shift = PAGE_SHIFT; + ncont = npages; + } + + hw_npages = DIV_ROUND_UP(qp->buf_size, PAGE_SIZE_4K); + err = xsc_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); + if (err) { + xsc_ib_err(dev, "bad offset:%d\n", offset); + goto err_umem; + } + xsc_ib_info(dev, "npage:%d, page_shift:%d, ncont:%d, offset:%d, hw_npages %d\n", + npages, page_shift, ncont, offset, hw_npages); + + *inlen = sizeof(**in) + sizeof(*((*in)->req.pas)) * hw_npages; + *in = xsc_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_umem; + } + xsc_ib_populate_pas(dev, qp->umem, page_shift, (*in)->req.pas, hw_npages, true); + (*in)->req.pa_num = cpu_to_be16(hw_npages); + + err = ib_copy_to_udata(udata, resp, sizeof(*resp)); + if (err) { + xsc_ib_err(dev, "failed to copy to udata, err=%d\n", err); + goto err_umem; + } + qp->create_type = XSC_QP_USER; + + return 0; + +err_umem: + ib_umem_release(qp->umem); + +err_uuar: + return err; +} + +static void destroy_qp_user(struct ib_pd *pd, struct xsc_ib_qp *qp) +{ + struct xsc_ib_ucontext *context; + + context = to_xucontext(pd->uobject->context); + ib_umem_release(qp->umem); +} + +#define MAX_QP1_SQ_HDR_SIZE_V2 512 +#define MAX_QP1_SQ_HDR_SIZE 86 + /* Ethernet header = 14 */ + /* ib_grh = 40 (provided by MAD) */ + /* ib_bth + ib_deth = 20 */ + /* MAD = 256 (provided by MAD) */ + /* iCRC = 4 */ +#define MAX_QP1_RQ_HDR_SIZE_V2 512 + +static int create_kernel_qp(struct xsc_ib_dev *dev, + struct ib_qp_init_attr *init_attr, + struct xsc_ib_qp *qp, + struct xsc_create_qp_mbox_in **in, int *inlen) +{ + int err; + int sq_size; + int hw_npages; + + sq_size = calc_sq_size(dev, init_attr, qp); + if (sq_size < 0) { + err = -ENOMEM; + xsc_ib_err(dev, "err %d\n", err); + return err; + } + + qp->rq.ds_cnt = qp->rq.wqe_cnt << (qp->rq.wqe_shift - XSC_BASE_WQE_SHIFT); + qp->rq.offset = 0; + qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; + qp->buf_size = qp->sq.offset + sq_size; + qp->send_psn = 0; + + err = xsc_buf_alloc(dev->xdev, qp->buf_size, PAGE_SIZE, &qp->buf); + if (err) { + xsc_ib_err(dev, "failed to alloc qp buffer,err=%d\n", err); + return err; + } + + qp->sq.qend = qp->buf.direct.buf + qp->sq.offset + sq_size; + hw_npages = DIV_ROUND_UP(qp->buf_size, PAGE_SIZE_4K); + *inlen = sizeof(**in) + sizeof(*(*in)->req.pas) * hw_npages; + *in = xsc_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_buf; + } + + xsc_fill_page_array(&qp->buf, (*in)->req.pas, hw_npages); + (*in)->req.pa_num = cpu_to_be16(hw_npages); + + qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wrid), GFP_KERNEL); + qp->sq.wr_data = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wr_data), GFP_KERNEL); + qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(*qp->rq.wrid), GFP_KERNEL); + qp->sq.w_list = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.w_list), GFP_KERNEL); + qp->sq.wqe_head = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wqe_head), GFP_KERNEL); + + if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || + !qp->sq.w_list || !qp->sq.wqe_head) { + err = -ENOMEM; + goto err_wrid; + } + qp->create_type = XSC_QP_KERNEL; + + if (init_attr->qp_type == IB_QPT_GSI) { + qp->sq.mad_index = 0; + qp->sq.mad_queue_depth = MAD_QUEUE_DEPTH; + qp->sq.hdr_size = MAX_QP1_SQ_HDR_SIZE_V2 * MAD_QUEUE_DEPTH; + qp->sq.hdr_buf = dma_alloc_coherent(dev->ib_dev.dma_device, + qp->sq.hdr_size, + &qp->sq.hdr_dma, + GFP_KERNEL); + if (!qp->sq.hdr_buf) { + err = -ENOMEM; + xsc_ib_err(dev, "Failed to create sq_hdr_buf"); + goto err_wrid; + } + } + + return 0; + +err_wrid: + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + +err_buf: + xsc_buf_free(dev->xdev, &qp->buf); + return err; +} + +static void destroy_qp_kernel(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp) +{ + if (qp->sq.hdr_buf) + dma_free_coherent(dev->ib_dev.dma_device, qp->sq.hdr_size, + qp->sq.hdr_buf, qp->sq.hdr_dma); + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + xsc_buf_free(dev->xdev, &qp->buf); +} + +static u8 ib_to_xsc_qp_type(enum ib_qp_type qp_type, __u32 flags) +{ + if (qp_type == IB_QPT_RC) { + return XSC_QUEUE_TYPE_RDMA_RC; + } else if ((qp_type == IB_QPT_GSI) || (qp_type == IB_QPT_SMI)) { + return XSC_QUEUE_TYPE_RDMA_MAD; + } else if (qp_type == IB_QPT_RAW_PACKET) { + if (flags & XSC_QP_FLAG_RAWPACKET_TSO) + return XSC_QUEUE_TYPE_RAW_TSO; + else if (flags & XSC_QP_FLAG_RAWPACKET_TX) + return XSC_QUEUE_TYPE_RAW_TX; + else + return XSC_QUEUE_TYPE_RAW; + } else { + return XSC_QUEUE_TYPE_INVALID; + } +} + +static int create_qp_common(struct xsc_ib_dev *dev, struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, struct xsc_ib_qp *qp) +{ + struct xsc_ib_resources *devr = &dev->devr; + struct xsc_ib_create_qp_resp resp; + struct xsc_create_qp_mbox_in *in = NULL; + struct xsc_ib_create_qp ucmd; + int inlen = sizeof(*in); + int err; + char buf[256]; + char *ptr = buf; + int ret = 0; + + mutex_init(&qp->mutex); + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + spin_lock_init(&qp->lock); + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + qp->sq_signal_bits = XSC_WQE_CTRL_CQ_UPDATE; + + if (pd && pd->uobject) { + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + xsc_ib_err(dev, "failed to copy from udata\n"); + return -EFAULT; + } + + qp->wq_sig = !!(ucmd.flags & XSC_QP_FLAG_SIGNATURE); + qp->scat_cqe = !!(ucmd.flags & XSC_QP_FLAG_SCATTER_CQE); + } else { + qp->wq_sig = !!wq_signature; + } + + qp->has_rq = qp_has_rq(init_attr); + + err = set_rq_size(dev, &init_attr->cap, qp->has_rq, + qp, (pd && pd->uobject) ? &ucmd : NULL); + if (err) { + xsc_ib_err(dev, "failed to set rq size %d\n", err); + return err; + } + + if (pd) { + if (pd->uobject) { + err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); + if (err) + xsc_ib_err(dev, "failed to create user qp, err = %d\n", err); + } else { + err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); + if (err) + xsc_ib_err(dev, "failed to create kernel qp, err = %d\n", err); + else + qp->pa_lkey = to_mpd(pd)->pa_lkey; + } + + if (err) + return err; + } else { + in = xsc_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + qp->create_type = XSC_QP_EMPTY; + } + + if (is_sqp(init_attr->qp_type)) + qp->port = init_attr->port_num; + + in->req.qp_type = init_attr->qp_type; + if (is_qp1(init_attr->qp_type)) + in->req.input_qpn = cpu_to_be16(1); + + if (init_attr->qp_type != XSC_IB_QPT_REG_UMR) + in->req.pdn = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); + + if (qp->rq.ds_cnt) + in->req.log_rq_sz = ilog2(qp->rq.ds_cnt); + + if (qp->sq.ds_cnt) + in->req.log_sq_sz = ilog2(qp->sq.ds_cnt); + else + in->req.log_sq_sz = ilog2(0x80); + + if (init_attr->send_cq) { + qp->send_cq = init_attr->send_cq; + in->req.cqn_send = to_xcq(init_attr->send_cq)->xcq.cqn; + in->req.cqn_send = cpu_to_be16(in->req.cqn_send); +#ifndef MSIX_SUPPORT + init_attr->send_cq->comp_handler(init_attr->send_cq, + init_attr->send_cq->cq_context); +#endif + } + + if (init_attr->recv_cq) { + qp->recv_cq = init_attr->recv_cq; + in->req.cqn_recv = to_xcq(init_attr->recv_cq)->xcq.cqn; + in->req.cqn_recv = cpu_to_be16(in->req.cqn_recv); + } + + in->req.qp_type = ib_to_xsc_qp_type(init_attr->qp_type, ucmd.flags); + + if (in->req.qp_type == XSC_QUEUE_TYPE_INVALID) { + xsc_ib_err(dev, "invalid qp type:%d\n", init_attr->qp_type); + goto err_create; + } + in->req.glb_funcid = cpu_to_be16(dev->xdev->glb_func_id); + + qp->xqp.qp_type_internal = in->req.qp_type; + + err = xsc_core_create_qp(dev->xdev, &qp->xqp, in, inlen); + if (err) { + xsc_ib_err(dev, "create qp failed, err=%d\n", err); + goto err_create; + } + + qp->doorbell_qpn = qp->xqp.qpn; + + qp->xqp.event = xsc_ib_qp_event; + qp->xqp.qp_type = init_attr->qp_type; + ret += snprintf(ptr + ret, 256 - ret, "pdn=%d,", to_mpd(pd ? pd : devr->p0)->pdn); + ret += snprintf(ptr + ret, 256 - ret, "log_rq_sz=%d,", in->req.log_rq_sz); + ret += snprintf(ptr + ret, 256 - ret, "log_sq_sz=%d,", in->req.log_sq_sz); + ret += snprintf(ptr + ret, 256 - ret, "scqn=%d,", to_xcq(init_attr->send_cq)->xcq.cqn); + ret += snprintf(ptr + ret, 256 - ret, "rcqn=%d", to_xcq(init_attr->recv_cq)->xcq.cqn); + + xsc_ib_info(dev, "succeeded to create qp:%d, %s\n", qp->xqp.qpn, buf); + + xsc_vfree(in); + + return 0; + +err_create: + if (qp->create_type == XSC_QP_USER) + destroy_qp_user(pd, qp); + else if (qp->create_type == XSC_QP_KERNEL) + destroy_qp_kernel(dev, qp); + + xsc_vfree(in); + return err; +} + +static void xsc_ib_lock_cqs(struct xsc_ib_cq *send_cq, struct xsc_ib_cq *recv_cq) + __acquires(&send_cq->lock) __acquires(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->xcq.cqn < recv_cq->xcq.cqn) { + spin_lock_irq(&send_cq->lock); + spin_lock_nested(&recv_cq->lock, + SINGLE_DEPTH_NESTING); + } else if (send_cq->xcq.cqn == recv_cq->xcq.cqn) { + spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); + } else { + spin_lock_irq(&recv_cq->lock); + spin_lock_nested(&send_cq->lock, + SINGLE_DEPTH_NESTING); + } + } else { + spin_lock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_lock_irq(&recv_cq->lock); + } +} + +static void xsc_ib_unlock_cqs(struct xsc_ib_cq *send_cq, struct xsc_ib_cq *recv_cq) + __releases(&send_cq->lock) __releases(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->xcq.cqn < recv_cq->xcq.cqn) { + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else if (send_cq->xcq.cqn == recv_cq->xcq.cqn) { + __release(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else { + spin_unlock(&send_cq->lock); + spin_unlock_irq(&recv_cq->lock); + } + } else { + spin_unlock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_unlock_irq(&recv_cq->lock); + } +} + +static struct xsc_ib_pd *get_pd(struct xsc_ib_qp *qp) +{ + return to_mpd(qp->ibqp.pd); +} + +static void get_cqs(struct xsc_ib_qp *qp, + struct xsc_ib_cq **send_cq, struct xsc_ib_cq **recv_cq) +{ + switch (qp->ibqp.qp_type) { + case IB_QPT_XRC_TGT: + *send_cq = NULL; + *recv_cq = NULL; + break; + case XSC_IB_QPT_REG_UMR: + case IB_QPT_XRC_INI: + *send_cq = to_xcq(qp->ibqp.send_cq); + *recv_cq = NULL; + break; + + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_RC: + case IB_QPT_UC: + case IB_QPT_UD: + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + *send_cq = to_xcq(qp->ibqp.send_cq); + *recv_cq = to_xcq(qp->ibqp.recv_cq); + break; + + case IB_QPT_RAW_PACKET: + case IB_QPT_MAX: + default: + *send_cq = NULL; + *recv_cq = NULL; + break; + } +} + +static void destroy_qp_common(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp) +{ + struct xsc_ib_cq *send_cq, *recv_cq; + struct xsc_modify_qp_mbox_in *in; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return; + + if (qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW || + qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW_TSO || + qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW_TX || + qp->state != IB_QPS_RESET) + if (xsc_core_qp_modify(dev->xdev, to_xsc_state(qp->state), + XSC_QP_STATE_RST, in, sizeof(*in), &qp->xqp)) + xsc_ib_warn(dev, "modify QP %06x to RESET failed\n", qp->xqp.qpn); + + get_cqs(qp, &send_cq, &recv_cq); + + if (qp->create_type == XSC_QP_KERNEL) { + xsc_ib_lock_cqs(send_cq, recv_cq); + __xsc_ib_cq_clean(recv_cq, qp->xqp.qpn); + if (send_cq != recv_cq) + __xsc_ib_cq_clean(send_cq, qp->xqp.qpn); + xsc_ib_unlock_cqs(send_cq, recv_cq); + } + + err = xsc_core_destroy_qp(dev->xdev, &qp->xqp); + if (err) + xsc_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->xqp.qpn); + kfree(in); + + if (qp->create_type == XSC_QP_KERNEL) + destroy_qp_kernel(dev, qp); + else if (qp->create_type == XSC_QP_USER) + destroy_qp_user(&get_pd(qp)->ibpd, qp); +} + +static const char *ib_qp_type_str(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_SMI: + return "IB_QPT_SMI"; + case IB_QPT_GSI: + return "IB_QPT_GSI"; + case IB_QPT_RC: + return "IB_QPT_RC"; + case IB_QPT_UC: + return "IB_QPT_UC"; + case IB_QPT_UD: + return "IB_QPT_UD"; + case IB_QPT_RAW_IPV6: + return "IB_QPT_RAW_IPV6"; + case IB_QPT_RAW_ETHERTYPE: + return "IB_QPT_RAW_ETHERTYPE"; + case IB_QPT_XRC_INI: + return "IB_QPT_XRC_INI"; + case IB_QPT_XRC_TGT: + return "IB_QPT_XRC_TGT"; + case IB_QPT_RAW_PACKET: + return "IB_QPT_RAW_PACKET"; + case XSC_IB_QPT_REG_UMR: + return "XSC_IB_QPT_REG_UMR"; + case IB_QPT_MAX: + default: + return "Invalid QP type"; + } +} + +int xsc_ib_create_qp(struct ib_qp *ibqp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct xsc_ib_dev *dev; + struct xsc_ib_qp *qp; + struct ib_pd *pd = ibqp->pd; + int err; + + if (pd) { + dev = to_mdev(pd->device); + } else { + /* being cautious here */ + if (init_attr->qp_type != IB_QPT_XRC_TGT && + init_attr->qp_type != XSC_IB_QPT_REG_UMR) { + pr_warn("%s: no PD for transport %s\n", __func__, + ib_qp_type_str(init_attr->qp_type)); + return RET_VALUE(-EINVAL); + } + dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); + } + + if (init_attr->qp_type != IB_QPT_RAW_PACKET) { + if (!is_support_rdma(dev->xdev) || + (is_qp1(init_attr->qp_type) && !is_support_rdma_cm(dev->xdev))) { + return RET_VALUE(-EPROTONOSUPPORT); + } + } + + qp = to_xqp(ibqp); + + qp->xqp.mac_id = MAC_INVALID; + + switch (init_attr->qp_type) { + case IB_QPT_RC: + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_RAW_PACKET: + err = create_qp_common(dev, pd, init_attr, udata, qp); + if (err) { + xsc_ib_err(dev, "create_qp_common failed\n"); + return RET_VALUE(err); + } + + if (is_qp0(init_attr->qp_type)) { + qp->ibqp.qp_num = 0; + } else if (is_qp1(init_attr->qp_type)) { + qp->ibqp.qp_num = 1; + dev->xdev->gsi_qpn = qp->xqp.qpn; + } else { + qp->ibqp.qp_num = qp->xqp.qpn; + } + + break; + + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + case IB_QPT_MAX: + default: + xsc_ib_err(dev, "unsupported qp type %d\n", + init_attr->qp_type); + /* Don't support raw QPs */ + return RET_VALUE(-EINVAL); + } + + return 0; +} + +xsc_ib_destroy_qp_def() +{ + struct xsc_ib_dev *dev = to_mdev(qp->device); + struct xsc_ib_qp *xqp = to_xqp(qp); + struct xsc_core_device *xdev = dev->xdev; + struct xsc_lag *lag; + + destroy_qp_common(dev, xqp); + + xsc_board_lag_lock(xdev); + if (xqp->xqp.mac_id != MAC_INVALID && xsc_lag_is_roce(xdev)) { + lag = xsc_get_lag(xdev); + atomic_dec(&lag->qp_cnt[xqp->xqp.mac_id]); + } + xsc_board_lag_unlock(xdev); + + return 0; +} + +static inline u16 xsc_calc_udp_sport(u32 lqpn, u32 rqpn) +{ + unsigned char *p; + u8 ports[2]; + u16 sport; + u64 tqpn; + + tqpn = ((u64)(lqpn & 0xffffff)) * ((u64)(rqpn & 0xffffff)); + p = (unsigned char *)&tqpn; + ports[0] = p[0] ^ p[2] ^ p[4]; + ports[1] = p[1] ^ p[3] ^ p[5]; + sport = *((u16 *)ports) | 0xC000; + + return sport; +} + +static inline void xsc_path_set_udp_sport(struct xsc_qp_path *path, + const struct rdma_ah_attr *ah, + u32 lqpn, u32 rqpn) +{ + if ((ah->grh.flow_label & UDP_SPORT_MASK) != 0) { + if ((ah->grh.flow_label & UDP_SPORT_MASK_EN) == 0) + path->sport = cpu_to_be16(xsc_flow_label_to_udp_sport(ah->grh.flow_label)); + else + path->sport = cpu_to_be16((ah->grh.flow_label & UDP_SPORT_MASK) >> + UDP_SPORT_OFFSET); + } else { + path->sport = cpu_to_be16(xsc_calc_udp_sport(lqpn, rqpn)); + } +} + +static int xsc_set_path(struct xsc_ib_dev *dev, const struct rdma_ah_attr *ah, + struct xsc_qp_path *path, u8 port, int attr_mask, + u32 path_flags, const struct ib_qp_attr *attr, struct xsc_ib_qp *qp) +{ + struct ib_global_route *grh = rdma_ah_retrieve_grh((struct rdma_ah_attr *)ah); + union ib_gid *dgid = &grh->dgid; + const struct ib_gid_attr *sgid_attr = grh->sgid_attr; + union ib_gid *sgid = &((struct ib_gid_attr *)sgid_attr)->gid; + union { + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; + } sgid_addr, dgid_addr; + int force_pcp, force_dscp; + char buf[256] = {0}; + char *ptr = buf; + int ret = 0; + + if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) { + if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH)) + return -EINVAL; + + if (qp->ibqp.qp_type == IB_QPT_RC || + qp->ibqp.qp_type == IB_QPT_UC || + qp->ibqp.qp_type == IB_QPT_XRC_INI || + qp->ibqp.qp_type == IB_QPT_XRC_TGT) + xsc_path_set_udp_sport(path, ah, qp->ibqp.qp_num, attr->dest_qp_num); + + if (sgid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) { + xsc_ib_err(dev, "gid type not ROCEv2\n"); + return -EINVAL; + } + + force_dscp = dev->force_dscp; + if (force_dscp == DSCP_PCP_UNSET) + path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f; + else + path->ecn_dscp = force_dscp; + path->hop_limit = grh->hop_limit; + + rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid); + + if (sgid_addr._sockaddr.sa_family == AF_INET && + dgid_addr._sockaddr.sa_family == AF_INET) { + memcpy(path->sip, &sgid_addr._sockaddr_in.sin_addr.s_addr, + sizeof(struct in_addr)); + memcpy(path->dip, &dgid_addr._sockaddr_in.sin_addr.s_addr, + sizeof(struct in_addr)); + path->af_type = AF_INET; + ret += snprintf(ptr + ret, 256 - ret, "sip=%#x,", + be32_to_cpu(path->sip[0])); + ret += snprintf(ptr + ret, 256 - ret, "dip=%#x,", + be32_to_cpu(path->dip[0])); + } else if (sgid_addr._sockaddr.sa_family == AF_INET6 && + dgid_addr._sockaddr.sa_family == AF_INET6) { + memcpy(path->sip, &sgid_addr._sockaddr_in6.sin6_addr.s6_addr, + sizeof(path->sip)); + memcpy(path->dip, &dgid_addr._sockaddr_in6.sin6_addr.s6_addr, + sizeof(path->dip)); + path->af_type = AF_INET6; + ret += snprintf(ptr + ret, 256 - ret, "sip=%08x%08x%08x%08x,", + be32_to_cpu(path->sip[0]), be32_to_cpu(path->sip[1]), + be32_to_cpu(path->sip[2]), be32_to_cpu(path->sip[3])); + ret += snprintf(ptr + ret, 256 - ret, "dip=%08x%08x%08x%08x,", + be32_to_cpu(path->dip[0]), be32_to_cpu(path->dip[1]), + be32_to_cpu(path->dip[2]), be32_to_cpu(path->dip[3])); + } else { + return -EINVAL; + } + + ether_addr_copy(path->smac, dev->netdev->dev_addr); + + memcpy(path->dmac, ah->roce.dmac, sizeof(ah->roce.dmac)); + ret += snprintf(ptr + ret, 256 - ret, "smac=%02x%02x%02x%02x%02x%02x,", + path->smac[0], path->smac[1], path->smac[2], + path->smac[3], path->smac[4], path->smac[5]); + ret += snprintf(ptr + ret, 256 - ret, "dmac=%02x%02x%02x%02x%02x%02x", + path->dmac[0], path->dmac[1], path->dmac[2], + path->dmac[3], path->dmac[4], path->dmac[5]); + xsc_ib_info(dev, "ib path info:%s\n", buf); + + if (is_vlan_dev(sgid_attr->ndev)) { + path->vlan_valid = 1; + path->vlan_id = cpu_to_be16(vlan_dev_vlan_id(sgid_attr->ndev)); + + force_pcp = dev->force_pcp; + if (force_pcp == DSCP_PCP_UNSET) + path->dci_cfi_prio_sl = (ah->sl & 0x7); + else + path->dci_cfi_prio_sl = force_pcp; + } else { + path->vlan_valid = 0; + } + } + xsc_ib_info(dev, "path dscp %d pcp %d\n", path->ecn_dscp, path->dci_cfi_prio_sl); + return 0; +} + +static inline u8 __xsc_get_min_qp_cnt_mac(struct xsc_lag *lag) +{ + int array_size = lag->xsc_member_cnt; + int min = atomic_read(&lag->qp_cnt[0]); + u8 mac_index = 0, i; + + for (i = 0; i < array_size; i++) { + if (atomic_read(&lag->qp_cnt[i]) < min) { + min = atomic_read(&lag->qp_cnt[i]); + mac_index = i; + } + } + + return mac_index; +} +static int __xsc_ib_modify_qp(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, int attr_mask, + enum ib_qp_state cur_state, enum ib_qp_state new_state) +{ + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_ib_cq *send_cq, *recv_cq; + struct xsc_qp_context *context; + struct xsc_modify_qp_mbox_in *in; + struct xsc_qp_path path; + int sqd_event; + int err; + struct xsc_lag *lag; + u8 lag_port_num; + char buf[256] = {0}; + char *ptr = buf; + int ret = 0; + struct xsc_core_device *xdev = dev->xdev; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + context = &qp->ctx; + + if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu != IB_MTU_1024 && + attr->path_mtu != IB_MTU_4096) { + xsc_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); + } + + context->mtu_mode = (attr->path_mtu < IB_MTU_4096) ? 0 : 1; + ret = snprintf(ptr, 256, "path_mtu=%d,", attr->path_mtu); + } + + if (attr_mask & IB_QP_DEST_QPN) { + context->remote_qpn = cpu_to_be32(attr->dest_qp_num); + ret += snprintf(ptr + ret, 256 - ret, "dest_qp_num=%d,", attr->dest_qp_num); + } + + if (attr_mask & IB_QP_AV) { + err = xsc_set_path(dev, &attr->ah_attr, &path, + attr_mask & IB_QP_PORT ? attr->port_num : qp->port, + attr_mask, 0, attr, qp); + if (err) + goto out; + + context->src_udp_port = path.sport; + context->dscp = path.ecn_dscp; + context->hop_limit = path.hop_limit; + context->ip_type = (path.af_type == AF_INET ? 0 : 1); + context->ip_type = cpu_to_be16(context->ip_type); + memcpy(context->dip, path.dip, sizeof(context->dip)); + memcpy(context->sip, path.sip, sizeof(context->sip)); + memcpy(context->dmac, path.dmac, sizeof(path.dmac)); + memcpy(context->smac, path.smac, sizeof(path.smac)); + + context->vlan_valid = path.vlan_valid; + context->dci_cfi_prio_sl = path.dci_cfi_prio_sl; + context->vlan_id = path.vlan_id; + + xsc_board_lag_lock(xdev); + if (xsc_lag_is_roce(xdev)) { + lag = xsc_get_lag(xdev); + context->lag_id = cpu_to_be16(lag->lag_id); + context->lag_sel_en = 1; + lag_port_num = lag->xsc_member_cnt; + if ((attr->ah_attr.grh.flow_label & LAG_PORT_NUM_MASK_EN) != 0) { + context->lag_sel = ((attr->ah_attr.grh.flow_label & + LAG_PORT_NUM_MASK) >> + LAG_PORT_NUM_OFFSET) % + lag_port_num; + } else { + context->lag_sel = __xsc_get_min_qp_cnt_mac(lag); + } + + if (qp->xqp.mac_id != MAC_INVALID && + context->lag_sel != qp->xqp.mac_id) + atomic_dec(&lag->qp_cnt[qp->xqp.mac_id]); + + qp->xqp.mac_id = context->lag_sel; + atomic_inc(&lag->qp_cnt[qp->xqp.mac_id]); + } + xsc_board_lag_unlock(xdev); + } + + if (attr_mask & IB_QP_RNR_RETRY) { + context->rnr_retry = attr->rnr_retry; + ret += snprintf(ptr + ret, 256 - ret, "rnr_retry=%d,", attr->rnr_retry); + } + + if (attr_mask & IB_QP_RETRY_CNT) { + context->retry_cnt = attr->retry_cnt; + ret += snprintf(ptr + ret, 256 - ret, "retry_cnt=%d,", attr->retry_cnt); + } + + if (attr_mask & IB_QP_SQ_PSN) { + context->next_send_psn = cpu_to_be32(attr->sq_psn); + ret += snprintf(ptr + ret, 256 - ret, "sq_psn=%#x,", attr->sq_psn); + } + + if (attr_mask & IB_QP_RQ_PSN) { + context->next_recv_psn = cpu_to_be32(attr->rq_psn); + ret += snprintf(ptr + ret, 256 - ret, "rq_psn=%#x,", attr->rq_psn); + } + + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && + attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) + sqd_event = 1; + else + sqd_event = 0; + + memcpy(&in->ctx, context, sizeof(*context)); + err = xsc_core_qp_modify(xdev, to_xsc_state(cur_state), + to_xsc_state(new_state), in, sqd_event, + &qp->xqp); + if (err) { + xsc_ib_err(dev, "failed to modify qp[%d] from %s to %s\n", + qp->xqp.qpn, qp_state_to_str(cur_state), qp_state_to_str(new_state)); + goto out; + } + + qp->state = new_state; + xsc_ib_info(dev, "succeeded to modify qp[%d] from %s to %s with attr_mask=%#x, %s\n", + qp->xqp.qpn, qp_state_to_str(cur_state), qp_state_to_str(new_state), + attr_mask, buf); + + if (attr_mask & IB_QP_ACCESS_FLAGS) + qp->atomic_rd_en = attr->qp_access_flags; + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + qp->resp_depth = attr->max_dest_rd_atomic; + if (attr_mask & IB_QP_PORT) + qp->port = attr->port_num; + if (attr_mask & IB_QP_ALT_PATH) + qp->alt_port = attr->alt_port_num; + + /* + * If we moved a kernel QP to RESET, clean up all old CQ + * entries and reinitialize the QP. + */ + if (new_state == IB_QPS_RESET && !ibqp->uobject) { + get_cqs(qp, &send_cq, &recv_cq); + xsc_ib_cq_clean(recv_cq, qp->xqp.qpn); + if (send_cq != recv_cq) + xsc_ib_cq_clean(send_cq, qp->xqp.qpn); + + qp->rq.head = 0; + qp->rq.tail = 0; + qp->sq.head = 0; + qp->sq.tail = 0; + qp->sq.cur_post = 0; + qp->sq.last_poll = 0; + } + +out: + kfree(in); + return err; +} + +int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_ib_qp *qp = to_xqp(ibqp); + enum ib_qp_state cur_state, new_state; + int err = -EINVAL; + + mutex_lock(&qp->mutex); + + cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; + + if ((attr_mask & IB_QP_PORT) && + (attr->port_num == 0 || attr->port_num > dev->xdev->caps.num_ports)) { + xsc_ib_err(dev, "error port num\n"); + goto out; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > dev->xdev->caps.max_ra_res_qp) { + xsc_ib_err(dev, "rd atomic:%u exeeded", attr->max_rd_atomic); + goto out; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > dev->xdev->caps.max_ra_req_qp) { + xsc_ib_err(dev, "dest rd atomic:%u exeeded", attr->max_dest_rd_atomic); + goto out; + } + + if (cur_state == new_state && cur_state == IB_QPS_RESET) { + err = 0; + goto out; + } + + err = __xsc_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); +out: + mutex_unlock(&qp->mutex); + return err; +} + +static int xsc_wq_overflow(struct xsc_ib_wq *wq, int nreq, struct xsc_ib_cq *cq) +{ + unsigned int cur; + + cur = wq->head - wq->tail; + if (likely(cur + nreq < wq->max_post)) + return 0; + + spin_lock(&cq->lock); + cur = wq->head - wq->tail; + spin_unlock(&cq->lock); + + return cur + nreq >= wq->max_post; +} + +static inline void xsc_post_send_db(struct xsc_ib_qp *qp, + struct xsc_core_device *xdev, + int nreq) +{ + u16 next_pid; + union xsc_db_data db; + + if (unlikely(!nreq)) + return; + + qp->sq.head += nreq; + + next_pid = qp->sq.head << (qp->sq.wqe_shift - XSC_BASE_WQE_SHIFT); + db.sq_next_pid = next_pid; + db.sqn = qp->doorbell_qpn; + /* + * Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + writel(db.raw_data, REG_ADDR(xdev, xdev->regs.tx_db)); +} + +static inline u32 xsc_crc32(struct xsc_ib_dev *dev, u32 crc, u8 *buf, size_t len) +{ + u32 i; + + for (i = 0; i < len; i++) + crc = dev->crc_32_table[(crc ^ buf[i]) & 0xff] ^ (crc >> 8); + + return crc; +} + +#define BTH_QPN_MASK (0x00ffffff) +#define BTH_PSN_MASK (0x00ffffff) + +/* Compute a partial ICRC for all the IB transport headers. */ +u32 xsc_icrc_hdr(struct xsc_ib_dev *dev, void *pkt, u32 size, u32 *icrc) +{ + struct iphdr *ip4h = NULL; + struct ipv6hdr *ip6h = NULL; + struct udphdr *udph; + struct ib_unpacked_eth *eth; + struct rxe_bth *bth; + struct ib_unpacked_deth *deth; + struct ib_unpacked_vlan *vlan; + int crc; + int crc_field_len; + __be16 l3_type; + u8 *l3_start; + + int hdr_size; + + /* pseudo header buffer size is calculate using ipv6 header size since + * it is bigger than ipv4 + */ + u8 pshdr[sizeof(struct udphdr) + + sizeof(struct ipv6hdr) + + sizeof(*bth) + sizeof(*deth)]; + + eth = pkt; + + if (eth->type == htons(ETH_P_8021Q)) { + vlan = (struct ib_unpacked_vlan *)(eth + 1); + l3_type = vlan->type; + l3_start = (u8 *)(vlan + 1); + size -= 4; + } else { + l3_type = eth->type; + l3_start = (u8 *)(eth + 1); + } + + hdr_size = sizeof(struct udphdr) + + (l3_type == htons(ETH_P_IP) ? + sizeof(struct iphdr) : sizeof(struct ipv6hdr)); + + crc_field_len = hdr_size + sizeof(*bth) + sizeof(*deth); + + if (crc_field_len != size) { + xsc_ib_err(dev, "Unmatched hdr: expect %d actual %d\n", + crc_field_len, size); + return -EINVAL; + } + + ip4h = (struct iphdr *)(l3_start); + ip6h = (struct ipv6hdr *)(l3_start); + udph = (struct udphdr *)(ip4h + 1); + bth = (struct rxe_bth *)(udph + 1); + + memcpy(pshdr, l3_start, crc_field_len); + + /* This seed is the result of computing a CRC with a seed of + * 0xfffffff and 8 bytes of 0xff representing a masked LRH. + */ + crc = 0xdebb20e3; + + if (l3_type == htons(ETH_P_IP)) { /* IPv4 */ + memcpy(pshdr, ip4h, hdr_size); + ip4h = (struct iphdr *)pshdr; + udph = (struct udphdr *)(ip4h + 1); + + ip4h->ttl = 0xff; + ip4h->check = CSUM_MANGLED_0; + ip4h->tos = 0xff; + } else { /* IPv6 */ + memcpy(pshdr, ip6h, hdr_size); + ip6h = (struct ipv6hdr *)pshdr; + udph = (struct udphdr *)(ip6h + 1); + + memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl)); + ip6h->priority = 0xf; + ip6h->hop_limit = 0xff; + } + udph->check = CSUM_MANGLED_0; + + bth = (struct rxe_bth *)(udph + 1); + /* exclude bth.resv8a */ + bth->qpn |= cpu_to_be32(~BTH_QPN_MASK); + + *icrc = xsc_crc32(dev, crc, pshdr, crc_field_len); + + return 0; +} + +/* Routine for sending QP1 packets for RoCE V1 an V2 + */ + // TO BE DONE: sq hdr buf should be create dynamically for mult entry +int build_qp1_send_v2(struct xsc_ib_dev *dev, + struct xsc_ib_qp *qp, + const struct ib_send_wr *wr, + struct ib_sge *sge, + int payload_size, u32 *crc) +{ + struct xsc_ib_ah *ah = container_of(ud_wr((struct ib_send_wr *)wr)->ah, struct xsc_ib_ah, + ibah); + const struct ib_gid_attr *sgid_attr = ah->ibah.sgid_attr; + u16 ether_type; + union ib_gid dgid; + bool is_eth = false; + bool is_vlan = false; + bool is_grh = false; + bool is_udp = false; + u8 ip_version = 0; + u16 vlan_id = 0xFFFF; + int rc = 0; + int cm_pcp = 0; + void *hdr_buf; + + memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); + + if (!qp->sq.hdr_buf) { + xsc_ib_err(dev, "QP1 buffer is empty!"); + return -ENOMEM; + } + hdr_buf = (u8 *)qp->sq.hdr_buf + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index; + + if (!sgid_attr || !sgid_attr->ndev) { + xsc_ib_err(dev, "sgid_addr or ndev is null\n"); + return -ENXIO; + } + + if (is_vlan_dev(sgid_attr->ndev)) + vlan_id = vlan_dev_vlan_id(sgid_attr->ndev); + + is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; + memcpy(&dgid.raw, &ah->av.rgid, 16); + if (is_udp) { + if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) { + ip_version = 4; + ether_type = ETH_P_IP; + } else { + ip_version = 6; + ether_type = ETH_P_IPV6; + } + is_grh = false; + } else { + ether_type = ETH_P_IBOE; + is_grh = true; + } + + is_eth = true; + is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false; + + ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh, + ip_version, is_udp, 0, &qp->qp1_hdr); + + /* ETH */ + ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->av.rmac); + ether_addr_copy(qp->qp1_hdr.eth.smac_h, dev->netdev->dev_addr); + + /* For vlan, check the sgid for vlan existence */ + if (!is_vlan) { + qp->qp1_hdr.eth.type = cpu_to_be16(ether_type); + } else { + if (dev->cm_pcp != DSCP_PCP_UNSET) + cm_pcp = dev->cm_pcp << 13; + else + cm_pcp = (iboe_tos_to_sl(sgid_attr->ndev, ah->av.tclass) << 13); + qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); + qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id | cm_pcp); + } + +#define ECN_CAPABLE_TRANSPORT 0x2 + if (is_grh || ip_version == 6) { + memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw, + sizeof(sgid_attr->gid)); + memcpy(qp->qp1_hdr.grh.destination_gid.raw, ah->av.rgid, + sizeof(ah->av.rgid)); + qp->qp1_hdr.grh.hop_limit = ah->av.hop_limit; + + if (dev->cm_dscp != DSCP_PCP_UNSET) + qp->qp1_hdr.grh.traffic_class = (dev->cm_dscp << 2) | ECN_CAPABLE_TRANSPORT; + else + qp->qp1_hdr.grh.traffic_class = ECN_CAPABLE_TRANSPORT; + } + + if (ip_version == 4) { + if (dev->cm_dscp != DSCP_PCP_UNSET) + qp->qp1_hdr.ip4.tos = (dev->cm_dscp << 2) | ECN_CAPABLE_TRANSPORT; + else + qp->qp1_hdr.ip4.tos = ECN_CAPABLE_TRANSPORT; + qp->qp1_hdr.ip4.id = 0; + qp->qp1_hdr.ip4.frag_off = htons(IP_DF); + qp->qp1_hdr.ip4.ttl = ah->av.hop_limit; + + memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4); + memcpy(&qp->qp1_hdr.ip4.daddr, ah->av.rgid + 12, 4); + qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr); + } + + if (is_udp) { + qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT); + qp->qp1_hdr.udp.sport = htons(ah->av.udp_sport); + qp->qp1_hdr.udp.csum = 0; + xsc_ib_dbg(dev, "CM packet used udp_sport=%d\n", ah->av.udp_sport); + } + + /* BTH */ + if (wr->opcode == IB_WR_SEND_WITH_IMM) { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + qp->qp1_hdr.immediate_present = 1; + } else { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; + } + if (wr->send_flags & IB_SEND_SOLICITED) + qp->qp1_hdr.bth.solicited_event = 1; + /* pad_count */ + qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; + + /* P_key for QP1 is for all members */ + qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); + qp->qp1_hdr.bth.destination_qpn = IB_QP1; + qp->qp1_hdr.bth.ack_req = 0; + qp->send_psn++; + qp->send_psn &= BTH_PSN_MASK; + qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); + /* DETH */ + /* Use the priviledged Q_Key for QP1 */ + qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); + qp->qp1_hdr.deth.source_qpn = IB_QP1; + + /* Pack the QP1 to the transmit buffer */ + sge->addr = (dma_addr_t)(qp->sq.hdr_dma + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index); + sge->lkey = 0xFFFFFFFF; + sge->length = MAX_QP1_SQ_HDR_SIZE; + + ib_ud_header_pack(&qp->qp1_hdr, hdr_buf); + /* + * Max Header buf size for IPV6 RoCE V2 is 86, + * which is same as the QP1 SQ header buffer. + * Header buf size for IPV4 RoCE V2 can be 66. + * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). + * Subtract 20 bytes from QP1 SQ header buf size + */ + if (is_udp && ip_version == 4) + sge->length -= 20; + /* + * Max Header buf size for RoCE V1 is 78. + * ETH(14) + VLAN(4) + GRH(40) + BTH(20). + * Subtract 8 bytes from QP1 SQ header buf size + */ + if (!is_udp) + sge->length -= 8; + + /* Subtract 4 bytes for non vlan packets */ + if (!is_vlan) + sge->length -= 4; + + rc = xsc_icrc_hdr(dev, hdr_buf, sge->length - sizeof(struct ib_unpacked_eth), crc); + if (rc) { + xsc_ib_err(dev, "CRC error: hdr size %ld\n", + sge->length - sizeof(struct ib_unpacked_eth)); + } + return rc; +} + +static void zero_send_ds(struct xsc_ib_qp *qp, int idx) +{ + void *seg; + int i; + int ds_num; + u64 *p; + + ds_num = XSC_SEND_SEG_NUM << (qp->sq.wqe_shift - XSC_SEND_WQE_SHIFT); + seg = (void *)xsc_get_send_wqe(qp, idx); + for (i = 1; i < ds_num; i++) { + p = get_seg_wqe(seg, i); + p[0] = 0; + p[1] = 0; + } +} + +int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad_wr) +{ + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + void *seg; + struct xsc_send_wqe_ctrl_seg *ctrl; + struct xsc_wqe_data_seg *data_seg; + u32 crc; + int nreq; + int err = 0; + int i; + unsigned int idx; + unsigned long irqflag = 0; + struct ib_sge sg; + u8 *cur_p = NULL; + u8 *mad_send_base = NULL; + struct ib_wc wc; + void *vaddr; + int sig = 0; + + if (wr->opcode == IB_WR_LOCAL_INV) { + wc.status = IB_WC_SUCCESS; + wc.wr_cqe = wr->wr_cqe; + wc.qp = ibqp; + sig = qp->sq_signal_bits == XSC_WQE_CTRL_CQ_UPDATE ? + 1 : wr->send_flags & IB_SEND_SIGNALED; + if (xsc_wr_invalidate_mr(dev, wr)) + wc.status = IB_WC_GENERAL_ERR; + + if (wr->wr_cqe && wr->wr_cqe->done && sig) + wr->wr_cqe->done(qp->send_cq, &wc); + wr = wr->next; + if (!wr) + return 0; + } + + if (wr->opcode == IB_WR_REG_MR) { + wc.status = IB_WC_SUCCESS; + wc.qp = ibqp; + sig = qp->sq_signal_bits == XSC_WQE_CTRL_CQ_UPDATE ? + 1 : wr->send_flags & IB_SEND_SIGNALED; + if (xsc_wr_reg_mr(dev, wr)) + wc.status = IB_WC_GENERAL_ERR; + if (wr->wr_cqe && wr->wr_cqe->done && sig) + wr->wr_cqe->done(qp->send_cq, &wc); + } + + spin_lock_irqsave(&qp->sq.lock, irqflag); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + unsigned int seg_index = 1; + unsigned int msg_len = 0; + struct ib_sge *sgl = &wr->sg_list[0]; + int sg_n = wr->num_sge; + + if (unlikely(wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(xsc_ib_opcode))) { + xsc_ib_err(dev, "bad opcode %d\n", wr->opcode); + err = EINVAL; + *bad_wr = wr; + goto out; + } + + if (unlikely(xsc_wq_overflow(&qp->sq, nreq, + to_xcq(qp->ibqp.send_cq)))) { + xsc_ib_err(dev, "send work queue overflow\n"); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->sq.max_gs)) { + xsc_ib_err(dev, "max gs exceeded %d (max = %d)\n", + wr->num_sge, qp->sq.max_gs); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->opcode == IB_WR_RDMA_READ && wr->num_sge > 1)) { + xsc_ib_err(dev, "rdma read, max gs exceeded %d (max = 1)\n", + wr->num_sge); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); + zero_send_ds(qp, idx); + seg = xsc_get_send_wqe(qp, idx); + ctrl = seg; + ctrl->wqe_id = cpu_to_le16(qp->sq.cur_post << + (qp->sq.wqe_shift - XSC_BASE_WQE_SHIFT)); + ctrl->ds_data_num = 0; + ctrl->se = wr->send_flags & IB_SEND_SOLICITED ? 1 : 0; + ctrl->ce = wr->send_flags & IB_SEND_SIGNALED ? 1 : 0; + for (i = 0; i < wr->num_sge; ++i) { + if (likely(wr->sg_list[i].length)) + msg_len += wr->sg_list[i].length; + } + ctrl->msg_len = msg_len; + ctrl->with_immdt = 0; + + if (unlikely(wr->opcode == IB_WR_RDMA_READ && msg_len == 0)) { + xsc_ib_err(dev, "rdma read, msg len should not be 0\n"); + /* workaround, return success for posting zero-length read */ + err = 0; + goto out; + } + switch (ibqp->qp_type) { + case IB_QPT_RC: + ctrl->ds_data_num = wr->num_sge; + switch (wr->opcode) { + case IB_WR_SEND_WITH_INV: + case IB_WR_SEND: + break; + case IB_WR_SEND_WITH_IMM: + ctrl->with_immdt = 1; + ctrl->opcode_data = send_ieth(wr); + break; + case IB_WR_RDMA_WRITE_WITH_IMM: + ctrl->with_immdt = 1; + ctrl->opcode_data = send_ieth(wr); + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + ctrl->with_immdt = 0; + ctrl->ds_data_num++; + data_seg = get_seg_wqe(ctrl, seg_index); + set_remote_addr_seg(data_seg, + msg_len, + rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); + seg_index++; + break; + case IB_WR_REG_MR: + break; + default: + xsc_ib_err(dev, "debug: opcode:%u NOT supported\n", wr->opcode); + err = EPERM; + *bad_wr = wr; + goto out; + } + ctrl->msg_opcode = xsc_ib_opcode[wr->opcode]; + break; + case IB_QPT_UD: + case IB_QPT_GSI: + ctrl->msg_opcode = XSC_MSG_OPCODE_MAD; + ctrl->ds_data_num++; + data_seg = get_seg_wqe(ctrl, seg_index); + mad_send_base = (u8 *)qp->sq.hdr_buf + + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index; + + err = build_qp1_send_v2(dev, qp, wr, &sg, msg_len, &crc); + if (err) { + *bad_wr = wr; + goto out; + } + + cur_p = mad_send_base + sg.length; + for (i = 0; i < wr->num_sge; ++i) { + if (likely(wr->sg_list[i].length)) { + vaddr = xsc_ib_send_mad_sg_virt_addr(&dev->ib_dev, wr, i); + memcpy(cur_p, vaddr, wr->sg_list[i].length); + } + cur_p += wr->sg_list[i].length; + } + crc = xsc_crc32(dev, crc, mad_send_base + sg.length, ctrl->msg_len); + ctrl->msg_len += sg.length; + seg_index++; + + *(u32 *)&mad_send_base[ctrl->msg_len] = ~crc; + ctrl->msg_len += sizeof(crc); + sg.length = ctrl->msg_len; + set_local_data_seg(data_seg, &sg); + xsc_ib_info(dev, "qp[%d] send MAD packet, msg_len:%d\n", + qp->xqp.qpn, ctrl->msg_len); + qp->sq.mad_index = (qp->sq.mad_index + 1) % MAD_QUEUE_DEPTH; + + sg_n = 0; + break; + default: + xsc_ib_err(dev, "qp type:%u NOT supported\n", ibqp->qp_type); + err = EPERM; + *bad_wr = wr; + goto out; + } + + if (wr->opcode == IB_WR_REG_MR) { + nreq--; + continue; + } + + if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { + err = set_data_inl_seg(qp, wr, ctrl); + if (unlikely(err)) { + *bad_wr = wr; + xsc_ib_err(dev, "inline layout failed, err %d\n", err); + goto out; + } + } else { + for (i = 0; i < sg_n; ++i, ++seg_index) { + if (likely(sgl[i].length)) { + data_seg = get_seg_wqe(ctrl, seg_index); + set_local_data_seg(data_seg, &sgl[i]); + } + } + } + qp->sq.wrid[idx] = wr->wr_id; + qp->sq.wqe_head[idx] = qp->sq.head + nreq; + qp->sq.cur_post += 1; + } +out: + xsc_ib_dbg(dev, "nreq:%d\n", nreq); + xsc_post_send_db(qp, dev->xdev, nreq); + spin_unlock_irqrestore(&qp->sq.lock, irqflag); + + return err; +} + +int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr) +{ + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_core_device *xdev = dev->xdev; + struct xsc_wqe_data_seg *recv_head; + struct xsc_wqe_data_seg *data_seg; + unsigned long flags; + int err = 0; + u16 next_pid = 0; + union xsc_db_data db; + int nreq; + u16 idx; + int i; + + spin_lock_irqsave(&qp->rq.lock, flags); + + idx = qp->rq.head & (qp->rq.wqe_cnt - 1); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (unlikely(xsc_wq_overflow(&qp->rq, nreq, to_xcq(qp->ibqp.recv_cq)))) { + xsc_ib_err(dev, "recv work queue overflow\n"); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->rq.max_gs)) { + xsc_ib_err(dev, "max gs exceeded %d (max = %d)\n", + wr->num_sge, qp->rq.max_gs); + err = EINVAL; + *bad_wr = wr; + goto out; + } + + recv_head = get_recv_wqe(qp, idx); + + for (i = 0; i < wr->num_sge; ++i) { + if (unlikely(!wr->sg_list[i].length)) + continue; + data_seg = get_seg_wqe(recv_head, i); + data_seg->in_line = 0; + WR_LE_64(data_seg->va, wr->sg_list[i].addr); + WR_LE_32(data_seg->mkey, wr->sg_list[i].lkey); + if (is_qp1(qp->xqp.qp_type)) + WR_LE_32(data_seg->seg_len, xdev->caps.rx_pkt_len_max); + else + WR_LE_32(data_seg->seg_len, wr->sg_list[i].length); + } + + qp->rq.wrid[idx] = wr->wr_id; + + idx = (idx + 1) & (qp->rq.wqe_cnt - 1); + } + +out: + if (likely(nreq)) { + qp->rq.head += nreq; + next_pid = qp->rq.head << (qp->rq.wqe_shift - XSC_BASE_WQE_SHIFT); + db.rq_next_pid = next_pid; + db.rqn = qp->doorbell_qpn; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + writel(db.raw_data, REG_ADDR(xdev, xdev->regs.rx_db)); + } + + spin_unlock_irqrestore(&qp->rq.lock, flags); + + return err; +} + +static inline enum ib_qp_state to_ib_qp_state(enum xsc_qp_state xsc_state) +{ + switch (xsc_state) { + case XSC_QP_STATE_RST: return IB_QPS_RESET; + case XSC_QP_STATE_INIT: return IB_QPS_INIT; + case XSC_QP_STATE_RTR: return IB_QPS_RTR; + case XSC_QP_STATE_RTS: return IB_QPS_RTS; + case XSC_QP_STATE_SQ_DRAINING: + case XSC_QP_STATE_SQD: return IB_QPS_SQD; + case XSC_QP_STATE_SQER: return IB_QPS_SQE; + case XSC_QP_STATE_ERR: return IB_QPS_ERR; + default: return -1; + } +} + +static inline enum ib_mig_state to_ib_mig_state(int xsc_mig_state) +{ + switch (xsc_mig_state) { + case XSC_QP_PM_ARMED: return IB_MIG_ARMED; + case XSC_QP_PM_REARM: return IB_MIG_REARM; + case XSC_QP_PM_MIGRATED: return IB_MIG_MIGRATED; + default: return -1; + } +} + +int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_query_qp_mbox_out *outb; + struct xsc_qp_context *context; + int xsc_state; + int err = 0; + + mutex_lock(&qp->mutex); + outb = kzalloc(sizeof(*outb), GFP_KERNEL); + if (!outb) { + err = -ENOMEM; + goto out; + } + context = &outb->ctx; + err = xsc_core_qp_query(dev->xdev, &qp->xqp, outb, sizeof(*outb)); + if (err) + goto out_free; + + qp_attr->qp_state = qp->state; + qp_attr->path_mtu = context->mtu_mode ? IB_MTU_4096 : IB_MTU_1024; + qp_attr->rq_psn = be32_to_cpu(context->next_recv_psn) & 0xffffff; + qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; + qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; + qp_attr->sq_draining = xsc_state == XSC_QP_STATE_SQ_DRAINING; + qp_attr->retry_cnt = context->retry_cnt; + qp_attr->rnr_retry = context->rnr_retry; + qp_attr->cur_qp_state = qp_attr->qp_state; + qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; + qp_attr->cap.max_recv_sge = qp->rq.max_gs; + + if (!ibqp->uobject) { + qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; + qp_attr->cap.max_send_sge = qp->sq.max_gs; + } else { + qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; + qp_attr->cap.max_send_sge = qp->sq.max_gs; + } + + /* We don't support inline sends for kernel QPs (yet), and we + * don't know what userspace's value should be. + */ + qp_attr->cap.max_inline_data = 0; + + qp_init_attr->cap = qp_attr->cap; + + qp_init_attr->create_flags = 0; + if (qp->flags & XSC_IB_QP_BLOCK_MULTICAST_LOOPBACK) + qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; + + qp_init_attr->sq_sig_type = qp->sq_signal_bits & XSC_WQE_CTRL_CQ_UPDATE ? + IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; + +out_free: + kfree(outb); + +out: + mutex_unlock(&qp->mutex); + return err; +} + +void xsc_ib_drain_rq(struct ib_qp *qp __maybe_unused) +{ +} + +void xsc_ib_drain_sq(struct ib_qp *qp __maybe_unused) +{ +} diff --git a/drivers/infiniband/hw/xsc/rtt.c b/drivers/infiniband/hw/xsc/rtt.c new file mode 100644 index 0000000000000000000000000000000000000000..e7a68f1ab41a7f830564f62d4057818e90a64f09 --- /dev/null +++ b/drivers/infiniband/hw/xsc/rtt.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_cmd.h" + +struct xsc_rtt_interface { + struct xsc_core_device *xdev; + struct kobject kobj; +}; + +struct xsc_rtt_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf); + ssize_t (*store)(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count); +}; + +static ssize_t enable_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err; + struct xsc_inbox_hdr in; + struct xsc_rtt_en_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_EN); + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", out.en); +} + +static ssize_t enable_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + int err; + u16 rtt_enable; + struct xsc_rtt_en_mbox_in in; + struct xsc_rtt_en_mbox_out out; + + err = kstrtou16(buf, 0, &rtt_enable); + if (err != 0) + return -EINVAL; + + if (rtt_enable > 1) { + xsc_core_err(g->xdev, "Failed to set rtt en, rtt_enable(%u) out of range[0,1]\n", + rtt_enable); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_EN); + in.en = rtt_enable; + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to set rtt en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static ssize_t qpn_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err, i; + u32 count = 0; + struct xsc_inbox_hdr in; + struct xsc_get_rtt_qpn_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_QPN); + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_get_rtt_qpn_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt qpn, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_RTT_CFG_QPN_MAX - 1); i++) + count += sprintf(&buf[count], "%hu,", __be32_to_cpu(out.qpn[i])); + + count += sprintf(&buf[count], "%hu\n", __be32_to_cpu(out.qpn[i])); + + return count; +} + +#define RTT_CFG_QPN_FORMAT "%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u," \ +"%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u" + +static ssize_t qpn_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + int err, i, num; + struct xsc_rtt_qpn_mbox_in in; + struct xsc_rtt_qpn_mbox_out out; + u32 *ptr = in.qpn; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + num = sscanf(buf, RTT_CFG_QPN_FORMAT, &ptr[0], &ptr[1], &ptr[2], &ptr[3], &ptr[4], + &ptr[5], &ptr[6], &ptr[7], &ptr[8], &ptr[9], &ptr[10], &ptr[11], &ptr[12], + &ptr[13], &ptr[14], &ptr[15], &ptr[16], &ptr[17], &ptr[18], &ptr[19], + &ptr[20], &ptr[21], &ptr[22], &ptr[23], &ptr[24], &ptr[25], &ptr[26], + &ptr[27], &ptr[28], &ptr[29], &ptr[30], &ptr[31]); + if (num == 0) + return -EINVAL; + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_QPN); + + for (i = 0 ; i < XSC_RTT_CFG_QPN_MAX; i++) + in.qpn[i] = __cpu_to_be32(ptr[i]); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_qpn_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_qpn_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to set rtt qpn, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static ssize_t period_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err; + struct xsc_inbox_hdr in; + struct xsc_rtt_period_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_PERIOD); + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt period, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", __be32_to_cpu(out.period)); +} + +#define RTT_CFG_PERIOD_MAX 10000 //ms, 10s +#define RTT_CFG_PERIOD_MIN 1000 //ms, 1s +static ssize_t period_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + int err; + u32 rtt_period; + struct xsc_rtt_period_mbox_in in; + struct xsc_rtt_period_mbox_out out; + + err = kstrtouint(buf, 0, &rtt_period); + if (err != 0) + return -EINVAL; + + if (rtt_period > RTT_CFG_PERIOD_MAX || rtt_period < RTT_CFG_PERIOD_MIN) + return -EINVAL; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_PERIOD); + in.period = __cpu_to_be32(rtt_period); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_period_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to set rtt period, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static ssize_t result_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int i, err; + u32 count = 0; + struct xsc_inbox_hdr in; + struct xsc_rtt_result_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_RESULT); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_result_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt result, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_RTT_CFG_QPN_MAX - 1); i++) + count += sprintf(&buf[count], "%lld,", __be64_to_cpu(out.result[i])); + + count += sprintf(&buf[count], "%lld\n", __be64_to_cpu(out.result[i])); + + return count; +} + +static ssize_t result_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t stats_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err; + u32 count = 0; + struct xsc_inbox_hdr in; + struct xsc_rtt_stats_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_STATS); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_stats_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt stats, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + count += sprintf(&buf[count], "rtt_succ_snd_req_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_succ_snd_req_cnt)); + count += sprintf(&buf[count], "rtt_succ_snd_rsp_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_succ_snd_rsp_cnt)); + count += sprintf(&buf[count], "rtt_fail_snd_req_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_fail_snd_req_cnt)); + count += sprintf(&buf[count], "rtt_fail_snd_rsp_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_fail_snd_rsp_cnt)); + count += sprintf(&buf[count], "rtt_rcv_req_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_rcv_req_cnt)); + count += sprintf(&buf[count], "rtt_rcv_rsp_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_rcv_rsp_cnt)); + count += sprintf(&buf[count], "rtt_rcv_unk_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_rcv_unk_cnt)); + count += sprintf(&buf[count], "rtt_grp_invalid_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_grp_invalid_cnt)); + + return count; +} + +static ssize_t stats_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +#define RTT_ATTR(_name) struct xsc_rtt_attributes xsc_rtt_attr_##_name = \ + __ATTR(rtt_probe_##_name, 0644, _name##_show, _name##_store) + +RTT_ATTR(enable); +RTT_ATTR(qpn); +RTT_ATTR(period); +RTT_ATTR(result); +RTT_ATTR(stats); + +static ssize_t rtt_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct xsc_rtt_attributes *ga = + container_of(attr, struct xsc_rtt_attributes, attr); + struct xsc_rtt_interface *g = container_of(kobj, struct xsc_rtt_interface, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t rtt_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct xsc_rtt_attributes *ga = + container_of(attr, struct xsc_rtt_attributes, attr); + struct xsc_rtt_interface *g = container_of(kobj, struct xsc_rtt_interface, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +static const struct sysfs_ops rtt_sysfs_ops = { + .show = rtt_attr_show, + .store = rtt_attr_store, +}; + +static struct attribute *rtt_attrs[] = { + &xsc_rtt_attr_enable.attr, + &xsc_rtt_attr_qpn.attr, + &xsc_rtt_attr_period.attr, + &xsc_rtt_attr_result.attr, + &xsc_rtt_attr_stats.attr, + NULL +}; + +ATTRIBUTE_GROUPS(rtt); + +static const struct kobj_type rtt_ktype = { + .sysfs_ops = &rtt_sysfs_ops, + .default_groups = rtt_groups, +}; + +int xsc_rtt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + struct xsc_rtt_interface *tmp; + int err; + + if (!xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) + return -EACCES; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + err = kobject_init_and_add(&tmp->kobj, &rtt_ktype, + &ib_dev->dev.kobj, "rtt"); + if (err) + goto rtt_attr_err; + + xdev->rtt_priv = tmp; + tmp->xdev = xdev; + return 0; + +rtt_attr_err: + kobject_put(&tmp->kobj); + kfree(tmp); + return err; +} + +void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev) +{ + int err; + struct xsc_rtt_en_mbox_in in; + struct xsc_rtt_en_mbox_out out; + struct xsc_rtt_period_mbox_in period_in; + struct xsc_rtt_period_mbox_out period_out; + struct xsc_rtt_interface *rtt; + + if (!xdev || !xdev->rtt_priv) + return; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_EN); + in.en = 0; + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + if (err || out.hdr.status) + xsc_core_err(xdev, "Failed to set rtt disable, err(%u), status(%u)\n", + err, out.hdr.status); + + memset(&period_in, 0, sizeof(period_in)); + memset(&period_out, 0, sizeof(period_out)); + + period_in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_PERIOD); + period_in.period = __cpu_to_be32(RTT_CFG_PERIOD_MAX); + + err = xsc_cmd_exec(xdev, (void *)&period_in, sizeof(struct xsc_rtt_period_mbox_in), + (void *)&period_out, sizeof(struct xsc_rtt_period_mbox_out)); + if (err || period_out.hdr.status) + xsc_core_err(xdev, "Failed to set rtt period default, err(%u), status(%u)\n", + err, out.hdr.status); + + rtt = xdev->rtt_priv; + kobject_put(&rtt->kobj); + kfree(rtt); + xdev->rtt_priv = NULL; +} diff --git a/drivers/infiniband/hw/xsc/user.h b/drivers/infiniband/hw/xsc/user.h new file mode 100644 index 0000000000000000000000000000000000000000..6e2b6ff542ae8de163a5190f651db3d1d4754450 --- /dev/null +++ b/drivers/infiniband/hw/xsc/user.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_USER_H +#define XSC_IB_USER_H + +#include +#include /* For ETH_ALEN. */ +#include + +enum xsc_ib_devx_methods { + XSC_IB_METHOD_DEVX_OTHER = (1U << UVERBS_ID_NS_SHIFT), + XSC_IB_METHOD_DEVX_QUERY_UAR, + XSC_IB_METHOD_DEVX_QUERY_EQN, +}; + +enum xsc_ib_devx_other_attrs { + XSC_IB_ATTR_DEVX_OTHER_CMD_IN = (1U << UVERBS_ID_NS_SHIFT), + XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, +}; + +enum xsc_ib_objects { + XSC_IB_OBJECT_DEVX = (1U << UVERBS_ID_NS_SHIFT), + XSC_IB_OBJECT_DEVX_OBJ, + XSC_IB_OBJECT_DEVX_UMEM, + XSC_IB_OBJECT_FLOW_MATCHER, +}; + +/* Increment this value if any changes that break userspace ABI + * compatibility are made. + */ +#define XSC_IB_UVERBS_ABI_VERSION 1 + +/* Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in __u64 + * instead. + */ + +enum { + XSC_QP_FLAG_SIGNATURE = 1 << 0, + XSC_QP_FLAG_SCATTER_CQE = 1 << 1, + XSC_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2, + XSC_QP_FLAG_BFREG_INDEX = 1 << 3, + XSC_QP_FLAG_TYPE_DCT = 1 << 4, + XSC_QP_FLAG_TYPE_DCI = 1 << 5, + XSC_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6, + XSC_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, + XSC_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, + XSC_QP_FLAG_RAWPACKET_TSO = 1 << 9, + XSC_QP_FLAG_RAWPACKET_TX = 1 << 10, +}; + +struct xsc_ib_alloc_ucontext_req { + __u32 rsvd0; + __u32 rsvd1; +}; + +enum xsc_user_cmds_supp_uhw { + XSC_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, + XSC_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, +}; + +struct xsc_ib_alloc_ucontext_resp { + __u32 qp_tab_size; + __u32 cache_line_size; + __u16 max_sq_desc_sz; + __u16 max_rq_desc_sz; + __u32 max_send_wqebb; + __u32 max_recv_wr; + __u16 num_ports; + __u16 reserved; + __u64 qpm_tx_db; + __u64 qpm_rx_db; + __u64 cqm_next_cid_reg; + __u64 cqm_armdb; + __u32 send_ds_num; + __u32 recv_ds_num; + __u32 cmds_supp_uhw; +}; + +struct xsc_ib_create_qp { + __u64 buf_addr; + __u64 db_addr; + __u32 sq_wqe_count; + __u32 rq_wqe_count; + __u32 rq_wqe_shift; + __u32 flags; +}; + +struct xsc_ib_create_qp_resp { + __u32 uuar_index; + __u32 reserved; +}; + +struct xsc_ib_create_cq { + __u64 buf_addr; + __u64 db_addr; + __u32 cqe_size; +}; + +struct xsc_ib_create_cq_resp { + __u32 cqn; + __u32 reserved; +}; + +struct xsc_ib_create_ah_resp { + __u32 response_length; + __u8 dmac[ETH_ALEN]; + __u8 reserved[6]; +}; + +struct xsc_ib_alloc_pd_resp { + __u32 pdn; +}; + +struct xsc_ib_tso_caps { + __u32 max_tso; /* Maximum tso payload size in bytes */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_UD + */ + __u32 supported_qpts; +}; + +/* RX Hash function flags */ +enum xsc_rx_hash_function_flags { + XSC_RX_HASH_FUNC_TOEPLITZ = 1 << 0, +}; + +enum xsc_rdma_link_speed { + XSC_RDMA_LINK_SPEED_2_5GB = 1 << 0, + XSC_RDMA_LINK_SPEED_5GB = 1 << 1, + XSC_RDMA_LINK_SPEED_10GB = 1 << 3, + XSC_RDMA_LINK_SPEED_14GB = 1 << 4, + XSC_RDMA_LINK_SPEED_25GB = 1 << 5, + XSC_RDMA_LINK_SPEED_50GB = 1 << 6, + XSC_RDMA_LINK_SPEED_100GB = 1 << 7, +}; + +enum xsc_rdma_phys_state { + XSC_RDMA_PHY_STATE_SLEEP = 1, + XSC_RDMA_PHY_STATE_POLLING, + XSC_RDMA_PHY_STATE_DISABLED, + XSC_RDMA_PHY_STATE_PORT_CONFIGURATION_TRAINNING, + XSC_RDMA_PHY_STATE_LINK_UP, + XSC_RDMA_PHY_STATE_LINK_ERROR_RECOVERY, + XSC_RDMA_PHY_STATE_PHY_TEST, +}; + +/* + * RX Hash flags, these flags allows to set which incoming packet's field should + * participates in RX Hash. Each flag represent certain packet's field, + * when the flag is set the field that is represented by the flag will + * participate in RX Hash calculation. + * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP + * and *TCP and *UDP flags can't be enabled together on the same QP. + */ +enum xsc_rx_hash_fields { + XSC_RX_HASH_SRC_IPV4 = 1 << 0, + XSC_RX_HASH_DST_IPV4 = 1 << 1, + XSC_RX_HASH_SRC_IPV6 = 1 << 2, + XSC_RX_HASH_DST_IPV6 = 1 << 3, + XSC_RX_HASH_SRC_PORT_TCP = 1 << 4, + XSC_RX_HASH_DST_PORT_TCP = 1 << 5, + XSC_RX_HASH_SRC_PORT_UDP = 1 << 6, + XSC_RX_HASH_DST_PORT_UDP = 1 << 7, + XSC_RX_HASH_IPSEC_SPI = 1 << 8, + /* Save bits for future fields */ + XSC_RX_HASH_INNER = (1UL << 31), +}; + +struct xsc_ib_rss_caps { + __aligned_u64 rx_hash_fields_mask; /* enum xsc_rx_hash_fields */ + __u8 rx_hash_function; /* enum xsc_rx_hash_function_flags */ + __u8 reserved[7]; +}; + +enum xsc_ib_cqe_comp_res_format { + XSC_IB_CQE_RES_FORMAT_HASH = 1 << 0, + XSC_IB_CQE_RES_FORMAT_CSUM = 1 << 1, + XSC_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2, +}; + +struct xsc_ib_cqe_comp_caps { + __u32 max_num; + __u32 supported_format; /* enum xsc_ib_cqe_comp_res_format */ +}; + +enum xsc_ib_packet_pacing_cap_flags { + XSC_IB_PP_SUPPORT_BURST = 1 << 0, +}; + +struct xsc_packet_pacing_caps { + __u32 qp_rate_limit_min; + __u32 qp_rate_limit_max; /* In kpbs */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; + __u8 cap_flags; /* enum xsc_ib_packet_pacing_cap_flags */ + __u8 reserved[3]; +}; + +enum xsc_ib_mpw_caps { + MPW_RESERVED = 1 << 0, + XSC_IB_ALLOW_MPW = 1 << 1, + XSC_IB_SUPPORT_EMPW = 1 << 2, +}; + +enum xsc_ib_sw_parsing_offloads { + XSC_IB_SW_PARSING = 1 << 0, + XSC_IB_SW_PARSING_CSUM = 1 << 1, + XSC_IB_SW_PARSING_LSO = 1 << 2, +}; + +struct xsc_ib_sw_parsing_caps { + __u32 sw_parsing_offloads; /* enum xsc_ib_sw_parsing_offloads */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; +}; + +struct xsc_ib_striding_rq_caps { + __u32 min_single_stride_log_num_of_bytes; + __u32 max_single_stride_log_num_of_bytes; + __u32 min_single_wqe_log_num_of_strides; + __u32 max_single_wqe_log_num_of_strides; + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; + __u32 reserved; +}; + +enum xsc_ib_query_dev_resp_flags { + /* Support 128B CQE compression */ + XSC_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, + XSC_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1, +}; + +enum xsc_ib_tunnel_offloads { + XSC_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0, + XSC_IB_TUNNELED_OFFLOADS_GRE = 1 << 1, + XSC_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2, + XSC_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3, + XSC_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4, +}; + +struct xsc_ib_query_device_resp { + __u32 comp_mask; + __u32 response_length; + struct xsc_ib_tso_caps tso_caps; + struct xsc_ib_rss_caps rss_caps; + struct xsc_ib_cqe_comp_caps cqe_comp_caps; + struct xsc_packet_pacing_caps packet_pacing_caps; + __u32 xsc_ib_support_multi_pkt_send_wqes; + __u32 flags; /* Use enum xsc_ib_query_dev_resp_flags */ + struct xsc_ib_sw_parsing_caps sw_parsing_caps; + struct xsc_ib_striding_rq_caps striding_rq_caps; + __u32 tunnel_offloads_caps; /* enum xsc_ib_tunnel_offloads */ + __u32 reserved; +}; + +#endif /* XSC_IB_USER_H */ diff --git a/drivers/infiniband/hw/xsc/xsc_ib.h b/drivers/infiniband/hw/xsc/xsc_ib.h new file mode 100644 index 0000000000000000000000000000000000000000..0753b3ba1c328aaefc7a8ef64b4b886114502e2a --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib.h @@ -0,0 +1,627 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_H +#define XSC_IB_H + +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/cq.h" +#include "common/qp.h" +#include +#include +#include +#include +#include + +#include "xsc_ib_compat.h" + +#define xsc_ib_dbg(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_DBG) \ + pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_err(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_ERR) \ + pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_warn(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_WARN) \ + pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_info(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_INFO) \ + pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +struct xsc_ib_ucontext { + struct ib_ucontext ibucontext; + struct list_head db_page_list; + + /* protect doorbell record alloc/free + */ + struct mutex db_page_mutex; +}; + +#define field_avail(type, fld, sz) (offsetof(type, fld) + \ + sizeof(((type *)0)->fld) <= (sz)) + +static inline struct xsc_ib_ucontext *to_xucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct xsc_ib_ucontext, ibucontext); +} + +struct xsc_ib_pd { + struct ib_pd ibpd; + u32 pdn; + u32 pa_lkey; +}; + +/* Use macros here so that don't have to duplicate + * enum ib_send_flags and enum ib_qp_type for low-level driver + */ + +#define XSC_IB_QPT_REG_UMR IB_QPT_RESERVED1 + +enum { + XSC_PAGE_SHIFT_4K = 12, + XSC_PAGE_SHIFT_64K = 16, + XSC_PAGE_SHIFT_2M = 21, + XSC_PAGE_SHIFT_1G = 30, +}; + +enum { + XSC_PAGE_MODE_4K = 0, + XSC_PAGE_MODE_64K = 1, + XSC_PAGE_MODE_2M = 2, + XSC_PAGE_MODE_1G = 3, +}; + +struct wr_list { + u16 opcode; + u16 next; +}; + +struct xsc_ib_wq { + u64 *wrid; + u32 *wr_data; + struct wr_list *w_list; + unsigned long *wqe_head; + u16 unsig_count; + + /* serialize post to the work queue + */ + spinlock_t lock; + int wqe_cnt; + int ds_cnt; + int max_post; + int max_gs; + int offset; + int wqe_shift; + unsigned int head; + unsigned int tail; + u16 cur_post; + u16 last_poll; + void *qend; + void *hdr_buf; + u32 hdr_size; + dma_addr_t hdr_dma; + int mad_queue_depth; + int mad_index; +}; + +enum { + XSC_QP_USER, + XSC_QP_KERNEL, + XSC_QP_EMPTY +}; + +struct xsc_ib_qp { + struct ib_qp ibqp; + struct xsc_core_qp xqp; + struct xsc_buf buf; + + struct xsc_db db; + struct xsc_ib_wq rq; + + u32 doorbell_qpn; + u8 sq_signal_bits; + u8 fm_cache; + int sq_max_wqes_per_wr; + int sq_spare_wqes; + struct xsc_ib_wq sq; + + struct ib_umem *umem; + int buf_size; + + /* serialize qp state modifications + */ + struct mutex mutex; + u16 xrcdn; + u32 flags; + u8 port; + u8 alt_port; + u8 atomic_rd_en; + u8 resp_depth; + u8 state; + int xsc_type; + int wq_sig; + int scat_cqe; + int max_inline_data; + int has_rq; + + int create_type; + u32 pa_lkey; + /* For QP1 */ + struct ib_ud_header qp1_hdr; + u32 send_psn; + struct xsc_qp_context ctx; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + /* For qp resources */ + spinlock_t lock; +}; + +struct xsc_ib_cq_buf { + struct xsc_buf buf; + struct ib_umem *umem; + int cqe_size; +}; + +enum xsc_ib_qp_flags { + XSC_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0, + XSC_IB_QP_SIGNATURE_HANDLING = 1 << 1, +}; + +struct xsc_shared_mr_info { + int mr_id; + struct ib_umem *umem; +}; + +struct xsc_ib_cq { + struct ib_cq ibcq; + struct xsc_core_cq xcq; + struct xsc_ib_cq_buf buf; + struct xsc_db db; + + /* serialize access to the CQ + */ + spinlock_t lock; + + /* protect resize cq + */ + struct mutex resize_mutex; + struct xsc_ib_cq_resize *resize_buf; + struct ib_umem *resize_umem; + int cqe_size; +}; + +struct xsc_ib_xrcd { + struct ib_xrcd ibxrcd; + u32 xrcdn; +}; + +struct xsc_ib_peer_id; + +struct xsc_ib_mr { + struct ib_mr ibmr; + struct xsc_core_mr mmr; + struct ib_umem *umem; + struct xsc_shared_mr_info *smr_info; + struct list_head list; + int order; + __be64 *pas; + dma_addr_t dma; + int npages; + struct completion done; + enum ib_wc_status status; + struct xsc_ib_peer_id *peer_id; + atomic_t invalidated; + struct completion invalidation_comp; +}; + +struct xsc_ib_peer_id { + struct completion comp; + struct xsc_ib_mr *mr; +}; + +struct xsc_cache_ent { + struct list_head head; + /* sync access to the cahce entry + */ + spinlock_t lock; + + struct dentry *dir; + char name[4]; + u32 order; + u32 size; + u32 cur; + u32 miss; + u32 limit; + + struct dentry *fsize; + struct dentry *fcur; + struct dentry *fmiss; + struct dentry *flimit; + + struct xsc_ib_dev *dev; + struct work_struct work; + struct delayed_work dwork; +}; + +struct xsc_mr_cache { + struct workqueue_struct *wq; + struct xsc_cache_ent ent[MAX_MR_CACHE_ENTRIES]; + int stopped; + struct dentry *root; + unsigned long last_add; +}; + +struct xsc_gid { + u8 data[16]; +}; + +struct xsc_sgid_tbl { + struct xsc_gid *tbl; + u32 max; + u32 count; +}; + +struct xsc_ib_res { + struct xsc_sgid_tbl sgid_tbl; +}; + +struct xsc_ib_resources { + struct ib_cq *c0; + struct ib_xrcd *x0; + struct ib_xrcd *x1; + struct ib_pd *p0; + struct ib_srq *s0; +}; + +struct xsc_ib_dev { + struct ib_device ib_dev; + struct uverbs_object_tree_def *driver_trees[6]; + struct net_device *netdev; + struct xsc_core_device *xdev; + XSC_DECLARE_DOORBELL_LOCK(uar_lock); + struct list_head eqs_list; + int num_ports; + int num_comp_vectors; + /* serialize update of capability mask + */ + struct mutex cap_mask_mutex; + u8 ib_active; + /* sync used page count stats + */ + spinlock_t mr_lock; + struct xsc_ib_res ib_res; + struct xsc_ib_resources devr; + struct xsc_mr_cache cache; + u32 crc_32_table[256]; + int cm_pcp; + int cm_dscp; + int force_pcp; + int force_dscp; + int iommu_state; + struct notifier_block nb; +}; + +union xsc_ib_fw_ver { + u64 data; + struct { + u8 ver_major; + u8 ver_minor; + u16 ver_patch; + u32 ver_tweak; + } s; +}; + +struct xsc_pa_chunk { + struct list_head list; + u64 va; + dma_addr_t pa; + size_t length; +}; + +static inline struct xsc_ib_cq *to_xibcq(struct xsc_core_cq *xcq) +{ + return container_of(xcq, struct xsc_ib_cq, xcq); +} + +static inline struct xsc_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) +{ + return container_of(ibxrcd, struct xsc_ib_xrcd, ibxrcd); +} + +static inline struct xsc_ib_dev *to_mdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct xsc_ib_dev, ib_dev); +} + +static inline struct xsc_ib_cq *to_xcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct xsc_ib_cq, ibcq); +} + +static inline struct xsc_ib_qp *to_xibqp(struct xsc_core_qp *xqp) +{ + return container_of(xqp, struct xsc_ib_qp, xqp); +} + +static inline struct xsc_ib_pd *to_mpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct xsc_ib_pd, ibpd); +} + +static inline struct xsc_ib_qp *to_xqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct xsc_ib_qp, ibqp); +} + +static inline struct xsc_ib_mr *to_mmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct xsc_ib_mr, ibmr); +} + +struct xsc_ib_ah { + struct ib_ah ibah; + struct xsc_av av; +}; + +static inline struct xsc_ib_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct xsc_ib_ah, ibah); +} + +static inline struct xsc_ib_dev *xdev2ibdev(struct xsc_core_device *xdev) +{ + return container_of((void *)xdev, struct xsc_ib_dev, xdev); +} + +int xsc_ib_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props); + +int xsc_ib_create_qp(struct ib_qp *ibqp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); + +void __xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn); +void xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn); + +int xsc_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); +int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); + +int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad_wr); +int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr); + +void *xsc_get_send_wqe(struct xsc_ib_qp *qp, int n); +int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); +int xsc_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); +struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc); +struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata); +int xsc_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); +void xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, + int *ncont, int *order); +void xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int npages, bool need_to_devide); +const struct uverbs_object_tree_def *xsc_ib_get_devx_tree(void); + +int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset); +int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr); +int xsc_wr_invalidate_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr); +int xsc_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, + unsigned long addr, int *npage, int *shift, u64 **pas); + +void xsc_ib_drain_rq(struct ib_qp *qp); +void xsc_ib_drain_sq(struct ib_qp *qp); + +static inline void init_query_mad(struct ib_smp *mad) +{ + mad->base_version = 1; + mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + mad->class_version = 1; + mad->method = IB_MGMT_METHOD_GET; +} + +static inline u8 convert_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_ATOMIC ? XSC_PERM_ATOMIC : 0) | + (acc & IB_ACCESS_REMOTE_WRITE ? XSC_PERM_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? XSC_PERM_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? XSC_PERM_LOCAL_WRITE : 0) | + XSC_PERM_LOCAL_READ; +} + +static inline enum ib_mtu xsc_net_to_ib_mtu(unsigned int mtu) +{ + mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + + IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES + + IB_ICRC_BYTES); + + if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) + return IB_MTU_4096; + else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024)) + return IB_MTU_1024; + else + return 0; +} + +/** + * UDP source port selection must adhere IANA port allocation ranges. Thus + * we will be using IANA recommendation for Ephemeral port range of: + * 49152-65535, or in hex: 0xC000-0xFFFF. + */ +#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) +#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF) +#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) + +/** + * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based + * on the flow_label + * + * This function will convert the 20 bit flow_label input to a valid RoCE v2 + * UDP src port 14 bit value. All RoCE V2 drivers should use this same + * convention. + */ +static inline u16 xsc_flow_label_to_udp_sport(u32 fl) +{ + u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000; + + fl_low ^= fl_high >> 14; + return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN); +} + +#define XSC_IB_IOMMU_MAP_DISABLE 0 +#define XSC_IB_IOMMU_MAP_UNKNOWN_DOMAIN 1 +#define XSC_IB_IOMMU_MAP_NORMAL 2 + +static inline int xsc_ib_iommu_dma_map(struct ib_device *ibdev) +{ + return to_mdev(ibdev)->iommu_state; +} + +static inline void *xsc_ib_iova_to_virt(struct ib_device *ibdev, dma_addr_t iova) +{ + phys_addr_t phyaddr; + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(ibdev->dma_device); + if (likely(domain)) { + phyaddr = iommu_iova_to_phys(domain, iova); + phyaddr |= iova & (PAGE_SIZE - 1); + } else { + phyaddr = dma_to_phys(ibdev->dma_device, iova); + } + + return phys_to_virt(phyaddr); +} + +struct ib_mad_list_head { + struct list_head list; + struct ib_cqe cqe; + struct ib_mad_queue *mad_queue; +}; + +#define IB_MAD_SEND_REQ_MAX_SG 2 +struct ib_mad_send_wr_private { + struct ib_mad_list_head mad_list; + struct list_head agent_list; + struct ib_mad_agent_private *mad_agent_priv; + struct ib_mad_send_buf send_buf; + u64 header_mapping; + u64 payload_mapping; + struct ib_ud_wr send_wr; + struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; + __be64 tid; + unsigned long timeout; + int max_retries; + int retries_left; + int retry; + int refcount; + enum ib_wc_status status; + + /* RMPP control */ + struct list_head rmpp_list; + struct ib_rmpp_segment *last_ack_seg; + struct ib_rmpp_segment *cur_seg; + int last_ack; + int seg_num; + int newwin; + int pad; +}; + +struct ib_mad_private_header { + struct ib_mad_list_head mad_list; + struct ib_mad_recv_wc recv_wc; + struct ib_wc wc; + u64 mapping; +} __packed; + +struct ib_mad_private { + struct ib_mad_private_header header; + size_t mad_size; + struct ib_grh grh; + u8 mad[]; +} __packed; + +static inline void *xsc_ib_send_mad_sg_virt_addr(struct ib_device *ibdev, + const struct ib_send_wr *wr, + int sg) +{ + struct ib_mad_send_wr_private *mad_send_wr; + struct ib_mad_list_head *mad_list; + int iommu_state = xsc_ib_iommu_dma_map(ibdev); + + /* direct dma mapping */ + if (!iommu_state) + return phys_to_virt(dma_to_phys(ibdev->dma_device, wr->sg_list[sg].addr)); + + if (iommu_state == XSC_IB_IOMMU_MAP_NORMAL) + return xsc_ib_iova_to_virt(ibdev, wr->sg_list[sg].addr); + + mad_list = container_of(wr->wr_cqe, struct ib_mad_list_head, cqe); + mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, + mad_list); + + /* sg_list[] */ + if (sg == 0) + return mad_send_wr->send_buf.mad; + + /* sg_list[1] */ + if (mad_send_wr->send_buf.seg_count) + return ib_get_rmpp_segment(&mad_send_wr->send_buf, + mad_send_wr->seg_num); + return mad_send_wr->send_buf.mad + mad_send_wr->send_buf.hdr_len; +} + +static inline void *xsc_ib_recv_mad_sg_virt_addr(struct ib_device *ibdev, + struct ib_wc *wc, + u64 sg_addr) +{ + struct ib_mad_private_header *mad_priv_hdr; + struct ib_mad_private *recv; + struct ib_mad_list_head *mad_list; + int iommu_state = xsc_ib_iommu_dma_map(ibdev); + + /* direct dma mapping */ + if (!iommu_state) + return phys_to_virt(dma_to_phys(ibdev->dma_device, sg_addr)); + + if (iommu_state == XSC_IB_IOMMU_MAP_NORMAL) + return xsc_ib_iova_to_virt(ibdev, sg_addr); + + mad_list = container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); + mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); + recv = container_of(mad_priv_hdr, struct ib_mad_private, header); + return &recv->grh; +} + +#endif /* XSC_IB_H */ diff --git a/drivers/infiniband/hw/xsc/xsc_ib_compat.h b/drivers/infiniband/hw/xsc/xsc_ib_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..9d43cfd5d41c362a7cc4704c009ab8bd2a13c0f8 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib_compat.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_COMPAT_H +#define XSC_IB_COMPAT_H + +/* + * adaptive to different ib_core versions + */ + +struct xsc_ib_ucontext; + +int xsc_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *ah_attr, + struct ib_udata *udata); +#define xsc_ib_create_ah_def() int xsc_ib_create_ah(\ + struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) + +int xsc_ib_destroy_ah(struct ib_ah *ah, u32 flags); +#define xsc_ib_destroy_ah_def() int xsc_ib_destroy_ah(struct ib_ah *ah, u32 flags) +int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); +int xsc_ib_create_cq(struct ib_cq *ibcq, + const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +int xsc_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); + +// from main.c static functions +int xsc_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +void xsc_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); +int xsc_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); + +int xsc_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#define xsc_ib_dealloc_pd_def() int xsc_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) + +int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); +#define xsc_ib_destroy_cq_def() int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + +#define xsc_ib_destroy_qp_def() int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) +#define xsc_ib_create_cq_def() int xsc_ib_create_cq(struct ib_cq *ibcq,\ + const struct ib_cq_init_attr *attr, struct ib_udata *udata) +#define xsc_ib_dereg_mr_def() int xsc_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +#define xsc_ib_alloc_ucontext_def() int xsc_ib_alloc_ucontext(\ + struct ib_ucontext *uctx, struct ib_udata *udata) +#define xsc_ib_dealloc_ucontext_def() void xsc_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +#define xsc_ib_alloc_pd_def() int xsc_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) + +#define RET_VALUE(x) (x) + +#ifdef IB_ALLOC_MR_HAVE_UDATA +struct ib_mr *xsc_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg, struct ib_udata *udata); +#define xsc_ib_alloc_mr_def() struct ib_mr *xsc_ib_alloc_mr(\ + struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata) +#else +struct ib_mr *xsc_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); +#define xsc_ib_alloc_mr_def() struct ib_mr *xsc_ib_alloc_mr(\ + struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) +#endif + +#endif diff --git a/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c b/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..f94f76394b2d74f7b34f4396f926da6e42699a10 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_cmd.h" +#include "xsc_ib.h" + +static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) +{ + struct ib_device *ib_dev = container_of(device, struct ib_device, dev); + struct xsc_core_device *dev = to_mdev(ib_dev)->xdev; + struct pci_dev *pdev = dev->pdev; + + return sprintf(buf, "%x\n", pdev->subsystem_device); +} + +static DEVICE_ATTR_RO(hca_type); + +static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, char *buf) +{ + struct ib_device *ib_dev = container_of(device, struct ib_device, dev); + struct xsc_core_device *dev = to_mdev(ib_dev)->xdev; + u32 hw_ver = 0; + + hw_ver = ((dev->chip_ver_l & 0xffff) << 16) | + (dev->hotfix_num & 0xffff); + return sprintf(buf, "0x%x\n", hw_ver); +} + +static DEVICE_ATTR_RO(hw_rev); + +static struct device_attribute *xsc_ib_attributes[] = { + &dev_attr_hca_type, + &dev_attr_hw_rev, +}; + +void xsc_ib_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + int err = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(xsc_ib_attributes); i++) { + err = device_create_file(&ib_dev->dev, xsc_ib_attributes[i]); + if (err) + xsc_core_err(xdev, "Create sysfs file for %s failed.\n", + xsc_ib_attributes[i]->attr.name); + } +} + +void xsc_ib_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(xsc_ib_attributes); i++) + device_remove_file(&ib_dev->dev, xsc_ib_attributes[i]); +} diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..dcf934b61e9bf48154f04a6958b5190519a0885d --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c @@ -0,0 +1,715 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "xsc_ib.h" + +#define XSC_RDMA_CTRL_NAME "rdma_ctrl" + +static void encode_cc_cmd_enable_rp(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_enable_rp *cc_cmd = (struct xsc_cc_cmd_enable_rp *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->enable = __cpu_to_be32(cc_cmd->enable); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_enable_np(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_enable_np *cc_cmd = (struct xsc_cc_cmd_enable_np *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->enable = __cpu_to_be32(cc_cmd->enable); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_init_alpha(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_init_alpha *cc_cmd = (struct xsc_cc_cmd_init_alpha *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->alpha = __cpu_to_be32(cc_cmd->alpha); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_g(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_g *cc_cmd = (struct xsc_cc_cmd_g *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->g = __cpu_to_be32(cc_cmd->g); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_ai(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_ai *cc_cmd = (struct xsc_cc_cmd_ai *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->ai = __cpu_to_be32(cc_cmd->ai); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_hai(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_hai *cc_cmd = (struct xsc_cc_cmd_hai *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->hai = __cpu_to_be32(cc_cmd->hai); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_th(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_th *cc_cmd = (struct xsc_cc_cmd_th *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->threshold = __cpu_to_be32(cc_cmd->threshold); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_bc(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_bc *cc_cmd = (struct xsc_cc_cmd_bc *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->bytecount = __cpu_to_be32(cc_cmd->bytecount); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_cnp_opcode(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_opcode *cc_cmd = (struct xsc_cc_cmd_cnp_opcode *)data; + + cc_cmd->opcode = __cpu_to_be32(cc_cmd->opcode); +} + +static void encode_cc_cmd_cnp_bth_b(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_bth_b *cc_cmd = (struct xsc_cc_cmd_cnp_bth_b *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->bth_b = __cpu_to_be32(cc_cmd->bth_b); +} + +static void encode_cc_cmd_cnp_bth_f(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_bth_f *cc_cmd = (struct xsc_cc_cmd_cnp_bth_f *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->bth_f = __cpu_to_be32(cc_cmd->bth_f); +} + +static void encode_cc_cmd_cnp_ecn(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_ecn *cc_cmd = (struct xsc_cc_cmd_cnp_ecn *)data; + + cc_cmd->ecn = __cpu_to_be32(cc_cmd->ecn); +} + +static void encode_cc_cmd_data_ecn(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_data_ecn *cc_cmd = (struct xsc_cc_cmd_data_ecn *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->ecn = __cpu_to_be32(cc_cmd->ecn); +} + +static void encode_cc_cmd_cnp_tx_interval(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_tx_interval *cc_cmd = (struct xsc_cc_cmd_cnp_tx_interval *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->interval = __cpu_to_be32(cc_cmd->interval); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_evt_rsttime(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_evt_rsttime *cc_cmd = + (struct xsc_cc_cmd_evt_rsttime *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->period = __cpu_to_be32(cc_cmd->period); +} + +static void encode_cc_cmd_cnp_dscp(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_dscp *cc_cmd = (struct xsc_cc_cmd_cnp_dscp *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->dscp = __cpu_to_be32(cc_cmd->dscp); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_cnp_pcp(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_pcp *cc_cmd = (struct xsc_cc_cmd_cnp_pcp *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->pcp = __cpu_to_be32(cc_cmd->pcp); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_evt_period_alpha(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_evt_period_alpha *cc_cmd = (struct xsc_cc_cmd_evt_period_alpha *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->period = __cpu_to_be32(cc_cmd->period); +} + +static void encode_cc_cmd_clamp_tgt_rate(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_clamp_tgt_rate *cc_cmd = (struct xsc_cc_cmd_clamp_tgt_rate *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->clamp_tgt_rate = __cpu_to_be32(cc_cmd->clamp_tgt_rate); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_max_hai_factor(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_max_hai_factor *cc_cmd = (struct xsc_cc_cmd_max_hai_factor *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->max_hai_factor = __cpu_to_be32(cc_cmd->max_hai_factor); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_scale(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_scale *cc_cmd = (struct xsc_cc_cmd_scale *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->scale = __cpu_to_be32(cc_cmd->scale); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_get_cfg(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void decode_cc_get_cfg(void *data) +{ + struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *)data; + + cc_cmd->cmd = __be16_to_cpu(cc_cmd->cmd); + cc_cmd->len = __be16_to_cpu(cc_cmd->len); + cc_cmd->enable_rp = __be32_to_cpu(cc_cmd->enable_rp); + cc_cmd->enable_np = __be32_to_cpu(cc_cmd->enable_np); + cc_cmd->init_alpha = __be32_to_cpu(cc_cmd->init_alpha); + cc_cmd->g = __be32_to_cpu(cc_cmd->g); + cc_cmd->ai = __be32_to_cpu(cc_cmd->ai); + cc_cmd->hai = __be32_to_cpu(cc_cmd->hai); + cc_cmd->threshold = __be32_to_cpu(cc_cmd->threshold); + cc_cmd->bytecount = __be32_to_cpu(cc_cmd->bytecount); + cc_cmd->opcode = __be32_to_cpu(cc_cmd->opcode); + cc_cmd->bth_b = __be32_to_cpu(cc_cmd->bth_b); + cc_cmd->bth_f = __be32_to_cpu(cc_cmd->bth_f); + cc_cmd->cnp_ecn = __be32_to_cpu(cc_cmd->cnp_ecn); + cc_cmd->data_ecn = __be32_to_cpu(cc_cmd->data_ecn); + cc_cmd->cnp_tx_interval = __be32_to_cpu(cc_cmd->cnp_tx_interval); + cc_cmd->evt_period_rsttime = __be32_to_cpu(cc_cmd->evt_period_rsttime); + cc_cmd->cnp_dscp = __be32_to_cpu(cc_cmd->cnp_dscp); + cc_cmd->cnp_pcp = __be32_to_cpu(cc_cmd->cnp_pcp); + cc_cmd->evt_period_alpha = __be32_to_cpu(cc_cmd->evt_period_alpha); + cc_cmd->clamp_tgt_rate = __be32_to_cpu(cc_cmd->clamp_tgt_rate); + cc_cmd->max_hai_factor = __be32_to_cpu(cc_cmd->max_hai_factor); + cc_cmd->scale = __be32_to_cpu(cc_cmd->scale); + cc_cmd->section = __be32_to_cpu(cc_cmd->section); +} + +static void encode_cc_get_stat(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_get_stat *cc_cmd = (struct xsc_cc_cmd_get_stat *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void decode_cc_get_stat(void *data) +{ + struct xsc_cc_cmd_stat *cc_cmd = (struct xsc_cc_cmd_stat *)data; + + cc_cmd->cnp_handled = __be32_to_cpu(cc_cmd->cnp_handled); + cc_cmd->alpha_recovery = __be32_to_cpu(cc_cmd->alpha_recovery); + cc_cmd->reset_timeout = __be32_to_cpu(cc_cmd->reset_timeout); + cc_cmd->reset_bytecount = __be32_to_cpu(cc_cmd->reset_bytecount); +} + +static int xsc_priv_dev_ioctl_get_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *resp = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->pcp = ib_dev->force_pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *resp = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->dscp = ib_dev->force_dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *req = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_pcp = req->pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *req = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_dscp = req->dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_cma_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_pcp *resp = (struct xsc_ioctl_cma_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->pcp = ib_dev->cm_pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_cma_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_dscp *resp = (struct xsc_ioctl_cma_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->dscp = ib_dev->cm_dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_cma_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_pcp *req = (struct xsc_ioctl_cma_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->cm_pcp = req->pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_cma_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_dscp *req = (struct xsc_ioctl_cma_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->cm_dscp = req->dscp; + return 0; +} + +static int _rdma_ctrl_ioctl_cc(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, u16 expect_resp_size, + void (*encode)(void *, u32), void (*decode)(void *)) +{ + struct xsc_cc_mbox_in *in; + struct xsc_cc_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(struct xsc_cc_mbox_in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(struct xsc_cc_mbox_out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = __be32_to_cpu(out->hdr.status); + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +int _rdma_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + int opcode, ret = 0; + struct xsc_ioctl_attr *hdr; + + hdr = (struct xsc_ioctl_attr *)in; + opcode = hdr->opcode; + switch (opcode) { + case XSC_IOCTL_GET_FORCE_PCP: + ret = xsc_priv_dev_ioctl_get_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_GET_FORCE_DSCP: + ret = xsc_priv_dev_ioctl_get_force_dscp(xdev, in, out); + break; + case XSC_IOCTL_GET_CMA_PCP: + ret = xsc_priv_dev_ioctl_get_cma_pcp(xdev, in, out); + break; + case XSC_IOCTL_GET_CMA_DSCP: + ret = xsc_priv_dev_ioctl_get_cma_dscp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_PCP: + xsc_core_dbg(xdev, "setting global pcp\n"); + ret = xsc_priv_dev_ioctl_set_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_DSCP: + xsc_core_dbg(xdev, "setting global dscp\n"); + ret = xsc_priv_dev_ioctl_set_force_dscp(xdev, in, out); + break; + case XSC_IOCTL_SET_CMA_PCP: + ret = xsc_priv_dev_ioctl_set_cma_pcp(xdev, in, out); + break; + case XSC_IOCTL_SET_CMA_DSCP: + ret = xsc_priv_dev_ioctl_set_cma_dscp(xdev, in, out); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long _rdma_ctrl_ioctl_getinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_hdr *in; + int in_size; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + switch (hdr.attr.opcode) { + case XSC_IOCTL_GET_FORCE_PCP: + case XSC_IOCTL_GET_FORCE_DSCP: + case XSC_IOCTL_SET_FORCE_PCP: + case XSC_IOCTL_SET_FORCE_DSCP: + case XSC_IOCTL_GET_CMA_PCP: + case XSC_IOCTL_GET_CMA_DSCP: + case XSC_IOCTL_SET_CMA_PCP: + case XSC_IOCTL_SET_CMA_DSCP: + break; + default: + return -EINVAL; + } + in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + in->attr.opcode = hdr.attr.opcode; + in->attr.length = hdr.attr.length; + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + err = _rdma_ctrl_exec_ioctl(xdev, &in->attr, (in_size - sizeof(u32)), in->attr.data, + hdr.attr.length); + in->attr.error = err; + if (copy_to_user(user_hdr, in, in_size)) + err = -EFAULT; + kvfree(in); + return err; +} + +static long _rdma_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_SET_ENABLE_RP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_enable_rp), + 0, encode_cc_cmd_enable_rp, NULL); + case XSC_CMD_OP_IOCTL_SET_ENABLE_NP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_enable_np), + 0, encode_cc_cmd_enable_np, NULL); + case XSC_CMD_OP_IOCTL_SET_INIT_ALPHA: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_init_alpha), + 0, encode_cc_cmd_init_alpha, NULL); + case XSC_CMD_OP_IOCTL_SET_G: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_g), + 0, encode_cc_cmd_g, NULL); + case XSC_CMD_OP_IOCTL_SET_AI: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_ai), + 0, encode_cc_cmd_ai, NULL); + case XSC_CMD_OP_IOCTL_SET_HAI: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_hai), + 0, encode_cc_cmd_hai, NULL); + case XSC_CMD_OP_IOCTL_SET_TH: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_th), + 0, encode_cc_cmd_th, NULL); + case XSC_CMD_OP_IOCTL_SET_BC_TH: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_bc), + 0, encode_cc_cmd_bc, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_OPCODE: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_opcode), + 0, encode_cc_cmd_cnp_opcode, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_B: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_bth_b), + 0, encode_cc_cmd_cnp_bth_b, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_F: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_bth_f), + 0, encode_cc_cmd_cnp_bth_f, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_ECN: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_ecn), + 0, encode_cc_cmd_cnp_ecn, NULL); + case XSC_CMD_OP_IOCTL_SET_DATA_ECN: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_data_ecn), + 0, encode_cc_cmd_data_ecn, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_tx_interval), + 0, encode_cc_cmd_cnp_tx_interval, NULL); + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_evt_rsttime), + 0, encode_cc_cmd_evt_rsttime, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_DSCP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_dscp), + 0, encode_cc_cmd_cnp_dscp, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_PCP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_pcp), + 0, encode_cc_cmd_cnp_pcp, NULL); + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_evt_period_alpha), + 0, encode_cc_cmd_evt_period_alpha, NULL); + case XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_clamp_tgt_rate), + 0, encode_cc_cmd_clamp_tgt_rate, NULL); + case XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_max_hai_factor), + 0, encode_cc_cmd_max_hai_factor, NULL); + case XSC_CMD_OP_IOCTL_SET_SCALE: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_scale), + 0, encode_cc_cmd_scale, NULL); + case XSC_CMD_OP_IOCTL_GET_CC_CFG: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_cfg), + sizeof(struct xsc_cc_cmd_get_cfg), + encode_cc_get_cfg, decode_cc_get_cfg); + case XSC_CMD_OP_IOCTL_GET_CC_STAT: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_stat), + sizeof(struct xsc_cc_cmd_stat), + encode_cc_get_stat, decode_cc_get_stat); + default: + return -EINVAL; + } + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, in, hdr.attr.length, out, hdr.attr.length); + + if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out, hdr.attr.length)) + err = -EFAULT; +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int _rdma_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) +{ + struct xsc_core_device *xdev = file->xdev; + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = _rdma_ctrl_ioctl_cmdq(xdev, user_hdr); + break; + case XSC_IOCTL_DRV_GET: + case XSC_IOCTL_DRV_SET: + // TODO refactor to split driver get and set + err = _rdma_ctrl_ioctl_getinfo(xdev, user_hdr); + break; + default: + err = -EFAULT; + break; + } + + return err; +} + +static void _rdma_ctrl_reg_fini(void) +{ + xsc_port_ctrl_cb_dereg(XSC_RDMA_CTRL_NAME); +} + +static int _rdma_ctrl_reg_init(void) +{ + int ret; + + ret = xsc_port_ctrl_cb_reg(XSC_RDMA_CTRL_NAME, _rdma_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_RDMA_CTRL_NAME); + + return ret; +} + +void xsc_rdma_ctrl_fini(void) +{ + _rdma_ctrl_reg_fini(); +} + +int xsc_rdma_ctrl_init(void) +{ + return _rdma_ctrl_reg_init(); +} diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..5049377101f9a34c3132ad4b913cbd0b3fa8ec75 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_RDMA_CTRL_H +#define XSC_RDMA_CTRL_H + +void xsc_rdma_ctrl_fini(void); +int xsc_rdma_ctrl_init(void); + +#endif diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index d6753a9ba00f8122a5348799fb984cbb325b5489..268c84e49194e534d4d702ec4f6c11520632da9d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -86,6 +86,7 @@ source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" +source "drivers/net/ethernet/yunsilicon/Kconfig" config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 5d715f4aff6b9845d0ee80bf1b0be2f39f149b85..423e9edd67771d60498eea0f5ff8d73a56d4cb7b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ +obj-$(CONFIG_NET_VENDOR_YUNSILICON) += yunsilicon/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o diff --git a/drivers/net/ethernet/yunsilicon/Kconfig b/drivers/net/ethernet/yunsilicon/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..a387a8ddeba4b2c5274d131aa6080fa7ba586a66 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon driver configuration +# + +config NET_VENDOR_YUNSILICON + bool "Yunsilicon devices" + default y + depends on PCI || NET + depends on ARM64 || X86_64 + help + If you have a network (Ethernet or RDMA) device belonging to this + class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Yunsilicon devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_YUNSILICON + +source "drivers/net/ethernet/yunsilicon/xsc/net/Kconfig" +source "drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig" + +endif # NET_VENDOR_YUNSILICON diff --git a/drivers/net/ethernet/yunsilicon/Makefile b/drivers/net/ethernet/yunsilicon/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0c603d2bf207e4e589ce7ed68261aff7321879d6 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Makefile for the Yunsilicon device drivers. +# + +obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc/net/ +obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc/pci/ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/cq.h b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h new file mode 100644 index 0000000000000000000000000000000000000000..76f0c506444649a12602889936f3c1360ed65c61 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_CORE_CQ_H +#define XSC_CORE_CQ_H + +#include +#include "common/driver.h" +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" + +struct xsc_core_cq { + u32 cqn; + int cqe_sz; + u64 arm_db; + u64 ci_db; + struct xsc_core_device *dev; + atomic_t refcount; + struct completion free; + unsigned int vector; + int irqn; + u16 dim_us; + u16 dim_pkts; + void (*comp)(struct xsc_core_cq *cq); + void (*event)(struct xsc_core_cq *cq, enum xsc_event); + u32 cons_index; + unsigned int arm_sn; + struct xsc_rsc_debug *dbg; + int pid; + u32 reg_next_cid; + u32 reg_done_pid; + struct xsc_eq *eq; +}; + +enum { + XSC_CQE_OWNER_MASK = 1, +}; + +enum { + CQE_SIZE_64 = 0, + CQE_SIZE_128 = 1, +}; + +enum { + XSC_CQ_DB_REQ_NOT_SOL = 1, + XSC_CQ_DB_REQ_NOT = 0, +}; + +static inline void xsc_cq_arm(struct xsc_core_cq *cq, u8 solicited) +{ + union xsc_cq_doorbell db; + + db.val = 0; + db.cq_next_cid = cq->cons_index; + db.cq_id = cq->cqn; + db.arm = solicited; + + /* Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + writel(db.val, REG_ADDR(cq->dev, cq->arm_db)); +} + +static inline void xsc_cq_set_ci(struct xsc_core_cq *cq) +{ + struct xsc_core_device *xdev = cq->dev; + union xsc_cq_doorbell db; + + db.cq_next_cid = cq->cons_index; + db.cq_id = cq->cqn; + /* ensure write val visable before doorbell */ + wmb(); + + writel(db.val, REG_ADDR(xdev, cq->ci_db)); +} + +int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_create_cq_mbox_in *in, int inlen); +int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq); +int xsc_core_query_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_query_cq_mbox_out *out); +int xsc_debug_cq_add(struct xsc_core_device *dev, struct xsc_core_cq *cq); +void xsc_debug_cq_remove(struct xsc_core_device *dev, struct xsc_core_cq *cq); + +void xsc_init_cq_table(struct xsc_core_device *dev); +void xsc_cleanup_cq_table(struct xsc_core_device *dev); +#endif /* XSC_CORE_CQ_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/device.h b/drivers/net/ethernet/yunsilicon/xsc/common/device.h new file mode 100644 index 0000000000000000000000000000000000000000..1d1b0be093798ad76fa963b92b02f11a7a3d15ee --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/device.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DEVICE_H +#define XSC_DEVICE_H + +#include +#include + +enum { + XSC_MAX_COMMANDS = 32, + XSC_CMD_DATA_BLOCK_SIZE = 512, + XSC_PCI_CMD_XPORT = 7, +}; + +enum { + XSC_PERM_LOCAL_READ = 1 << 0, + XSC_PERM_LOCAL_WRITE = 1 << 1, + XSC_PERM_REMOTE_READ = 1 << 2, + XSC_PERM_REMOTE_WRITE = 1 << 3, + XSC_PERM_ATOMIC = 1 << 6, + XSC_PERM_UMR_EN = 1 << 7, +}; + +enum { + XSC_ACCESS_MODE_PA = 0, + XSC_ACCESS_MODE_MTT = 1, + XSC_ACCESS_MODE_KLM = 2 +}; + +enum { + XSC_MKEY_REMOTE_INVAL = 1 << 24, + XSC_MKEY_FLAG_SYNC_UMR = 1 << 29, + XSC_MKEY_BSF_EN = 1 << 30, + XSC_MKEY_LEN64 = 1 << 31, +}; + +enum { + XSC_BF_REGS_PER_PAGE = 4, + XSC_MAX_UAR_PAGES = 1 << 8, + XSC_MAX_UUARS = XSC_MAX_UAR_PAGES * XSC_BF_REGS_PER_PAGE, +}; + +enum { + XSC_DEV_CAP_FLAG_RC = 1LL << 0, + XSC_DEV_CAP_FLAG_UC = 1LL << 1, + XSC_DEV_CAP_FLAG_UD = 1LL << 2, + XSC_DEV_CAP_FLAG_XRC = 1LL << 3, + XSC_DEV_CAP_FLAG_SRQ = 1LL << 6, + XSC_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + XSC_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + XSC_DEV_CAP_FLAG_APM = 1LL << 17, + XSC_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + XSC_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, + XSC_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, + XSC_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, + XSC_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, + XSC_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, + XSC_DEV_CAP_FLAG_DCT = 1LL << 41, + XSC_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46, +}; + +enum xsc_event { + XSC_EVENT_TYPE_COMP = 0x0, + XSC_EVENT_TYPE_COMM_EST = 0x02,//mad + XSC_EVENT_TYPE_CQ_ERROR = 0x04, + XSC_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + XSC_EVENT_TYPE_INTERNAL_ERROR = 0x08,//tpe私有err,无IB event对应 + XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,//IBV_EVENT_QP_REQ_ERR + XSC_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,//IBV_EVENT_QP_ACCESS_ERR +}; + +struct xsc_cmd_prot_block { + u8 data[XSC_CMD_DATA_BLOCK_SIZE]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 owner_status; //init to 0, dma user should change this val to 1 + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +#define XSC_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) + +enum xsc_traffic_types { + XSC_TT_IPV4, + XSC_TT_IPV4_TCP, + XSC_TT_IPV4_UDP, + XSC_TT_IPV6, + XSC_TT_IPV6_TCP, + XSC_TT_IPV6_UDP, + XSC_TT_IPV4_IPSEC_AH, + XSC_TT_IPV6_IPSEC_AH, + XSC_TT_IPV4_IPSEC_ESP, + XSC_TT_IPV6_IPSEC_ESP, + XSC_TT_ANY, + XSC_NUM_TT, +}; + +#define XSC_NUM_INDIR_TIRS XSC_NUM_TT + +enum { + XSC_HASH_FUNC_XOR = 0, + XSC_HASH_FUNC_TOP = 1, + XSC_HASH_FUNC_TOP_SYM = 2, + XSC_HASH_FUNC_RSV = 3, +}; + +enum { + XSC_L3_PROT_TYPE_IPV4 = 1 << 0, + XSC_L3_PROT_TYPE_IPV6 = 1 << 1, +}; + +enum { + XSC_L4_PROT_TYPE_TCP = 1 << 0, + XSC_L4_PROT_TYPE_UDP = 1 << 1, +}; + +struct xsc_tirc_config { + u8 l3_prot_type; + u8 l4_prot_type; + u32 rx_hash_fields; +}; + +static inline u8 hash_func_type(u8 hash_func) +{ + switch (hash_func) { + case ETH_RSS_HASH_TOP: + return XSC_HASH_FUNC_TOP; + case ETH_RSS_HASH_XOR: + return XSC_HASH_FUNC_XOR; + default: + return XSC_HASH_FUNC_TOP; + } +} + +#endif /* XSC_DEVICE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h b/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h new file mode 100644 index 0000000000000000000000000000000000000000..6b9fdfb738d8f6947af63353f5f761090df77268 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DOORBELL_H +#define XSC_DOORBELL_H + +#if BITS_PER_LONG == 64 +/* Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define XSC_DECLARE_DOORBELL_LOCK(name) +#define XSC_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define XSC_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void xsc_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *)val, dest); +} + +#else + +/* Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define XSC_DECLARE_DOORBELL_LOCK(name) spinlock_t name +#define XSC_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define XSC_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void xsc_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32)val[0], dest); + __raw_writel((__force u32)val[1], dest + 4); + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + +#endif /* XSC_DOORBELL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/driver.h b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h new file mode 100644 index 0000000000000000000000000000000000000000..03705978a85a68c84a6fa19a7d5ed466bae31efb --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DRIVER_H +#define XSC_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include "common/device.h" +#include "common/doorbell.h" +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "common/xsc_hsi.h" +#include "common/qpts.h" + +#define LS_64(val, field) (((u64)(val) << field ## _SHIFT) & (field ## _MASK)) +#define RS_64(val, field) ((u64)((val) & field ## _MASK) >> field ## _SHIFT) +#define LS_32(val, field) (((val) << field ## _SHIFT) & (field ## _MASK)) +#define RS_32(val, field) (((val) & field ## _MASK) >> field ## _SHIFT) + +enum { + CMD_OWNER_SW = 0x0, + CMD_OWNER_HW = 0x1, + CMD_STATUS_SUCCESS = 0, +}; + +enum { + XSC_MAX_FW_PORTS = 1, +}; + +enum { + XSC_MAX_IRQ_NAME = 32 +}; + +enum { + XSC_MAX_EQ_NAME = 20 +}; + +enum { + XSC_REG_PCAP = 0x5001, + XSC_REG_PMTU = 0x5003, + XSC_REG_PTYS = 0x5004, + XSC_REG_PAOS = 0x5006, + XSC_REG_PMAOS = 0x5012, + XSC_REG_PUDE = 0x5009, + XSC_REG_PMPE = 0x5010, + XSC_REG_PELC = 0x500e, + XSC_REG_PMLP = 0, /* TBD */ + XSC_REG_NODE_DESC = 0x6001, + XSC_REG_HOST_ENDIANNESS = 0x7004, + XSC_REG_MCIA = 0x9014, +}; + +enum dbg_rsc_type { + XSC_DBG_RSC_QP, + XSC_DBG_RSC_EQ, + XSC_DBG_RSC_CQ, +}; + +struct xsc_field_desc { + struct dentry *dent; + int i; +}; + +struct xsc_rsc_debug { + struct xsc_core_device *xdev; + void *object; + enum dbg_rsc_type type; + struct dentry *root; + struct xsc_field_desc fields[]; +}; + +struct xsc_buf_list { + void *buf; + dma_addr_t map; +}; + +struct xsc_buf { + struct xsc_buf_list direct; + struct xsc_buf_list *page_list; + int nbufs; + int npages; + int page_shift; + int size; +}; + +struct xsc_frag_buf { + struct xsc_buf_list *frags; + int npages; + int size; + u8 page_shift; +}; + +struct xsc_frag_buf_ctrl { + struct xsc_buf_list *frags; + u32 sz_m1; + u16 frag_sz_m1; + u16 strides_offset; + u8 log_sz; + u8 log_stride; + u8 log_frag_strides; +}; + +struct xsc_cq_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct xsc_eq { + struct xsc_core_device *dev; + struct xsc_cq_table cq_table; + u32 doorbell;//offset from bar0/2 space start + u32 cons_index; + struct xsc_buf buf; + int size; + unsigned int irqn; + u16 eqn; + int nent; + cpumask_var_t mask; + char name[XSC_MAX_EQ_NAME]; + struct list_head list; + int index; + struct xsc_rsc_debug *dbg; +}; + +struct xsc_core_mr { + u64 iova; + u64 size; + u32 key; + u32 pd; + u32 access; +}; + +struct xsc_eq_table { + void __iomem *update_ci; + void __iomem *update_arm_ci; + struct list_head comp_eqs_list; + struct xsc_eq pages_eq; + struct xsc_eq async_eq; + struct xsc_eq cmd_eq; + int num_comp_vectors; + int eq_vec_comp_base; + /* protect EQs list + */ + spinlock_t lock; +}; + +struct xsc_irq_info { + cpumask_var_t mask; + char name[XSC_MAX_IRQ_NAME]; +}; + +struct xsc_qp_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct counter_name_map { + int index; + const char *reg_name; +}; + +struct counter_reg_map { + int index; + int reg_addr; +}; + +struct xsc_dev_resource { + struct xsc_qp_table qp_table; + struct xsc_cq_table cq_table; + struct xsc_eq_table eq_table; + struct xsc_irq_info *irq_info; + spinlock_t mkey_lock; /* protect mkey */ + u8 mkey_key; + struct mutex alloc_mutex; /* protect buffer alocation according to numa node */ + int numa_node; + int fw_pages; + int reg_pages; + struct mutex pgdir_mutex; /* protect pgdir_list */ + struct list_head pgdir_list; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + struct dentry *qptrace_debugfs; + struct dentry *dbg_root; +}; + +struct xsc_db { + __be32 *db; + union { + struct xsc_db_pgdir *pgdir; + struct xsc_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; +}; + +enum { + XSC_COMP_EQ_SIZE = 1024, +}; + +/*replace by struct define in ofed*/ +struct xsc_db_pgdir { + struct list_head list; + unsigned long *bitmap; + __be32 *db_page; + dma_addr_t db_dma; +}; + +static inline void *xsc_buf_offset(struct xsc_buf *buf, int offset) +{ + if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) + return buf->direct.buf + offset; + else + return buf->page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +static inline struct xsc_core_device *pci2xdev(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); +} + +extern struct dentry *xsc_debugfs_root; + +static inline void *xsc_vzalloc(unsigned long size) +{ + void *rtn; + + rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!rtn) + rtn = vzalloc(size); + return rtn; +} + +static inline void xsc_vfree(const void *addr) +{ + if (addr && is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +int xsc_dev_init(struct xsc_core_device *xdev); +void xsc_dev_cleanup(struct xsc_core_device *xdev); +int xsc_cmd_init(struct xsc_core_device *xdev); +void xsc_cmd_cleanup(struct xsc_core_device *xdev); +void xsc_cmd_use_events(struct xsc_core_device *xdev); +void xsc_cmd_use_polling(struct xsc_core_device *xdev); +int xsc_cmd_err_handler(struct xsc_core_device *xdev); +void xsc_cmd_resp_handler(struct xsc_core_device *xdev); +int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr); +int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size); +int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, + struct xsc_buf *buf); +void xsc_buf_free(struct xsc_core_device *dev, struct xsc_buf *buf); +int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr); +int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr); +int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, + struct xsc_register_mr_mbox_in *in, int inlen); +int xsc_core_dereg_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr); +void xsc_reg_local_dma_mr(struct xsc_core_device *dev); +int xsc_core_alloc_pd(struct xsc_core_device *xdev, u32 *pdn); +int xsc_core_dealloc_pd(struct xsc_core_device *xdev, u32 pdn); +void xsc_register_debugfs(void); +void xsc_unregister_debugfs(void); +int xsc_eq_init(struct xsc_core_device *dev); +void xsc_eq_cleanup(struct xsc_core_device *dev); +struct xsc_eq *xsc_eq_get(struct xsc_core_device *dev, int index); + +void xsc_fill_page_array(struct xsc_buf *buf, __be64 *pas, int npages); +void xsc_fill_page_frag_array(struct xsc_frag_buf *buf, __be64 *pas, int npages); +void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type); +int xsc_vector2eqn(struct xsc_core_device *dev, int vector, int *eqn, + unsigned int *irqn); +void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type); +int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, + int nent, const char *name); +int xsc_destroy_unmap_eq(struct xsc_core_device *dev, struct xsc_eq *eq); +int xsc_start_eqs(struct xsc_core_device *dev); +void xsc_stop_eqs(struct xsc_core_device *dev); + +int xsc_qp_debugfs_init(struct xsc_core_device *dev); +void xsc_qp_debugfs_cleanup(struct xsc_core_device *dev); +int xsc_core_access_reg(struct xsc_core_device *xdev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write); +int xsc_set_port_caps(struct xsc_core_device *xdev, int port_num, u32 caps); + +int xsc_debug_eq_add(struct xsc_core_device *xdev, struct xsc_eq *eq); +void xsc_debug_eq_remove(struct xsc_core_device *xdev, struct xsc_eq *eq); +int xsc_core_eq_query(struct xsc_core_device *dev, struct xsc_eq *eq, + struct xsc_query_eq_mbox_out *out, int outlen); +int xsc_eq_debugfs_init(struct xsc_core_device *dev); +void xsc_eq_debugfs_cleanup(struct xsc_core_device *dev); +int xsc_cq_debugfs_init(struct xsc_core_device *dev); +void xsc_cq_debugfs_cleanup(struct xsc_core_device *dev); + +const char *xsc_command_str(int command); +int xsc_cmdif_debugfs_init(struct xsc_core_device *xdev); +void xsc_cmdif_debugfs_cleanup(struct xsc_core_device *xdev); + +int xsc_qptrace_debugfs_init(struct xsc_core_device *dev); +void xsc_qptrace_debugfs_cleanup(struct xsc_core_device *dev); + +int xsc_db_alloc_node(struct xsc_core_device *xdev, struct xsc_db *db, int node); +int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, + struct xsc_frag_buf *buf, int node); +void xsc_db_free(struct xsc_core_device *xdev, struct xsc_db *db); +void xsc_frag_buf_free(struct xsc_core_device *xdev, struct xsc_frag_buf *buf); + +static inline u32 xsc_mkey_to_idx(u32 mkey) +{ + return mkey >> ((MMC_MPT_TBL_MEM_DEPTH == 32768) ? 17 : 18); +} + +static inline u32 xsc_idx_to_mkey(u32 mkey_idx) +{ + return mkey_idx << ((MMC_MPT_TBL_MEM_DEPTH == 32768) ? 17 : 18); +} + +enum { + XSC_PROF_MASK_QP_SIZE = (u64)1 << 0, + XSC_PROF_MASK_CMDIF_CSUM = (u64)1 << 1, + XSC_PROF_MASK_MR_CACHE = (u64)1 << 2, +}; + +#endif /* XSC_DRIVER_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/port.h b/drivers/net/ethernet/yunsilicon/xsc/common/port.h new file mode 100644 index 0000000000000000000000000000000000000000..a44af6c88c0678334898a963ed2bb1ca982edc0f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/port.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_PORT_H__ +#define __XSC_PORT_H__ + +enum xsc_module_id { + XSC_MODULE_ID_SFP = 0x3, + XSC_MODULE_ID_QSFP = 0xC, + XSC_MODULE_ID_QSFP_PLUS = 0xD, + XSC_MODULE_ID_QSFP28 = 0x11, + XSC_MODULE_ID_QSFP_DD = 0x18, + XSC_MODULE_ID_DSFP = 0x1B, + XSC_MODULE_ID_QSFP_PLUS_CMIS = 0x1E, +}; + +#define XSC_EEPROM_MAX_BYTES 32 +#define XSC_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff +#define XSC_I2C_ADDR_LOW 0x50 +#define XSC_I2C_ADDR_HIGH 0x51 +#define XSC_EEPROM_PAGE_LENGTH 256 +#define XSC_EEPROM_HIGH_PAGE_LENGTH 128 + +struct xsc_module_eeprom_query_params { + u16 size; + u16 offset; + u16 i2c_address; + u32 page; + u32 bank; + u32 module_number; +}; + +int xsc_query_module_eeprom(struct xsc_core_device *dev, + u16 offset, u16 size, u8 *data); +int xsc_query_module_eeprom_by_page(struct xsc_core_device *dev, + struct xsc_module_eeprom_query_params *params, + u8 *data); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qp.h b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h new file mode 100644 index 0000000000000000000000000000000000000000..fd3d6ee4a8dfe27ba6ae2999532acd9e8884529f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_QP_H +#define XSC_QP_H + +#include "common/xsc_hsi.h" +#include "common/device.h" +#include "common/driver.h" + +enum { + XSC_QP_PM_MIGRATED = 0x3, + XSC_QP_PM_ARMED = 0x0, + XSC_QP_PM_REARM = 0x1 +}; + +enum { + XSC_WQE_CTRL_CQ_UPDATE = 2 << 2, + XSC_WQE_CTRL_SOLICITED = 1 << 1, +}; + +struct xsc_send_wqe_ctrl_seg { + __le32 msg_opcode:8; + __le32 with_immdt:1; + __le32 csum_en:2; + __le32 ds_data_num:5; + __le32 wqe_id:16; + __le32 msg_len; + union { + __le32 opcode_data; + struct { + u8 has_pph:1; + u8 so_type:1; + __le16 so_data_size:14; + u8:8; + u8 so_hdr_len:8; + }; + struct { + __le16 desc_id; + __le16 is_last_wqe:1; + __le16 dst_qp_id:15; + }; + }; + __le32 se:1; + __le32 ce:1; + __le32:30; +}; + +struct xsc_wqe_data_seg { + union { + __le32 in_line:1; + struct { + __le32:1; + __le32 seg_len:31; + __le32 mkey; + __le64 va; + }; + struct { + __le32:1; + __le32 len:7; + u8 in_line_data[15]; + }; + }; +}; + +struct xsc_wqe_ctrl_seg_2 { + __be32 opmod_idx_opcode; + __be32 qpn_ds; + u8 signature; + u8 rsvd[2]; + u8 fm_ce_se; + __be32 imm; +}; + +struct xsc_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + union { + __be16 rlid; + __be16 udp_sport; + }; + u8 reserved0[4]; + u8 rmac[6]; + u8 tclass; + u8 hop_limit; + __be32 grh_gid_fl; + u8 rgid[16]; +}; + +struct xsc_wqe_data_seg_2 { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct xsc_core_qp { + void (*event)(struct xsc_core_qp *qp, int type); + int qpn; + atomic_t refcount; + struct completion free; + struct xsc_rsc_debug *dbg; + int pid; + u16 qp_type; + u16 eth_queue_type; + struct dentry *trace; + struct xsc_qp_trace *trace_info; + u16 qp_type_internal; + u16 grp_id; + u8 mac_id; +}; + +struct xsc_qp_rsc { + struct list_head node; + u32 qpn; + struct completion delayed_release; + struct xsc_core_device *xdev; +}; + +struct xsc_qp_path { + u8 fl; + u8 rsvd3; + u8 free_ar; + u8 pkey_index; + u8 rsvd0; + u8 grh_mlid; + __be16 rlid; + u8 ackto_lt; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + u8 rgid[16]; + u8 rsvd1[4]; + u8 sl; + u8 port; + u8 rsvd2[6]; + u8 dmac[6]; + u8 smac[6]; + __be16 af_type; + __be32 sip[4]; + __be32 dip[4]; + __be16 sport; + u8 ecn_dscp; + u8 vlan_valid; + __be16 vlan_id; + u8 dci_cfi_prio_sl; //not left moved yet. +}; + +static inline struct xsc_core_qp *__xsc_qp_lookup(struct xsc_core_device *xdev, u32 qpn) +{ + return radix_tree_lookup(&xdev->dev_res->qp_table.tree, qpn); +} + +int create_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); +void destroy_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); + +int xsc_core_create_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp, + struct xsc_create_qp_mbox_in *in, + int inlen); +int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state, + enum xsc_qp_state new_state, + struct xsc_modify_qp_mbox_in *in, int sqd_event, + struct xsc_core_qp *qp); +int xsc_core_destroy_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); +int xsc_core_qp_query(struct xsc_core_device *xdev, struct xsc_core_qp *qp, + struct xsc_query_qp_mbox_out *out, int outlen); + +void xsc_init_qp_table(struct xsc_core_device *xdev); +void xsc_cleanup_qp_table(struct xsc_core_device *xdev); +int xsc_debug_qp_add(struct xsc_core_device *xdev, struct xsc_core_qp *qp); +void xsc_debug_qp_remove(struct xsc_core_device *xdev, struct xsc_core_qp *qp); + +int xsc_create_qptrace(struct xsc_core_device *xdev, struct xsc_core_qp *qp); +void xsc_remove_qptrace(struct xsc_core_device *xdev, struct xsc_core_qp *qp); + +void xsc_init_delayed_release(void); +void xsc_stop_delayed_release(void); + +int xsc_modify_qp(struct xsc_core_device *xdev, + struct xsc_modify_qp_mbox_in *in, + struct xsc_modify_qp_mbox_out *out, + u32 qpn, u16 status); + +#endif /* XSC_QP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h new file mode 100644 index 0000000000000000000000000000000000000000..57eb829f811b1dd8b4e41f8daf2545caf3565780 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __QPTS_H__ +#define __QPTS_H__ + +struct __packed xsc_qp_trace { + u16 main_ver; + u16 sub_ver; + u32 pid; + u16 qp_type; + u16 af_type; + union { + u32 s_addr4; + u8 s_addr6[16]; + } s_addr; + union { + u32 d_addr4; + u8 d_addr6[16]; + } d_addr; + u16 s_port; + u16 d_port; + u32 affinity_idx; + u64 timestamp; + u32 lqpn; + u32 rqpn; +}; + +struct __packed qpt_update_affinity { + u32 aff_new; + u32 aff_old; +}; + +struct __packed qpt_update_sport { + u16 port_new; + u16 port_old; +}; + +struct __packed qpt_update_data { + u64 timestamp; + u32 qpn; + u32 bus; + u32 dev; + u32 fun; + union { + struct qpt_update_affinity affinity; + struct qpt_update_sport sport; + } update; +}; + +struct __packed xsc_qpt_update_msg { + u16 main_ver; + u16 sub_ver; + u32 type; //0:UPDATE_TYPE_SPORT; 1:UPDATE_TYPE_AFFINITY + struct qpt_update_data data; +}; + +enum { + YS_QPTRACE_UPDATE_TYPE_SPORT = 0, + YS_QPTRACE_UPDATE_TYPE_AFFINITY, +}; + +#define YS_QPTRACE_VER_MAJOR 2 +#define YS_QPTRACE_VER_MINOR 0 + +int qpts_init(void); +void qpts_fini(void); +int qpts_write_one_msg(struct xsc_qpt_update_msg *msg); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h new file mode 100644 index 0000000000000000000000000000000000000000..d259d69f2211023474fb837100032f10eeb7437d --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef RES_OBJ_H +#define RES_OBJ_H + +#include +#include +#include "common/xsc_core.h" + +struct xsc_res_obj { + struct list_head node; + struct xsc_bdf_file *file; + void (*release_method)(void *obj); + char *data; + unsigned int datalen; +}; + +struct xsc_pd_obj { + struct xsc_res_obj obj; + unsigned int pdn; +}; + +struct xsc_mr_obj { + struct xsc_res_obj obj; + unsigned int mkey; +}; + +struct xsc_cq_obj { + struct xsc_res_obj obj; + unsigned int cqn; +}; + +struct xsc_qp_obj { + struct xsc_res_obj obj; + unsigned int qpn; +}; + +struct xsc_pct_obj { + struct xsc_res_obj obj; + unsigned int pct_idx; +}; + +struct xsc_wct_obj { + struct xsc_res_obj obj; + unsigned int wct_idx; +}; + +struct xsc_em_obj { + struct xsc_res_obj obj; + unsigned int em_idx[54]; +}; + +struct xsc_flow_pct_v4_add { + char key[44]; + char mask[44]; + char ad[6]; + unsigned int priority; +}; + +struct xsc_flow_pct_v4_del { + char key[44]; + char mask[44]; + unsigned int priority; +}; + +struct xsc_flow_pct_v6_add { + char key[44]; + char mask[44]; + char ad[6]; + unsigned int priority; +}; + +struct xsc_flow_pct_v6_del { + char key[44]; + char mask[44]; + unsigned int priority; +}; + +enum RES_OBJ_TYPE { + RES_OBJ_PD, + RES_OBJ_MR, + RES_OBJ_CQ, + RES_OBJ_QP, + RES_OBJ_PCT, + RES_OBJ_WCT, + RES_OBJ_EM, + RES_OBJ_MAX +}; + +static inline unsigned long xsc_idx_to_key(unsigned int obj_type, unsigned int idx) +{ + return ((unsigned long)obj_type << 32) | idx; +} + +int xsc_alloc_pd_obj(struct xsc_bdf_file *file, unsigned int pdn, + char *data, unsigned int datalen); +void xsc_destroy_pd_obj(struct xsc_bdf_file *file, unsigned int pdn); + +int xsc_alloc_mr_obj(struct xsc_bdf_file *file, unsigned int mkey, + char *data, unsigned int datalen); +void xsc_destroy_mr_obj(struct xsc_bdf_file *file, unsigned int mkey); + +int xsc_alloc_cq_obj(struct xsc_bdf_file *file, unsigned int cqn, + char *data, unsigned int datalen); +void xsc_destroy_cq_obj(struct xsc_bdf_file *file, unsigned int cqn); + +int xsc_alloc_qp_obj(struct xsc_bdf_file *file, unsigned int qpn, + char *data, unsigned int datalen); +void xsc_destroy_qp_obj(struct xsc_bdf_file *file, unsigned int qpn); + +int xsc_alloc_pct_obj(struct xsc_bdf_file *file, unsigned int priority, + char *data, unsigned int datalen); +void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority); + +void xsc_close_bdf_file(struct xsc_bdf_file *file); + +void xsc_send_cmd_2rst_qp(struct xsc_core_device *xdev, unsigned int qpn); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/version.h b/drivers/net/ethernet/yunsilicon/xsc/common/version.h new file mode 100644 index 0000000000000000000000000000000000000000..8c7c6e03f5a147afb57981cadda7736cc24b2c5b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/version.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#define BRANCH_VERSION 1 +#define MAJOR_VERSION 2 +#define MINOR_VERSION 0 +#define BUILD_VERSION 367 +#define HOTFIX_NUM 446 diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/vport.h b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h new file mode 100644 index 0000000000000000000000000000000000000000..dad39f12e26590d87560b770da1335db472df7e2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_VPORT_H +#define XSC_VPORT_H + +#include "common/xsc_core.h" +#include +#include "common/xsc_fs.h" + +#define XSC_VPORT_PF_PLACEHOLDER (1u) +#define XSC_VPORT_UPLINK_PLACEHOLDER (1u) +#define XSC_VPORT_ECPF_PLACEHOLDER(dev) (xsc_ecpf_vport_exists(dev) || \ + xsc_core_is_ecpf_esw_manager(dev)) + +#define XSC_SPECIAL_VPORTS(dev) (XSC_VPORT_PF_PLACEHOLDER + \ + XSC_VPORT_UPLINK_PLACEHOLDER + \ + XSC_VPORT_ECPF_PLACEHOLDER(dev)) + +#define XSC_VPORT_MANAGER(dev) (xsc_core_is_vport_manager(dev)) + +enum { + XSC_CAP_INLINE_MODE_L2, + XSC_CAP_INLINE_MODE_VPORT_CONTEXT, + XSC_CAP_INLINE_MODE_NOT_REQUIRED, +}; + +/* Vport number for each function must keep unchanged */ +enum { + XSC_VPORT_PF = 0x0, + XSC_VPORT_FIRST_VF = 0x1, + XSC_VPORT_ECPF = 0xfffe, + XSC_VPORT_UPLINK = 0xffff, +}; + +enum { + XSC_VPORT_ADMIN_STATE_DOWN = 0x0, + XSC_VPORT_ADMIN_STATE_UP = 0x1, + XSC_VPORT_ADMIN_STATE_AUTO = 0x2, +}; + +u8 xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, u16 vport); +int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u16 opmod, + u16 vport, u8 other_vport, u8 state); +int xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr); +int xsc_query_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr); +int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 *min_inline); +void xsc_query_min_inline(struct xsc_core_device *dev, u8 *min_inline); +int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 min_inline); +int xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac); +int xsc_modify_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac); +int xsc_query_nic_vport_mtu(struct xsc_core_device *dev, u16 *mtu); +int xsc_modify_nic_vport_mtu(struct xsc_core_device *dev, u16 mtu); +int xsc_query_nic_vport_system_image_guid(struct xsc_core_device *dev, + u64 *system_image_guid); +int xsc_query_nic_vport_node_guid(struct xsc_core_device *dev, u32 vport, + u64 *node_guid); +int xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid); +int xsc_modify_other_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid); +int xsc_query_nic_vport_qkey_viol_cntr(struct xsc_core_device *dev, + u16 *qkey_viol_cntr); +int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid); +int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey); +int xsc_query_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct xsc_hca_vport_context *rep); +int xsc_query_hca_vport_node_guid(struct xsc_core_device *dev, + u64 *node_guid); +int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, + u16 vport, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size); +int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size); +int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, + u16 vport, + int *promisc, + int *allmcast); +int xsc_modify_nic_vport_promisc(struct xsc_core_device *dev, + bool allmcast_flag, bool promisc_flag, + int allmcast, int promisc); +int xsc_modify_nic_vport_spoofchk(struct xsc_core_device *dev, + u16 vport, int spoofchk); +int xsc_modify_nic_vport_trust(struct xsc_core_device *dev, + u16 vport, bool trust); +int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, + unsigned long *vlans); +int xsc_modify_nic_vport_vlans(struct xsc_core_device *dev, + u16 vid, bool add); +int xsc_query_vport_down_stats(struct xsc_core_device *dev, u16 vport, + u8 other_vport, u64 *rx_discard_vport_down, + u64 *tx_discard_vport_down); +int xsc_query_vport_counter(struct xsc_core_device *dev, u8 other_vport, + int vf, u8 port_num, void *out, + size_t out_sz); +int xsc_modify_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + int vf, + struct xsc_hca_vport_context *req); +int xsc_modify_vport_max_rate(struct xsc_core_device *dev, + u16 vport, u32 rate); + +u16 xsc_eswitch_get_total_vports(const struct xsc_core_device *dev); +int xsc_modify_nic_vport_context(struct xsc_core_device *dev, void *in, + int inlen); +int __xsc_query_nic_vport_context(struct xsc_core_device *dev, + u16 vport, void *out, int outlen, + int force_other); +#endif /* XSC_VPORT_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..4864cb747cdea43b8f3a7f9b417dd022a1b7728b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +/* generated time: + * Thu Feb 29 15:33:50 CST 2024 + */ + +#ifndef XSC_HW_H +#define XSC_HW_H + +//hif_irq_csr_defines.h +#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR 0xa1100070 + +//hif_cpm_csr_defines.h +#define HIF_CPM_LOCK_GET_REG_ADDR 0xa0000104 +#define HIF_CPM_LOCK_PUT_REG_ADDR 0xa0000108 +#define HIF_CPM_LOCK_AVAIL_REG_ADDR 0xa000010c +#define HIF_CPM_IDA_DATA_MEM_ADDR 0xa0000800 +#define HIF_CPM_IDA_CMD_REG_ADDR 0xa0000020 +#define HIF_CPM_IDA_ADDR_REG_ADDR 0xa0000080 +#define HIF_CPM_IDA_BUSY_REG_ADDR 0xa0000100 +#define HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH 5 +#define HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH 4 +#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH 1 +#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT 5 +#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK 0x1f +#define HIF_CPM_IDA_ADDR_REG_STRIDE 0x4 +#define HIF_CPM_CHIP_VERSION_H_REG_ADDR 0xa0000010 + +//mmc_csr_defines.h +#define MMC_MPT_TBL_MEM_DEPTH 32768 +#define MMC_MTT_TBL_MEM_DEPTH 262144 +#define MMC_MPT_TBL_MEM_WIDTH 256 +#define MMC_MTT_TBL_MEM_WIDTH 64 +#define MMC_MPT_TBL_MEM_ADDR 0xa4100000 +#define MMC_MTT_TBL_MEM_ADDR 0xa4200000 + +//clsf_dma_csr_defines.h +#define CLSF_DMA_DMA_UL_BUSY_REG_ADDR 0xa6010048 +#define CLSF_DMA_DMA_DL_DONE_REG_ADDR 0xa60100d0 +#define CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR 0xa60100c0 +#define CLSF_DMA_ERR_CODE_CLR_REG_ADDR 0xa60100d4 +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK 0x7f +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR 0xa6010020 +#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT 16 +#define CLSF_DMA_DMA_RD_ADDR_REG_ADDR 0xa6010024 +#define CLSF_DMA_INDRW_RD_START_REG_ADDR 0xa6010028 + +//hif_tbl_csr_defines.h +#define HIF_TBL_TBL_DL_BUSY_REG_ADDR 0xa1060030 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT 12 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_DL_REQ_REG_ADDR 0xa1060020 +#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_L_REG_ADDR 0xa1060024 +#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_H_REG_ADDR 0xa1060028 +#define HIF_TBL_TBL_DL_START_REG_ADDR 0xa106002c +#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_UL_REQ_REG_ADDR 0xa106007c +#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_L_REG_ADDR 0xa1060080 +#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_H_REG_ADDR 0xa1060084 +#define HIF_TBL_TBL_UL_START_REG_ADDR 0xa1060088 +#define HIF_TBL_MSG_RDY_REG_ADDR 0xa1060044 + +//hif_cmdqm_csr_defines.h +#define HIF_CMDQM_HOST_REQ_PID_MEM_ADDR 0xa1026000 +#define HIF_CMDQM_HOST_REQ_CID_MEM_ADDR 0xa1028000 +#define HIF_CMDQM_HOST_RSP_PID_MEM_ADDR 0xa102e000 +#define HIF_CMDQM_HOST_RSP_CID_MEM_ADDR 0xa1030000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0xa1022000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0xa1024000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0xa102a000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0xa102c000 +#define HIF_CMDQM_VECTOR_ID_MEM_ADDR 0xa1034000 +#define HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR 0xa1020020 +#define HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR 0xa1020028 +#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0xa1032000 + +//PSV use +//hif_irq_csr_defines.h +#define HIF_IRQ_CONTROL_TBL_MEM_ADDR 0xa1102000 +#define HIF_IRQ_INT_DB_REG_ADDR 0xa11000b4 +#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_ADDR 0xa1100114 +#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_ADDR 0xa11000f0 +#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_ADDR 0xa11000ec +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_ADDR 0xa11000f4 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_ADDR 0xa11000f8 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_ADDR 0xa11000fc +#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_ADDR 0xa1100100 +#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_ADDR 0xa11000e8 + +#endif /* XSC_HW_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..1d5d0e6c8c78dfcc0b8f5a796cd744eda2a0b855 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h @@ -0,0 +1,2513 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_CMD_H +#define XSC_CMD_H + +#define CMDQ_VERSION 0x32 + +#define MAX_MBOX_OUT_LEN 2048 + +#define QOS_PRIO_MAX 7 +#define QOS_DSCP_MAX 63 +#define MAC_PORT_DSCP_SHIFT 6 +#define QOS_PCP_MAX 7 +#define DSCP_PCP_UNSET 255 +#define MAC_PORT_PCP_SHIFT 3 +#define XSC_MAX_MAC_NUM 8 +#define XSC_BOARD_SN_LEN 32 +#define MAX_PKT_LEN 9800 +#define XSC_RTT_CFG_QPN_MAX 32 + +#define XSC_PCIE_LAT_CFG_INTERVAL_MAX 8 +#define XSC_PCIE_LAT_CFG_HISTOGRAM_MAX 9 +#define XSC_PCIE_LAT_EN_DISABLE 0 +#define XSC_PCIE_LAT_EN_ENABLE 1 +#define XSC_PCIE_LAT_PERIOD_MIN 1 +#define XSC_PCIE_LAT_PERIOD_MAX 20 +#define DPU_PORT_WGHT_CFG_MAX 1 + +enum { + XSC_CMD_STAT_OK = 0x0, + XSC_CMD_STAT_INT_ERR = 0x1, + XSC_CMD_STAT_BAD_OP_ERR = 0x2, + XSC_CMD_STAT_BAD_PARAM_ERR = 0x3, + XSC_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + XSC_CMD_STAT_BAD_RES_ERR = 0x5, + XSC_CMD_STAT_RES_BUSY = 0x6, + XSC_CMD_STAT_LIM_ERR = 0x8, + XSC_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + XSC_CMD_STAT_IX_ERR = 0xa, + XSC_CMD_STAT_NO_RES_ERR = 0xf, + XSC_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + XSC_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + XSC_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + XSC_CMD_STAT_BAD_PKT_ERR = 0x30, + XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + +enum { + DPU_PORT_WGHT_TARGET_HOST, + DPU_PORT_WGHT_TARGET_SOC, + DPU_PORT_WGHT_TARGET_NUM, +}; + +enum { + DPU_PRIO_WGHT_TARGET_HOST2SOC, + DPU_PRIO_WGHT_TARGET_SOC2HOST, + DPU_PRIO_WGHT_TARGET_HOSTSOC2LAG, + DPU_PRIO_WGHT_TARGET_NUM, +}; + +#define XSC_AP_FEAT_UDP_SPORT_MIN 1024 +#define XSC_AP_FEAT_UDP_SPORT_MAX 65535 + +enum { + XSC_CMD_OP_QUERY_HCA_CAP = 0x100, + XSC_CMD_OP_QUERY_ADAPTER = 0x101, + XSC_CMD_OP_INIT_HCA = 0x102, + XSC_CMD_OP_TEARDOWN_HCA = 0x103, + XSC_CMD_OP_ENABLE_HCA = 0x104, + XSC_CMD_OP_DISABLE_HCA = 0x105, + XSC_CMD_OP_MODIFY_HCA = 0x106, + XSC_CMD_OP_QUERY_PAGES = 0x107, + XSC_CMD_OP_MANAGE_PAGES = 0x108, + XSC_CMD_OP_SET_HCA_CAP = 0x109, + XSC_CMD_OP_QUERY_CMDQ_VERSION = 0x10a, + XSC_CMD_OP_QUERY_MSIX_TBL_INFO = 0x10b, + XSC_CMD_OP_FUNCTION_RESET = 0x10c, + XSC_CMD_OP_DUMMY = 0x10d, + XSC_CMD_OP_SET_DEBUG_INFO = 0x10e, + XSC_CMD_OP_QUERY_PSV_FUNCID = 0x10f, + XSC_CMD_OP_ALLOC_IA_LOCK = 0x110, + XSC_CMD_OP_RELEASE_IA_LOCK = 0x111, + XSC_CMD_OP_ENABLE_RELAXED_ORDER = 0x112, + XSC_CMD_OP_QUERY_GUID = 0x113, + XSC_CMD_OP_ACTIVATE_HW_CONFIG = 0x114, + + XSC_CMD_OP_CREATE_MKEY = 0x200, + XSC_CMD_OP_QUERY_MKEY = 0x201, + XSC_CMD_OP_DESTROY_MKEY = 0x202, + XSC_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, + XSC_CMD_OP_REG_MR = 0x204, + XSC_CMD_OP_DEREG_MR = 0x205, + XSC_CMD_OP_SET_MPT = 0x206, + XSC_CMD_OP_SET_MTT = 0x207, + + XSC_CMD_OP_CREATE_EQ = 0x301, + XSC_CMD_OP_DESTROY_EQ = 0x302, + XSC_CMD_OP_QUERY_EQ = 0x303, + + XSC_CMD_OP_CREATE_CQ = 0x400, + XSC_CMD_OP_DESTROY_CQ = 0x401, + XSC_CMD_OP_QUERY_CQ = 0x402, + XSC_CMD_OP_MODIFY_CQ = 0x403, + XSC_CMD_OP_ALLOC_MULTI_VIRTQ_CQ = 0x404, + XSC_CMD_OP_RELEASE_MULTI_VIRTQ_CQ = 0x405, + + XSC_CMD_OP_CREATE_QP = 0x500, + XSC_CMD_OP_DESTROY_QP = 0x501, + XSC_CMD_OP_RST2INIT_QP = 0x502, + XSC_CMD_OP_INIT2RTR_QP = 0x503, + XSC_CMD_OP_RTR2RTS_QP = 0x504, + XSC_CMD_OP_RTS2RTS_QP = 0x505, + XSC_CMD_OP_SQERR2RTS_QP = 0x506, + XSC_CMD_OP_2ERR_QP = 0x507, + XSC_CMD_OP_RTS2SQD_QP = 0x508, + XSC_CMD_OP_SQD2RTS_QP = 0x509, + XSC_CMD_OP_2RST_QP = 0x50a, + XSC_CMD_OP_QUERY_QP = 0x50b, + XSC_CMD_OP_CONF_SQP = 0x50c, + XSC_CMD_OP_MAD_IFC = 0x50d, + XSC_CMD_OP_INIT2INIT_QP = 0x50e, + XSC_CMD_OP_SUSPEND_QP = 0x50f, + XSC_CMD_OP_UNSUSPEND_QP = 0x510, + XSC_CMD_OP_SQD2SQD_QP = 0x511, + XSC_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, + XSC_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, + XSC_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, + XSC_CMD_OP_CREATE_MULTI_QP = 0x515, + XSC_CMD_OP_ALLOC_MULTI_VIRTQ = 0x516, + XSC_CMD_OP_RELEASE_MULTI_VIRTQ = 0x517, + XSC_CMD_OP_QUERY_QP_FLUSH_STATUS = 0x518, + + XSC_CMD_OP_CREATE_PSV = 0x600, + XSC_CMD_OP_DESTROY_PSV = 0x601, + XSC_CMD_OP_QUERY_PSV = 0x602, + XSC_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, + XSC_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, + + XSC_CMD_OP_CREATE_SRQ = 0x700, + XSC_CMD_OP_DESTROY_SRQ = 0x701, + XSC_CMD_OP_QUERY_SRQ = 0x702, + XSC_CMD_OP_ARM_RQ = 0x703, + XSC_CMD_OP_RESIZE_SRQ = 0x704, + + XSC_CMD_OP_ALLOC_PD = 0x800, + XSC_CMD_OP_DEALLOC_PD = 0x801, + XSC_CMD_OP_ALLOC_UAR = 0x802, + XSC_CMD_OP_DEALLOC_UAR = 0x803, + + XSC_CMD_OP_ATTACH_TO_MCG = 0x806, + XSC_CMD_OP_DETACH_FROM_MCG = 0x807, + + XSC_CMD_OP_ALLOC_XRCD = 0x80e, + XSC_CMD_OP_DEALLOC_XRCD = 0x80f, + + XSC_CMD_OP_ACCESS_REG = 0x805, + + XSC_CMD_OP_MODIFY_RAW_QP = 0x81f, + + XSC_CMD_OP_ENABLE_NIC_HCA = 0x810, + XSC_CMD_OP_DISABLE_NIC_HCA = 0x811, + XSC_CMD_OP_MODIFY_NIC_HCA = 0x812, + + XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x820, + XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x821, + XSC_CMD_OP_QUERY_VPORT_STATE = 0x822, + XSC_CMD_OP_MODIFY_VPORT_STATE = 0x823, + XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x824, + XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x825, + XSC_CMD_OP_QUERY_HCA_VPORT_GID = 0x826, + XSC_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x827, + XSC_CMD_OP_QUERY_VPORT_COUNTER = 0x828, + XSC_CMD_OP_QUERY_PRIO_STATS = 0x829, + XSC_CMD_OP_QUERY_PHYPORT_STATE = 0x830, + XSC_CMD_OP_QUERY_EVENT_TYPE = 0x831, + XSC_CMD_OP_QUERY_LINK_INFO = 0x832, + XSC_CMD_OP_QUERY_PFC_PRIO_STATS = 0x833, + XSC_CMD_OP_MODIFY_LINK_INFO = 0x834, + XSC_CMD_OP_QUERY_FEC_PARAM = 0x835, + XSC_CMD_OP_MODIFY_FEC_PARAM = 0x836, + + XSC_CMD_OP_LAG_CREATE = 0x840, + XSC_CMD_OP_LAG_ADD_MEMBER = 0x841, + XSC_CMD_OP_LAG_REMOVE_MEMBER = 0x842, + XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS = 0x843, + XSC_CMD_OP_LAG_UPDATE_HASH_TYPE = 0x844, + XSC_CMD_OP_LAG_DESTROY = 0x845, + + XSC_CMD_OP_LAG_SET_QOS = 0x848, + XSC_CMD_OP_ENABLE_MSIX = 0x850, + + XSC_CMD_OP_IOCTL_FLOW = 0x900, + XSC_CMD_OP_IOCTL_OTHER = 0x901, + + XSC_CMD_OP_IOCTL_SET_DSCP_PMT = 0x1000, + XSC_CMD_OP_IOCTL_GET_DSCP_PMT = 0x1001, + XSC_CMD_OP_IOCTL_SET_TRUST_MODE = 0x1002, + XSC_CMD_OP_IOCTL_GET_TRUST_MODE = 0x1003, + XSC_CMD_OP_IOCTL_SET_PCP_PMT = 0x1004, + XSC_CMD_OP_IOCTL_GET_PCP_PMT = 0x1005, + XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI = 0x1006, + XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI = 0x1007, + XSC_CMD_OP_IOCTL_SET_PFC = 0x1008, + XSC_CMD_OP_IOCTL_GET_PFC = 0x1009, + XSC_CMD_OP_IOCTL_SET_RATE_LIMIT = 0x100a, + XSC_CMD_OP_IOCTL_GET_RATE_LIMIT = 0x100b, + XSC_CMD_OP_IOCTL_SET_SP = 0x100c, + XSC_CMD_OP_IOCTL_GET_SP = 0x100d, + XSC_CMD_OP_IOCTL_SET_WEIGHT = 0x100e, + XSC_CMD_OP_IOCTL_GET_WEIGHT = 0x100f, + XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT = 0x1010, + XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT = 0x1011, + XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT = 0x1012, + XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT = 0x1013, + XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN = 0x1014, + XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN = 0x1015, + XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD = 0x1016, + XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD = 0x1017, + XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH = 0x1018, + XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS = 0x1019, + + XSC_CMD_OP_IOCTL_SET_ENABLE_RP = 0x1030, + XSC_CMD_OP_IOCTL_SET_ENABLE_NP = 0x1031, + XSC_CMD_OP_IOCTL_SET_INIT_ALPHA = 0x1032, + XSC_CMD_OP_IOCTL_SET_G = 0x1033, + XSC_CMD_OP_IOCTL_SET_AI = 0x1034, + XSC_CMD_OP_IOCTL_SET_HAI = 0x1035, + XSC_CMD_OP_IOCTL_SET_TH = 0x1036, + XSC_CMD_OP_IOCTL_SET_BC_TH = 0x1037, + XSC_CMD_OP_IOCTL_SET_CNP_OPCODE = 0x1038, + XSC_CMD_OP_IOCTL_SET_CNP_BTH_B = 0x1039, + XSC_CMD_OP_IOCTL_SET_CNP_BTH_F = 0x103a, + XSC_CMD_OP_IOCTL_SET_CNP_ECN = 0x103b, + XSC_CMD_OP_IOCTL_SET_DATA_ECN = 0x103c, + XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL = 0x103d, + XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME = 0x103e, + XSC_CMD_OP_IOCTL_SET_CNP_DSCP = 0x103f, + XSC_CMD_OP_IOCTL_SET_CNP_PCP = 0x1040, + XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA = 0x1041, + XSC_CMD_OP_IOCTL_GET_CC_CFG = 0x1042, + XSC_CMD_OP_IOCTL_GET_CC_STAT = 0x104b, + XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE = 0x1052, + XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR = 0x1053, + XSC_CMD_OP_IOCTL_SET_SCALE = 0x1054, + + XSC_CMD_OP_IOCTL_SET_HWC = 0x1060, + XSC_CMD_OP_IOCTL_GET_HWC = 0x1061, + + XSC_CMD_OP_SET_MTU = 0x1100, + XSC_CMD_OP_QUERY_ETH_MAC = 0X1101, + + XSC_CMD_OP_QUERY_HW_STATS = 0X1200, + XSC_CMD_OP_QUERY_PAUSE_CNT = 0X1201, + XSC_CMD_OP_IOCTL_QUERY_PFC_STALL_STATS = 0x1202, + XSC_CMD_OP_QUERY_HW_STATS_RDMA = 0X1203, + XSC_CMD_OP_QUERY_HW_STATS_ETH = 0X1204, + XSC_CMD_OP_QUERY_HW_GLOBAL_STATS = 0X1210, + + XSC_CMD_OP_SET_RTT_EN = 0X1220, + XSC_CMD_OP_GET_RTT_EN = 0X1221, + XSC_CMD_OP_SET_RTT_QPN = 0X1222, + XSC_CMD_OP_GET_RTT_QPN = 0X1223, + XSC_CMD_OP_SET_RTT_PERIOD = 0X1224, + XSC_CMD_OP_GET_RTT_PERIOD = 0X1225, + XSC_CMD_OP_GET_RTT_RESULT = 0X1226, + XSC_CMD_OP_GET_RTT_STATS = 0X1227, + + XSC_CMD_OP_SET_LED_STATUS = 0X1228, + + XSC_CMD_OP_AP_FEAT = 0x1400, + XSC_CMD_OP_PCIE_LAT_FEAT = 0x1401, + + XSC_CMD_OP_GET_LLDP_STATUS = 0x1500, + XSC_CMD_OP_SET_LLDP_STATUS = 0x1501, + + XSC_CMD_OP_SET_VPORT_RATE_LIMIT = 0x1600, + + XSC_CMD_OP_SET_PORT_ADMIN_STATUS = 0x1801, + XSC_CMD_OP_USER_EMU_CMD = 0x8000, + + XSC_CMD_OP_MAX +}; + +enum { + XSC_CMD_EVENT_RESP_CHANGE_LINK = 0x0001, + XSC_CMD_EVENT_RESP_TEMP_WARN = 0x0002, + XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION = 0x0004, +}; + +enum xsc_eth_qp_num_sel { + XSC_ETH_QP_NUM_8K_SEL = 0, + XSC_ETH_QP_NUM_8K_8TC_SEL, + XSC_ETH_QP_NUM_SEL_MAX, +}; + +enum xsc_eth_vf_num_sel { + XSC_ETH_VF_NUM_SEL_8 = 0, + XSC_ETH_VF_NUM_SEL_16, + XSC_ETH_VF_NUM_SEL_32, + XSC_ETH_VF_NUM_SEL_64, + XSC_ETH_VF_NUM_SEL_128, + XSC_ETH_VF_NUM_SEL_256, + XSC_ETH_VF_NUM_SEL_512, + XSC_ETH_VF_NUM_SEL_1024, + XSC_ETH_VF_NUM_SEL_MAX +}; + +enum { + LINKSPEED_MODE_UNKNOWN = -1, + LINKSPEED_MODE_10G = 10000, + LINKSPEED_MODE_25G = 25000, + LINKSPEED_MODE_40G = 40000, + LINKSPEED_MODE_50G = 50000, + LINKSPEED_MODE_100G = 100000, + LINKSPEED_MODE_200G = 200000, + LINKSPEED_MODE_400G = 400000, +}; + +enum { + MODULE_SPEED_UNKNOWN, + MODULE_SPEED_10G, + MODULE_SPEED_25G, + MODULE_SPEED_40G_R4, + MODULE_SPEED_50G_R, + MODULE_SPEED_50G_R2, + MODULE_SPEED_100G_R2, + MODULE_SPEED_100G_R4, + MODULE_SPEED_200G_R4, + MODULE_SPEED_200G_R8, + MODULE_SPEED_400G_R8, +}; + +enum xsc_dma_direct { + DMA_DIR_TO_MAC, + DMA_DIR_READ, + DMA_DIR_WRITE, + DMA_DIR_LOOPBACK, + DMA_DIR_MAX, +}; + +/* hw feature bitmap, 32bit */ +enum xsc_hw_feature_flag { + XSC_HW_RDMA_SUPPORT = 0x1, + XSC_HW_PFC_PRIO_STATISTIC_SUPPORT = 0x2, + XSC_HW_THIRD_FEATURE = 0x4, + XSC_HW_PFC_STALL_STATS_SUPPORT = 0x8, + XSC_HW_RDMA_CM_SUPPORT = 0x20, + + XSC_HW_LAST_FEATURE = 0x80000000, +}; + +enum xsc_lldp_dcbx_sub_cmd { + XSC_OS_HANDLE_LLDP_STATUS = 0x1, + XSC_DCBX_STATUS +}; + +struct xsc_inbox_hdr { + __be16 opcode; + u8 rsvd[4]; + __be16 ver; +}; + +struct xsc_outbox_hdr { + u8 status; + u8 rsvd[5]; + __be16 ver; +}; + +struct xsc_alloc_ia_lock_mbox_in { + struct xsc_inbox_hdr hdr; + u8 lock_num; + u8 rsvd[7]; +}; + +#define XSC_RES_NUM_IAE_GRP 16 + +struct xsc_alloc_ia_lock_mbox_out { + struct xsc_outbox_hdr hdr; + u8 lock_idx[XSC_RES_NUM_IAE_GRP]; +}; + +struct xsc_release_ia_lock_mbox_in { + struct xsc_inbox_hdr hdr; + u8 lock_idx[XSC_RES_NUM_IAE_GRP]; +}; + +struct xsc_release_ia_lock_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_pci_driver_init_params_in { + struct xsc_inbox_hdr hdr; + __be32 s_wqe_mode; + __be32 r_wqe_mode; + __be32 local_timeout_retrans; + u8 mac_lossless_prio[XSC_MAX_MAC_NUM]; + __be32 group_mod; +}; + +struct xsc_pci_driver_init_params_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*CQ mbox*/ +struct xsc_cq_context { + __be16 eqn; + __be16 pa_num; + __be16 glb_func_id; + u8 log_cq_sz; + u8 cq_type; +}; + +struct xsc_create_cq_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_cq_context ctx; + __be64 pas[]; +}; + +struct xsc_create_cq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 cqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_cq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 cqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_cq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*QP mbox*/ +struct xsc_create_qp_request { + __be16 input_qpn; + __be16 pa_num; + u8 qp_type; + u8 log_sq_sz; + u8 log_rq_sz; + u8 dma_direct;//0 for dma read, 1 for dma write + __be32 pdn; + __be16 cqn_send; + __be16 cqn_recv; + __be16 glb_funcid; + /*rsvd,rename logic_port used to transfer logical_port to fw*/ + u8 rsvd[2]; + __be64 pas[]; +}; + +struct xsc_create_qp_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_create_qp_request req; +}; + +struct xsc_create_qp_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_destroy_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_destroy_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_query_qp_flush_status_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; +}; + +struct xsc_query_qp_flush_status_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_qp_context { + __be32 remote_qpn; + __be32 cqn_send; + __be32 cqn_recv; + __be32 next_send_psn; + __be32 next_recv_psn; + __be32 pdn; + __be16 src_udp_port; + __be16 path_id; + u8 mtu_mode; + u8 lag_sel; + u8 lag_sel_en; + u8 retry_cnt; + u8 rnr_retry; + u8 dscp; + u8 state; + u8 hop_limit; + u8 dmac[6]; + u8 smac[6]; + __be32 dip[4]; + __be32 sip[4]; + __be16 ip_type; + __be16 grp_id; + u8 vlan_valid; + u8 dci_cfi_prio_sl; + __be16 vlan_id; + u8 qp_out_port; + u8 pcie_no; + __be16 lag_id; + __be16 func_id; + __be16 rsvd; +}; + +struct xsc_query_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_query_qp_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_qp_context ctx; +}; + +struct xsc_modify_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + struct xsc_qp_context ctx; + u8 no_need_wait; +}; + +struct xsc_modify_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_create_multiqp_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_num; + u8 qp_type; + u8 rsvd; + __be32 req_len; + u8 data[]; +}; + +struct xsc_create_multiqp_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn_base; +}; + +struct xsc_alloc_multi_virtq_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_or_cq_num; + __be16 pa_num; + __be32 rsvd; + __be32 rsvd2; +}; + +struct xsc_alloc_multi_virtq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qnum_base; + __be32 pa_list_base; + __be32 rsvd; +}; + +struct xsc_release_multi_virtq_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_or_cq_num; + __be16 pa_num; + __be32 qnum_base; + __be32 pa_list_base; +}; + +struct xsc_release_multi_virtq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 rsvd; + __be32 rsvd2; + __be32 rsvd3; +}; + +/* MSIX TABLE mbox */ +struct xsc_msix_table_info_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 index; + u8 rsvd[6]; +}; + +struct xsc_msix_table_info_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 addr_lo; + __be32 addr_hi; + __be32 data; +}; + +/*EQ mbox*/ +struct xsc_eq_context { + __be16 vecidx; + __be16 pa_num; + u8 log_eq_sz; + __be16 glb_func_id; + u8 is_async_eq; + u8 rsvd; +}; + +struct xsc_create_eq_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_eq_context ctx; + __be64 pas[]; +}; + +struct xsc_create_eq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 eqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_eq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 eqn; + u8 rsvd[4]; + +}; + +struct xsc_destroy_eq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*PD mbox*/ +struct xsc_alloc_pd_request { + u8 rsvd[8]; +}; + +struct xsc_alloc_pd_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_alloc_pd_request req; +}; + +struct xsc_alloc_pd_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct xsc_dealloc_pd_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; + +}; + +struct xsc_dealloc_pd_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*MR mbox*/ +struct xsc_register_mr_request { + __be32 pdn; + __be32 pa_num; + __be32 len; + __be32 mkey; + u8 rsvd; + u8 acc; + u8 page_mode; + u8 map_en; + __be64 va_base; + __be64 pas[]; +}; + +struct xsc_register_mr_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_register_mr_request req; +}; + +struct xsc_register_mr_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct xsc_unregister_mr_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct xsc_unregister_mr_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_mpt_item { + __be32 pdn; + __be32 pa_num; + __be32 len; + __be32 mkey; + u8 rsvd[5]; + u8 acc; + u8 page_mode; + u8 map_en; + __be64 va_base; +}; + +struct xsc_set_mpt_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_mpt_item mpt_item; +}; + +struct xsc_set_mpt_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mtt_base; + u8 rsvd[4]; +}; + +struct xsc_mtt_setting { + __be32 mtt_base; + __be32 pa_num; + __be64 pas[]; +}; + +struct xsc_set_mtt_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_mtt_setting mtt_setting; +}; + +struct xsc_set_mtt_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_create_mkey_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[4]; +}; + +struct xsc_create_mkey_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mkey; +}; + +struct xsc_destroy_mkey_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 mkey; +}; + +struct xsc_destroy_mkey_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd; +}; + +struct xsc_access_reg_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 register_id; + __be32 arg; + __be32 data[]; +}; + +struct xsc_access_reg_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + __be32 data[]; +}; + +struct xsc_mad_ifc_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 remote_lid; + u8 rsvd0; + u8 port; + u8 rsvd1[4]; + u8 data[256]; +}; + +struct xsc_mad_ifc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + u8 data[256]; +}; + +struct xsc_query_eq_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd0[3]; + u8 eqn; + u8 rsvd1[4]; +}; + +struct xsc_query_eq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + struct xsc_eq_context ctx; +}; + +struct xsc_query_cq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct xsc_query_cq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[8]; + struct xsc_cq_context ctx; + u8 rsvd6[16]; + __be64 pas[]; +}; + +struct xsc_cmd_query_cmdq_ver_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_cmdq_ver_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 cmdq_ver; + u8 rsvd[6]; +}; + +struct xsc_cmd_dummy_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_dummy_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_fw_version { + u8 fw_version_major; + u8 fw_version_minor; + __be16 fw_version_patch; + __be32 fw_version_tweak; + u8 fw_version_extra_flag; + u8 rsvd[7]; +}; + +struct xsc_hca_cap { + u8 rsvd1[12]; + u8 send_seg_num; + u8 send_wqe_shift; + u8 recv_seg_num; + u8 recv_wqe_shift; + u8 log_max_srq_sz; + u8 log_max_qp_sz; + u8 log_max_mtt; + u8 log_max_qp; + u8 log_max_strq_sz; + u8 log_max_srqs; + u8 rsvd4[2]; + u8 log_max_tso; + u8 log_max_cq_sz; + u8 rsvd6; + u8 log_max_cq; + u8 log_max_eq_sz; + u8 log_max_mkey; + u8 log_max_msix; + u8 log_max_eq; + u8 max_indirection; + u8 log_max_mrw_sz; + u8 log_max_bsf_list_sz; + u8 log_max_klm_list_sz; + u8 rsvd_8_0; + u8 log_max_ra_req_dc; + u8 rsvd_8_1; + u8 log_max_ra_res_dc; + u8 rsvd9; + u8 log_max_ra_req_qp; + u8 log_max_qp_depth; + u8 log_max_ra_res_qp; + __be16 max_vfs; + __be16 raweth_qp_id_end; + __be16 raw_tpe_qp_num; + __be16 max_qp_count; + __be16 raweth_qp_id_base; + u8 rsvd13; + u8 local_ca_ack_delay; + u8 max_num_eqs; + u8 num_ports; + u8 log_max_msg; + u8 mac_port; + __be16 raweth_rss_qp_id_base; + __be16 stat_rate_support; + u8 rsvd16[2]; + __be64 flags; + u8 rsvd17; + u8 uar_sz; + u8 rsvd18; + u8 log_pg_sz; + __be16 bf_log_bf_reg_size; + __be16 msix_base; + __be16 msix_num; + __be16 max_desc_sz_sq; + u8 rsvd20[2]; + __be16 max_desc_sz_rq; + u8 rsvd21[2]; + __be16 max_desc_sz_sq_dc; + u8 rsvd22[4]; + __be16 max_qp_mcg; + u8 rsvd23; + u8 log_max_mcg; + u8 rsvd24; + u8 log_max_pd; + u8 rsvd25; + u8 log_max_xrcd; + u8 rsvd26[40]; + __be32 uar_page_sz; + u8 rsvd27[8]; + __be32 hw_feature_flag;/*enum xsc_hw_feature_flag*/ + __be16 pf0_vf_funcid_base; + __be16 pf0_vf_funcid_top; + __be16 pf1_vf_funcid_base; + __be16 pf1_vf_funcid_top; + __be16 pcie0_pf_funcid_base; + __be16 pcie0_pf_funcid_top; + __be16 pcie1_pf_funcid_base; + __be16 pcie1_pf_funcid_top; + u8 log_msx_atomic_size_qp; + u8 pcie_host; + u8 rsvd28; + u8 log_msx_atomic_size_dc; + u8 board_sn[XSC_BOARD_SN_LEN]; + u8 max_tc; + u8 mac_bit; + __be16 funcid_to_logic_port; + u8 rsvd29[6]; + u8 nif_port_num; + u8 reg_mr_via_cmdq; + __be32 hca_core_clock; + __be32 max_rwq_indirection_tables;/*rss_caps*/ + __be32 max_rwq_indirection_table_size;/*rss_caps*/ + __be32 chip_ver_h; + __be32 chip_ver_m; + __be32 chip_ver_l; + __be32 hotfix_num; + __be32 feature_flag; + __be32 rx_pkt_len_max; + __be32 glb_func_id; + __be64 tx_db; + __be64 rx_db; + __be64 complete_db; + __be64 complete_reg; + __be64 event_db; + __be32 qp_rate_limit_min; + __be32 qp_rate_limit_max; + struct xsc_fw_version fw_ver; + u8 lag_logic_port_ofst; +}; + +struct xsc_cmd_query_hca_cap_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 cpu_num; + u8 rsvd[6]; +}; + +struct xsc_cmd_query_hca_cap_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[8]; + struct xsc_hca_cap hca_cap; +}; + +struct xsc_cmd_enable_hca_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 vf_num; + __be16 max_msix_vec; + __be16 cpu_num; + u8 pp_bypass; + u8 esw_mode; +}; + +struct xsc_cmd_enable_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_cmd_disable_hca_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 vf_num; + u8 pp_bypass; + u8 esw_mode; +}; + +struct xsc_cmd_disable_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_cmd_modify_hca_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pp_bypass; + u8 esw_mode; + u8 rsvd0[6]; +}; + +struct xsc_cmd_modify_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_query_special_ctxs_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_query_special_ctxs_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 dump_fill_mkey; + __be32 reserved_lkey; +}; + +/* vport mbox */ +struct xsc_nic_vport_context { + __be32 min_wqe_inline_mode:3; + __be32 disable_mc_local_lb:1; + __be32 disable_uc_local_lb:1; + __be32 roce_en:1; + + __be32 arm_change_event:1; + __be32 event_on_mtu:1; + __be32 event_on_promisc_change:1; + __be32 event_on_vlan_change:1; + __be32 event_on_mc_address_change:1; + __be32 event_on_uc_address_change:1; + __be32 affiliation_criteria:4; + __be32 affiliated_vhca_id; + + __be16 mtu; + + __be64 system_image_guid; + __be64 port_guid; + __be64 node_guid; + + __be32 qkey_violation_counter; + + __be16 spoofchk:1; + __be16 trust:1; + __be16 promisc:1; + __be16 allmcast:1; + __be16 vlan_allowed:1; + __be16 allowed_list_type:3; + __be16 allowed_list_size:10; + + __be16 vlan_proto; + __be16 vlan; + u8 qos; + u8 permanent_address[6]; + u8 current_address[6]; + u8 current_uc_mac_address[0][2]; +}; + +enum { + XSC_HCA_VPORT_SEL_PORT_GUID = 1 << 0, + XSC_HCA_VPORT_SEL_NODE_GUID = 1 << 1, + XSC_HCA_VPORT_SEL_STATE_POLICY = 1 << 2, +}; + +struct xsc_hca_vport_context { + u32 field_select; + u32 port_physical_state:4; + u32 vport_state_policy:4; + u32 port_state:4; + u32 vport_state:4; + u32 rcvd0:16; + + u64 system_image_guid; + u64 port_guid; + u64 node_guid; + + u16 qkey_violation_counter; + u16 pkey_violation_counter; +}; + +struct xsc_query_nic_vport_context_out { + struct xsc_outbox_hdr hdr; + struct xsc_nic_vport_context nic_vport_ctx; +}; + +struct xsc_query_nic_vport_context_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 vport_number:16; + u32 allowed_list_type:3; + u32 rsvd:12; +}; + +struct xsc_modify_nic_vport_context_out { + struct xsc_outbox_hdr hdr; + __be16 outer_vlan_id; + u8 rsvd[2]; +}; + +struct xsc_modify_nic_vport_field_select { + __be32 affiliation:1; + __be32 disable_uc_local_lb:1; + __be32 disable_mc_local_lb:1; + __be32 node_guid:1; + __be32 port_guid:1; + __be32 min_inline:1; + __be32 mtu:1; + __be32 change_event:1; + __be32 promisc:1; + __be32 allmcast:1; + __be32 permanent_address:1; + __be32 current_address:1; + __be32 addresses_list:1; + __be32 roce_en:1; + __be32 spoofchk:1; + __be32 trust:1; + __be32 rsvd:16; +}; + +struct xsc_modify_nic_vport_context_in { + struct xsc_inbox_hdr hdr; + __be32 other_vport:1; + __be32 vport_number:16; + __be32 rsvd:15; + __be16 caps; + __be16 caps_mask; + __be16 lag_id; + + struct xsc_modify_nic_vport_field_select field_select; + struct xsc_nic_vport_context nic_vport_ctx; +}; + +struct xsc_query_hca_vport_context_out { + struct xsc_outbox_hdr hdr; + struct xsc_hca_vport_context hca_vport_ctx; +}; + +struct xsc_query_hca_vport_context_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; +}; + +struct xsc_modify_hca_vport_context_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_modify_hca_vport_context_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; + + struct xsc_hca_vport_context hca_vport_ctx; +}; + +struct xsc_array128 { + u8 array128[16]; +}; + +struct xsc_query_hca_vport_gid_out { + struct xsc_outbox_hdr hdr; + u16 gids_num; + struct xsc_array128 gid[]; +}; + +struct xsc_query_hca_vport_gid_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; + u16 gid_index; +}; + +struct xsc_pkey { + u16 pkey; +}; + +struct xsc_query_hca_vport_pkey_out { + struct xsc_outbox_hdr hdr; + struct xsc_pkey pkey[]; +}; + +struct xsc_query_hca_vport_pkey_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; + u16 pkey_index; +}; + +struct xsc_query_vport_state_out { + struct xsc_outbox_hdr hdr; + u8 admin_state:4; + u8 state:4; +}; + +struct xsc_query_vport_state_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 vport_number:16; + u32 rsvd0:15; +}; + +struct xsc_modify_vport_state_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_modify_vport_state_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 vport_number:16; + u32 rsvd0:15; + u8 admin_state:4; + u8 rsvd1:4; +}; + +struct xsc_traffic_counter { + u64 packets; + u64 bytes; +}; + +struct xsc_query_vport_counter_out { + struct xsc_outbox_hdr hdr; + struct xsc_traffic_counter received_errors; + struct xsc_traffic_counter transmit_errors; + struct xsc_traffic_counter received_ib_unicast; + struct xsc_traffic_counter transmitted_ib_unicast; + struct xsc_traffic_counter received_ib_multicast; + struct xsc_traffic_counter transmitted_ib_multicast; + struct xsc_traffic_counter received_eth_broadcast; + struct xsc_traffic_counter transmitted_eth_broadcast; + struct xsc_traffic_counter received_eth_unicast; + struct xsc_traffic_counter transmitted_eth_unicast; + struct xsc_traffic_counter received_eth_multicast; + struct xsc_traffic_counter transmitted_eth_multicast; +}; + +struct xsc_query_vport_counter_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; +}; + +/* ioctl mbox */ +struct xsc_ioctl_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 len; + __be16 rsvd; + u8 data[]; +}; + +struct xsc_ioctl_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 error; + __be16 len; + __be16 rsvd; + u8 data[]; +}; + +struct xsc_modify_raw_qp_request { + u16 qpn; + u16 lag_id; + u16 func_id; + u8 dma_direct; + u8 prio; + u8 qp_out_port; + u8 rsvd[7]; +}; + +struct xsc_modify_raw_qp_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pcie_no; + u8 rsv[7]; + struct xsc_modify_raw_qp_request req; +}; + +struct xsc_modify_raw_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +#define ETH_ALEN 6 + +struct xsc_create_lag_request { + __be16 lag_id; + u8 lag_type; + u8 lag_sel_mode; + u8 mac_idx; + u8 netdev_addr[ETH_ALEN]; + u8 bond_mode; + u8 slave_status; +}; + +struct xsc_add_lag_member_request { + __be16 lag_id; + u8 lag_type; + u8 lag_sel_mode; + u8 mac_idx; + u8 netdev_addr[ETH_ALEN]; + u8 bond_mode; + u8 slave_status; + u8 mad_mac_idx; +}; + +struct xsc_remove_lag_member_request { + __be16 lag_id; + u8 lag_type; + u8 mac_idx; + u8 mad_mac_idx; + u8 bond_mode; + u8 is_roce_lag_xdev; + u8 not_roce_lag_xdev_mask; +}; + +struct xsc_update_lag_member_status_request { + __be16 lag_id; + u8 lag_type; + u8 mac_idx; + u8 bond_mode; + u8 slave_status; + u8 rsvd; +}; + +struct xsc_update_lag_hash_type_request { + __be16 lag_id; + u8 lag_sel_mode; + u8 rsvd[5]; +}; + +struct xsc_destroy_lag_request { + __be16 lag_id; + u8 lag_type; + u8 mac_idx; + u8 bond_mode; + u8 slave_status; + u8 rsvd[3]; +}; + +struct xsc_set_lag_qos_request { + __be16 lag_id; + u8 member_idx; + u8 lag_op; + u8 resv[4]; +}; + +struct xsc_create_lag_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_create_lag_request req; +}; + +struct xsc_create_lag_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_add_lag_member_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_add_lag_member_request req; +}; + +struct xsc_add_lag_member_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_remove_lag_member_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_remove_lag_member_request req; +}; + +struct xsc_remove_lag_member_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_update_lag_member_status_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_update_lag_member_status_request req; +}; + +struct xsc_update_lag_member_status_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_update_lag_hash_type_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_update_lag_hash_type_request req; +}; + +struct xsc_update_lag_hash_type_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_destroy_lag_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_destroy_lag_request req; +}; + +struct xsc_destroy_lag_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_set_lag_qos_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_set_lag_qos_request req; +}; + +struct xsc_set_lag_qos_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*ioctl qos*/ +struct xsc_qos_req_prfx { + u8 mac_port; + u8 rsvd[7]; +}; + +struct xsc_qos_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_qos_req_prfx req_prfx; + u8 data[]; +}; + +struct xsc_qos_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct xsc_prio_stats { + u64 tx_bytes; + u64 rx_bytes; + u64 tx_pkts; + u64 rx_pkts; +}; + +struct xsc_prio_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pport; +}; + +struct xsc_prio_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_prio_stats prio_stats[QOS_PRIO_MAX + 1]; +}; + +struct xsc_pfc_prio_stats { + u64 tx_pause; + u64 tx_pause_duration; + u64 rx_pause; + u64 rx_pause_duration; +}; + +struct xsc_pfc_prio_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pport; +}; + +struct xsc_pfc_prio_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_pfc_prio_stats prio_stats[QOS_PRIO_MAX + 1]; +}; + +struct xsc_hw_stats_rdma_pf { + /*by mac port*/ + u64 rdma_tx_pkts; + u64 rdma_tx_bytes; + u64 rdma_rx_pkts; + u64 rdma_rx_bytes; + u64 np_cnp_sent; + u64 rp_cnp_handled; + u64 np_ecn_marked_roce_packets; + u64 rp_cnp_ignored; + u64 read_rsp_out_of_seq; + u64 implied_nak_seq_err; + /*by function*/ + u64 out_of_sequence; + u64 packet_seq_err; + u64 out_of_buffer; + u64 rnr_nak_retry_err; + u64 local_ack_timeout_err; + u64 rx_read_requests; + u64 rx_write_requests; + u64 duplicate_requests; + u64 rdma_tx_pkts_func; + u64 rdma_tx_payload_bytes; + u64 rdma_rx_pkts_func; + u64 rdma_rx_payload_bytes; + /*global*/ + u64 rdma_loopback_pkts; + u64 rdma_loopback_bytes; +}; + +struct xsc_hw_stats_rdma_vf { + /*by function*/ + u64 rdma_tx_pkts_func; + u64 rdma_tx_payload_bytes; + u64 rdma_rx_pkts_func; + u64 rdma_rx_payload_bytes; + + u64 out_of_sequence; + u64 packet_seq_err; + u64 out_of_buffer; + u64 rnr_nak_retry_err; + u64 local_ack_timeout_err; + u64 rx_read_requests; + u64 rx_write_requests; + u64 duplicate_requests; +}; + +struct xsc_hw_stats_rdma { + u8 is_pf; + u8 rsv[3]; + union { + struct xsc_hw_stats_rdma_pf pf_stats; + struct xsc_hw_stats_rdma_vf vf_stats; + } stats; +}; + +struct xsc_hw_stats_eth_pf { + /*by mac port*/ + u64 rdma_tx_pkts; + u64 rdma_tx_bytes; + u64 rdma_rx_pkts; + u64 rdma_rx_bytes; + u64 tx_pause; + u64 rx_pause; + u64 rx_fcs_errors; + u64 rx_discards; + u64 tx_multicast_phy; + u64 tx_broadcast_phy; + u64 rx_multicast_phy; + u64 rx_broadcast_phy; + /*by global*/ + u64 rdma_loopback_pkts; + u64 rdma_loopback_bytes; +}; + +struct xsc_hw_stats_eth_vf { + /*by function*/ + u64 rdma_tx_pkts; + u64 rdma_tx_bytes; + u64 rdma_rx_pkts; + u64 rdma_rx_bytes; +}; + +struct xsc_hw_stats_eth { + u8 is_pf; + u8 rsv[3]; + union { + struct xsc_hw_stats_eth_pf pf_stats; + struct xsc_hw_stats_eth_vf vf_stats; + } stats; +}; + +struct xsc_hw_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_port; + u8 is_lag; + u8 lag_member_num; + u8 member_port[]; +}; + +struct xsc_hw_stats_rdma_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_stats_rdma hw_stats; +}; + +struct xsc_hw_stats_eth_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_stats_eth hw_stats; +}; + +struct xsc_hw_global_stats_rdma { + /*by global*/ + u64 rdma_loopback_pkts; + u64 rdma_loopback_bytes; + u64 rx_icrc_encapsulated; + u64 req_cqe_error; + u64 resp_cqe_error; + u64 cqe_msg_code_error; +}; + +struct xsc_hw_global_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsv[4]; +}; + +struct xsc_hw_global_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_global_stats_rdma hw_stats; +}; + +struct xsc_pfc_stall_stats { + /*by mac port*/ + u64 tx_pause_storm_triggered; +}; + +struct xsc_pfc_stall_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_port; +}; + +struct xsc_pfc_stall_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_pfc_stall_stats pfc_stall_stats; +}; + +struct xsc_dscp_pmt_set { + u8 dscp; + u8 priority; + u8 rsvd[6]; +}; + +struct xsc_dscp_pmt_get { + u8 prio_map[QOS_DSCP_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_trust_mode_set { + u8 is_pcp; + u8 rsvd[7]; +}; + +struct xsc_trust_mode_get { + u8 is_pcp; + u8 rsvd[7]; +}; + +struct xsc_pcp_pmt_set { + u8 pcp; + u8 priority; + u8 rsvd[6]; +}; + +struct xsc_pcp_pmt_get { + u8 prio_map[QOS_PCP_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_default_pri_set { + u8 priority; + u8 rsvd[7]; +}; + +struct xsc_default_pri_get { + u8 priority; + u8 rsvd[7]; +}; + +#define PFC_WATCHDOG_EN_OFF 0 +#define PFC_WATCHDOG_EN_ON 1 +struct xsc_watchdog_en_set { + u8 en; +}; + +struct xsc_watchdog_en_get { + u8 en; +}; + +#define PFC_WATCHDOG_PERIOD_MIN 1 +#define PFC_WATCHDOG_PERIOD_MAX 4000000 +struct xsc_watchdog_period_set { + u32 period; +}; + +struct xsc_watchdog_period_get { + u32 period; +}; + +struct xsc_event_resp { + u8 resp_cmd_type; /* bitmap:0x0001: link up/down */ +}; + +struct xsc_event_linkstatus_resp { + u8 linkstatus; /*0:down, 1:up*/ +}; + +struct xsc_event_linkinfo { + u8 linkstatus; /*0:down, 1:up*/ + u8 port; + u8 duplex; + u8 autoneg; + u32 linkspeed; + u64 supported; + u64 advertising; + u64 supported_fec; /* reserved, not support currently */ + u64 advertised_fec; /* reserved, not support currently */ + u64 supported_speed[2]; + u64 advertising_speed[2]; +}; + +struct xsc_lldp_status_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 os_handle_lldp; + u8 sub_type; +}; + +struct xsc_lldp_status_mbox_out { + struct xsc_outbox_hdr hdr; + union { + __be32 os_handle_lldp; + __be32 dcbx_status; + } status; +}; + +struct xsc_vport_rate_limit_mobox_in { + struct xsc_inbox_hdr hdr; + u8 other_vport; + __be16 vport_number; + __be16 rsvd0; + __be32 rate; +}; + +struct xsc_vport_rate_limit_mobox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_event_query_type_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_type_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_resp ctx; +}; + +struct xsc_event_query_linkstatus_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_linkstatus_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_linkstatus_resp ctx; +}; + +struct xsc_event_query_linkinfo_mbox_in { + struct xsc_inbox_hdr hdr; +}; + +struct xsc_event_query_linkinfo_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_linkinfo ctx; +}; + +struct xsc_event_modify_linkinfo_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_event_linkinfo ctx; +}; + +struct xsc_event_modify_linkinfo_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_set_port_admin_status_mbox_in { + struct xsc_inbox_hdr hdr; + u16 admin_status; + +}; + +struct xsc_event_set_port_admin_status_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_set_led_status_mbox_in { + struct xsc_inbox_hdr hdr; + u8 port_id; +}; + +struct xsc_event_set_led_status_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_modify_fecparam_mbox_in { + struct xsc_inbox_hdr hdr; + u32 fec; +}; + +struct xsc_event_modify_fecparam_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_query_fecparam_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_fecparam_mbox_out { + struct xsc_outbox_hdr hdr; + u32 active_fec; + u32 fec_cfg; + u32 status; +}; + +#define PFC_ON_PG_PRFL_IDX 0 +#define PFC_OFF_PG_PRFL_IDX 1 +#define PFC_ON_QMU_VALUE 0 +#define PFC_OFF_QMU_VALUE 1 + +#define NIF_PFC_EN_ON 1 +#define NIF_PFC_EN_OFF 0 + +#define PFC_CFG_CHECK_TIMEOUT_US 8000000 +#define PFC_CFG_CHECK_SLEEP_TIME_US 200 +#define PFC_CFG_CHECK_MAX_RETRY_TIMES \ + (PFC_CFG_CHECK_TIMEOUT_US / PFC_CFG_CHECK_SLEEP_TIME_US) +#define PFC_CFG_CHECK_VALID_CNT 3 + +enum { + PFC_OP_ENABLE = 0, + PFC_OP_DISABLE, + PFC_OP_MODIFY, + PFC_OP_TYPE_MAX, +}; + +enum { + DROP_TH_CLEAR = 0, + DROP_TH_RECOVER, + DROP_TH_RECOVER_LOSSY, + DROP_TH_RECOVER_LOSSLESS, +}; + +struct xsc_pfc_cfg { + u8 req_prio; + u8 req_pfc_en; + u8 curr_prio; + u8 curr_pfc_en; + u8 pfc_op; + u8 lossless_num; +}; + +#define LOSSLESS_NUM_INVAILD 9 +struct xsc_pfc_set { + u8 priority; + u8 pfc_on; + u8 type; + u8 src_prio; + u8 lossless_num; +}; + +#define PFC_PRIO_MAX 7 +struct xsc_pfc_get { + u8 pfc_on[PFC_PRIO_MAX + 1]; + u8 max_prio; +}; + +struct xsc_pfc_set_drop_th_mbox_in { + struct xsc_inbox_hdr hdr; + u8 prio; + u8 cfg_type; +}; + +struct xsc_pfc_set_drop_th_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_pfc_get_cfg_status_mbox_in { + struct xsc_inbox_hdr hdr; + u8 prio; +}; + +struct xsc_pfc_get_cfg_status_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_rate_limit_set { + u32 rate_cir; + u32 limit_id; + u8 limit_level; + u8 rsvd[7]; +}; + +struct xsc_rate_limit_get { + u32 rate_cir[QOS_PRIO_MAX + 1]; + u32 max_limit_id; + u8 limit_level; + u8 rsvd[3]; +}; + +struct xsc_sp_set { + u8 sp[QOS_PRIO_MAX + 1]; +}; + +struct xsc_sp_get { + u8 sp[QOS_PRIO_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_weight_set { + u8 weight[QOS_PRIO_MAX + 1]; +}; + +struct xsc_weight_get { + u8 weight[QOS_PRIO_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_dpu_port_weight_set { + u8 target; + u8 weight[DPU_PORT_WGHT_CFG_MAX + 1]; + u8 rsv[5]; +}; + +struct xsc_dpu_port_weight_get { + u8 weight[DPU_PORT_WGHT_TARGET_NUM][DPU_PORT_WGHT_CFG_MAX + 1]; + u8 rsvd[4]; +}; + +struct xsc_dpu_prio_weight_set { + u8 target; + u8 weight[QOS_PRIO_MAX + 1]; + u8 rsv[7]; +}; + +struct xsc_dpu_prio_weight_get { + u8 weight[DPU_PRIO_WGHT_TARGET_NUM][QOS_PRIO_MAX + 1]; +}; + +struct xsc_cc_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct xsc_cc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct xsc_cc_ctrl_cmd { + u16 cmd; + u16 len; + u8 val[]; +}; + +struct xsc_cc_cmd_enable_rp { + u16 cmd; + u16 len; + u32 enable; + u32 section; +}; + +struct xsc_cc_cmd_enable_np { + u16 cmd; + u16 len; + u32 enable; + u32 section; +}; + +struct xsc_cc_cmd_init_alpha { + u16 cmd; + u16 len; + u32 alpha; + u32 section; +}; + +struct xsc_cc_cmd_g { + u16 cmd; + u16 len; + u32 g; + u32 section; +}; + +struct xsc_cc_cmd_ai { + u16 cmd; + u16 len; + u32 ai; + u32 section; +}; + +struct xsc_cc_cmd_hai { + u16 cmd; + u16 len; + u32 hai; + u32 section; +}; + +struct xsc_cc_cmd_th { + u16 cmd; + u16 len; + u32 threshold; + u32 section; +}; + +struct xsc_cc_cmd_bc { + u16 cmd; + u16 len; + u32 bytecount; + u32 section; +}; + +struct xsc_cc_cmd_cnp_opcode { + u16 cmd; + u16 len; + u32 opcode; +}; + +struct xsc_cc_cmd_cnp_bth_b { + u16 cmd; + u16 len; + u32 bth_b; +}; + +struct xsc_cc_cmd_cnp_bth_f { + u16 cmd; + u16 len; + u32 bth_f; +}; + +struct xsc_cc_cmd_cnp_ecn { + u16 cmd; + u16 len; + u32 ecn; +}; + +struct xsc_cc_cmd_data_ecn { + u16 cmd; + u16 len; + u32 ecn; +}; + +struct xsc_cc_cmd_cnp_tx_interval { + u16 cmd; + u16 len; + u32 interval; // us + u32 section; +}; + +struct xsc_cc_cmd_evt_rsttime { + u16 cmd; + u16 len; + u32 period; +}; + +struct xsc_cc_cmd_cnp_dscp { + u16 cmd; + u16 len; + u32 dscp; + u32 section; +}; + +struct xsc_cc_cmd_cnp_pcp { + u16 cmd; + u16 len; + u32 pcp; + u32 section; +}; + +struct xsc_cc_cmd_evt_period_alpha { + u16 cmd; + u16 len; + u32 period; +}; + +struct xsc_cc_cmd_clamp_tgt_rate { + u16 cmd; + u16 len; + u32 clamp_tgt_rate; + u32 section; +}; + +struct xsc_cc_cmd_max_hai_factor { + u16 cmd; + u16 len; + u32 max_hai_factor; + u32 section; +}; + +struct xsc_cc_cmd_scale { + u16 cmd; + u16 len; + u32 scale; + u32 section; +}; + +struct xsc_cc_cmd_get_cfg { + u16 cmd; + u16 len; + u32 enable_rp; + u32 enable_np; + u32 init_alpha; + u32 g; + u32 ai; + u32 hai; + u32 threshold; + u32 bytecount; + u32 opcode; + u32 bth_b; + u32 bth_f; + u32 cnp_ecn; + u32 data_ecn; + u32 cnp_tx_interval; + u32 evt_period_rsttime; + u32 cnp_dscp; + u32 cnp_pcp; + u32 evt_period_alpha; + u32 clamp_tgt_rate; + u32 max_hai_factor; + u32 scale; + u32 section; +}; + +struct xsc_cc_cmd_get_stat { + u16 cmd; + u16 len; + u32 section; +}; + +struct xsc_cc_cmd_stat { + u32 cnp_handled; + u32 alpha_recovery; + u32 reset_timeout; + u32 reset_bytecount; +}; + +struct xsc_set_mtu_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 mtu; + __be16 rx_buf_sz_min; + u8 mac_port; + u8 rsvd; +}; + +struct xsc_hwc_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct xsc_hwc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct hwc_set_t { + u8 type; + u8 s_wqe_mode; + u8 r_wqe_mode; + u8 ack_timeout; + u8 group_mode; + u8 lossless_prio[XSC_MAX_MAC_NUM]; + u8 lossless_prio_len; + u8 retry_cnt_th; + u8 adapt_to_other; + u8 alloc_qp_id_mode; + u16 vf_num_per_pf; + u16 max_vf_num_per_pf; + u8 eth_pkt_offset; + u8 rdma_pkt_offset; + u8 tso_eth_pkt_offset; + u8 tx_dedi_pref; + u8 reg_mr_via_cmdq; + u8 per_dst_grp_thr; + u8 per_dst_grp_cnt; + u8 dcbx_status[XSC_MAX_MAC_NUM]; + u8 dcbx_port_cnt; +}; + +struct hwc_get_t { + u8 cur_s_wqe_mode; + u8 next_s_wqe_mode; + u8 cur_r_wqe_mode; + u8 next_r_wqe_mode; + u8 cur_ack_timeout; + u8 next_ack_timeout; + u8 cur_group_mode; + u8 next_group_mode; + u8 cur_lossless_prio[XSC_MAX_MAC_NUM]; + u8 next_lossless_prio[XSC_MAX_MAC_NUM]; + u8 lossless_prio_len; + u8 cur_retry_cnt_th; + u8 next_retry_cnt_th; + u8 cur_adapt_to_other; + u8 next_adapt_to_other; + u16 cur_vf_num_per_pf; + u16 next_vf_num_per_pf; + u16 cur_max_vf_num_per_pf; + u16 next_max_vf_num_per_pf; + u8 cur_eth_pkt_offset; + u8 next_eth_pkt_offset; + u8 cur_rdma_pkt_offset; + u8 next_rdma_pkt_offset; + u8 cur_tso_eth_pkt_offset; + u8 next_tso_eth_pkt_offset; + u8 cur_alloc_qp_id_mode; + u8 next_alloc_qp_id_mode; + u8 cur_tx_dedi_pref; + u8 next_tx_dedi_pref; + u8 cur_reg_mr_via_cmdq; + u8 next_reg_mr_via_cmdq; + u8 cur_per_dst_grp_thr; + u8 next_per_dst_grp_thr; + u8 cur_per_dst_grp_cnt; + u8 next_per_dst_grp_cnt; + u8 cur_dcbx_status[XSC_MAX_MAC_NUM]; + u8 next_dcbx_status[XSC_MAX_MAC_NUM]; + u8 dcbx_port_cnt; +}; + +struct xsc_set_mtu_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_query_eth_mac_mbox_in { + struct xsc_inbox_hdr hdr; + u8 index; +}; + +struct xsc_query_eth_mac_mbox_out { + struct xsc_outbox_hdr hdr; + u8 mac[6]; +}; + +struct xsc_query_pause_cnt_mbox_in { + struct xsc_inbox_hdr hdr; + u16 mac_port; + u16 cnt_type; + u32 reg_addr; +}; + +struct xsc_query_pause_cnt_mbox_out { + struct xsc_outbox_hdr hdr; + u64 val; +}; + +enum { + XSC_TBM_CAP_HASH_PPH = 0, + XSC_TBM_CAP_RSS, + XSC_TBM_CAP_PP_BYPASS, + XSC_TBM_CAP_PCT_DROP_CONFIG, +}; + +struct xsc_nic_attr { + __be16 caps; + __be16 caps_mask; + u8 mac_addr[6]; +}; + +struct xsc_rss_attr { + u8 rss_en; + u8 hfunc; + __be16 rqn_base; + __be16 rqn_num; + __be32 hash_tmpl; +}; + +struct xsc_cmd_enable_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_attr nic; + struct xsc_rss_attr rss; +}; + +struct xsc_cmd_enable_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[2]; +}; + +struct xsc_nic_dis_attr { + __be16 caps; +}; + +struct xsc_cmd_disable_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_dis_attr nic; +}; + +struct xsc_cmd_disable_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +enum { + XSC_RSS_HASH_KEY_UPDATE = 0, + XSC_RSS_HASH_TEMP_UPDATE, + XSC_RSS_HASH_FUNC_UPDATE, + XSC_RSS_RXQ_UPDATE, + XSC_RSS_RXQ_DROP, +}; + +struct xsc_rss_modify_attr { + u8 caps_mask; + u8 rss_en; + __be16 rqn_base; + __be16 rqn_num; + u8 hfunc; + __be32 hash_tmpl; + u8 hash_key[52]; +}; + +struct xsc_cmd_modify_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_attr nic; + struct xsc_rss_modify_attr rss; +}; + +struct xsc_cmd_modify_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_function_reset_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 glb_func_id; + u8 rsvd[6]; +}; + +struct xsc_function_reset_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +enum { + XSC_PCIE_LAT_FEAT_SET_EN = 0, + XSC_PCIE_LAT_FEAT_GET_EN, + XSC_PCIE_LAT_FEAT_SET_INTERVAL, + XSC_PCIE_LAT_FEAT_GET_INTERVAL, + XSC_PCIE_LAT_FEAT_GET_HISTOGRAM, + XSC_PCIE_LAT_FEAT_GET_PEAK, + XSC_PCIE_LAT_FEAT_HW, + XSC_PCIE_LAT_FEAT_HW_INIT, +}; + +struct xsc_pcie_lat { + u8 pcie_lat_enable; + u32 pcie_lat_interval[XSC_PCIE_LAT_CFG_INTERVAL_MAX]; + u32 pcie_lat_histogram[XSC_PCIE_LAT_CFG_HISTOGRAM_MAX]; + u32 pcie_lat_peak; +}; + +struct xsc_pcie_lat_feat_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 xsc_pcie_lat_feature_opcode; + struct xsc_pcie_lat pcie_lat; +}; + +struct xsc_pcie_lat_feat_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 xsc_pcie_lat_feature_opcode; + struct xsc_pcie_lat pcie_lat; +}; + +struct xsc_reg_mcia { + u8 module; + u8 status; + + u8 i2c_device_address; + u8 page_number; + u8 device_address; + + u8 size; + + u8 dword_0[0x20]; + u8 dword_1[0x20]; + u8 dword_2[0x20]; + u8 dword_3[0x20]; + u8 dword_4[0x20]; + u8 dword_5[0x20]; + u8 dword_6[0x20]; + u8 dword_7[0x20]; + u8 dword_8[0x20]; + u8 dword_9[0x20]; + u8 dword_10[0x20]; + u8 dword_11[0x20]; +}; + +struct xsc_rtt_en_mbox_in { + struct xsc_inbox_hdr hdr; + u8 en;//0-disable, 1-enable + u8 rsvd[7]; +}; + +struct xsc_rtt_en_mbox_out { + struct xsc_outbox_hdr hdr; + u8 en;//0-disable, 1-enable + u8 rsvd[7]; +}; + +struct xsc_rtt_qpn_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn[32]; +}; + +struct xsc_rtt_qpn_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_get_rtt_qpn_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn[32]; +}; + +struct xsc_rtt_period_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 period; //ms +}; + +struct xsc_rtt_period_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 period; //ms + u8 rsvd[4]; +}; + +struct xsc_rtt_result_mbox_out { + struct xsc_outbox_hdr hdr; + __be64 result[32]; +}; + +struct rtt_stats { + u64 rtt_succ_snd_req_cnt; + u64 rtt_succ_snd_rsp_cnt; + u64 rtt_fail_snd_req_cnt; + u64 rtt_fail_snd_rsp_cnt; + u64 rtt_rcv_req_cnt; + u64 rtt_rcv_rsp_cnt; + u64 rtt_rcv_unk_cnt; + u64 rtt_grp_invalid_cnt; +}; + +struct xsc_rtt_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct rtt_stats stats; +}; + +enum { + XSC_AP_FEAT_SET_UDP_SPORT = 0, +}; + +struct xsc_ap_feat_set_udp_sport { + u32 qpn; + u32 udp_sport; +}; + +struct xsc_ap { + struct xsc_ap_feat_set_udp_sport set_udp_sport; +}; + +struct xsc_ap_feat_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 xsc_ap_feature_opcode; + struct xsc_ap ap; +}; + +struct xsc_ap_feat_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 xsc_ap_feature_opcode; + struct xsc_ap ap; +}; + +struct xsc_set_debug_info_mbox_in { + struct xsc_inbox_hdr hdr; + u8 set_field; + u8 log_level; + u8 cmd_verbose; + u8 rsvd[5]; +}; + +struct xsc_set_debug_info_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_enable_relaxed_order_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_enable_relaxed_order_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_guid_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_guid_mbox_out { + struct xsc_outbox_hdr hdr; + __be64 guid; +}; + +struct xsc_cmd_activate_hw_config_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_activate_hw_config_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +#endif /* XSC_CMD_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h new file mode 100644 index 0000000000000000000000000000000000000000..122b06a87991d4c798fed3c6aab2712232744545 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h @@ -0,0 +1,1315 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_CORE_H +#define XSC_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_macro.h" +#include "common/xsc_cmd.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_auto_hw.h" +#include "common/driver.h" +#include "common/xsc_reg.h" +#include "common/xsc_eswitch.h" + +extern uint xsc_debug_mask; +extern unsigned int xsc_log_level; + +#ifndef mmiowb +#define mmiowb() +#endif + +#define XSC_PCI_VENDOR_ID 0x1f67 + +#define XSC_MC_PF_DEV_ID 0x1011 +#define XSC_MC_VF_DEV_ID 0x1012 + +#define XSC_MF_HOST_PF_DEV_ID 0x1051 +#define XSC_MF_HOST_VF_DEV_ID 0x1052 +#define XSC_MF_SOC_PF_DEV_ID 0x1053 + +#define XSC_MS_PF_DEV_ID 0x1111 +#define XSC_MS_VF_DEV_ID 0x1112 + +#define XSC_MV_HOST_PF_DEV_ID 0x1151 +#define XSC_MV_HOST_VF_DEV_ID 0x1152 +#define XSC_MV_SOC_PF_DEV_ID 0x1153 + +#define REG_ADDR(dev, offset) \ + (xsc_core_is_pf(dev) ? ((dev->bar) + ((offset) - 0xA0000000)) : ((dev->bar) + (offset))) + +#define REG_WIDTH_TO_STRIDE(width) ((width) / 8) +#define QPM_PAM_TBL_NUM 4 +#define QPM_PAM_TBL_NUM_MASK 3 +#define QPM_PAM_TBL_INDEX_SHIFT 2 +#define QPM_PAM_PAGE_SHIFT 12 + +#define XSC_SUB_DEV_ID_MC_50 0xC050 +#define XSC_SUB_DEV_ID_MC_100 0xC100 +#define XSC_SUB_DEV_ID_MC_200 0xC200 +#define XSC_SUB_DEV_ID_MC_400S 0xC400 +#define XSC_SUB_DEV_ID_MF_50 0xF050 +#define XSC_SUB_DEV_ID_MF_200 0xF200 +#define XSC_SUB_DEV_ID_MS_50 0xA050 +#define XSC_SUB_DEV_ID_MS_100Q 0xA104 +#define XSC_SUB_DEV_ID_MS_200 0xA200 +#define XSC_SUB_DEV_ID_MS_200S 0xA201 +#define XSC_SUB_DEV_ID_MS_400M 0xA202 +#define XSC_SUB_DEV_ID_MS_200_OCP 0xA203 +#define XSC_SUB_DEV_ID_MV_100 0xD100 +#define XSC_SUB_DEV_ID_MV_200 0xD200 + +#define XSC_MAX_PRODUCT_NAME_LEN 32 + +enum { + XSC_LOG_LEVEL_DBG = 0, + XSC_LOG_LEVEL_INFO = 1, + XSC_LOG_LEVEL_WARN = 2, + XSC_LOG_LEVEL_ERR = 3, +}; + +enum { + XSC_CHIP_MC, + XSC_CHIP_MF, + XSC_CHIP_MS, + XSC_CHIP_MV, + XSC_CHIP_UNKNOWN, +}; + +#ifndef dev_fmt +#define dev_fmt(fmt) fmt +#endif + +#define xsc_dev_log(condition, level, dev, fmt, ...) \ +do { \ + if (condition) \ + dev_printk(level, dev, dev_fmt(fmt), ##__VA_ARGS__); \ +} while (0) + +#define xsc_core_dbg(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_DBG, KERN_DEBUG, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_core_dbg_once(__dev, format, ...) \ + dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define xsc_core_dbg_mask(__dev, mask, format, ...) \ +do { \ + if ((mask) & xsc_debug_mask) \ + xsc_core_dbg(__dev, format, ##__VA_ARGS__); \ +} while (0) + +#define xsc_core_err(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_ERR, KERN_ERR, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_core_err_rl(__dev, format, ...) \ + dev_err_ratelimited(&(__dev)->pdev->dev, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define xsc_core_warn(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_WARN, KERN_WARNING, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_core_info(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_INFO, KERN_INFO, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_pr_debug(format, ...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_DBG) \ + pr_debug(format, ##__VA_ARGS__); \ +} while (0) + +#define assert(__dev, expr) \ +do { \ + if (!(expr)) { \ + dev_err(&(__dev)->pdev->dev, \ + "Assertion failed! %s, %s, %s, line %d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } \ +} while (0) + +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +#define XSC_PCIE_NO_HOST 0x0 +#define XSC_PCIE_NO_SOC 0x1 +#define XSC_PCIE_NO_UNSET 0xFF + +enum xsc_driver_mode { + HOST_MODE, + SOC_MODE, +}; + +u8 xsc_get_driver_work_mode(void); + +enum xsc_dev_event { + XSC_DEV_EVENT_SYS_ERROR, + XSC_DEV_EVENT_PORT_UP, + XSC_DEV_EVENT_PORT_DOWN, + XSC_DEV_EVENT_PORT_INITIALIZED, + XSC_DEV_EVENT_LID_CHANGE, + XSC_DEV_EVENT_PKEY_CHANGE, + XSC_DEV_EVENT_GUID_CHANGE, + XSC_DEV_EVENT_CLIENT_REREG, +}; + +enum { + /* one minute for the sake of bringup. Generally, commands must always + * complete and we may need to increase this timeout value + */ + XSC_CMD_TIMEOUT_MSEC = 10 * 1000, + XSC_CMD_WQ_MAX_NAME = 32, +}; + +enum { + XSC_MAX_NAME_LEN = 32, +}; + +enum { + XSC_MAX_PORTS = 2, +}; + +enum { + MAX_MR_CACHE_ENTRIES = 16, +}; + +enum { + XSC_CMD_DATA, /* print command payload only */ + XSC_CMD_TIME, /* print command execution time */ +}; + +enum xsc_rdma_driver_id { + RDMA_DRIVER_XSC_UNKNOWN, + RDMA_DRIVER_XSC5, + RDMA_DRIVER_XSC4, +}; + +/* mutex for interface device list */ +extern struct mutex xsc_intf_mutex; + +#define GROUP_REFER_CNT_SIZE 1024 + +struct qp_group_refer { + spinlock_t lock; /* protect refer_cnt[] */ + u16 refer_cnt[GROUP_REFER_CNT_SIZE]; +}; + +struct xsc_priv_device { + char device_name[IB_DEVICE_NAME_MAX]; + dev_t devno; + struct cdev cdev; + struct list_head mem_list; + spinlock_t mem_lock; /* protect mem_list */ + struct radix_tree_root bdf_tree; + spinlock_t bdf_lock; /* protect bdf_tree */ +}; + +enum xsc_pci_status { + XSC_PCI_STATUS_DISABLED, + XSC_PCI_STATUS_ENABLED, +}; + +enum xsc_device_state { + XSC_DEVICE_STATE_UNINITIALIZED, + XSC_DEVICE_STATE_UP, + XSC_DEVICE_STATE_INTERNAL_ERROR, +}; + +enum xsc_interface_state { + XSC_INTERFACE_STATE_UP = BIT(0), + XSC_INTERFACE_STATE_TEARDOWN = BIT(1), +}; + +enum { + XSC_INTERFACE_PROTOCOL_IB = 0, + XSC_INTERFACE_PROTOCOL_ETH = 1, +}; + +enum { + XSC_INTERFACE_ADDED, + XSC_INTERFACE_ATTACHED, +}; + +#define CONFIG_XSC_SRIOV 1 + +enum xsc_coredev_type { + XSC_COREDEV_PF, + XSC_COREDEV_VF, + XSC_COREDEV_SF +}; + +enum { + XSC_PCI_DEV_IS_VF = 1 << 0, +}; + +enum port_state_policy { + XSC_POLICY_DOWN = 0, + XSC_POLICY_UP = 1, + XSC_POLICY_FOLLOW = 2, + XSC_POLICY_INVALID = 0xffffffff +}; + +enum { + XSC_CAP_PORT_TYPE_IB = 0x0, + XSC_CAP_PORT_TYPE_ETH = 0x1, +}; + +enum xsc_inline_modes { + XSC_INLINE_MODE_NONE, + XSC_INLINE_MODE_L2, + XSC_INLINE_MODE_IP, + XSC_INLINE_MODE_TCP_UDP, +}; + +struct xsc_core_device; + +struct xsc_vf_context { + int enabled; + u64 port_guid; + u64 node_guid; + enum port_state_policy policy; +}; + +struct xsc_sriov_vf { + struct xsc_core_device *dev; + struct kobject kobj; + int vf; +}; + +struct xsc_pci_sriov { + /* standard SRIOV capability fields, mostly for debug */ + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_vfs; /* total VFs of PF */ + u16 initial_vfs; /* initial VFs of PF */ + u16 num_vfs; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u16 vf_device; /* VF device ID */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ +}; + +struct xsc_core_sriov { + int num_vfs; + u16 max_vfs; + u16 vf_bdf_base; + u8 probe_vf; + struct xsc_vf_context *vfs_ctx; + struct kobject *config; + struct kobject *groups_config; + struct kobject node_guid_kobj; + struct xsc_sriov_vf *vfs; + struct xsc_pci_sriov pci_sriov; +}; + +struct xsc_vgroup { + struct xsc_core_device *dev; + u32 group_id; + u32 num_vports; + u32 tsar_ix; + u32 max_rate; + u32 min_rate; + u32 bw_share; + struct kobject kobj; + struct list_head list; +}; + +struct xsc_vport_info { + u8 mac[ETH_ALEN]; + u16 vlan; + u8 qos; + __be16 vlan_proto; + u64 node_guid; + int link_state; + u32 min_rate; + u32 max_rate; + u8 spoofchk; + u8 trusted; + u8 roce; + /* the admin approved vlan list */ + DECLARE_BITMAP(vlan_trunk_8021q_bitmap, VLAN_N_VID); + u32 group; +}; + +#define XSC_L2_ADDR_HASH_SIZE 8 + +enum xsc_eswitch_vport_event { + XSC_VPORT_UC_ADDR_CHANGE = BIT(0), + XSC_VPORT_MC_ADDR_CHANGE = BIT(1), + XSC_VPORT_PROMISC_CHANGE = BIT(2), + XSC_VPORT_VLAN_CHANGE = BIT(3), +}; + +struct xsc_vport { + struct xsc_core_device *dev; + u16 vport; + struct hlist_head uc_list[XSC_L2_ADDR_HASH_SIZE]; + struct hlist_head mc_list[XSC_L2_ADDR_HASH_SIZE]; + /* The requested vlan list from the vport side */ + DECLARE_BITMAP(req_vlan_bitmap, VLAN_N_VID); + /* Actual accepted vlans on the acl tables */ + DECLARE_BITMAP(acl_vlan_8021q_bitmap, VLAN_N_VID); + struct work_struct vport_change_handler; + + struct xsc_vport_info info; + + struct { + u8 enabled; + u32 esw_tsar_ix; + u32 bw_share; + u32 min_rate; + u32 max_rate; + } qos; + + u8 enabled; + enum xsc_eswitch_vport_event enabled_events; + u16 match_id; + u32 bond_metadata; + u16 vlan_id; + u8 vlan_qos; + __be16 vlan_proto; +}; + +struct xsc_eswitch { + struct xsc_core_device *dev; + u32 flags; + int total_vports; + int enabled_vports; + int num_vfs; + struct xsc_vport *vports; + struct workqueue_struct *work_queue; + + /* Synchronize between vport change events + * and async SRIOV admin state changes + */ + struct mutex state_lock; + + /* Protects eswitch mode changes occurring via sriov + * state change, devlink commands. + */ + struct mutex mode_lock; + int mode; + int nvports; + u16 manager_vport; + u16 first_host_vport; +}; + +struct xsc_core_health { + u8 sick; +}; + +struct xsc_priv { + char name[XSC_MAX_NAME_LEN]; + struct list_head dev_list; + struct list_head ctx_list; + spinlock_t ctx_lock; /* protect ctx_list */ + int numa_node; + struct xsc_core_sriov sriov; + struct xsc_eswitch *eswitch; + struct xsc_core_health health; +}; + +struct xsc_port_ctrl { + struct list_head node; + dev_t devid; + struct cdev cdev; + struct device *device; + struct list_head file_list; + spinlock_t file_lock; /* protect file_list */ +}; + +typedef int (*restore_func_t)(struct xsc_core_device *dev); + +struct xsc_bdf_file { + unsigned long key; + struct radix_tree_root obj_tree; /* protect obj_tree */ + spinlock_t obj_lock; + struct xsc_core_device *xdev; + restore_func_t restore_nic_fn; +}; + +struct xsc_port_ctrl_file { + struct list_head file_node; + struct radix_tree_root bdf_tree; + spinlock_t bdf_lock; /* protect bdf_tree */ + struct xsc_bdf_file *root_bdf; + struct xsc_port_ctrl *ctrl; +}; + +struct xsc_port_caps { + int gid_table_len; + int pkey_table_len; +}; + +struct xsc_caps { + u8 log_max_eq; + u8 log_max_cq; + u8 log_max_qp; + u8 log_max_mkey; + u8 log_max_pd; + u8 log_max_srq; + u8 log_max_msix; + u32 max_cqes; + u32 max_wqes; + u32 max_sq_desc_sz; + u32 max_rq_desc_sz; + u64 flags; + u16 stat_rate_support; + u32 log_max_msg; + u32 num_ports; + u32 max_ra_res_qp; + u32 max_ra_req_qp; + u32 max_srq_wqes; + u32 bf_reg_size; + u32 bf_regs_per_page; + struct xsc_port_caps port[XSC_MAX_PORTS]; + u8 ext_port_cap[XSC_MAX_PORTS]; + u32 reserved_lkey; + u8 local_ca_ack_delay; + u8 log_max_mcg; + u16 max_qp_mcg; + u32 min_page_sz; + u32 send_ds_num; + u32 send_wqe_shift; + u32 recv_ds_num; + u32 recv_wqe_shift; + u32 rx_pkt_len_max; + + u32 msix_enable:1; + u32 port_type:1; + u32 embedded_cpu:1; + u32 eswitch_manager:1; + u32 ecpf_vport_exists:1; + u32 vport_group_manager:1; + u32 sf:1; + u32 wqe_inline_mode:3; + u32 raweth_qp_id_base:15; + u32 rsvd0:7; + + u16 max_vfs; + u8 log_max_qp_depth; + u8 log_max_current_uc_list; + u8 log_max_current_mc_list; + u16 log_max_vlan_list; + u8 fdb_multi_path_to_table; + u8 log_esw_max_sched_depth; + + u8 max_num_sf_partitions; + u8 log_max_esw_sf; + u16 sf_base_id; + + u32 max_tc:8; + u32 ets:1; + u32 dcbx:1; + u32 dscp:1; + u32 sbcam_reg:1; + u32 qos:1; + u32 port_buf:1; + u32 rsvd1:2; + u32 raw_tpe_qp_num:16; + u32 max_num_eqs:8; + u32 mac_port:8; + u32 raweth_rss_qp_id_base:16; + u16 msix_base; + u16 msix_num; + u8 log_max_mtt; + u8 log_max_tso; + u32 hca_core_clock; + u32 max_rwq_indirection_tables;/*rss_caps*/ + u32 max_rwq_indirection_table_size;/*rss_caps*/ + u16 raweth_qp_id_end; + u32 qp_rate_limit_min; + u32 qp_rate_limit_max; + u32 hw_feature_flag; + u16 pf0_vf_funcid_base; + u16 pf0_vf_funcid_top; + u16 pf1_vf_funcid_base; + u16 pf1_vf_funcid_top; + u16 pcie0_pf_funcid_base; + u16 pcie0_pf_funcid_top; + u16 pcie1_pf_funcid_base; + u16 pcie1_pf_funcid_top; + u8 nif_port_num; + u8 pcie_host; + u8 mac_bit; + u16 funcid_to_logic_port; + u8 lag_logic_port_ofst; +}; + +struct cache_ent { + /* protect block chain allocations + */ + spinlock_t lock; + struct list_head head; +}; + +struct cmd_msg_cache { + struct cache_ent large; + struct cache_ent med; + +}; + +#define CMD_FIRST_SIZE 8 +struct xsc_cmd_first { + __be32 data[CMD_FIRST_SIZE]; +}; + +struct xsc_cmd_mailbox { + void *buf; + dma_addr_t dma; + struct xsc_cmd_mailbox *next; +}; + +struct xsc_cmd_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct xsc_cmd_first first; + struct xsc_cmd_mailbox *next; +}; + +#define RSP_FIRST_SIZE 14 +struct xsc_rsp_first { + __be32 data[RSP_FIRST_SIZE]; //can be larger, xsc_rsp_layout +}; + +struct xsc_rsp_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct xsc_rsp_first first; + struct xsc_cmd_mailbox *next; +}; + +typedef void (*xsc_cmd_cbk_t)(int status, void *context); + +//hw will use this for some records(e.g. vf_id) +struct cmdq_rsv { + u16 vf_id; + u8 rsv[2]; +}; + +//related with hw, won't change +#define CMDQ_ENTRY_SIZE 64 + +struct xsc_cmd_layout { + struct cmdq_rsv rsv0; + __be32 inlen; + __be64 in_ptr; + __be32 in[CMD_FIRST_SIZE]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, arm will check this bit to make sure mem written +}; + +struct xsc_rsp_layout { + struct cmdq_rsv rsv0; + __be32 out[RSP_FIRST_SIZE]; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, driver will check this bit to make sure mem written +}; + +struct xsc_cmd_work_ent { + struct xsc_cmd_msg *in; + struct xsc_rsp_msg *out; + int idx; + struct completion done; + struct xsc_cmd *cmd; + struct work_struct work; + struct xsc_cmd_layout *lay; + struct xsc_rsp_layout *rsp_lay; + int ret; + u8 status; + u8 token; + struct timespec64 ts1; + struct timespec64 ts2; +}; + +struct xsc_cmd_debug { + struct dentry *dbg_root; + struct dentry *dbg_in; + struct dentry *dbg_out; + struct dentry *dbg_outlen; + struct dentry *dbg_status; + struct dentry *dbg_run; + void *in_msg; + void *out_msg; + u8 status; + u16 inlen; + u16 outlen; +}; + +struct xsc_cmd_stats { + u64 sum; + u64 n; + struct dentry *root; + struct dentry *avg; + struct dentry *count; + /* protect command average calculations */ + spinlock_t lock; +}; + +struct xsc_cmd_reg { + u32 req_pid_addr; + u32 req_cid_addr; + u32 rsp_pid_addr; + u32 rsp_cid_addr; + u32 req_buf_h_addr; + u32 req_buf_l_addr; + u32 rsp_buf_h_addr; + u32 rsp_buf_l_addr; + u32 msix_vec_addr; + u32 element_sz_addr; + u32 q_depth_addr; + u32 interrupt_stat_addr; +}; + +enum xsc_cmd_status { + XSC_CMD_STATUS_NORMAL, + XSC_CMD_STATUS_TIMEDOUT, +}; + +struct xsc_cmd { + struct xsc_cmd_reg reg; + void *cmd_buf; + void *cq_buf; + dma_addr_t dma; + dma_addr_t cq_dma; + u16 cmd_pid; + u16 cq_cid; + u8 owner_bit; + u8 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + int events; + u32 __iomem *vector; + + spinlock_t alloc_lock; /* protect command queue allocations */ + spinlock_t token_lock; /* protect token allocations */ + spinlock_t doorbell_lock; /* protect cmdq req pid doorbell */ + u8 token; + unsigned long bitmask; + char wq_name[XSC_CMD_WQ_MAX_NAME]; + struct workqueue_struct *wq; + struct task_struct *cq_task; + struct semaphore sem; + int mode; + struct xsc_cmd_work_ent *ent_arr[XSC_MAX_COMMANDS]; + struct dma_pool *pool; + struct xsc_cmd_debug dbg; + struct cmd_msg_cache cache; + int checksum_disabled; + struct xsc_cmd_stats stats[XSC_CMD_OP_MAX]; + unsigned int irqn; + u8 ownerbit_learned; + u8 cmd_status; +}; + +struct xsc_lock { + spinlock_t lock; /* xsc spin lock */ +}; + +struct xsc_reg_addr { + u64 tx_db; + u64 rx_db; + u64 complete_db; + u64 complete_reg; + u64 event_db; + u64 cpm_get_lock; + u64 cpm_put_lock; + u64 cpm_lock_avail; + u64 cpm_data_mem; + u64 cpm_cmd; + u64 cpm_addr; + u64 cpm_busy; +}; + +struct xsc_board_info { + u32 board_id; + char board_sn[XSC_BOARD_SN_LEN]; + __be64 guid; + u8 guid_valid; + u8 hw_config_activated; +}; + +/* our core device */ +struct xsc_core_device { + struct pci_dev *pdev; + struct device *device; + struct xsc_priv priv; + struct xsc_dev_resource *dev_res; + void *xsc_ib_dev; + void *netdev; + void *eth_priv; + void *ovs_priv; + void __iomem *bar; + int bar_num; + + u8 mac_port; /* mac port */ + u8 pcie_no; /* pcie number */ + u8 pf_id; + u16 vf_id; + u16 glb_func_id; /* function id */ + + u16 gsi_qpn; /* logic qpn for gsi*/ + u16 msix_vec_base; + + struct mutex pci_status_mutex; /* protect pci_status */ + enum xsc_pci_status pci_status; + struct mutex intf_state_mutex; /* protect intf_state */ + unsigned long intf_state; + enum xsc_coredev_type coredev_type; + struct xsc_caps caps; + atomic_t num_qps; + struct xsc_cmd cmd; + struct xsc_lock reg_access_lock; + + void *counters_priv; + struct xsc_priv_device priv_device; + struct xsc_board_info *board_info; + void (*event)(struct xsc_core_device *dev, + enum xsc_dev_event event, unsigned long param); + + void (*event_handler)(void *adapter); + + struct xsc_reg_addr regs; + u32 chip_ver_h; + u32 chip_ver_m; + u32 chip_ver_l; + u32 hotfix_num; + u32 feature_flag; + u16 cmdq_ver; + u8 fw_version_major; + u8 fw_version_minor; + u16 fw_version_patch; + u32 fw_version_tweak; + u8 fw_version_extra_flag; + cpumask_var_t xps_cpumask; + + u8 reg_mr_via_cmdq; + u8 user_mode; + + struct xsc_port_ctrl port_ctrl; + + void *rtt_priv; + void *ap_priv; + void *pcie_lat; + + u8 bond_id; + struct list_head slave_node; +}; + +struct xsc_feature_flag { + u8 fpga_type:2; + u8 hps_ddr:2; + u8 onchip_ft:1; + u8 rdma_icrc:1; + u8 ma_xbar:1; + u8 anlt_fec:1; + u8 pp_tbl_dma:1; + u8 pct_exp:1; +}; + +struct xsc_interface { + struct list_head list; + int protocol; + + void *(*add)(struct xsc_core_device *dev); + void (*remove)(struct xsc_core_device *dev, void *context); + int (*attach)(struct xsc_core_device *dev, void *context); + void (*detach)(struct xsc_core_device *dev, void *context); + void (*event)(struct xsc_core_device *dev, void *context, + enum xsc_dev_event event, unsigned long param); + void *(*get_dev)(void *context); +}; + +struct xsc_device_context { + struct list_head list; + struct xsc_interface *intf; + void *context; + unsigned long state; +}; + +struct xsc_mem_entry { + struct list_head list; + char task_name[TASK_COMM_LEN]; + struct xsc_ioctl_mem_info mem_info; +}; + +struct xsc_device_product_info { + u16 vendor; + u16 device; + u16 subdevice; + char product_name[XSC_MAX_PRODUCT_NAME_LEN]; +}; + +#define XSC_DEVICE_PRODUCT_INFO(vend, dev, subdev, name) \ + .vendor = (vend), .device = (dev), \ + .subdevice = (subdev), .product_name = (name) + +static inline bool xsc_fw_is_available(struct xsc_core_device *dev) +{ + return dev->cmd.cmd_status == XSC_CMD_STATUS_NORMAL; +} + +int xsc_debugfs_init(struct xsc_core_device *dev); +void xsc_debugfs_fini(struct xsc_core_device *dev); +void xsc_register_debugfs(void); +void xsc_unregister_debugfs(void); + +bool xsc_device_registered(struct xsc_core_device *dev); +int xsc_register_device(struct xsc_core_device *dev); +void xsc_unregister_device(struct xsc_core_device *dev); +void xsc_attach_device(struct xsc_core_device *dev); +void xsc_detach_device(struct xsc_core_device *dev); +int xsc_register_interface(struct xsc_interface *intf); +void xsc_unregister_interface(struct xsc_interface *intf); +void xsc_reload_interface(struct xsc_core_device *dev, int protocol); +void xsc_reload_interfaces(struct xsc_core_device *dev, + int protocol1, int protocol2, + bool valid1, bool valid2); + +void xsc_remove_dev_by_protocol(struct xsc_core_device *dev, int protocol); +void xsc_add_dev_by_protocol(struct xsc_core_device *dev, int protocol); +void xsc_dev_list_lock(void); +void xsc_dev_list_unlock(void); +int xsc_dev_list_trylock(void); + +int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_size, void *out, + int out_size, int func_id); +int xsc_cmd_exec(struct xsc_core_device *dev, void *in, int in_size, + void *out, int out_size); +int xsc_create_mkey(struct xsc_core_device *xdev, void *in, void *out); +int xsc_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out); +int xsc_reg_mr(struct xsc_core_device *dev, void *in, void *out); +int xsc_dereg_mr(struct xsc_core_device *dev, void *in, void *out); +int xsc_eth_reset(struct xsc_core_device *dev); +int xsc_tbm_init(struct xsc_core_device *dev); +int xsc_qos_init(struct xsc_core_device *xdev); + +bool xsc_chk_chip_ver(struct xsc_core_device *dev); + +int xsc_alloc_iae_idx(struct xsc_core_device *dev, int *iae_idx); +void xsc_release_iae_idx(struct xsc_core_device *dev, int *iae_idx); +int xsc_get_iae_idx(struct xsc_core_device *dev); + +int xsc_create_res(struct xsc_core_device *dev); +void xsc_destroy_res(struct xsc_core_device *dev); + +int xsc_counters_init(struct ib_device *ib_dev, + struct xsc_core_device *dev); +void xsc_counters_fini(struct ib_device *ib_dev, + struct xsc_core_device *dev); + +int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev); +void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev); + +int xsc_priv_alloc_chrdev_region(void); +void xsc_priv_unregister_chrdev_region(void); + +int xsc_eth_sysfs_create(struct net_device *netdev, struct xsc_core_device *dev); +void xsc_eth_sysfs_remove(struct net_device *netdev, struct xsc_core_device *dev); +int xsc_rtt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev); +void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev); + +void xsc_ib_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev); +void xsc_ib_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *xdev); + +int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, + struct xsc_caps *caps); +int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_num, u16 max_msix); +int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_num); +int xsc_cmd_modify_hca(struct xsc_core_device *dev); +int xsc_query_guid(struct xsc_core_device *dev); +void xsc_free_board_info(void); + +int xsc_irq_eq_create(struct xsc_core_device *dev); +int xsc_irq_eq_destroy(struct xsc_core_device *dev); + +int xsc_sriov_init(struct xsc_core_device *dev); +void xsc_sriov_cleanup(struct xsc_core_device *dev); +int xsc_sriov_attach(struct xsc_core_device *dev); +void xsc_sriov_detach(struct xsc_core_device *dev); +int xsc_core_sriov_configure(struct pci_dev *dev, int num_vfs); +int xsc_sriov_sysfs_init(struct xsc_core_device *dev); +void xsc_sriov_sysfs_cleanup(struct xsc_core_device *dev); +int xsc_create_vfs_sysfs(struct xsc_core_device *dev, int num_vfs); +void xsc_destroy_vfs_sysfs(struct xsc_core_device *dev, int num_vfs); +int xsc_create_vf_group_sysfs(struct xsc_core_device *dev, + u32 group_id, struct kobject *group_kobj); +void xsc_destroy_vf_group_sysfs(struct xsc_core_device *dev, + struct kobject *group_kobj); +u32 xsc_eth_pcie_read32_by_mac_port(struct xsc_core_device *xdev, u32 mac_port, + u32 eth_ip_inter_addr); +void xsc_eth_pcie_write32_by_mac_port(struct xsc_core_device *xdev, u32 mac_port, + u32 eth_ip_inter_addr, u32 val); +struct cpumask *xsc_comp_irq_get_affinity_mask(struct xsc_core_device *dev, int vector); +void mask_cpu_by_node(int node, struct cpumask *dstp); +int xsc_get_link_speed(struct xsc_core_device *dev); +int xsc_chip_type(struct xsc_core_device *dev); +int xsc_eth_restore_nic_hca(struct xsc_core_device *dev); + +#define XSC_ESWITCH_MANAGER(dev) ((dev)->caps.eswitch_manager) + +static inline bool xsc_sriov_is_enabled(struct xsc_core_device *dev) +{ + return pci_num_vf(dev->pdev) ? true : false; +} + +static inline u16 xsc_core_max_vfs(const struct xsc_core_device *dev) +{ + return dev->priv.sriov.max_vfs; +} + +static inline int xsc_core_vfs_num(const struct xsc_core_device *dev) +{ + return dev->priv.sriov.num_vfs; +} + +static inline bool xsc_core_is_pf(const struct xsc_core_device *dev) +{ + return dev->coredev_type == XSC_COREDEV_PF; +} + +static inline bool xsc_core_is_sf(const struct xsc_core_device *dev) +{ + return dev->coredev_type == XSC_COREDEV_SF; +} + +static inline bool xsc_core_is_ecpf(struct xsc_core_device *dev) +{ + return dev->caps.embedded_cpu; +} + +#define XSC_ESWITCH_MANAGER(dev) ((dev)->caps.eswitch_manager) +#define ESW_ALLOWED(esw) ((esw) && XSC_ESWITCH_MANAGER((esw)->dev)) + +static inline bool +xsc_core_is_ecpf_esw_manager(const struct xsc_core_device *dev) +{ + return dev->caps.embedded_cpu && dev->caps.eswitch_manager; +} + +static inline bool +xsc_ecpf_vport_exists(const struct xsc_core_device *dev) +{ + return xsc_core_is_pf(dev) && dev->caps.ecpf_vport_exists; +} + +static inline bool +xsc_core_is_vport_manager(const struct xsc_core_device *dev) +{ + return dev->caps.vport_group_manager && xsc_core_is_pf(dev); +} + +static inline bool xsc_rl_is_supported(struct xsc_core_device *dev) +{ + return false; +} + +/* define in andes */ +#define HIF_CPM_IDA_DATA_MEM_STRIDE 0x40 + +#define CPM_IAE_CMD_READ 0 +#define CPM_IAE_CMD_WRITE 1 + +#define CPM_IAE_ADDR_REG_STRIDE HIF_CPM_IDA_ADDR_REG_STRIDE + +#define CPM_IAE_DATA_MEM_STRIDE HIF_CPM_IDA_DATA_MEM_STRIDE + +#define CPM_IAE_DATA_MEM_MAX_LEN 16 + +struct iae_cmd { + union { + struct { + u32 iae_idx:HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH; + u32 iae_len:HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH; + u32 iae_r0w1:HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH; + }; + unsigned int raw_data; + }; +}; + +static inline void acquire_ia_lock(struct xsc_core_device *xdev, int *iae_idx) +{ + int lock_val; + int lock_vld; + + lock_val = readl(REG_ADDR(xdev, xdev->regs.cpm_get_lock)); + lock_vld = lock_val >> HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT; + if (lock_vld) + *iae_idx = lock_val & HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK; + else + *iae_idx = -1; +} + +#define ACQUIRE_IA_LOCK(bp, iae_idx) \ + do { \ + int idx; \ + acquire_ia_lock(bp, &idx); \ + iae_idx = idx; \ + } while (0) + +static inline void release_ia_lock(struct xsc_core_device *xdev, int lock_idx) +{ + writel(lock_idx, REG_ADDR(xdev, xdev->regs.cpm_put_lock)); +} + +#define RELEASE_IA_LOCK(bp, iae_idx) release_ia_lock(bp, iae_idx) + +static inline void ia_write_data(struct xsc_core_device *xdev, u32 *ptr, int n, int iae_idx) +{ + int i; + int offset = xdev->regs.cpm_data_mem + (iae_idx) * CPM_IAE_DATA_MEM_STRIDE; + + for (i = 0; i < n; i++) { + writel(*(ptr++), REG_ADDR(xdev, offset)); + offset += sizeof(*ptr); + } +} + +static inline void ia_read_data(struct xsc_core_device *xdev, u32 *ptr, int n, int iae_idx) +{ + int i; + int offset = xdev->regs.cpm_data_mem + (iae_idx) * CPM_IAE_DATA_MEM_STRIDE; + u32 *pptr = ptr; + + for (i = 0; i < n; i++) { + *(pptr) = readl(REG_ADDR(xdev, offset)); + offset += sizeof(*ptr); + pptr = pptr + 1; + } +} + +static inline void ia_write_reg_addr(struct xsc_core_device *xdev, u32 reg, int iae_idx) +{ + int offset = xdev->regs.cpm_addr + (iae_idx) * CPM_IAE_ADDR_REG_STRIDE; + + writel(reg, REG_ADDR(xdev, offset)); +} + +static inline void initiate_ia_cmd(struct xsc_core_device *xdev, int iae_idx, int length, int r0w1) +{ + struct iae_cmd cmd; + int addr = xdev->regs.cpm_cmd; + + cmd.iae_r0w1 = r0w1; + cmd.iae_len = length - 1; + cmd.iae_idx = iae_idx; + writel(cmd.raw_data, REG_ADDR(xdev, addr)); +} + +static inline void initiate_ia_write_cmd(struct xsc_core_device *xdev, int iae_idx, int length) +{ + initiate_ia_cmd(xdev, iae_idx, length, CPM_IAE_CMD_WRITE); +} + +static inline void initiate_ia_read_cmd(struct xsc_core_device *xdev, int iae_idx, int length) +{ + initiate_ia_cmd(xdev, iae_idx, length, CPM_IAE_CMD_READ); +} + +static inline void wait_for_complete(struct xsc_core_device *xdev, int iae_idx) +{ + while ((readl(REG_ADDR(xdev, xdev->regs.cpm_busy)) & (1 << iae_idx))) + ; +} + +static inline void ia_write_reg_mr(struct xsc_core_device *xdev, u32 reg, + u32 *ptr, int n, int idx) +{ + ia_write_data(xdev, ptr, n, idx); + ia_write_reg_addr(xdev, reg, idx); + initiate_ia_write_cmd(xdev, idx, n); +} + +#define IA_WRITE_REG_MR(bp, reg, ptr, n, idx) ia_write_reg_mr(bp, reg, ptr, n, idx) + +static inline void ia_write(struct xsc_core_device *xdev, u32 reg, u32 *ptr, int n) +{ + int iae_idx; + + acquire_ia_lock(xdev, &iae_idx); + ia_write_data(xdev, ptr, n, iae_idx); + ia_write_reg_addr(xdev, reg, iae_idx); + initiate_ia_write_cmd(xdev, iae_idx, n); + release_ia_lock(xdev, iae_idx); +} + +#define IA_WRITE(bp, reg, ptr, n) ia_write(bp, reg, ptr, n) + +static inline void ia_read(struct xsc_core_device *xdev, u32 reg, u32 *ptr, int n) +{ + int iae_idx; + + acquire_ia_lock(xdev, &iae_idx); + ia_write_reg_addr(xdev, reg, iae_idx); + initiate_ia_read_cmd(xdev, iae_idx, n); + wait_for_complete(xdev, iae_idx); + ia_read_data(xdev, ptr, n, iae_idx); + release_ia_lock(xdev, iae_idx); +} + +#define IA_READ(bp, reg, ptr, n) ia_read(bp, reg, ptr, n) + +static inline u32 reg_read32(struct xsc_core_device *dev, u32 offset) +{ + u32 val = 0; + + if (xsc_core_is_pf(dev)) + val = readl(REG_ADDR(dev, offset)); + else + IA_READ(dev, offset, &val, 1); + + return val; +} + +static inline void reg_write32(struct xsc_core_device *dev, u32 offset, u32 val) +{ + u32 *ptr = &val; + + if (xsc_core_is_pf(dev)) + writel(val, REG_ADDR(dev, offset)); + else + IA_WRITE(dev, offset, ptr, 1); +} + +#define REG_RD32(dev, offset) reg_read32(dev, offset) +#define REG_WR32(dev, offset, val) reg_write32(dev, offset, val) + +static inline unsigned long bdf_to_key(unsigned int domain, unsigned int bus, unsigned int devfn) +{ + return ((unsigned long)domain << 32) | ((bus & 0xff) << 16) | (devfn & 0xff); +} + +static inline void +funcid_to_pf_vf_index(struct xsc_caps *caps, u16 func_id, u8 *pf_no, u8 *pf_id, u16 *vf_id) +{ + if (func_id >= caps->pf0_vf_funcid_base && func_id <= caps->pf0_vf_funcid_top) { + *pf_id = 0; + *pf_no = caps->pcie_host; + *vf_id = func_id - caps->pf0_vf_funcid_base; + } else if (func_id >= caps->pf1_vf_funcid_base && func_id <= caps->pf1_vf_funcid_top) { + *pf_id = 1; + *pf_no = caps->pcie_host; + *vf_id = func_id - caps->pf1_vf_funcid_base; + } else if (func_id >= caps->pcie0_pf_funcid_base && func_id <= caps->pcie0_pf_funcid_top) { + *pf_id = func_id - caps->pcie0_pf_funcid_base; + *pf_no = 0; + *vf_id = -1; + } else { + *pf_id = func_id - caps->pcie1_pf_funcid_base; + *pf_no = 1; + *vf_id = -1; + } +} + +static inline bool +is_support_rdma(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_RDMA_SUPPORT) + return true; + + return false; +} + +static inline bool is_support_rdma_cm(struct xsc_core_device *dev) +{ + return dev->caps.hw_feature_flag & XSC_HW_RDMA_CM_SUPPORT; +} + +static inline bool +is_support_pfc_prio_statistic(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_PFC_PRIO_STATISTIC_SUPPORT) + return true; + + return false; +} + +static inline bool +is_support_pfc_stall_stats(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_PFC_STALL_STATS_SUPPORT) + return true; + + return false; +} + +static inline bool is_support_hw_pf_stats(struct xsc_core_device *dev) +{ + return xsc_core_is_pf(dev); +} + +static inline void xsc_set_user_mode(struct xsc_core_device *dev, u8 mode) +{ + dev->user_mode = mode; +} + +static inline u8 xsc_get_user_mode(struct xsc_core_device *dev) +{ + return dev->user_mode; +} + +void xsc_pci_exit(void); + +void xsc_remove_eth_driver(void); + +void xsc_remove_rdma_driver(void); + +void xsc_set_exit_flag(void); +bool xsc_get_exit_flag(void); +bool exist_incomplete_qp_flush(void); +#endif /* XSC_CORE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h new file mode 100644 index 0000000000000000000000000000000000000000..9da4396d66eedabd4b2461c3460cdbc781d9aeb9 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ESWITCH_H +#define XSC_ESWITCH_H + +enum { + XSC_ESWITCH_NONE, + XSC_ESWITCH_LEGACY, + XSC_ESWITCH_OFFLOADS +}; + +enum { + REP_ETH, + REP_IB, + NUM_REP_TYPES, +}; + +enum { + REP_UNREGISTERED, + REP_REGISTERED, + REP_LOADED, +}; + +enum xsc_switchdev_event { + XSC_SWITCHDEV_EVENT_PAIR, + XSC_SWITCHDEV_EVENT_UNPAIR, +}; + +enum { + SET_VLAN_STRIP = BIT(0), + SET_VLAN_INSERT = BIT(1), + CLR_VLAN_STRIP = BIT(2), + CLR_VLAN_INSERT = BIT(3), +}; + +#endif /* XSC_ESWITCH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h new file mode 100644 index 0000000000000000000000000000000000000000..97cbded4a2f20bb98a7971de5e6420979e9b1af4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FS_H +#define XSC_FS_H + +#include +#include +#include + +enum xsc_list_type { + XSC_NVPRT_LIST_TYPE_UC = 0x0, + XSC_NVPRT_LIST_TYPE_MC = 0x1, + XSC_NVPRT_LIST_TYPE_VLAN = 0x2, + XSC_NVPRT_LIST_TYPE_VLAN_OFFLOAD = 0x03, +}; + +enum xsc_vlan_rule_type { + XSC_VLAN_RULE_TYPE_UNTAGGED, + XSC_VLAN_RULE_TYPE_ANY_CTAG_VID, + XSC_VLAN_RULE_TYPE_ANY_STAG_VID, + XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, + XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, +}; + +struct xsc_vlan_table { + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); + DECLARE_BITMAP(active_outer_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_outer_svlans, VLAN_N_VID); + u8 cvlan_filter_disabled; +}; + +struct xsc_l2_table { + struct hlist_head netdev_uc[XSC_L2_ADDR_HASH_SIZE]; + struct hlist_head netdev_mc[XSC_L2_ADDR_HASH_SIZE]; + u8 broadcast_enabled; + u8 allmulti_enabled; + u8 promisc_enabled; +}; + +struct xsc_flow_steering { + struct xsc_vlan_table vlan; + struct xsc_l2_table l2; +}; + +int xsc_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +int xsc_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +void xsc_set_rx_mode_work(struct work_struct *work); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h new file mode 100644 index 0000000000000000000000000000000000000000..d1fa8b207607a2852951647c61eb04d0fcce7b81 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_HSI_H +#define XSC_HSI_H + +#include + +#include +#include +#include "common/xsc_macro.h" + +#ifdef MSIX_SUPPORT +#else +#define NEED_CREATE_RX_THREAD +#endif + +#define PAGE_SHIFT_4K 12 +#define PAGE_SIZE_4K (_AC(1, UL) << PAGE_SHIFT_4K) +#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) + +#ifndef EQ_NUM_MAX +#define EQ_NUM_MAX 1024 +#endif +#ifndef EQ_SIZE_MAX +#define EQ_SIZE_MAX 1024 +#endif + +#define XSC_RSS_INDIR_TBL_S 256 +#define XSC_MAX_TSO_PAYLOAD 0x10000/*64kb*/ + +#define MAX_BOARD_NUM 32 + +#define DMA_LO_LE(x) __cpu_to_le32(lower_32_bits(x)) +#define DMA_HI_LE(x) __cpu_to_le32(upper_32_bits(x)) +#define DMA_REGPAIR_LE(x, val) do { \ + (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)); \ + } while (0) + +#define WR_LE_16(x, val) (x = __cpu_to_le16(val)) +#define WR_LE_32(x, val) (x = __cpu_to_le32(val)) +#define WR_LE_64(x, val) (x = __cpu_to_le64(val)) +#define WR_LE_R64(x, val) (DMA_REGPAIR_LE(x, val)) +#define WR_BE_32(x, val) (x = __cpu_to_be32(val)) + +#define RD_LE_16(x) __le16_to_cpu(x) +#define RD_LE_32(x) __le32_to_cpu(x) +#define RD_BE_32(x) __be32_to_cpu(x) + +#define WR_REG(addr, val) mmio_write64_le(addr, val) +#define RD_REG(addr) mmio_read64_le(addr) + +#define XSC_MPT_MAP_EN 0 + +/* FIXME: 32-byte alignment for SW descriptors for Amber for now */ +#define XSC_DESC_ALIGNMENT 32 + +/* each ds holds one fragment in skb */ +#define XSC_MAX_RX_FRAGS 4 +#define XSC_RX_FRAG_SZ_ORDER 0 +#define XSC_RX_FRAG_SZ (PAGE_SIZE << XSC_RX_FRAG_SZ_ORDER) +#define DEFAULT_FRAG_SIZE (2048) + +/* message opcode */ +enum { + XSC_MSG_OPCODE_SEND = 0, + XSC_MSG_OPCODE_RDMA_WRITE = 1, + XSC_MSG_OPCODE_RDMA_READ = 2, + XSC_MSG_OPCODE_MAD = 3, + XSC_MSG_OPCODE_RDMA_ACK = 4, + XSC_MSG_OPCODE_RDMA_ACK_READ = 5, + XSC_MSG_OPCODE_RDMA_CNP = 6, + XSC_MSG_OPCODE_RAW = 7, + XSC_MSG_OPCODE_VIRTIO_NET = 8, + XSC_MSG_OPCODE_VIRTIO_BLK = 9, + XSC_MSG_OPCODE_RAW_TPE = 10, + XSC_MSG_OPCODE_INIT_QP_REQ = 11, + XSC_MSG_OPCODE_INIT_QP_RSP = 12, + XSC_MSG_OPCODE_INIT_PATH_REQ = 13, + XSC_MSG_OPCODE_INIT_PATH_RSP = 14, +}; + +/* TODO: sw cqe opcode*/ +enum { + XSC_OPCODE_RDMA_REQ_SEND = 0, + XSC_OPCODE_RDMA_REQ_SEND_IMMDT = 1, + XSC_OPCODE_RDMA_RSP_RECV = 2, + XSC_OPCODE_RDMA_RSP_RECV_IMMDT = 3, + XSC_OPCODE_RDMA_REQ_WRITE = 4, + XSC_OPCODE_RDMA_REQ_WRITE_IMMDT = 5, + XSC_OPCODE_RDMA_RSP_WRITE_IMMDT = 6, + XSC_OPCODE_RDMA_REQ_READ = 7, + XSC_OPCODE_RDMA_REQ_ERROR = 8, + XSC_OPCODE_RDMA_RSP_ERROR = 9, + XSC_OPCODE_RDMA_CQE_ERROR = 10, + XSC_OPCODE_RDMA_MAD_REQ_SEND, + XSC_OPCODE_RDMA_MAD_RSP_RECV, +}; + +enum { + XSC_REQ = 0, + XSC_RSP = 1, +}; + +enum { + XSC_WITHOUT_IMMDT = 0, + XSC_WITH_IMMDT = 1, +}; + +enum { + XSC_ERR_CODE_NAK_RETRY = 0x40, + XSC_ERR_CODE_NAK_OPCODE = 0x41, + XSC_ERR_CODE_NAK_MR = 0x42, + XSC_ERR_CODE_NAK_OPERATION = 0x43, + XSC_ERR_CODE_NAK_RNR = 0x44, + XSC_ERR_CODE_LOCAL_MR = 0x45, + XSC_ERR_CODE_LOCAL_LEN = 0x46, + XSC_ERR_CODE_LOCAL_OPCODE = 0x47, + XSC_ERR_CODE_CQ_OVER_FLOW = 0x48, + XSC_ERR_CODE_STRG_ACC_GEN_CQE = 0x4c, + XSC_ERR_CODE_CQE_ACC = 0x4d, + XSC_ERR_CODE_FLUSH = 0x4e, + XSC_ERR_CODE_MALF_WQE_HOST = 0x50, + XSC_ERR_CODE_MALF_WQE_INFO = 0x51, + XSC_ERR_CODE_MR_NON_NAK = 0x52, + XSC_ERR_CODE_OPCODE_GEN_CQE = 0x61, + XSC_ERR_CODE_MANY_READ = 0x62, + XSC_ERR_CODE_LEN_GEN_CQE = 0x63, + XSC_ERR_CODE_MR = 0x65, + XSC_ERR_CODE_MR_GEN_CQE = 0x66, + XSC_ERR_CODE_OPERATION = 0x67, + XSC_ERR_CODE_MALF_WQE_INFO_GEN_NAK = 0x68, +}; + +/* QP type */ +enum { + XSC_QUEUE_TYPE_RDMA_RC = 0, + XSC_QUEUE_TYPE_RDMA_MAD = 1, + XSC_QUEUE_TYPE_RAW = 2, + XSC_QUEUE_TYPE_VIRTIO_NET = 3, + XSC_QUEUE_TYPE_VIRTIO_BLK = 4, + XSC_QUEUE_TYPE_RAW_TPE = 5, + XSC_QUEUE_TYPE_RAW_TSO = 6, + XSC_QUEUE_TYPE_RAW_TX = 7, + XSC_QUEUE_TYPE_INVALID = 0xFF, +}; + +/* CQ type */ +enum { + XSC_CQ_TYPE_NORMAL = 0, + XSC_CQ_TYPE_VIRTIO = 1, +}; + +enum xsc_qp_state { + XSC_QP_STATE_RST = 0, + XSC_QP_STATE_INIT = 1, + XSC_QP_STATE_RTR = 2, + XSC_QP_STATE_RTS = 3, + XSC_QP_STATE_SQER = 4, + XSC_QP_STATE_SQD = 5, + XSC_QP_STATE_ERR = 6, + XSC_QP_STATE_SQ_DRAINING = 7, + XSC_QP_STATE_SUSPENDED = 9, + XSC_QP_NUM_STATE +}; + +enum { + XSC_SEND_SEG_MAX = 32, + XSC_BASE_WQE_SHIFT = 4, + XSC_SEND_SEG_NUM = 4, + XSC_SEND_WQE_SHIFT = 6, + XSC_CTRL_SEG_NUM = 1, + XSC_RADDR_SEG_NUM = 1, +}; + +enum { + XSC_RECV_SEG_MAX = 4, + XSC_RECV_SEG_NUM = 1, + XSC_RECV_WQE_SHIFT = 4, +}; + +enum { + XSC_INLINE_SIZE_MAX = 15, +}; + +/* Descriptors that are allocated by SW and accessed by HW, 32-byte aligned + * this is to keep descriptor structures packed + */ +struct regpair { + __le32 lo; + __le32 hi; +}; + +struct xsc_cqe { + union { + u8 msg_opcode; + struct { + u8 error_code:7; + u8 is_error:1; + }; + }; + __le32 qp_id:15; + u8 rsv1:1; + u8 se:1; + u8 has_pph:1; + u8 type:1; + u8 with_immdt:1; + u8 csum_err:4; + __le32 imm_data; + __le32 msg_len; + __le32 vni; + __le64 ts:48; + __le16 wqe_id; + __le16 rsv[3]; + __le16 rsv2:15; + u8 owner:1; +}; + +/* CQ doorbell */ +union xsc_cq_doorbell { + struct{ + u32 cq_next_cid:16; + u32 cq_id:15; + u32 arm:1; + }; + u32 val; +}; + +/* EQE TBD */ +struct xsc_eqe { + u8 type; + u8 sub_type; + __le16 queue_id:15; + u8 rsv1:1; + u8 err_code; + u8 rsvd[2]; + u8 rsv2:7; + u8 owner:1; +}; + +/* EQ doorbell */ +union xsc_eq_doorbell { + struct{ + u32 eq_next_cid : 11; + u32 eq_id : 11; + u32 arm : 1; + }; + u32 val; +}; + +/*for beryl tcam table .begin*/ +#define XSC_TBM_PCT_DW_SIZE_MAX 20 +#define XSC_TCAM_REG_ADDR_STRIDE 4 + +enum xsc_tbm_tcam_type { + XSC_TBM_TCAM_PCT = 0, + XSC_TBM_TCAM_PRS_STAGE0, + XSC_TBM_TCAM_PRS_STAGE1, + XSC_TBM_TCAM_PRS_STAGE2, +}; + +enum xsc_tbm_tcam_oper { + XSC_TCAM_OP_X_WRITE = 0, + XSC_TCAM_OP_Y_WRITE, + XSC_TCAM_OP_ACTION_WRITE, + XSC_TCAM_OP_X_READ, + XSC_TCAM_OP_Y_READ, + XSC_TCAM_OP_ACTION_READ, + XSC_TCAM_OP_TCAM_FLUSH, + XSC_TCAM_OP_ACTION_FLUSH, + XSC_TCAM_OP_CPU_SEARCH, + XSC_TCAM_OP_LONG_X_WRT, + XSC_TCAM_OP_LONG_Y_WRT +}; + +enum xsc_tbm_prs_stage_encode { + XSC_PRS_STAGE0_HDR_TYPE_NONE = 0x00, + XSC_PRS_STAGE0_HDR_TYPE_ETH0 = 0x01, + XSC_PRS_STAGE1_HDR_TYPE_NONE = 0x10, + XSC_PRS_STAGE1_HDR_TYPE_RSV = 0x11, + XSC_PRS_STAGE1_HDR_TYPE_IPV4 = 0x12, + XSC_PRS_STAGE1_HDR_TYPE_IPV6 = 0x13, + XSC_PRS_STAGE2_HDR_TYPE_NONE = 0x20, + XSC_PRS_STAGE2_HDR_TYPE_TCP = 0x21, + XSC_PRS_STAGE2_HDR_TYPE_UDP = 0x22, + XSC_PRS_STAGE2_HDR_TYPE_GRE = 0x23, + XSC_PRS_STAGE2_HDR_TYPE_RSV = 0x24, + XSC_PRS_STAGE2_HDR_TYPE_IFA_TCP = 0x25, + XSC_PRS_STAGE2_HDR_TYPE_IFA_UDP = 0x26, + XSC_PRS_STAGE2_HDR_TYPE_IFA_GRE = 0x27, + XSC_PRS_STAGE6_HDR_TYPE_ICMP = 0x63, + XSC_PRS_STAGEX_HDR_TYPE_PAYLOAD = 0xa0, + XSC_PRS_STAGEX_HDR_TYPE_BTH = 0xa1, +}; + +enum xsc_tbm_prs_eth_hdr_type_encode { + ETH_HDR_TYPE_MAC0 = 0x0, + ETH_HDR_TYPE_MAC0_VLANA = 0x2, + ETH_HDR_TYPE_MAC0_VLANA_VLANB = 0x3, +}; + +enum xsc_tbm_pct_pkttype { + XSC_PCT_RDMA_NORMAL = 0x0, + XSC_PCT_RDMA_CNP, + XSC_PCT_RDMA_MAD, + XSC_PCT_RAW, + XSC_PCT_RAW_TPE, + XSC_PCT_VIRTIO_NET_TO_HOST, + XSC_PCT_SOC_WITH_PPH, +}; + +enum xsc_tbm_pct_inport { + XSC_PCT_PORT_NIF0 = 0x0, + XSC_PCT_PORT_NIF1, + XSC_PCT_PORT_PCIE0_PF0, + XSC_PCT_PORT_PCIE0_PF1, + XSC_PCT_PORT_PCIE1_PF0, +}; + +/*for beryl tcam table .end*/ + +/* Size of WQE */ +#define XSC_SEND_WQE_SIZE BIT(XSC_SEND_WQE_SHIFT) +#define XSC_RECV_WQE_SIZE BIT(XSC_RECV_WQE_SHIFT) + +union xsc_db_data { + struct { + __le32 sq_next_pid:16; + __le32 sqn:15; + __le32:1; + }; + struct { + __le32 rq_next_pid:13; + __le32 rqn:15; + __le32:4; + }; + struct { + __le32 cq_next_cid:16; + __le32 cqn:15; + __le32 solicited:1; + + }; + __le32 raw_data; +}; + +#define XSC_BROADCASTID_MAX 2 +#define XSC_TBM_BOMT_DESTINFO_SHIFT (XSC_BROADCASTID_MAX / 2) + +enum { + XSC_EQ_VEC_ASYNC = 0, + XSC_VEC_CMD = 1, + XSC_VEC_CMD_EVENT = 2, + XSC_DMA_READ_DONE_VEC = 3, + XSC_EQ_VEC_COMP_BASE, +}; + +struct rxe_bth { + u8 opcode; + u8 flags; + __be16 pkey; + __be32 qpn; + __be32 apsn; +}; + +struct rxe_deth { + __be32 qkey; + __be32 sqp; +}; + +#endif /* XSC_HSI_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..e2355cf91a02da774ab5c43b030936ca31b300f7 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IOCTL_H +#define XSC_IOCTL_H + +#include +#include + +/* Documentation/ioctl/ioctl-number.txt */ +#define XSC_IOCTL_MAGIC (0x1b) /* TBD */ +#define XSC_IOCTL_CMDQ \ + _IOWR(XSC_IOCTL_MAGIC, 1, struct xsc_ioctl_hdr) +#define XSC_IOCTL_DRV_GET \ + _IOR(XSC_IOCTL_MAGIC, 2, struct xsc_ioctl_hdr) +#define XSC_IOCTL_DRV_SET \ + _IOWR(XSC_IOCTL_MAGIC, 3, struct xsc_ioctl_hdr) +#define XSC_IOCTL_MEM \ + _IOWR(XSC_IOCTL_MAGIC, 4, struct xsc_ioctl_hdr) +#define XSC_IOCTL_CMDQ_RAW \ + _IOWR(XSC_IOCTL_MAGIC, 5, struct xsc_ioctl_hdr) +#define XSC_IOCTL_USER_MODE \ + _IOWR(XSC_IOCTL_MAGIC, 8, struct xsc_ioctl_hdr) + +#define XSC_IOCTL_CHECK_FILED 0x01234567 +enum { + XSC_IOCTL_OP_GET_LOCAL, + XSC_IOCTL_OP_GET_VF_INFO, + XSC_IOCTL_OP_GET_CONTEXT, + XSC_IOCTL_OP_GET_INFO_BY_BDF, + XSC_IOCTL_OP_GET_MAX +}; + +enum { + XSC_IOCTL_GET_PHY_INFO = 0x100, + XSC_IOCTL_GET_FORCE_PCP = 0x101, + XSC_IOCTL_GET_FORCE_DSCP = 0x102, + XSC_IOCTL_GET_CMA_PCP = 0x103, + XSC_IOCTL_GET_CMA_DSCP = 0x104, + XSC_IOCTL_GET_CONTEXT = 0x105, + XSC_IOCTL_GAT_MAX +}; + +enum { + XSC_IOCTL_SET_QP_STATUS = 0x200, + XSC_IOCTL_SET_FORCE_PCP = 0x201, + XSC_IOCTL_SET_FORCE_DSCP = 0x202, + XSC_IOCTL_SET_CMA_PCP = 0x203, + XSC_IOCTL_SET_CMA_DSCP = 0x204, + XSC_IOCTL_SET_MAX +}; + +enum { + XSC_IOCTL_MEM_ALLOC = 0x300, + XSC_IOCTL_MEM_FREE, + XSC_IOCTL_MEM_MAX +}; + +enum { + XSC_IOCTL_GET_VECTOR_MATRIX = 0x400, + XSC_IOCTL_SET_LOG_LEVEL = 0x401, + XSC_IOCTL_SET_CMD_VERBOSE = 0x402, + XSC_IOCTL_DRIVER_MAX +}; + +enum { + XSC_IOCTL_OPCODE_ENABLE_USER_MODE = 0x600, +}; + +enum xsc_flow_tbl_id { + XSC_FLOW_TBL_IPAT, //IN_PORT_ATTR + XSC_FLOW_TBL_IPVLANMT, //IN_PORT_VLAN_MEMBER + XSC_FLOW_TBL_IN_VLAN_M, //IN_VLAN_MAPPING + XSC_FLOW_TBL_HOST_VLAN_M, //HOST_VLAN_MAPPING + XSC_FLOW_TBL_PCT_V4, //PACKET_CLASSIFIER_V4 + XSC_FLOW_TBL_PCT_V6, //PACKET_CLASSIFIER_V6 + XSC_FLOW_TBL_WCT_KP, //WCT_KEY_PROFILE + XSC_FLOW_TBL_WCT, //WILDCARD_TBL + XSC_FLOW_TBL_FKP, //FLOW_KEY_PROFILE + XSC_FLOW_TBL_EM, //EXACT_MATCH + XSC_FLOW_TBL_FAT, //FLOW_ACTION + XSC_FLOW_TBL_TNL_ECP, //TUNNEL_ENCAP + XSC_FLOW_TBL_ERP_HDR, //ERSPAN_HDR_INFO + XSC_FLOW_TBL_MIR_IDX, //MIRROR_INDEX + XSC_FLOW_TBL_MIR, //MIRROR_TBL + XSC_FLOW_TBL_MIR_HDR, //ENCAP_MIRROR_HDR + XSC_FLOW_TBL_VER, //VERSION_TBL + XSC_FLOW_TBL_LCMT, //LCMT_TBL + XSC_FLOW_TBL_CT, //CONN_TRACK + XSC_FLOW_TBL_EPAT, //EG_PORT_ATTR + XSC_FLOW_TBL_OPVLANMT, //OUT_PORT_VLAN_MEMBER + XSC_FLOW_TBL_RSS_HASH, //RSS_HASH + XSC_FLOW_TBL_MDF_MAC, //MODIFY_MAC + XSC_FLOW_TBL_MDF_IP, //MODIFY_IP + XSC_FLOW_TBL_MDF_TPID, //MODIFY_TPID + XSC_FLOW_TBL_ECP_HDR, //ENCAP_HDR + XSC_FLOW_TBL_ECP_MAC, //ENCAP_MAC + XSC_FLOW_TBL_ECP_IP, //ENCAP_IP + XSC_FLOW_TBL_ECP_TPID, //ENCAP_TPID + XSC_FLOW_TBL_ECP_TP_TNL, //ENCAP_TP_TUNNEL + XSC_FLOW_TBL_ECP_DPORT, //ENCAP_DPORT + XSC_FLOW_TBL_VFSO, //VF_START_OFST + XSC_FLOW_TBL_IACL, //INGRESS_ACL + XSC_FLOW_TBL_IACL_CNT, //INGRESS_ACL_COUNTER + XSC_FLOW_TBL_EACL, //EGRESS_ACL + XSC_FLOW_TBL_EACL_CNT, //EGRESS_ACL_COUNTER + XSC_FLOW_TBL_EM_EXT, //EXACT_MATCH_EXT + XSC_FLOW_TBL_EM_EXT_2M_HASH_ADR, //EM_EXT_2M_HASH_ADDR + XSC_FLOW_TBL_EM_EXT_1G_HASH_ADR, //EM_EXT_1G_HASH_ADDR + XSC_FLOW_TBL_EM_EXT_2M_KEY_ADR, //EM_EXT_2M_KEY_ADDR + XSC_FLOW_TBL_EM_EXT_1G_KEY_ADR, //EM_EXT_1G_KEY_ADDR + XSC_FLOW_TBL_PG_QP_SET_ID, //PG_QP_SET_ID + XSC_FLOW_DIR_REGISTER, //DIR_REGISTER + XSC_FLOW_INDIR_REGISTER, //INDIR_REGISTER + XSC_FLOW_TBL_BM_PCT_V4, //BIM MATCH PACKET_CLASSIFIER_V4 + XSC_FLOW_TBL_BM_PCT_V6, //BIM MATCH PACKET_CLASSIFIER_V6 + XSC_FLOW_TBL_BM_WCT, //BIM MATCH WILDCARD_TBL + XSC_FLOW_TBL_BM_IACL, //BIM MATCH INGRESS_ACL + XSC_FLOW_TBL_BMT, //BROADCAST MEMBER + XSC_FLOW_TBL_BOMT, //BROADCAST OUTPUT + XSC_FLOW_TBL_PST, //pst + XSC_FLOW_DMA_WR, //DMA WRITE + XSC_FLOW_DMA_RD, //DMA READ + XSC_FLOW_PARSER_TBL, //PARSER_TBL + XSC_FLOW_UDF_AWARE_TBL, //UDF_AWARE_TBL + XSC_FLOW_UDF_UNAWARE_TBL, //UDF_UNAWARE_TBL + XSC_FLOW_MTR_CTRL_TBL, //MTR_CTRL_TBL + XSC_FLOW_MTR_FLOW_PD, //MTR_FLOW_PD + XSC_FLOW_MTR_VPORT_PD, //MTR_VPORT_PD + XSC_FLOW_MTR_VPG_PD, //MTR_VPG_PD + XSC_FLOW_MTR_FLOW_SCAN, //MTR_FLOW_SCAN + XSC_FLOW_MTR_VPORT_SCAN, //MTR_VPORT_SCAN + XSC_FLOW_MTR_VPG_SCAN, //MTR_VPG_SCAN + XSC_FLOW_MTR_MAPPING, //MTR_MAPPING + XSC_FLOW_PRG_ACT_IDX, //PRG_ACT_INDEX + XSC_FLOW_PRG_ACT0, //PRG_ACT0 + XSC_FLOW_PRG_ACT1, //PRG_ACT1 + XSC_FLOW_PRG_ACT2, //PRG_ACT2 + XSC_FLOW_NIF_PRI_CNT, //NIF_PRI_CNT + XSC_FLOW_PRS2CLSF_SRC_PORT_CNT, //PRS2CLSF_SRC_PORT_CNT + XSC_FLOW_QUEUE_RX_CNT, //QUEUE_TX_CNT + XSC_FLOW_QUEUE_TX_CNT, //QUEUE_TX_CNT + XSC_FLOW_MAC_LAG_PORT_SEL, //MAC_LAG_PORT_SEL + XSC_FLOW_EXT_CT_CLR, //EXT_CT_CLR + XSC_FLOW_IP_TBL_CFG, //IP_TBL_CFG + XSC_FLOW_RSS_HASH_INIT_KEY_CFG, //SS_HASH_INIT_KEY_CFG + XSC_FLOW_QP_ID_BASE_CFG, //QP_ID_BASE_CFG + XSC_FLOW_PSS_INFO, //CLSF_CTRL_PSS_INFO + XSC_FLOW_SNAPSHOT, //SNAPSHOT + XSC_FLOW_PSS_MATCH_KEY, //PSS_MATCH_KEY + XSC_FLOW_PSS_CLR, //PSS_CLEAR + XSC_FLOW_PSS_START, //PSS_START + XSC_FLOW_PSS_DONE, //PSS_DONE + XSC_FLOW_MAC_PORT_MTU, //MAC_PORT_MTU + XSC_FLOW_ECP_PKT_LEN_INC, //ECP_PKT_LEN_INC + XSC_FLOW_TCP_FLAGS_CFG, //TCP_FLAGS_CFG + XSC_FLOW_DBG_CNT, //DBG_CNT + XSC_FLOW_PRS_REC_PORT_UDF_SEL, + XSC_FLOW_TBL_MAX +}; + +enum xsc_other_tbl_id { + XSC_OTHER_TBL_MAX +}; + +enum xsc_ioctl_op { + XSC_IOCTL_OP_ADD, + XSC_IOCTL_OP_DEL, + XSC_IOCTL_OP_GET, + XSC_IOCTL_OP_CLR, + XSC_IOCTL_OP_MOD, + XSC_IOCTL_OP_MAX +}; + +struct xsc_ioctl_mem_info { + u32 mem_num; + u32 size; + u64 vir_addr; + u64 phy_addr; +}; + +/* get phy info */ +struct xsc_ioctl_get_phy_info_attr { + u16 bdf; + u16 rsvd; +}; + +struct xsc_ioctl_qp_range { + u16 opcode; + int num; + u32 qpn; +}; + +struct xsc_ioctl_get_phy_info_res { + u32 domain; + u32 bus; + u32 devfn; + u32 pcie_no; //pcie number + u32 func_id; //pf glb func id + u32 pcie_host; //host pcie number + u32 mac_phy_port; //mac port + u32 funcid_to_logic_port_off; + u16 lag_id; + u16 raw_qp_id_base; + u16 raw_rss_qp_id_base; + u16 pf0_vf_funcid_base; + u16 pf0_vf_funcid_top; + u16 pf1_vf_funcid_base; + u16 pf1_vf_funcid_top; + u16 pcie0_pf_funcid_base; + u16 pcie0_pf_funcid_top; + u16 pcie1_pf_funcid_base; + u16 pcie1_pf_funcid_top; + u16 lag_port_start; + u16 raw_tpe_qp_num; + int send_seg_num; + int recv_seg_num; + u8 on_chip_tbl_vld; + u8 dma_rw_tbl_vld; + u8 pct_compress_vld; + u32 chip_version; + u32 hca_core_clock; + u8 mac_bit; + u8 esw_mode; + u32 board_id; +}; + +struct xsc_ioctl_get_vf_info_res { + u16 vf_id; //start from 1, 0 is reserved for pf + u16 phy_port; //pcie0=0, pcie1=1 + u16 pf_id; //pf0=0, pf1=1 + u32 func_id; + u32 logic_port; +}; + +struct xsc_alloc_ucontext_req { + u32 domain; + u32 bus; + u32 devfn; +}; + +struct xsc_ioctl_force_pcp { + int pcp; +}; + +struct xsc_ioctl_force_dscp { + int dscp; +}; + +struct xsc_alloc_ucontext_resp { + int max_cq; + int max_qp; + u32 max_rwq_indirection_table_size; + u64 qpm_tx_db; + u64 qpm_rx_db; + u64 cqm_next_cid_reg; + u64 cqm_armdb; + u32 send_ds_num; + u32 recv_ds_num; + u32 send_ds_shift; + u32 recv_ds_shift; + u32 glb_func_id; + u32 max_wqes; +}; + +struct xsc_ioctl_cma_pcp { + int pcp; +}; + +struct xsc_ioctl_cma_dscp { + int dscp; +}; + +struct xsc_ioctl_set_debug_info { + unsigned int log_level; + unsigned int cmd_verbose; +}; + +struct xsc_ioctl_user_mode_attr { + u8 enable; +}; + +/* type-value */ +struct xsc_ioctl_data_tl { + u16 table; /* table id */ + u16 opmod; /* add/del/mod */ + u16 length; + u16 rsvd; +}; + +/* public header */ +struct xsc_ioctl_attr { + u16 opcode; /* ioctl cmd */ + u16 length; /* data length */ + u32 error; /* ioctl error info */ + u16 ver; + u16 rsvd; + u8 data[]; /* specific table info */ +}; + +struct xsc_ioctl_emu_hdr { + u16 in_length; /* cmd req length */ + u16 out_length; /* cmd rsp length */ + u8 data[]; /* emu cmd content start from here */ +}; + +struct xsc_ioctl_hdr { + u32 check_filed; /* Validity verification fileds */ + u32 domain; + u32 bus; + u32 devfn; + struct xsc_ioctl_attr attr; +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h new file mode 100644 index 0000000000000000000000000000000000000000..24aa39a15e9d16dd21df005275364ffe411edb91 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_LAG_H +#define XSC_LAG_H + +#define XSC_BOARD_LAG_MAX XSC_MAX_PORTS + +enum lag_event_type { + XSC_LAG_CREATE, + XSC_LAG_ADD_MEMBER, + XSC_LAG_REMOVE_MEMBER, + XSC_LAG_UPDATE_MEMBER_STATUS, + XSC_LAG_UPDATE_HASH_TYPE, + XSC_LAG_DESTROY, + XSC_LAG_EVENT_MAX +}; + +enum lag_slave_status { + XSC_LAG_SLAVE_INACTIVE, + XSC_LAG_SLAVE_ACTIVE, + XSC_LAG_SLAVE_STATUS_MAX, +}; + +enum { + XSC_SLEEP, + XSC_WAKEUP, + XSC_EXIT, +}; + +enum { + XSC_LAG_FLAG_ROCE = 1 << 0, + XSC_LAG_FLAG_SRIOV = 1 << 1, + XSC_LAG_FLAG_KERNEL = 1 << 2, +}; + +enum xsc_lag_hash { + XSC_LAG_HASH_L23, + XSC_LAG_HASH_L34, + XSC_LAG_HASH_E23, + XSC_LAG_HASH_E34, +}; + +enum { + QOS_LAG_OP_CREATE = 0, + QOS_LAG_OP_ADD_MEMBER = 1, + QOS_LAG_OP_DEL_MEMBER = 2, + QOS_LAG_OP_DESTROY = 3, +}; + +#define BOND_ID_INVALID U8_MAX +#define BOARD_ID_INVALID U32_MAX +#define LAG_ID_INVALID U16_MAX + +#define XSC_LAG_MODE_FLAGS (XSC_LAG_FLAG_ROCE | XSC_LAG_FLAG_SRIOV | XSC_LAG_FLAG_KERNEL) + +struct xsc_lag { + struct net_device *bond_dev; + u8 bond_mode; + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; + u8 lag_type; + u16 lag_id; + atomic_t qp_cnt[XSC_MAX_PORTS]; + struct list_head slave_list; + u8 xsc_member_cnt; + u32 board_id; + int mode_changes_in_progress; + u8 not_roce_lag_xdev_mask; +}; + +struct xsc_lag_event { + struct list_head node; + enum lag_event_type event_type; + struct xsc_core_device *xdev; + struct xsc_core_device *roce_lag_xdev; + u8 bond_mode; + u8 lag_type; + u8 hash_type; + u8 lag_sel_mode; + u16 lag_id; + enum lag_slave_status slave_status; + u8 is_roce_lag_xdev; + u8 not_roce_lag_xdev_mask; +}; + +struct lag_event_list { + struct list_head head; + spinlock_t lock; /* protect lag_event_list */ + struct task_struct *bond_poll_task; + wait_queue_head_t wq; + int wait_flag; + u8 event_type; +}; + +struct xsc_board_lag { + struct xsc_lag xsc_lag[XSC_BOARD_LAG_MAX]; + u32 board_id; + struct kref ref; + u8 bond_valid_mask; + struct lag_event_list lag_event_list; + struct notifier_block nb; + struct mutex lock; /* protects board_lag */ +}; + +void xsc_lag_add_xdev(struct xsc_core_device *xdev); +void xsc_lag_remove_xdev(struct xsc_core_device *xdev); +void xsc_lag_add_netdev(struct net_device *ndev); +void xsc_lag_remove_netdev(struct net_device *ndev); +void xsc_lag_disable(struct xsc_core_device *xdev); +void xsc_lag_enable(struct xsc_core_device *xdev); +bool xsc_lag_is_roce(struct xsc_core_device *xdev); +struct xsc_lag *xsc_get_lag(struct xsc_core_device *xdev); +struct xsc_core_device *xsc_get_roce_lag_xdev(struct xsc_core_device *xdev); +u16 xsc_get_lag_id(struct xsc_core_device *xdev); +struct xsc_board_lag *xsc_board_lag_get(struct xsc_core_device *xdev); + +static inline void xsc_board_lag_lock(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (xsc_core_is_pf(xdev)) + mutex_lock(&board_lag->lock); +} + +static inline void xsc_board_lag_unlock(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (xsc_core_is_pf(xdev)) + mutex_unlock(&board_lag->lock); +} + +#endif /* XSC_LAG_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h new file mode 100644 index 0000000000000000000000000000000000000000..db23b910f8e3c51152cdf660d8d79c225ac4cd11 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_MACRO_H +#define XSC_MACRO_H + +#ifndef NO_MSIX_SUPPORT +#define MSIX_SUPPORT +#endif + +#ifndef NO_RSS_SUPPORT +#define XSC_RSS_SUPPORT +#endif + +#ifndef NO_BQL_SUPPORT +#define XSC_BQL_SUPPORT +#endif + +#endif /*XSC_MACRO_H*/ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..665103ac4dfa12e4232a1478e22a623b68d44ff0 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PORT_CTRL_H +#define XSC_PORT_CTRL_H + +/*mmap msg encode*/ +enum { + XSC_MMAP_MSG_SQDB = 0, + XSC_MMAP_MSG_RQDB = 1, + XSC_MMAP_MSG_CQDB = 2, + XSC_MMAP_MSG_ARM_CQDB = 3, +}; + +#define TRY_NEXT_CB 0x1a2b3c4d + +typedef int (*port_ctrl_cb)(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data); + +void xsc_port_ctrl_remove(struct xsc_core_device *dev); +int xsc_port_ctrl_probe(struct xsc_core_device *dev); +int xsc_port_ctrl_cb_reg(const char *name, port_ctrl_cb cb, void *data); +void xsc_port_ctrl_cb_dereg(const char *name); + +void xsc_port_ctrl_fini(void); +int xsc_port_ctrl_init(void); +struct xsc_core_device *xsc_pci_get_xdev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h new file mode 100644 index 0000000000000000000000000000000000000000..c200ba8928974d743cf5bab124fbd9cd13583211 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PP_H +#define XSC_PP_H + +enum { + XSC_HASH_FIELD_SEL_SRC_IP = 1 << 0, + XSC_HASH_FIELD_SEL_PROTO = 1 << 1, + XSC_HASH_FIELD_SEL_DST_IP = 1 << 2, + XSC_HASH_FIELD_SEL_SPORT = 1 << 3, + XSC_HASH_FIELD_SEL_DPORT = 1 << 4, + XSC_HASH_FIELD_SEL_SRC_IPV6 = 1 << 5, + XSC_HASH_FIELD_SEL_DST_IPV6 = 1 << 6, + XSC_HASH_FIELD_SEL_SPORT_V6 = 1 << 7, + XSC_HASH_FIELD_SEL_DPORT_V6 = 1 << 8, +}; + +#define XSC_HASH_IP (XSC_HASH_FIELD_SEL_SRC_IP |\ + XSC_HASH_FIELD_SEL_DST_IP |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP_PORTS (XSC_HASH_FIELD_SEL_SRC_IP |\ + XSC_HASH_FIELD_SEL_DST_IP |\ + XSC_HASH_FIELD_SEL_SPORT |\ + XSC_HASH_FIELD_SEL_DPORT |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP6 (XSC_HASH_FIELD_SEL_SRC_IPV6 |\ + XSC_HASH_FIELD_SEL_DST_IPV6 |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP6_PORTS (XSC_HASH_FIELD_SEL_SRC_IPV6 |\ + XSC_HASH_FIELD_SEL_DST_IPV6 |\ + XSC_HASH_FIELD_SEL_SPORT_V6 |\ + XSC_HASH_FIELD_SEL_DPORT_V6 |\ + XSC_HASH_FIELD_SEL_PROTO) + +enum { + XSC_HASH_TMPL_IDX_IP_PORTS_IP6_PORTS = 0, + XSC_HASH_TMPL_IDX_IP_IP6, + XSC_HASH_TMPL_IDX_IP_PORTS_IP6, + XSC_HASH_TMPL_IDX_IP_IP6_PORTS, + XSC_HASH_TMPL_IDX_MAX, +}; + +#endif /* XSC_PP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h new file mode 100644 index 0000000000000000000000000000000000000000..fec39d7137f57cb17fd2eafb248c060fa90caabf --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PPH_H +#define XSC_PPH_H + +#define XSC_PPH_HEAD_LEN 64 + +enum { + L4_PROTO_NONE = 0, + L4_PROTO_TCP = 1, + L4_PROTO_UDP = 2, + L4_PROTO_ICMP = 3, + L4_PROTO_GRE = 4, +}; + +enum { + L3_PROTO_NONE = 0, + L3_PROTO_IP = 2, + L3_PROTO_IP6 = 3, +}; + +struct epp_pph { + u16 outer_eth_type; //2 bytes + u16 inner_eth_type; //4 bytes + + u16 rsv1:1; + u16 outer_vlan_flag:2; + u16 outer_ip_type:2; + u16 outer_ip_ofst:5; + u16 outer_ip_len:6; //6 bytes + + u16 rsv2:1; + u16 outer_tp_type:3; + u16 outer_tp_csum_flag:1; + u16 outer_tp_ofst:7; + u16 ext_tunnel_type:4; //8 bytes + + u8 tunnel_ofst; //9 bytes + u8 inner_mac_ofst; //10 bytes + + u32 rsv3:2; + u32 inner_mac_flag:1; + u32 inner_vlan_flag:2; + u32 inner_ip_type:2; + u32 inner_ip_ofst:8; + u32 inner_ip_len:6; + u32 inner_tp_type:2; + u32 inner_tp_csum_flag:1; + u32 inner_tp_ofst:8; //14 bytees + + u16 rsv4:1; + u16 payload_type:4; + u16 payload_ofst:8; + u16 pkt_type:3; //16 bytes + + u16 rsv5:2; + u16 pri:3; + u16 logical_in_port:11; + u16 vlan_info; + u8 error_bitmap:8; //21 bytes + + u8 rsv6:7; + u8 recirc_id_vld:1; + u16 recirc_id; //24 bytes + + u8 rsv7:7; + u8 recirc_data_vld:1; + u32 recirc_data; //29 bytes + + u8 rsv8:6; + u8 mark_tag_vld:2; + u16 mark_tag; //32 bytes + + u8 rsv9:4; + u8 upa_to_soc:1; + u8 upa_from_soc:1; + u8 upa_re_up_call:1; + u8 upa_pkt_drop:1; //33 bytes + + u8 ucdv; + u16 rsv10:2; + u16 pkt_len:14; //36 bytes + + u16 rsv11:2; + u16 pkt_hdr_ptr:14; //38 bytes + + u64 rsv12:5; + u64 csum_ofst:8; + u64 csum_val:29; + u64 csum_plen:14; + u64 rsv11_0:8; //46 bytes + + u64 rsv11_1; + u64 rsv11_2; + u16 rsv11_3; +}; + +#define OUTER_L3_BIT BIT(3) +#define OUTER_L4_BIT BIT(2) +#define INNER_L3_BIT BIT(1) +#define INNER_L4_BIT BIT(0) +#define OUTER_BIT (OUTER_L3_BIT | OUTER_L4_BIT) +#define INNER_BIT (INNER_L3_BIT | INNER_L4_BIT) +#define OUTER_AND_INNER (OUTER_BIT | INNER_BIT) + +#define PACKET_UNKNOWN BIT(4) + +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET (6UL) +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_MASK (0XF00) +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_OFFSET (8) + +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET (20UL) +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_MASK (0XFF) +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET (0) + +#define XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(PPH_BASE_ADDR) \ + ((*(u16 *)((u8 *)(PPH_BASE_ADDR) + EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET) & \ + EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_MASK) >> EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_OFFSET) + +#define XSC_GET_EPP2SOC_PPH_ERROR_BITMAP(PPH_BASE_ADDR) \ + ((*(u8 *)((u8 *)(PPH_BASE_ADDR) + EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET) & \ + EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_MASK) >> EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET) + +#define PPH_OUTER_IP_TYPE_OFF (4UL) +#define PPH_OUTER_IP_TYPE_MASK (0x3) +#define PPH_OUTER_IP_TYPE_SHIFT (11) +#define PPH_OUTER_IP_TYPE(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_TYPE_OFF)) >> \ + PPH_OUTER_IP_TYPE_SHIFT) & PPH_OUTER_IP_TYPE_MASK) + +#define PPH_OUTER_IP_OFST_OFF (4UL) +#define PPH_OUTER_IP_OFST_MASK (0x1f) +#define PPH_OUTER_IP_OFST_SHIFT (6) +#define PPH_OUTER_IP_OFST(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_OFST_OFF)) >> \ + PPH_OUTER_IP_OFST_SHIFT) & PPH_OUTER_IP_OFST_MASK) + +#define PPH_OUTER_IP_LEN_OFF (4UL) +#define PPH_OUTER_IP_LEN_MASK (0x3f) +#define PPH_OUTER_IP_LEN_SHIFT (0) +#define PPH_OUTER_IP_LEN(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_LEN_OFF)) >> \ + PPH_OUTER_IP_LEN_SHIFT) & PPH_OUTER_IP_LEN_MASK) + +#define PPH_OUTER_TP_TYPE_OFF (6UL) +#define PPH_OUTER_TP_TYPE_MASK (0x7) +#define PPH_OUTER_TP_TYPE_SHIFT (12) +#define PPH_OUTER_TP_TYPE(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_TP_TYPE_OFF)) >> \ + PPH_OUTER_TP_TYPE_SHIFT) & PPH_OUTER_TP_TYPE_MASK) + +#define PPH_PAYLOAD_OFST_OFF (14UL) +#define PPH_PAYLOAD_OFST_MASK (0xff) +#define PPH_PAYLOAD_OFST_SHIFT (3) +#define PPH_PAYLOAD_OFST(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_PAYLOAD_OFST_OFF)) >> \ + PPH_PAYLOAD_OFST_SHIFT) & PPH_PAYLOAD_OFST_MASK) + +#define PPH_CSUM_OFST_OFF (38UL) +#define PPH_CSUM_OFST_MASK (0xff) +#define PPH_CSUM_OFST_SHIFT (51) +#define PPH_CSUM_OFST(base) \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_OFST_OFF)) >> \ + PPH_CSUM_OFST_SHIFT) & PPH_CSUM_OFST_MASK) + +#define PPH_CSUM_VAL_OFF (38UL) +#define PPH_CSUM_VAL_MASK (0xeffffff) +#define PPH_CSUM_VAL_SHIFT (22) +#define PPH_CSUM_VAL(base) \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_VAL_OFF)) >> \ + PPH_CSUM_VAL_SHIFT) & PPH_CSUM_VAL_MASK) +#endif /* XSC_TBM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..6b2c84017c18cafc5568743aa7c94d2d27b74f2a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_REG_H +#define XSC_REG_H +#define CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0x0 +#define CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0x4 +#define CMDQM_HOST_REQ_PID_MEM_ADDR 0x8 +#define CMDQM_HOST_REQ_CID_MEM_ADDR 0xc +#define CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0x10 +#define CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0x14 +#define CMDQM_HOST_RSP_PID_MEM_ADDR 0x18 +#define CMDQM_HOST_RSP_CID_MEM_ADDR 0x1c +#define CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0x20 +#define CMDQM_VECTOR_ID_MEM_ADDR 0x24 +#define CMDQM_Q_ELEMENT_SZ_REG_ADDR 0x28 +#define CMDQM_HOST_Q_DEPTH_REG_ADDR 0x2c + +#define CPM_LOCK_GET_REG_ADDR 0x30 +#define CPM_LOCK_PUT_REG_ADDR 0x34 +#define CPM_LOCK_AVAIL_REG_ADDR 0x38 +#define CPM_IDA_DATA_MEM_ADDR 0x3c +#define CPM_IDA_CMD_REG_ADDR 0x83c +#define CPM_IDA_ADDR_REG_ADDR 0x840 +#define CPM_IDA_BUSY_REG_ADDR 0x8c0 + +#define DB_CQ_FUNC_MEM_ADDR 0x8c4 +#define DB_EQ_FUNC_MEM_ADDR 0x8c8 +#define DB_CQ_CID_DIRECT_MEM_ADDR 0x8cc +#define TX_DB_FUNC_MEM_ADDR 0x8d0 +#define RX_DB_FUNC_MEM_ADDR 0x8d4 + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig b/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..30889caa96034016e41721361f99b3911365cba2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon driver configuration +# + +config YUNSILICON_XSC_ETH + tristate "Yunsilicon XSC ethernet driver" + default n + depends on YUNSILICON_XSC_PCI + help + This driver provides ethernet support for + Yunsilicon XSC devices. + + To compile this driver as a module, choose M here. The module + will be called xsc_eth. diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/Makefile b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a6b1a4a300aa8e203f83731ddf2a769e2e3fc993 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc + +obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc_eth.o + +xsc_eth-y := main.o xsc_eth_ctrl.o xsc_eth_tx.o xsc_eth_rx.o xsc_eth_txrx.o \ + ut_main.o xsc_eth_ethtool.o xsc_eth_stats.o xsc_dcbnl.o xsc_hw_comm.o \ + xsc_eth_sysfs.o xsc_fs.o xsc_eth_dim.o diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/main.c b/drivers/net/ethernet/yunsilicon/xsc/net/main.c new file mode 100644 index 0000000000000000000000000000000000000000..3ed7be4e5d7d8b7ea5eafb60da381b02a73b05b9 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/main.c @@ -0,0 +1,3397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" +#include "common/qp.h" +#include "common/xsc_lag.h" +#include "common/xsc_pp.h" + +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_ethtool.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include "xsc_accel.h" +#include "xsc_eth_ctrl.h" +#include "../pci/eswitch.h" + +#include "common/xsc_fs.h" +#include "common/vport.h" +#include "common/qp.h" +#include "xsc_eth_dim.h" + +MODULE_LICENSE("GPL"); + +#define MAX_VF_NUM_MINIDUMP 1024 + +static void xsc_eth_close_channel(struct xsc_channel *c, bool free_rq); +static void xsc_eth_remove(struct xsc_core_device *xdev, void *context); + +static int xsc_eth_open(struct net_device *netdev); +static int xsc_eth_close(struct net_device *netdev); +static void xsc_netdev_set_tcs(struct xsc_adapter *priv, u16 nch, u8 ntc); + +#ifdef NEED_CREATE_RX_THREAD +extern uint32_t xsc_eth_rx_thread_create(struct xsc_adapter *adapter); +#endif + +static inline void xsc_set_feature(netdev_features_t *features, + netdev_features_t feature, + bool enable) +{ + if (enable) + *features |= feature; + else + *features &= ~feature; +} + +typedef int (*xsc_feature_handler)(struct net_device *netdev, bool enable); + +static int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status); + +static void xsc_eth_build_queue_param(struct xsc_adapter *adapter, + struct xsc_queue_attr *attr, u8 type) +{ + struct xsc_core_device *xdev = adapter->xdev; + + if (adapter->nic_param.sq_size == 0) + adapter->nic_param.sq_size = BIT(xdev->caps.log_max_qp_depth); + if (adapter->nic_param.rq_size == 0) + adapter->nic_param.rq_size = BIT(xdev->caps.log_max_qp_depth); + + if (type == XSC_QUEUE_TYPE_EQ) { + attr->q_type = XSC_QUEUE_TYPE_EQ; + attr->ele_num = XSC_EQ_ELE_NUM; + attr->ele_size = XSC_EQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_EQ_ELE_SZ); + attr->q_log_size = order_base_2(XSC_EQ_ELE_NUM); + } else if (type == XSC_QUEUE_TYPE_RQCQ) { + attr->q_type = XSC_QUEUE_TYPE_RQCQ; + attr->ele_num = min_t(int, XSC_RQCQ_ELE_NUM, xdev->caps.max_cqes); + attr->ele_size = XSC_RQCQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_RQCQ_ELE_SZ); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_SQCQ) { + attr->q_type = XSC_QUEUE_TYPE_SQCQ; + attr->ele_num = min_t(int, XSC_SQCQ_ELE_NUM, xdev->caps.max_cqes); + attr->ele_size = XSC_SQCQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_SQCQ_ELE_SZ); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_RQ) { + attr->q_type = XSC_QUEUE_TYPE_RQ; + attr->ele_num = adapter->nic_param.rq_size; + attr->ele_size = xdev->caps.recv_ds_num * XSC_RECV_WQE_DS; + attr->ele_log_size = order_base_2(attr->ele_size); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_SQ) { + attr->q_type = XSC_QUEUE_TYPE_SQ; + attr->ele_num = adapter->nic_param.sq_size; + attr->ele_size = xdev->caps.send_ds_num * XSC_SEND_WQE_DS; + attr->ele_log_size = order_base_2(attr->ele_size); + attr->q_log_size = order_base_2(attr->ele_num); + } +} + +static void xsc_eth_init_frags_partition(struct xsc_rq *rq) +{ + struct xsc_wqe_frag_info next_frag = {}; + struct xsc_wqe_frag_info *prev; + int i; + + next_frag.di = &rq->wqe.di[0]; + next_frag.offset = 0; + prev = NULL; + + for (i = 0; i < xsc_wq_cyc_get_size(&rq->wqe.wq); i++) { + struct xsc_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct xsc_wqe_frag_info *frag = + &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { + if (next_frag.offset + frag_info[f].frag_stride > + XSC_RX_FRAG_SZ) { + next_frag.di++; + next_frag.offset = 0; + if (prev) + prev->last_in_page = 1; + } + *frag = next_frag; + + /* prepare next */ + next_frag.offset += frag_info[f].frag_stride; + prev = frag; + } + } + + if (prev) + prev->last_in_page = 1; +} + +static int xsc_eth_init_di_list(struct xsc_rq *rq, int wq_sz, int cpu) +{ + int len = wq_sz << rq->wqe.info.log_num_frags; + + rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), + GFP_KERNEL, cpu_to_node(cpu)); + if (!rq->wqe.di) + return -ENOMEM; + + xsc_eth_init_frags_partition(rq); + + return 0; +} + +static void xsc_eth_free_di_list(struct xsc_rq *rq) +{ + kvfree(rq->wqe.di); +} + +int xsc_rx_alloc_page_cache(struct xsc_rq *rq, int node, u8 log_init_sz) +{ + struct xsc_page_cache *cache = &rq->page_cache; + + cache->sz = 1 << log_init_sz; + cache->page_cache = kvzalloc_node(cache->sz * sizeof(*cache->page_cache), + GFP_KERNEL, node); + if (!cache->page_cache) + return -ENOMEM; + + return 0; +} + +void xsc_rx_free_page_cache(struct xsc_rq *rq) +{ + struct xsc_page_cache *cache = &rq->page_cache; + u32 i; + + for (i = cache->head; i != cache->tail; i = (i + 1) & (cache->sz - 1)) { + struct xsc_dma_info *dma_info = &cache->page_cache[i]; + + xsc_page_release_dynamic(rq, dma_info, false); + } + kvfree(cache->page_cache); +} + +int xsc_eth_reset(struct xsc_core_device *dev) +{ + return 0; +} + +void xsc_eth_cq_error_event(struct xsc_core_cq *xcq, enum xsc_event event) +{ + struct xsc_cq *xsc_cq = container_of(xcq, struct xsc_cq, xcq); + struct xsc_core_device *xdev = xsc_cq->xdev; + + if (event != XSC_EVENT_TYPE_CQ_ERROR) { + xsc_core_err(xdev, "Unexpected event type %d on CQ %06x\n", + event, xcq->cqn); + return; + } + + xsc_core_err(xdev, "Eth catch CQ ERROR:%x, cqn: %d\n", event, xcq->cqn); +} + +void xsc_eth_completion_event(struct xsc_core_cq *xcq) +{ + struct xsc_cq *cq = container_of(xcq, struct xsc_cq, xcq); + struct xsc_core_device *xdev = cq->xdev; + struct xsc_rq *rq = NULL; + + if (unlikely(!cq->channel)) { + xsc_core_warn(xdev, "cq%d->channel is null\n", xcq->cqn); + return; + } + + rq = &cq->channel->qp.rq[0]; + + set_bit(XSC_CHANNEL_NAPI_SCHED, &cq->channel->flags); + cq->channel->stats->poll = 0; + cq->channel->stats->poll_tx = 0; + + if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) + xsc_core_warn(xdev, "ch%d_cq%d, napi_flag=0x%lx\n", + cq->channel->chl_idx, xcq->cqn, cq->napi->state); + + napi_schedule(cq->napi); + cq->event_ctr++; + cq->channel->stats->events++; +} + +static inline int xsc_cmd_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *xcq) +{ + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(xcq->cqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "failed to destroy cq, err=%d out.status=%u\n", + err, out.hdr.status); + return -ENOEXEC; + } + + xcq->cqn = 0; + return 0; +} + +int xsc_eth_create_cq(struct xsc_core_device *xdev, struct xsc_core_cq *xcq, + struct xsc_create_cq_mbox_in *in, int insize) +{ + int err, ret = -1; + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_create_cq_mbox_out out; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create cq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + xcq->cqn = be32_to_cpu(out.cqn) & 0xffffff; + xcq->cons_index = 0; + xcq->arm_sn = 0; + atomic_set(&xcq->refcount, 1); + init_completion(&xcq->free); + + spin_lock_irq(&table->lock); + ret = radix_tree_insert(&table->tree, xcq->cqn, xcq); + spin_unlock_irq(&table->lock); + if (ret) + goto err_insert_cq; + return 0; + +err_insert_cq: + err = xsc_cmd_destroy_cq(xdev, xcq); + if (err) + xsc_core_warn(xdev, "failed to destroy cqn=%d, err=%d\n", xcq->cqn, err); + return ret; +} + +int xsc_eth_destroy_cq(struct xsc_core_device *xdev, struct xsc_cq *cq) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->xcq.cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + err = -ENOENT; + goto err_delete_cq; + } + + if (tmp != &cq->xcq) { + err = -EINVAL; + goto err_delete_cq; + } + + err = xsc_cmd_destroy_cq(xdev, &cq->xcq); + if (err) + goto err_destroy_cq; + + if (atomic_dec_and_test(&cq->xcq.refcount)) + complete(&cq->xcq.free); + wait_for_completion(&cq->xcq.free); + return 0; + +err_destroy_cq: + xsc_core_warn(xdev, "failed to destroy cqn=%d, err=%d\n", + cq->xcq.cqn, err); + return err; +err_delete_cq: + xsc_core_warn(xdev, "cqn=%d not found in tree, err=%d\n", + cq->xcq.cqn, err); + return err; +} + +void xsc_eth_free_cq(struct xsc_cq *cq) +{ + xsc_eth_wq_destroy(&cq->wq_ctrl); +} + +int xsc_eth_create_rss_qp_rqs(struct xsc_core_device *xdev, + struct xsc_create_multiqp_mbox_in *in, + int insize, + int *prqn_base) +{ + int ret; + struct xsc_create_multiqp_mbox_out out; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_MULTI_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, + "failed to create rss rq, qp_num=%d, type=%d, err=%d out.status=%u\n", + in->qp_num, in->qp_type, ret, out.hdr.status); + return -ENOEXEC; + } + + *prqn_base = be32_to_cpu(out.qpn_base) & 0xffffff; + return 0; +} + +void xsc_eth_qp_event(struct xsc_core_qp *qp, int type) +{ + struct xsc_rq *rq; + struct xsc_sq *sq; + struct xsc_core_device *xdev; + + if (qp->eth_queue_type == XSC_RES_RQ) { + rq = container_of(qp, struct xsc_rq, cqp); + xdev = rq->cq.xdev; + } else if (qp->eth_queue_type == XSC_RES_SQ) { + sq = container_of(qp, struct xsc_sq, cqp); + xdev = sq->cq.xdev; + } else { + pr_err("%s:Unknown eth qp type %d\n", __func__, type); + return; + } + + switch (type) { + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + xsc_core_err(xdev, "%s:Async event %x on QP %d\n", __func__, type, qp->qpn); + break; + default: + xsc_core_err(xdev, "%s: Unexpected event type %d on QP %d\n", + __func__, type, qp->qpn); + return; + } +} + +int xsc_eth_create_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq, + struct xsc_create_qp_mbox_in *in, int insize) +{ + int ret = -1; + struct xsc_create_qp_mbox_out out; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create rq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + prq->rqn = be32_to_cpu(out.qpn) & 0xffffff; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + + ret = create_resource_common(xdev, &prq->cqp); + if (ret) { + xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", __func__, prq->rqn, ret); + return ret; + } + + return 0; +} + +int xsc_eth_destroy_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + err = xsc_eth_modify_qp_status(xdev, prq->rqn, XSC_CMD_OP_2RST_QP); + if (err) { + xsc_core_warn(xdev, "failed to set rq%d status=rst, err=%d\n", prq->rqn, err); + return err; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(prq->rqn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy rq%d, err=%d out.status=%u\n", + prq->rqn, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static void xsc_eth_free_rx_wqe(struct xsc_rq *rq) +{ + u16 wqe_ix; + struct xsc_wq_cyc *wq = &rq->wqe.wq; + + while (!xsc_wq_cyc_is_empty(wq)) { + wqe_ix = xsc_wq_cyc_get_tail(wq); + rq->dealloc_wqe(rq, wqe_ix); + xsc_wq_cyc_pop(wq); + } +} + +static void xsc_free_qp_rq(struct xsc_rq *rq) +{ + if (rq->page_cache.page_cache) + xsc_rx_free_page_cache(rq); + + kvfree(rq->wqe.frags); + kvfree(rq->wqe.di); + + if (rq->page_pool) + page_pool_destroy(rq->page_pool); + + xsc_eth_wq_destroy(&rq->wq_ctrl); +} + +int xsc_eth_create_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq, + struct xsc_create_qp_mbox_in *in, int insize) +{ + struct xsc_create_qp_mbox_out out; + int ret; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + psq->sqn = be32_to_cpu(out.qpn) & 0xffffff; + + return 0; +} + +int xsc_eth_modify_qp_sq(struct xsc_core_device *xdev, struct xsc_modify_raw_qp_mbox_in *in) +{ + struct xsc_modify_raw_qp_mbox_out out; + int ret; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_RAW_QP); + + ret = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + &out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to modify sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_eth_destroy_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + err = xsc_eth_modify_qp_status(xdev, psq->sqn, XSC_CMD_OP_2RST_QP); + if (err) { + xsc_core_warn(xdev, "failed to set sq%d status=rst, err=%d\n", psq->sqn, err); + return err; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(psq->sqn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy sq%d, err=%d out.status=%u\n", + psq->sqn, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static void xsc_free_qp_sq_db(struct xsc_sq *sq) +{ + kvfree(sq->db.wqe_info); + kvfree(sq->db.dma_fifo); +} + +static void xsc_free_qp_sq(struct xsc_sq *sq) +{ + xsc_free_qp_sq_db(sq); + xsc_eth_wq_destroy(&sq->wq_ctrl); +} + +static int xsc_eth_alloc_qp_sq_db(struct xsc_sq *sq, int numa) +{ + int wq_sz = xsc_wq_cyc_get_size(&sq->wq); + struct xsc_core_device *xdev = sq->cq.xdev; + int df_sz = wq_sz * xdev->caps.send_ds_num; + + sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, sizeof(*sq->db.dma_fifo)), + GFP_KERNEL, numa); + sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.wqe_info)), + GFP_KERNEL, numa); + + if (!sq->db.dma_fifo || !sq->db.wqe_info) { + xsc_free_qp_sq_db(sq); + return -ENOMEM; + } + + sq->dma_fifo_mask = df_sz - 1; + + return 0; +} + +static int xsc_eth_alloc_cq(struct xsc_channel *c, struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret; + struct xsc_core_device *xdev = c->adapter->xdev; + struct xsc_core_cq *core_cq = &pcq->xcq; + u32 i; + u8 q_log_size = pcq_param->cq_attr.q_log_size; + u8 ele_log_size = pcq_param->cq_attr.ele_log_size; + + pcq_param->wq.db_numa_node = cpu_to_node(c->cpu); + pcq_param->wq.buf_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_cqwq_create(xdev, &pcq_param->wq, + q_log_size, ele_log_size, &pcq->wq, + &pcq->wq_ctrl); + if (ret) + return ret; + + core_cq->cqe_sz = pcq_param->cq_attr.ele_num; + core_cq->comp = xsc_eth_completion_event; + core_cq->event = xsc_eth_cq_error_event; + core_cq->vector = c->chl_idx; + + for (i = 0; i < xsc_cqwq_get_size(&pcq->wq); i++) { + struct xsc_cqe *cqe = xsc_cqwq_get_wqe(&pcq->wq, i); + + cqe->owner = 1; + } + pcq->xdev = xdev; + + return ret; +} + +#ifdef NEED_CREATE_RX_THREAD +static int xsc_eth_set_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret = XSCALE_RET_SUCCESS; + struct xsc_create_cq_mbox_in *in; + int inlen; + int hw_npages; + + hw_npages = DIV_ROUND_UP(pcq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*mbox size + pas size*/ + inlen = sizeof(struct xsc_create_cq_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + /*construct param of in struct*/ + in->ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(c->adapter->xdev->glb_func_id); + + xsc_fill_page_frag_array(&pcq->wq_ctrl.buf, + &in->pas[0], hw_npages); + + ret = xsc_eth_create_cq(c->adapter->xdev, &pcq->xcq, in, inlen); + + kfree(in); + xsc_core_info(c->adapter->xdev, "create cqn%d, func_id=%d, ret=%d\n", + pcq->xcq.cqn, c->adapter->xdev->glb_func_id, ret); + return ret; +} +#else +static int xsc_eth_set_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret = XSCALE_RET_SUCCESS; + struct xsc_core_device *xdev = c->adapter->xdev; + struct xsc_create_cq_mbox_in *in; + int inlen; + int eqn, irqn; + int hw_npages; + + hw_npages = DIV_ROUND_UP(pcq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*mbox size + pas size*/ + inlen = sizeof(struct xsc_create_cq_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + /*construct param of in struct*/ + ret = xsc_vector2eqn(xdev, c->chl_idx, &eqn, &irqn); + if (ret) + goto err; + + in->ctx.eqn = eqn; + in->ctx.eqn = cpu_to_be16(in->ctx.eqn); + in->ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(xdev->glb_func_id); + + xsc_fill_page_frag_array(&pcq->wq_ctrl.buf, &in->pas[0], hw_npages); + + ret = xsc_eth_create_cq(c->adapter->xdev, &pcq->xcq, in, inlen); + if (ret == 0) { + pcq->xcq.irqn = irqn; + pcq->xcq.eq = xsc_eq_get(xdev, pcq->xcq.vector); + } + +err: + kvfree(in); + xsc_core_info(c->adapter->xdev, "create ch%d cqn%d, eqn=%d, func_id=%d, ret=%d\n", + c->chl_idx, pcq->xcq.cqn, eqn, xdev->glb_func_id, ret); + return ret; +} +#endif + +static int xsc_eth_open_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret; + + ret = xsc_eth_alloc_cq(c, pcq, pcq_param); + if (ret) + return ret; + + ret = xsc_eth_set_cq(c, pcq, pcq_param); + if (ret) + goto err_set_cq; + + xsc_cq_notify_hw_rearm(pcq); + + pcq->napi = &c->napi; + pcq->channel = c; + pcq->rx = (pcq_param->cq_attr.q_type == XSC_QUEUE_TYPE_RQCQ) ? 1 : 0; + + return 0; + +err_set_cq: + xsc_eth_free_cq(pcq); + return ret; +} + +static int xsc_eth_close_cq(struct xsc_channel *c, struct xsc_cq *pcq) +{ + int ret; + struct xsc_core_device *xdev = c->adapter->xdev; + + ret = xsc_eth_destroy_cq(xdev, pcq); + if (ret) { + xsc_core_warn(xdev, "failed to close ch%d cq%d, ret=%d\n", + c->chl_idx, pcq->xcq.cqn, ret); + return ret; + } + + xsc_eth_free_cq(pcq); + + return 0; +} + +static int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status) +{ + struct xsc_modify_qp_mbox_in in; + struct xsc_modify_qp_mbox_out out; + + return xsc_modify_qp(xdev, &in, &out, qpn, status); +} + +int xsc_eth_set_hw_mtu(struct xsc_core_device *dev, u16 mtu, u16 rx_buf_sz) +{ + struct xsc_set_mtu_mbox_in in; + struct xsc_set_mtu_mbox_out out; + int ret; + + memset(&in, 0, sizeof(struct xsc_set_mtu_mbox_in)); + memset(&out, 0, sizeof(struct xsc_set_mtu_mbox_out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MTU); + in.mtu = cpu_to_be16(mtu); + in.rx_buf_sz_min = cpu_to_be16(rx_buf_sz); + in.mac_port = dev->mac_port; + + ret = xsc_cmd_exec(dev, &in, sizeof(struct xsc_set_mtu_mbox_in), &out, + sizeof(struct xsc_set_mtu_mbox_out)); + if (ret || out.hdr.status) { + xsc_core_err(dev, "failed to set hw_mtu=%u rx_buf_sz=%u, err=%d, status=%d\n", + mtu, rx_buf_sz, ret, out.hdr.status); + ret = -ENOEXEC; + } + + return ret; +} + +int xsc_eth_get_mac(struct xsc_core_device *dev, char *mac) +{ + struct xsc_query_eth_mac_mbox_out *out; + struct xsc_query_eth_mac_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_ETH_MAC); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err || out->hdr.status) { + xsc_core_warn(dev, "get mac failed! err=%d, out.status=%u\n", err, out->hdr.status); + err = -ENOEXEC; + goto exit; + } + + memcpy(mac, out->mac, 6); + xsc_core_dbg(dev, "get mac %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + +exit: + kfree(out); + + return err; +} + +int xsc_eth_modify_qps_channel(struct xsc_adapter *adapter, struct xsc_channel *c) +{ + int ret = 0; + int i; + + for (i = 0; i < c->qp.rq_num; i++) { + c->qp.rq[i].post_wqes(&c->qp.rq[i]); + ret = xsc_eth_modify_qp_status(adapter->xdev, c->qp.rq[i].rqn, + XSC_CMD_OP_RTR2RTS_QP); + if (ret) + return ret; + } + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_modify_qp_status(adapter->xdev, c->qp.sq[i].sqn, + XSC_CMD_OP_RTR2RTS_QP); + if (ret) + return ret; + } + return 0; +} + +int xsc_eth_modify_qps(struct xsc_adapter *adapter, + struct xsc_eth_channels *chls) +{ + int ret; + int i; + + for (i = 0; i < chls->num_chl; i++) { + struct xsc_channel *c = &chls->c[i]; + + ret = xsc_eth_modify_qps_channel(adapter, c); + if (ret) + return ret; + } + + return 0; +} + +u32 xsc_rx_get_linear_frag_sz(u32 mtu) +{ + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + + return XSC_SKB_FRAG_SZ(byte_count); +} + +bool xsc_rx_is_linear_skb(u32 mtu) +{ + u32 linear_frag_sz = xsc_rx_get_linear_frag_sz(mtu); + + return linear_frag_sz <= PAGE_SIZE; +} + +static int xsc_eth_alloc_rq(struct xsc_channel *c, + struct xsc_rq *prq, + struct xsc_rq_param *prq_param) +{ + struct xsc_adapter *adapter = c->adapter; + u8 q_log_size = prq_param->rq_attr.q_log_size; + struct page_pool_params pagepool_params = { 0 }; + u32 pool_size = 1 << q_log_size; + u8 ele_log_size = prq_param->rq_attr.ele_log_size; + struct xsc_stats *stats = c->adapter->stats; + struct xsc_channel_stats *channel_stats = + &stats->channel_stats[c->chl_idx]; + int cache_init_sz = 0; + int wq_sz; + int i, f; + int ret = 0; + + prq->stats = &channel_stats->rq; + prq_param->wq.db_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_wq_cyc_create(c->adapter->xdev, &prq_param->wq, + q_log_size, ele_log_size, &prq->wqe.wq, + &prq->wq_ctrl); + if (ret) + return ret; + + wq_sz = xsc_wq_cyc_get_size(&prq->wqe.wq); + + prq->wqe.info = prq_param->frags_info; + prq->wqe.frags = kvzalloc_node(array_size((wq_sz << prq->wqe.info.log_num_frags), + sizeof(*prq->wqe.frags)), + GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!prq->wqe.frags) { + ret = -ENOMEM; + goto err_alloc_frags; + } + + ret = xsc_eth_init_di_list(prq, wq_sz, c->cpu); + if (ret) + goto err_init_di; + + prq->buff.map_dir = DMA_FROM_DEVICE; +#ifdef XSC_PAGE_CACHE + cache_init_sz = wq_sz << prq->wqe.info.log_num_frags; + ret = xsc_rx_alloc_page_cache(prq, cpu_to_node(c->cpu), ilog2(cache_init_sz)); + if (ret) + goto err_create_pool; +#endif + + /* Create a page_pool and register it with rxq */ + pool_size = wq_sz << prq->wqe.info.log_num_frags; + pagepool_params.order = XSC_RX_FRAG_SZ_ORDER; + pagepool_params.flags = 0; /* No-internal DMA mapping in page_pool */ + pagepool_params.pool_size = pool_size; + pagepool_params.nid = cpu_to_node(c->cpu); + pagepool_params.dev = c->adapter->dev; + pagepool_params.dma_dir = prq->buff.map_dir; + + prq->page_pool = page_pool_create(&pagepool_params); + if (IS_ERR(prq->page_pool)) { + ret = PTR_ERR(prq->page_pool); + prq->page_pool = NULL; + goto err_create_pool; + } + + if (c->chl_idx == 0) + xsc_core_dbg(adapter->xdev, + "page pool: size=%d, cpu=%d, pool_numa=%d, cache_size=%d, mtu=%d, wqe_numa=%d\n", + pool_size, c->cpu, pagepool_params.nid, + cache_init_sz, adapter->nic_param.mtu, + prq_param->wq.buf_numa_node); + + for (i = 0; i < wq_sz; i++) { + struct xsc_eth_rx_wqe_cyc *wqe = + xsc_wq_cyc_get_wqe(&prq->wqe.wq, i); + + for (f = 0; f < prq->wqe.info.num_frags; f++) { + u32 frag_size = prq->wqe.info.arr[f].frag_size; + + wqe->data[f].seg_len = cpu_to_le32(frag_size); + wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); + } + + for (; f < prq->wqe.info.frags_max_num; f++) { + wqe->data[f].seg_len = 0; + wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); + wqe->data[f].va = 0; + } + } + + prq->post_wqes = xsc_eth_post_rx_wqes; + prq->handle_rx_cqe = xsc_eth_handle_rx_cqe; + prq->dealloc_wqe = xsc_eth_dealloc_rx_wqe; + prq->wqe.skb_from_cqe = xsc_rx_is_linear_skb(adapter->nic_param.mtu) ? + xsc_skb_from_cqe_linear : + xsc_skb_from_cqe_nonlinear; + prq->ix = c->chl_idx; + prq->frags_sz = adapter->nic_param.rq_frags_size; + + if (adapter->nic_param.rx_dim_enabled) { + INIT_WORK(&prq->dim_obj.dim.work, xsc_rx_dim_work); + prq->dim_obj.dim.mode = + adapter->nic_param.rx_cq_moderation.cq_period_mode; + hrtimer_init(&prq->cq.cq_reduce.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + prq->cq.cq_reduce.timer.function = xsc_dim_reduce_timer_fn; + set_bit(XSC_ETH_RQ_STATE_AM, &prq->state); + } + + return 0; + +err_create_pool: + xsc_eth_free_di_list(prq); +err_init_di: + kvfree(prq->wqe.frags); +err_alloc_frags: + xsc_eth_wq_destroy(&prq->wq_ctrl); + return ret; +} + +#ifdef XSC_RSS_SUPPORT +static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, + struct xsc_rq_param *prq_param, + struct xsc_eth_channels *chls, + unsigned int num_chl) +{ + int ret = 0, err = 0; + struct xsc_create_multiqp_mbox_in *in; + struct xsc_create_qp_request *req; + u8 q_log_size = prq_param->rq_attr.q_log_size; + int paslen = 0; + struct xsc_rq *prq; + struct xsc_channel *c; + int rqn_base; + int inlen; + int entry_len; + int i, j, n; + int hw_npages; + + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + ret = xsc_eth_alloc_rq(c, prq, prq_param); + if (ret) + goto err_alloc_rqs; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*support different npages number smoothly*/ + entry_len = sizeof(struct xsc_create_qp_request) + + sizeof(__be64) * hw_npages; + + paslen += entry_len; + } + } + + inlen = sizeof(struct xsc_create_multiqp_mbox_in) + paslen; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_create_rss_rqs; + } + + in->qp_num = cpu_to_be16(num_chl); + in->qp_type = XSC_QUEUE_TYPE_RAW; + in->req_len = cpu_to_be32(inlen); + + req = (struct xsc_create_qp_request *)&in->data[0]; + n = 0; + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /* no use for eth */ + req->input_qpn = cpu_to_be16(0); + req->qp_type = XSC_QUEUE_TYPE_RAW; + req->log_rq_sz = ilog2(adapter->xdev->caps.recv_ds_num) + + q_log_size; + req->pa_num = cpu_to_be16(hw_npages); + req->cqn_recv = cpu_to_be16(prq->cq.xcq.cqn); + req->cqn_send = req->cqn_recv; + req->glb_funcid = cpu_to_be16(adapter->xdev->glb_func_id); + + xsc_fill_page_frag_array(&prq->wq_ctrl.buf, &req->pas[0], hw_npages); + n++; + req = (struct xsc_create_qp_request *)(&in->data[0] + entry_len * n); + } + } + + ret = xsc_eth_create_rss_qp_rqs(adapter->xdev, in, inlen, &rqn_base); + kvfree(in); + if (ret) + goto err_create_rss_rqs; + + n = 0; + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + prq->rqn = rqn_base + n; + prq->cqp.qpn = prq->rqn; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + ret = create_resource_common(adapter->xdev, &prq->cqp); + if (ret) { + err = ret; + xsc_core_err(adapter->xdev, + "create resource common error qp:%d errno:%d\n", + prq->rqn, ret); + continue; + } + + n++; + } + } + if (err) + return err; + + adapter->channels.rqn_base = rqn_base; + xsc_core_info(adapter->xdev, "rqn_base=%d, rq_num=%d, state=0x%lx\n", + rqn_base, num_chl, prq->state); + return 0; + +err_create_rss_rqs: + i = num_chl; +err_alloc_rqs: + for (--i; i >= 0; i--) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + xsc_free_qp_rq(prq); + } + } + return ret; +} + +#else +static int xsc_eth_open_qp_rq(struct xsc_channel *c, + struct xsc_rq *prq, + struct xsc_rq_param *prq_param, + u32 rq_idx) +{ + struct xsc_adapter *adapter = c->adapter; + struct xsc_core_device *xdev = adapter->xdev; + u8 q_log_size = prq_param->rq_attr.q_log_size; + struct xsc_create_qp_mbox_in *in; + int hw_npages; + int inlen; + int ret = 0; + + ret = xsc_eth_alloc_rq(c, prq, prq_param); + if (ret) + goto out; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); + inlen = sizeof(struct xsc_create_qp_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_alloc_rq; + } + + in->req.input_qpn = cpu_to_be16(XSC_QPN_RQN_STUB); /*no use for eth*/ + in->req.qp_type = XSC_QUEUE_TYPE_RAW; + in->req.log_rq_sz = ilog2(xdev->caps.recv_ds_num) + q_log_size; + in->req.pa_num = cpu_to_be16(hw_npages); + in->req.cqn_recv = cpu_to_be16(prq->cq.xcq.cqn); + in->req.cqn_send = in->req.cqn_recv; + in->req.glb_funcid = cpu_to_be16(xdev->glb_func_id); + + xsc_fill_page_frag_array(&prq->wq_ctrl.buf, &in->req.pas[0], hw_npages); + + ret = xsc_eth_create_qp_rq(xdev, prq, in, inlen); + if (ret) + goto err_create_rq; + + prq->cqp.qpn = prq->rqn; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + + ret = create_resource_common(xdev, &prq->cqp); + if (ret) { + xsc_core_err(xdev, "failed to init rqn%d, err=%d\n", + prq->rqn, ret); + goto err_destroy_rq; + } + + xsc_core_info(c->adapter->xdev, "rqn=%d ch_num=%d state=0x%llx\n", + prq->rqn, c->chl_idx, prq->state); + + kvfree(in); + + return 0; + +err_destroy_rq: + xsc_eth_destroy_qp_rq(xdev, prq); +err_create_rq: + kvfree(in); +err_alloc_rq: + xsc_free_qp_rq(prq); +out: + return ret; +} +#endif + +static int xsc_eth_close_qp_rq(struct xsc_channel *c, struct xsc_rq *prq) +{ + int ret; + struct xsc_core_device *xdev = c->adapter->xdev; + + destroy_resource_common(xdev, &prq->cqp); + + ret = xsc_eth_destroy_qp_rq(xdev, prq); + if (ret) + return ret; + + xsc_eth_free_rx_wqe(prq); + xsc_free_qp_rq(prq); + + return 0; +} + +static int xsc_eth_open_qp_sq(struct xsc_channel *c, + struct xsc_sq *psq, + struct xsc_sq_param *psq_param, + u32 sq_idx) +{ + struct xsc_adapter *adapter = c->adapter; + struct xsc_core_device *xdev = adapter->xdev; + u8 q_log_size = psq_param->sq_attr.q_log_size; + u8 ele_log_size = psq_param->sq_attr.ele_log_size; + struct xsc_stats *stats = adapter->stats; + struct xsc_channel_stats *channel_stats = + &stats->channel_stats[c->chl_idx]; + struct xsc_create_qp_mbox_in *in; + struct xsc_modify_raw_qp_mbox_in *modify_in; + int hw_npages; + int inlen; + int ret; + + psq->stats = &channel_stats->sq[sq_idx]; + psq_param->wq.db_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_wq_cyc_create(xdev, &psq_param->wq, + q_log_size, ele_log_size, &psq->wq, + &psq->wq_ctrl); + if (ret) + return ret; + + hw_npages = DIV_ROUND_UP(psq->wq_ctrl.buf.size, PAGE_SIZE_4K); + inlen = sizeof(struct xsc_create_qp_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_sq_wq_destroy; + } + in->req.input_qpn = cpu_to_be16(XSC_QPN_SQN_STUB); /*no use for eth*/ + in->req.qp_type = XSC_QUEUE_TYPE_RAW_TSO; /*default sq is tso qp*/ + in->req.log_sq_sz = ilog2(xdev->caps.send_ds_num) + q_log_size; + in->req.pa_num = cpu_to_be16(hw_npages); + in->req.cqn_send = cpu_to_be16(psq->cq.xcq.cqn); + in->req.cqn_recv = in->req.cqn_send; + in->req.glb_funcid = cpu_to_be16(xdev->glb_func_id); + + xsc_fill_page_frag_array(&psq->wq_ctrl.buf, + &in->req.pas[0], hw_npages); + + ret = xsc_eth_create_qp_sq(xdev, psq, in, inlen); + if (ret) + goto err_sq_in_destroy; + + psq->cqp.qpn = psq->sqn; + psq->cqp.event = xsc_eth_qp_event; + psq->cqp.eth_queue_type = XSC_RES_SQ; + + ret = create_resource_common(xdev, &psq->cqp); + if (ret) { + xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", + __func__, psq->sqn, ret); + goto err_sq_destroy; + } + + psq->channel = c; + psq->ch_ix = c->chl_idx; + psq->txq_ix = psq->ch_ix + sq_idx * adapter->channels.num_chl; + + /*need to querify from hardware*/ + psq->hw_mtu = XSC_ETH_HW_MTU_SEND; + psq->stop_room = 1; + + ret = xsc_eth_alloc_qp_sq_db(psq, psq_param->wq.db_numa_node); + if (ret) + goto err_sq_common_destroy; + + inlen = sizeof(struct xsc_modify_raw_qp_mbox_in); + modify_in = kvzalloc(inlen, GFP_KERNEL); + if (!modify_in) { + ret = -ENOMEM; + goto err_sq_common_destroy; + } + + modify_in->req.qp_out_port = xdev->pf_id; + modify_in->pcie_no = xdev->pcie_no; + modify_in->req.qpn = cpu_to_be16((u16)(psq->sqn)); + modify_in->req.func_id = cpu_to_be16(xdev->glb_func_id); + modify_in->req.dma_direct = DMA_DIR_TO_MAC; + modify_in->req.prio = sq_idx; + ret = xsc_eth_modify_qp_sq(xdev, modify_in); + if (ret) + goto err_sq_modify_in_destroy; + + kvfree(modify_in); + kvfree(in); + + if (adapter->nic_param.tx_dim_enabled) { + INIT_WORK(&psq->dim_obj.dim.work, xsc_tx_dim_work); + psq->dim_obj.dim.mode = adapter->nic_param.tx_cq_moderation.cq_period_mode; + hrtimer_init(&psq->cq.cq_reduce.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + psq->cq.cq_reduce.timer.function = xsc_dim_reduce_timer_fn; + set_bit(XSC_ETH_SQ_STATE_AM, &psq->state); + } + + xsc_core_info(c->adapter->xdev, + "open sq ok, ch%d_sq%d_qpn=%d, state=0x%lx, db_numa=%d, buf_numa=%d\n", + c->chl_idx, sq_idx, psq->sqn, psq->state, + psq_param->wq.db_numa_node, psq_param->wq.buf_numa_node); + + return 0; + +err_sq_modify_in_destroy: + kvfree(modify_in); + +err_sq_common_destroy: + destroy_resource_common(xdev, &psq->cqp); + +err_sq_destroy: + xsc_eth_destroy_qp_sq(xdev, psq); + +err_sq_in_destroy: + kvfree(in); + +err_sq_wq_destroy: + xsc_eth_wq_destroy(&psq->wq_ctrl); + return ret; +} + +static int xsc_eth_close_qp_sq(struct xsc_channel *c, struct xsc_sq *psq) +{ + struct xsc_core_device *xdev = c->adapter->xdev; + int ret; + + destroy_resource_common(xdev, &psq->cqp); + + ret = xsc_eth_destroy_qp_sq(xdev, psq); + if (ret) + return ret; + + xsc_free_tx_wqe(c->adapter->dev, psq); + xsc_free_qp_sq(psq); + + return 0; +} + +int xsc_eth_open_channel(struct xsc_adapter *adapter, + int idx, + struct xsc_channel *c, + struct xsc_channel_param *chl_param) +{ + int ret = 0; + struct net_device *netdev = adapter->netdev; + struct xsc_stats *stats = adapter->stats; + struct xsc_core_device *xdev = adapter->xdev; + int i, j, eqn, irqn; + const struct cpumask *aff; + + c->adapter = adapter; + c->netdev = adapter->netdev; + c->chl_idx = idx; + c->num_tc = adapter->nic_param.num_tc; + c->stats = &stats->channel_stats[idx].ch; + + /*1rq per channel, and may have multi sqs per channel*/ + c->qp.rq_num = 1; + c->qp.sq_num = c->num_tc; + + if (xdev->caps.msix_enable) { + ret = xsc_vector2eqn(xdev, c->chl_idx, &eqn, &irqn); + if (ret) + goto err; + aff = irq_get_affinity_mask(irqn); + c->aff_mask = aff; + c->cpu = cpumask_first(aff); + } + + if (c->qp.sq_num > XSC_MAX_NUM_TC || c->qp.rq_num > XSC_MAX_NUM_TC) { + ret = -EINVAL; + goto err; + } + + for (i = 0; i < c->qp.rq_num; i++) { + ret = xsc_eth_open_cq(c, &c->qp.rq[i].cq, &chl_param->rqcq_param); + if (ret) { + j = i - 1; + goto err_open_rq_cq; + } + } + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_open_cq(c, &c->qp.sq[i].cq, &chl_param->sqcq_param); + if (ret) { + j = i - 1; + goto err_open_sq_cq; + } + } + +#ifndef XSC_RSS_SUPPORT + for (i = 0; i < c->qp.rq_num; i++) { + ret = xsc_eth_open_qp_rq(c, &c->qp.rq[i], &chl_param->rq_param, i); + if (ret) { + j = i - 1; + goto err_open_rq; + } + } +#endif + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_open_qp_sq(c, &c->qp.sq[i], &chl_param->sq_param, i); + if (ret) { + j = i - 1; + goto err_open_sq; + } + } + + netif_napi_add(netdev, &c->napi, xsc_eth_napi_poll); + + xsc_core_dbg(adapter->xdev, "open channel%d ok\n", idx); + return 0; + +err_open_sq: + for (; j >= 0; j--) + xsc_eth_close_qp_sq(c, &c->qp.sq[j]); + j = (c->qp.rq_num - 1); +#ifndef XSC_RSS_SUPPORT +err_open_rq: + for (; j >= 0; j--) + xsc_eth_close_qp_rq(c, &c->qp.rq[j]); + j = (c->qp.sq_num - 1); +#endif +err_open_sq_cq: + for (; j >= 0; j--) + xsc_eth_close_cq(c, &c->qp.sq[j].cq); + j = (c->qp.rq_num - 1); +err_open_rq_cq: + for (; j >= 0; j--) + xsc_eth_close_cq(c, &c->qp.rq[j].cq); +err: + xsc_core_warn(adapter->xdev, + "failed to open channel: ch%d, sq_num=%d, rq_num=%d, err=%d\n", + idx, c->qp.sq_num, c->qp.rq_num, ret); + return ret; +} + +static u32 xsc_get_rq_frag_info(struct xsc_rq_frags_info *frags_info, u32 mtu) +{ + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + int frag_stride; + int i = 0; + + if (xsc_rx_is_linear_skb(mtu)) { + frag_stride = xsc_rx_get_linear_frag_sz(mtu); + frag_stride = roundup_pow_of_two(frag_stride); + + frags_info->arr[0].frag_size = byte_count; + frags_info->arr[0].frag_stride = frag_stride; + frags_info->num_frags = 1; + frags_info->wqe_bulk = PAGE_SIZE / frag_stride; + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + goto out; + } + + if (byte_count <= DEFAULT_FRAG_SIZE) { + frags_info->arr[0].frag_size = DEFAULT_FRAG_SIZE; + frags_info->arr[0].frag_stride = DEFAULT_FRAG_SIZE; + frags_info->num_frags = 1; + } else if (byte_count <= PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 1; + } else if (byte_count <= (PAGE_SIZE_4K + DEFAULT_FRAG_SIZE)) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->arr[1].frag_size = PAGE_SIZE_4K; + frags_info->arr[1].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 2; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else if (byte_count <= 2 * PAGE_SIZE_4K) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->arr[1].frag_size = PAGE_SIZE_4K; + frags_info->arr[1].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 2; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else { + if (PAGE_SIZE < 4 * PAGE_SIZE_4K) { + frags_info->num_frags = roundup(byte_count, PAGE_SIZE_4K) / PAGE_SIZE_4K; + for (i = 0; i < frags_info->num_frags; i++) { + frags_info->arr[i].frag_size = PAGE_SIZE_4K; + frags_info->arr[i].frag_stride = PAGE_SIZE_4K; + } + } else { + frags_info->arr[0].frag_size = 4 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 4 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } + + if (PAGE_SIZE <= PAGE_SIZE_4K) { + frags_info->wqe_bulk_min = 4; + frags_info->wqe_bulk = max_t(u8, frags_info->wqe_bulk_min, 8); + } else if (PAGE_SIZE <= 2 * PAGE_SIZE_4K) { + frags_info->wqe_bulk = 2; + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + } else { + frags_info->wqe_bulk = + PAGE_SIZE / (frags_info->num_frags * frags_info->arr[0].frag_size); + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + } + +out: + frags_info->log_num_frags = order_base_2(frags_info->num_frags); + + return frags_info->num_frags * frags_info->arr[0].frag_size; +} + +static void xsc_build_rq_frags_info(struct xsc_queue_attr *attr, + struct xsc_rq_frags_info *frags_info, + struct xsc_eth_params *params) +{ + params->rq_frags_size = xsc_get_rq_frag_info(frags_info, params->mtu); + frags_info->frags_max_num = attr->ele_size / XSC_RECV_WQE_DS; +} + +static void xsc_eth_build_channel_param(struct xsc_adapter *adapter, + struct xsc_channel_param *chl_param) +{ + xsc_eth_build_queue_param(adapter, &chl_param->rqcq_param.cq_attr, + XSC_QUEUE_TYPE_RQCQ); + chl_param->rqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->sqcq_param.cq_attr, + XSC_QUEUE_TYPE_SQCQ); + chl_param->sqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->sq_param.sq_attr, + XSC_QUEUE_TYPE_SQ); + chl_param->sq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->rq_param.rq_attr, + XSC_QUEUE_TYPE_RQ); + chl_param->rq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_build_rq_frags_info(&chl_param->rq_param.rq_attr, + &chl_param->rq_param.frags_info, + &adapter->nic_param); +} + +int xsc_eth_open_channels(struct xsc_adapter *adapter) +{ + int ret = 0; + int i; + struct xsc_channel_param *chl_param; + struct xsc_eth_channels *chls = &adapter->channels; + struct xsc_core_device *xdev = adapter->xdev; + bool free_rq = false; + + chls->num_chl = adapter->nic_param.num_channels; + chls->c = kcalloc_node(chls->num_chl, sizeof(struct xsc_channel), + GFP_KERNEL, xdev->priv.numa_node); + if (!chls->c) { + ret = -ENOMEM; + goto err; + } + + chl_param = kvzalloc(sizeof(*chl_param), GFP_KERNEL); + if (!chl_param) { + ret = -ENOMEM; + goto err_free_ch; + } + + xsc_eth_build_channel_param(adapter, chl_param); + + for (i = 0; i < chls->num_chl; i++) { + ret = xsc_eth_open_channel(adapter, i, &chls->c[i], chl_param); + if (ret) + goto err_open_channel; +#ifndef XSC_RSS_SUPPORT + free_rq = true; +#endif + } + +#ifdef XSC_RSS_SUPPORT + ret = xsc_eth_open_rss_qp_rqs(adapter, &chl_param->rq_param, chls, chls->num_chl); + if (ret) + goto err_open_channel; + free_rq = true; +#endif + + for (i = 0; i < chls->num_chl; i++) + napi_enable(&chls->c[i].napi); + + /* flush cache to memory before interrupt and napi_poll running */ + smp_wmb(); + + ret = xsc_eth_modify_qps(adapter, chls); + if (ret) + goto err_modify_qps; + + kvfree(chl_param); + xsc_core_info(adapter->xdev, "open %d channels ok\n", chls->num_chl); + return 0; + +err_modify_qps: + i = chls->num_chl; +err_open_channel: + for (--i; i >= 0; i--) + xsc_eth_close_channel(&chls->c[i], free_rq); + + kvfree(chl_param); +err_free_ch: + kfree(chls->c); +err: + chls->num_chl = 0; + xsc_core_warn(adapter->xdev, "failed to open %d channels, err=%d\n", + chls->num_chl, ret); + return ret; +} + +static void xsc_eth_activate_txqsq(struct xsc_channel *c) +{ + int tc = c->num_tc; + struct xsc_sq *psq; + + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + psq->txq = netdev_get_tx_queue(psq->channel->netdev, psq->txq_ix); + set_bit(XSC_ETH_SQ_STATE_ENABLED, &psq->state); + netdev_tx_reset_queue(psq->txq); + netif_tx_start_queue(psq->txq); + } +} + +static void xsc_eth_deactivate_txqsq(struct xsc_channel *c) +{ + int tc = c->num_tc; + struct xsc_sq *psq; + + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + clear_bit(XSC_ETH_SQ_STATE_ENABLED, &psq->state); + } +} + +static void xsc_activate_rq(struct xsc_channel *c) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) + set_bit(XSC_ETH_RQ_STATE_ENABLED, &c->qp.rq[i].state); +} + +static void xsc_deactivate_rq(struct xsc_channel *c) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) + clear_bit(XSC_ETH_RQ_STATE_ENABLED, &c->qp.rq[i].state); +} + +void xsc_eth_activate_channel(struct xsc_channel *c) +{ + xsc_eth_activate_txqsq(c); + xsc_activate_rq(c); +} + +void xsc_eth_deactivate_channel(struct xsc_channel *c) +{ + xsc_deactivate_rq(c); + xsc_eth_deactivate_txqsq(c); +} + +static void xsc_eth_activate_channels(struct xsc_eth_channels *chs) +{ + int i; + + for (i = 0; i < chs->num_chl; i++) + xsc_eth_activate_channel(&chs->c[i]); +} + +static void xsc_eth_deactivate_channels(struct xsc_eth_channels *chs) +{ + int i; + + for (i = 0; i < chs->num_chl; i++) + xsc_eth_deactivate_channel(&chs->c[i]); + + /* Sync with all NAPIs to wait until they stop using queues. */ + synchronize_net(); + + for (i = 0; i < chs->num_chl; i++) + /* last doorbell out */ + napi_disable(&chs->c[i].napi); +} + +static void xsc_eth_build_tx2sq_maps(struct xsc_adapter *adapter) +{ + struct xsc_channel *c; + struct xsc_sq *psq; + int i, tc; + + for (i = 0; i < adapter->channels.num_chl; i++) { + c = &adapter->channels.c[i]; + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + adapter->txq2sq[psq->txq_ix] = psq; + adapter->channel_tc2realtxq[i][tc] = + i + tc * adapter->channels.num_chl; + } + } +} + +void xsc_eth_activate_priv_channels(struct xsc_adapter *adapter) +{ + int num_txqs; + struct net_device *netdev = adapter->netdev; + + num_txqs = adapter->channels.num_chl * adapter->nic_param.num_tc; + xsc_netdev_set_tcs(adapter, adapter->channels.num_chl, adapter->nic_param.num_tc); + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, adapter->channels.num_chl); + + xsc_eth_build_tx2sq_maps(adapter); + xsc_eth_activate_channels(&adapter->channels); + netif_tx_start_all_queues(adapter->netdev); +} + +void xsc_eth_deactivate_priv_channels(struct xsc_adapter *adapter) +{ + netif_tx_disable(adapter->netdev); + xsc_eth_deactivate_channels(&adapter->channels); +} + +static int xsc_eth_sw_init(struct xsc_adapter *adapter) +{ + int ret; + + ret = xsc_eth_open_channels(adapter); + if (ret) + return ret; + + xsc_eth_activate_priv_channels(adapter); + + return 0; +} + +static void xsc_eth_close_channel(struct xsc_channel *c, bool free_rq) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) { + if (free_rq) + xsc_eth_close_qp_rq(c, &c->qp.rq[i]); + xsc_eth_close_cq(c, &c->qp.rq[i].cq); + memset(&c->qp.rq[i], 0, sizeof(struct xsc_rq)); + } + + for (i = 0; i < c->qp.sq_num; i++) { + xsc_eth_close_qp_sq(c, &c->qp.sq[i]); + xsc_eth_close_cq(c, &c->qp.sq[i].cq); + } + + netif_napi_del(&c->napi); +} + +static void xsc_eth_close_channels(struct xsc_adapter *adapter) +{ + int i; + struct xsc_channel *c = NULL; + + for (i = 0; i < adapter->channels.num_chl; i++) { + c = &adapter->channels.c[i]; + xsc_core_dbg(adapter->xdev, "start to close channel%d\n", c->chl_idx); + + xsc_eth_close_channel(c, true); + } + + kfree(adapter->channels.c); + adapter->channels.num_chl = 0; +} + +static void xsc_eth_sw_deinit(struct xsc_adapter *adapter) +{ + xsc_eth_deactivate_priv_channels(adapter); + + return xsc_eth_close_channels(adapter); +} + +int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter) +{ + int err; + + struct xsc_event_set_led_status_mbox_in in; + struct xsc_event_set_led_status_mbox_out out; + + /*query linkstatus cmd*/ + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_LED_STATUS); + in.port_id = id; + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.status) { + xsc_core_err(adapter->xdev, "failed to set led to %d, err=%d, status=%d\n", + id, err, out.status); + return -1; + } + + return 0; +} + +bool xsc_eth_get_link_status(struct xsc_adapter *adapter) +{ + bool link_up; + struct xsc_core_device *xdev = adapter->xdev; + u16 vport = xsc_core_is_pf(xdev) ? 0 : (xdev->vf_id + 1); + + link_up = xsc_query_vport_state(xdev, XSC_CMD_OP_QUERY_VPORT_STATE, vport); + + xsc_core_dbg(adapter->xdev, "link_status=%d\n", link_up); + + return link_up ? true : false; +} + +int xsc_eth_get_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo) +{ + struct xsc_event_query_linkinfo_mbox_in in; + struct xsc_event_query_linkinfo_mbox_out out; + int i, err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_LINK_INFO); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to get link info, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + memcpy(plinkinfo, &out.ctx, sizeof(*plinkinfo)); + + plinkinfo->linkspeed = be32_to_cpu(plinkinfo->linkspeed); + plinkinfo->supported = be64_to_cpu(plinkinfo->supported); + plinkinfo->advertising = be64_to_cpu(plinkinfo->advertising); + for (i = 0; i < ARRAY_SIZE(plinkinfo->supported_speed); i++) { + plinkinfo->supported_speed[i] = be64_to_cpu(plinkinfo->supported_speed[i]); + plinkinfo->advertising_speed[i] = be64_to_cpu(plinkinfo->advertising_speed[i]); + } + + return 0; +} + +int xsc_eth_set_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo) +{ + struct xsc_event_modify_linkinfo_mbox_in in; + struct xsc_event_modify_linkinfo_mbox_out out; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_LINK_INFO); + memcpy(&in.ctx, plinkinfo, sizeof(*plinkinfo)); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to set link info, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + return err; +} + +int xsc_get_link_speed(struct xsc_core_device *dev) +{ + struct xsc_adapter *adapter = netdev_priv(dev->netdev); + struct xsc_event_linkinfo linkinfo; + + if (xsc_eth_get_link_info(adapter, &linkinfo)) { + xsc_core_err(adapter->xdev, "fail to get linkspeed, return 25G\n"); + return MODULE_SPEED_25G; + } + + return linkinfo.linkspeed; +} +EXPORT_SYMBOL(xsc_get_link_speed); + +#if defined(MSIX_SUPPORT) +int xsc_eth_change_link_status(struct xsc_adapter *adapter) +{ + bool link_up; + + link_up = xsc_eth_get_link_status(adapter); + + if (link_up && !netif_carrier_ok(adapter->netdev)) { + netdev_info(adapter->netdev, "Link up\n"); + netif_carrier_on(adapter->netdev); + } else if (!link_up && netif_carrier_ok(adapter->netdev)) { + netdev_info(adapter->netdev, "Link down\n"); + netif_carrier_off(adapter->netdev); + } + + return 0; +} + +static void xsc_eth_event_work(struct work_struct *work) +{ + int err; + struct xsc_event_query_type_mbox_in in; + struct xsc_event_query_type_mbox_out out; + struct xsc_adapter *adapter = container_of(work, struct xsc_adapter, event_work); + + if (adapter->status != XSCALE_ETH_DRIVER_OK) + return; + + /*query cmd_type cmd*/ + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EVENT_TYPE); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to query event type, err=%d, stats=%d\n", + err, out.hdr.status); + goto failed; + } + + switch (out.ctx.resp_cmd_type) { + case XSC_CMD_EVENT_RESP_CHANGE_LINK: + err = xsc_eth_change_link_status(adapter); + if (err) { + xsc_core_err(adapter->xdev, "failed to change linkstatus, err=%d\n", err); + goto failed; + } + + xsc_core_dbg(adapter->xdev, "event cmdtype=%04x\n", out.ctx.resp_cmd_type); + break; + case XSC_CMD_EVENT_RESP_TEMP_WARN: + xsc_core_warn(adapter->xdev, "[Minor]nic chip temperature high warning\n"); + break; + case XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION: + xsc_core_warn(adapter->xdev, "[Critical]nic chip was over-temperature\n"); + break; + default: + xsc_core_info(adapter->xdev, "unknown event cmdtype=%04x\n", + out.ctx.resp_cmd_type); + break; + } + +failed: + return; +} + +void xsc_eth_event_handler(void *arg) +{ + struct xsc_adapter *adapter = (struct xsc_adapter *)arg; + + queue_work(adapter->workq, &adapter->event_work); +} +#endif + +int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct net_device *netdev = adapter->netdev; + struct xsc_cmd_enable_nic_hca_mbox_in in = {}; + struct xsc_cmd_enable_nic_hca_mbox_out out = {}; + u16 caps = 0; + u16 caps_mask = 0; + int err; + + if (xsc_get_user_mode(xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_NIC_HCA); + +#ifdef XSC_RSS_SUPPORT + in.rss.rss_en = 1; + in.rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in.rss.rqn_num = cpu_to_be16(adapter->channels.num_chl); + in.rss.hash_tmpl = cpu_to_be32(adapter->rss_params.rss_hash_tmpl); + in.rss.hfunc = hash_func_type(adapter->rss_params.hfunc); +#else + in.rss.rss_en = 0; + if (adapter->channels.c) + in.rss.rqn_base = cpu_to_be16(adapter->channels.c[0].qp.rq[0].rqn - + xdev->caps.raweth_rss_qp_id_base); +#endif + caps_mask |= BIT(XSC_TBM_CAP_RSS); + + if (netdev->features & NETIF_F_RXCSUM) + caps |= BIT(XSC_TBM_CAP_HASH_PPH); + caps_mask |= BIT(XSC_TBM_CAP_HASH_PPH); + + if (xsc_get_pp_bypass_res(adapter->xdev, false)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + caps_mask |= BIT(XSC_TBM_CAP_PP_BYPASS); + + if (xsc_get_pct_drop_config(xdev) && !(netdev->flags & IFF_SLAVE)) + caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + caps_mask |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + + memcpy(in.nic.mac_addr, netdev->dev_addr, ETH_ALEN); + + in.nic.caps = cpu_to_be16(caps); + in.nic.caps_mask = cpu_to_be16(caps_mask); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); + return -ENOEXEC; + } + + xsc_core_info(xdev, "caps=0x%x, caps_mask=0x%x\n", caps, caps_mask); + + return 0; +} + +int xsc_eth_restore_nic_hca(struct xsc_core_device *dev) +{ + return xsc_eth_enable_nic_hca((struct xsc_adapter *)dev->eth_priv); +} +EXPORT_SYMBOL(xsc_eth_restore_nic_hca); + +int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct net_device *netdev = adapter->netdev; + struct xsc_cmd_disable_nic_hca_mbox_in in = {}; + struct xsc_cmd_disable_nic_hca_mbox_out out = {}; + int err; + u16 caps = 0; + + if (xsc_get_user_mode(xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_NIC_HCA); + + if (xsc_get_pp_bypass_res(adapter->xdev, false)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + + if (xsc_get_pct_drop_config(xdev) && !(netdev->priv_flags & IFF_BONDING)) + caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + + in.nic.caps = cpu_to_be16(caps); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +void xsc_eth_rss_params_change(struct xsc_adapter *adapter, u32 change, void *modify) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_rss_params *rss = &adapter->rss_params; + struct xsc_eth_params *params = &adapter->nic_param; + struct xsc_cmd_modify_nic_hca_mbox_in *in = + (struct xsc_cmd_modify_nic_hca_mbox_in *)modify; + u32 hash_field = 0; + int key_len; + u8 rss_caps_mask = 0; + + if (xsc_get_user_mode(xdev)) + return; + + if (change & BIT(XSC_RSS_RXQ_DROP)) { + in->rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in->rss.rqn_num = 0; + rss_caps_mask |= BIT(XSC_RSS_RXQ_DROP); + goto rss_caps; + } + + if (change & BIT(XSC_RSS_RXQ_UPDATE)) { + in->rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in->rss.rqn_num = cpu_to_be16(params->num_channels); + rss_caps_mask |= BIT(XSC_RSS_RXQ_UPDATE); + } + + if (change & BIT(XSC_RSS_HASH_KEY_UPDATE)) { + key_len = min(sizeof(in->rss.hash_key), sizeof(rss->toeplitz_hash_key)); + memcpy(&in->rss.hash_key, rss->toeplitz_hash_key, key_len); + rss_caps_mask |= BIT(XSC_RSS_HASH_KEY_UPDATE); + } + + if (change & BIT(XSC_RSS_HASH_TEMP_UPDATE)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV4_TCP] | + rss->rx_hash_fields[XSC_TT_IPV6_TCP]; + in->rss.hash_tmpl = cpu_to_be32(hash_field); + rss_caps_mask |= BIT(XSC_RSS_HASH_TEMP_UPDATE); + } + + if (change & BIT(XSC_RSS_HASH_FUNC_UPDATE)) { + in->rss.hfunc = hash_func_type(rss->hfunc); + rss_caps_mask |= BIT(XSC_RSS_HASH_FUNC_UPDATE); + } + +rss_caps: + if (rss_caps_mask) { + in->rss.caps_mask = rss_caps_mask; + in->rss.rss_en = 1; + in->nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_RSS)); + in->nic.caps = in->nic.caps_mask; + } +} + +int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 flags) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_cmd_modify_nic_hca_mbox_in in = {}; + struct xsc_cmd_modify_nic_hca_mbox_out out = {}; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); + + xsc_eth_rss_params_change(adapter, flags, &in); + if (in.rss.caps_mask) { + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed!! err=%d, status=%u\n", + err, out.hdr.status); + return -ENOEXEC; + } + } + + return 0; +} + +static void xsc_set_default_xps_cpumasks(struct xsc_adapter *priv, + struct xsc_eth_params *params) +{ +#ifdef MSIX_SUPPORT + struct xsc_core_device *xdev = priv->xdev; + int num_comp_vectors, irq; + + num_comp_vectors = priv->nic_param.comp_vectors; + cpumask_clear(xdev->xps_cpumask); + + for (irq = 0; irq < num_comp_vectors; irq++) { + mask_cpu_by_node(xdev->priv.numa_node, xdev->xps_cpumask); + netif_set_xps_queue(priv->netdev, xdev->xps_cpumask, irq); + } +#endif +} + +static int xsc_set_port_admin_status(struct xsc_adapter *adapter, + enum xsc_port_status status) +{ + struct xsc_event_set_port_admin_status_mbox_in in; + struct xsc_event_set_port_admin_status_mbox_out out; + int ret = 0; + + if (!xsc_core_is_pf(adapter->xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_PORT_ADMIN_STATUS); + in.admin_status = cpu_to_be16(status); + + ret = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to set port admin status, err=%d, status=%d\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +int xsc_eth_open(struct net_device *netdev) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + int ret = XSCALE_RET_SUCCESS; + + mutex_lock(&adapter->state_lock); + if (adapter->status == XSCALE_ETH_DRIVER_OK) { + xsc_core_warn(adapter->xdev, "unnormal ndo_open when status=%d\n", + adapter->status); + goto ret; + } + + spin_lock_init(&adapter->lock); + + ret = xsc_eth_sw_init(adapter); + if (ret) + goto ret; + + ret = xsc_eth_reset(xdev); + if (ret) + goto sw_deinit; + + ret = xsc_eth_enable_nic_hca(adapter); + if (ret) + goto sw_deinit; + +#ifdef NEED_CREATE_RX_THREAD + ret = xsc_eth_rx_thread_create(adapter); + if (ret) { + xsc_core_warn(xdev, "xsc_eth_rx_thread_create failed, err=%d\n", ret); + goto sw_deinit; + } +#endif + +#if defined(MSIX_SUPPORT) + /*INIT_WORK*/ + INIT_WORK(&adapter->event_work, xsc_eth_event_work); + xdev->event_handler = xsc_eth_event_handler; + + if (xsc_eth_get_link_status(adapter)) { + netdev_info(netdev, "Link up\n"); + netif_carrier_on(adapter->netdev); + } else { + netdev_info(netdev, "Link down\n"); + } +#else + netif_carrier_on(netdev); +#endif + + adapter->status = XSCALE_ETH_DRIVER_OK; + + xsc_set_default_xps_cpumasks(adapter, &adapter->nic_param); + + xsc_set_port_admin_status(adapter, XSC_PORT_UP); + + goto ret; + +sw_deinit: + xsc_eth_sw_deinit(adapter); + +ret: + mutex_unlock(&adapter->state_lock); + xsc_core_info(xdev, "open %s %s, ret=%d\n", + netdev->name, ret ? "failed" : "ok", ret); + if (ret) + return XSCALE_RET_ERROR; + else + return XSCALE_RET_SUCCESS; +} + +int xsc_eth_close(struct net_device *netdev) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + mutex_lock(&adapter->state_lock); + + if (!netif_device_present(netdev)) { + ret = -ENODEV; + goto ret; + } + + if (adapter->status != XSCALE_ETH_DRIVER_OK) + goto ret; + + adapter->status = XSCALE_ETH_DRIVER_CLOSE; + +#ifdef NEED_CREATE_RX_THREAD + if (adapter->task) + kthread_stop(adapter->task); +#endif + + netif_carrier_off(adapter->netdev); + + xsc_eth_sw_deinit(adapter); + + ret = xsc_eth_disable_nic_hca(adapter); + if (ret) + xsc_core_warn(adapter->xdev, "failed to disable nic hca, err=%d\n", ret); + + xsc_set_port_admin_status(adapter, XSC_PORT_DOWN); + +ret: + mutex_unlock(&adapter->state_lock); + xsc_core_info(adapter->xdev, "close device %s %s, ret=%d\n", + adapter->netdev->name, ret ? "failed" : "ok", ret); + + return ret; +} + +static int xsc_eth_set_mac(struct net_device *netdev, void *addr) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct sockaddr *saddr = addr; + struct xsc_core_device *xdev = adapter->xdev; + int ret; + u16 vport = xsc_core_is_pf(xdev) ? 0 : (xdev->vf_id + 1); + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + ret = xsc_modify_nic_vport_mac_address(xdev, vport, saddr->sa_data, false); + if (ret) + xsc_core_err(adapter->xdev, "%s: xsc set mac addr failed\n", __func__); + + netif_addr_lock_bh(netdev); + eth_hw_addr_set(netdev, saddr->sa_data); + netif_addr_unlock_bh(netdev); + + return 0; +} + +static void xsc_netdev_set_tcs(struct xsc_adapter *priv, u16 nch, u8 ntc) +{ + int tc; + + netdev_reset_tc(priv->netdev); + + if (ntc == 1) + return; + + netdev_set_num_tc(priv->netdev, ntc); + + /* Map netdev TCs to offset 0 + * We have our own UP to TXQ mapping for QoS + */ + for (tc = 0; tc < ntc; tc++) + netdev_set_tc_queue(priv->netdev, tc, nch, 0); +} + +static int xsc_update_netdev_queues(struct xsc_adapter *priv) +{ + struct net_device *netdev = priv->netdev; + int num_txqs, num_rxqs, nch, ntc; + int old_num_txqs, old_ntc; + int err; +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES + bool disabling; +#endif + + old_num_txqs = netdev->real_num_tx_queues; + old_ntc = netdev->num_tc ? : 1; + + nch = priv->nic_param.num_channels; + ntc = priv->nic_param.num_tc; + num_txqs = nch * ntc; + num_rxqs = nch;// * priv->profile->rq_groups; + +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES + disabling = num_txqs < netdev->real_num_tx_queues; +#endif + + xsc_netdev_set_tcs(priv, nch, ntc); + + err = netif_set_real_num_tx_queues(netdev, num_txqs); + if (err) { + netdev_warn(netdev, + "netif_set_real_num_tx_queues failed, txqs=%d->%d, tc=%d->%d, err=%d\n", + old_num_txqs, num_txqs, old_ntc, ntc, err); + goto err_tcs; + } + + err = netif_set_real_num_rx_queues(netdev, num_rxqs); + if (err) { + netdev_warn(netdev, "netif_set_real_num_rx_queues failed, rxqs=%d, err=%d\n", + num_rxqs, err); + goto err_txqs; + } + +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES + if (disabling) + synchronize_net(); +#endif + + return 0; + +err_txqs: + /* netif_set_real_num_rx_queues could fail only when nch increased. Only + * one of nch and ntc is changed in this function. That means, the call + * to netif_set_real_num_tx_queues below should not fail, because it + * decreases the number of TX queues. + */ + WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); + +err_tcs: + xsc_netdev_set_tcs(priv, old_num_txqs / old_ntc, old_ntc); + return err; +} + +void xsc_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels) +{ + int i; + + for (i = 0; i < len; i++) + indirection_rqt[i] = i % num_channels; +} + +int xsc_eth_num_channels_changed(struct xsc_adapter *priv) +{ + struct net_device *netdev = priv->netdev; + u16 count = priv->nic_param.num_channels; + int err; + + err = xsc_update_netdev_queues(priv); + if (err) + goto err; + + if (!netif_is_rxfh_configured(priv->netdev)) + xsc_build_default_indir_rqt(priv->rss_params.indirection_rqt, + XSC_INDIR_RQT_SIZE, count); + + return 0; + +err: + netdev_err(netdev, "%s: failed to change rss rxq number %d, err=%d\n", + __func__, count, err); + return err; +} + +int xsc_safe_switch_channels(struct xsc_adapter *adapter, + xsc_eth_fp_preactivate preactivate, + xsc_eth_fp_postactivate postactivate) +{ + struct net_device *netdev = adapter->netdev; + int carrier_ok; + int ret = 0; + + adapter->status = XSCALE_ETH_DRIVER_CLOSE; + + carrier_ok = netif_carrier_ok(netdev); + netif_carrier_off(netdev); +#ifdef NEED_CREATE_RX_THREAD + if (adapter->task) + kthread_stop(adapter->task); +#endif + ret = xsc_eth_modify_nic_hca(adapter, BIT(XSC_RSS_RXQ_DROP)); + if (ret) + goto close_channels; + + xsc_eth_deactivate_priv_channels(adapter); + xsc_eth_close_channels(adapter); + + if (preactivate) { + ret = preactivate(adapter); + if (ret) + goto out; + } + + ret = xsc_eth_open_channels(adapter); + if (ret) + goto close_channels; + + if (postactivate) { + ret = postactivate(adapter); + if (ret) + goto close_channels; + } + + xsc_eth_activate_priv_channels(adapter); + ret = xsc_eth_modify_nic_hca(adapter, BIT(XSC_RSS_RXQ_UPDATE)); + if (ret) + goto close_channels; + +#ifdef NEED_CREATE_RX_THREAD + ret = xsc_eth_rx_thread_create(adapter); + if (ret) + goto close_channels; +#endif + + adapter->status = XSCALE_ETH_DRIVER_OK; + + goto out; + +close_channels: + xsc_eth_deactivate_priv_channels(adapter); + xsc_eth_close_channels(adapter); + +out: + if (carrier_ok) + netif_carrier_on(netdev); + xsc_core_dbg(adapter->xdev, "channels=%d, mtu=%d, err=%d\n", + adapter->nic_param.num_channels, + adapter->nic_param.mtu, ret); + return ret; +} + +int xsc_eth_nic_mtu_changed(struct xsc_adapter *priv) +{ + u32 new_mtu = priv->nic_param.mtu; + int ret; + + ret = xsc_eth_set_hw_mtu(priv->xdev, XSC_SW2HW_MTU(new_mtu), + XSC_SW2HW_RX_PKT_LEN(new_mtu)); + + return ret; +} + +static int xsc_eth_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int ret = 0; + int max_buf_len = 0; + + if (new_mtu > netdev->max_mtu || new_mtu < netdev->min_mtu) { + netdev_err(netdev, "%s: Bad MTU (%d), valid range is: [%d..%d]\n", + __func__, new_mtu, netdev->min_mtu, netdev->max_mtu); + return -EINVAL; + } + + if (!xsc_rx_is_linear_skb(new_mtu)) { + max_buf_len = adapter->xdev->caps.recv_ds_num * PAGE_SIZE; + if (new_mtu > max_buf_len) { + netdev_err(netdev, "Bad MTU (%d), max buf len is %d\n", + new_mtu, max_buf_len); + return -EINVAL; + } + } + mutex_lock(&adapter->state_lock); + adapter->nic_param.mtu = new_mtu; + if (adapter->status != XSCALE_ETH_DRIVER_OK) { + ret = xsc_eth_nic_mtu_changed(adapter); + if (ret) + adapter->nic_param.mtu = old_mtu; + else + netdev->mtu = adapter->nic_param.mtu; + goto out; + } + + ret = xsc_safe_switch_channels(adapter, xsc_eth_nic_mtu_changed, NULL); + if (ret) + goto out; + + netdev->mtu = adapter->nic_param.mtu; + +out: + mutex_unlock(&adapter->state_lock); + xsc_core_info(adapter->xdev, "mtu change from %d to %d, new_mtu=%d, err=%d\n", + old_mtu, netdev->mtu, new_mtu, ret); + return ret; +} + +static void xsc_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + + xsc_fold_sw_stats64(adapter, stats); +} + +static void xsc_set_rx_mode(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + queue_work(priv->workq, &priv->set_rx_mode_work); +} + +int xsc_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_sriov *sriov = &adapter->xdev->priv.sriov; + struct xsc_core_device *xdev = adapter->xdev; + int ret; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + ret = xsc_eswitch_set_vport_mac(xdev->priv.eswitch, vf + 1, mac); + if (ret) + xsc_core_err(xdev, "xsc set mac addr failed\n"); + + return ret; +} + +static int xsc_set_vf_trust(struct net_device *dev, int vf, bool setting) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + return xsc_eswitch_set_vport_trust(xdev->priv.eswitch, vf + 1, setting); +} + +static int xsc_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + return xsc_eswitch_set_vport_spoofchk(xdev->priv.eswitch, vf + 1, setting); +} + +static int xsc_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_vport *evport = xsc_eswitch_get_vport(xdev->priv.eswitch, vf + 1); + int err; + + if (!(dev->features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { + xsc_core_err(xdev, "dev features not support STAG_RX %llu STAG_TX %llu\n", + dev->features & NETIF_F_HW_VLAN_STAG_RX, + dev->features & NETIF_F_HW_VLAN_STAG_TX); + return -EOPNOTSUPP; + } + + if (vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD)) + return -EPROTONOSUPPORT; + + err = xsc_eswitch_set_vport_vlan(xdev->priv.eswitch, vf + 1, + vlan, qos, vlan_proto); + if (err) { + xsc_core_err(xdev, "fail to set vf %d vlan %u qos %u err=%d\n", + vf, vlan, qos, err); + return err; + } + + if (evport) { + evport->vlan_id = vlan; + evport->vlan_qos = qos; + evport->vlan_proto = vlan_proto; + } + + return 0; +} + +int xsc_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + struct xsc_core_sriov *sriov = &xdev->priv.sriov; + int err; + + if (!netif_device_present(dev) || sriov->num_vfs > MAX_VF_NUM_MINIDUMP) + return -EOPNOTSUPP; + + err = xsc_eswitch_get_vport_config(esw, vf + 1, ivi); + + return err; +} + +int xsc_set_vf_link_state(struct net_device *dev, int vf, + int link_state) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + + return xsc_eswitch_set_vport_state(esw, vf + 1, link_state); +} + +int set_feature_rxcsum(struct net_device *netdev, bool enable) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_cmd_modify_nic_hca_mbox_in in = {}; + struct xsc_cmd_modify_nic_hca_mbox_out out = {}; + int err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); + in.nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_HASH_PPH)); + in.nic.caps = cpu_to_be16(enable << XSC_TBM_CAP_HASH_PPH); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + netdev_err(netdev, "failed to change rxcsum=%d, err=%d, status=%d\n", + enable, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int set_feature_vlan_offload(struct net_device *netdev, bool enable) +{ + int err = 0, i; + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_vport *evport = NULL; + + if (!enable) { + for (i = 0; i < adapter->xdev->priv.eswitch->num_vfs; i++) { + evport = xsc_eswitch_get_vport(adapter->xdev->priv.eswitch, + i + 1); + if (evport && (evport->vlan_id || evport->vlan_qos)) { + evport->vlan_id = 0; + evport->vlan_qos = 0; + err = xsc_eswitch_set_vport_vlan(adapter->xdev->priv.eswitch, + i + 1, evport->vlan_id, + evport->vlan_qos, + evport->vlan_proto); + if (err) + xsc_core_err(adapter->xdev, "fail to clear vf vlan offload vf=%d err=%d\n", + i, err); + } + } + } + + return 0; +} + +static int xsc_handle_feature(struct net_device *netdev, + netdev_features_t *features, + netdev_features_t wanted_features, + netdev_features_t feature, + xsc_feature_handler feature_handler) +{ + netdev_features_t changes = wanted_features ^ netdev->features; + bool enable = !!(wanted_features & feature); + int err; + + if (!(changes & feature)) + return 0; + + err = feature_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s feature %pNF failed, err %d\n", + enable ? "Enable" : "Disable", &feature, err); + return err; + } + + xsc_set_feature(features, feature, enable); + + return 0; +} + +int xsc_eth_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t oper_features = netdev->features; + int err = 0; + +#define XSC_HANDLE_FEATURE(feature, handler) \ + xsc_handle_feature(netdev, &oper_features, features, feature, handler) + + err |= XSC_HANDLE_FEATURE(NETIF_F_RXCSUM, set_feature_rxcsum); + err |= XSC_HANDLE_FEATURE(NETIF_F_HW_VLAN_STAG_RX, set_feature_vlan_offload); + err |= XSC_HANDLE_FEATURE(NETIF_F_HW_VLAN_STAG_TX, set_feature_vlan_offload); + if (err) { + netdev->features = oper_features; + return -EINVAL; + } + + return 0; +} + +static netdev_features_t xsc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_RX)) + features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_RX; + return features; +} + +#ifdef HAVE_NETDEVICE_OPS_SELECT_QUEUE_FALLBACK +u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + int txq_ix = fallback(dev, skb, NULL); + u16 num_channels; + int up = 0; + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!adapter) { + pr_err("%s adapter is null\n", __func__); + return txq_ix; + } + + if (!netdev_get_num_tc(dev)) + return txq_ix; + + if (skb_vlan_tag_present(skb)) { + up = skb->vlan_tci >> VLAN_PRIO_SHIFT; + if (adapter->nic_param.num_tc > 1) + up = up % (adapter->nic_param.num_tc - 1) + 1; + else + up = 0; + } + + /* channel_ix can be larger than num_channels since + * dev->num_real_tx_queues = num_channels * num_tc + */ + num_channels = adapter->channels.num_chl; + if (txq_ix >= num_channels) + txq_ix = adapter->txq2sq[txq_ix]->ch_ix; + + return adapter->channel_tc2realtxq[txq_ix][up]; +} +#else +u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + int txq_ix, up = 0; + u16 num_channels; + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!adapter) { + pr_err("%s adapter is null\n", __func__); + return txq_ix; + } + + txq_ix = netdev_pick_tx(dev, skb, NULL); + if (!netdev_get_num_tc(dev)) + return txq_ix; + + if (skb_vlan_tag_present(skb)) { + up = skb_vlan_tag_get_prio(skb); + if (adapter->nic_param.num_tc > 1) + up = up % (adapter->nic_param.num_tc - 1) + 1; + else + up = 0; + } + + /* channel_ix can be larger than num_channels since + * dev->num_real_tx_queues = num_channels * num_tc + */ + num_channels = adapter->channels.num_chl; + if (txq_ix >= num_channels) + txq_ix = adapter->txq2sq[txq_ix]->ch_ix; + + return adapter->channel_tc2realtxq[txq_ix][up]; +} +#endif + +static int xsc_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_core_device *pf_xdev; + struct net_device *pf_netdev; + struct pci_dev *pdev = xdev->pdev; + int ret = len; + + if (!pdev) + return -EOPNOTSUPP; + if (!xsc_core_is_pf(xdev)) { + if (!pdev->physfn) + return -EOPNOTSUPP; + pf_xdev = pci_get_drvdata(pdev->physfn); + if (!pf_xdev || !pf_xdev->netdev) + return -EOPNOTSUPP; + pf_netdev = (struct net_device *)pf_xdev->netdev; + ret = snprintf(buf, len, "%s_%d", + pf_netdev->name, xdev->vf_id); + } else { + return -EOPNOTSUPP; + } + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + +static int xsc_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_sriov *sriov = &adapter->xdev->priv.sriov; + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + u16 vport; + int err = 0; + u32 rate = 0; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + if (min_tx_rate > 0) + return -EOPNOTSUPP; + + vport = vf + 1; + xsc_core_dbg(xdev, "set vf rate %d Mbps\n", max_tx_rate); + + rate = (u32)max_tx_rate; + err = xsc_eswitch_set_vport_rate(esw, vport, rate, 0); + if (err) { + xsc_core_err(xdev, "set_vf_rate failed!! err=%d\n", err); + return -EINVAL; + } + + return 0; +} + +static const struct net_device_ops xsc_netdev_ops = { + .ndo_open = xsc_eth_open, + .ndo_stop = xsc_eth_close, + .ndo_start_xmit = xsc_eth_xmit_start, + + .ndo_set_rx_mode = xsc_set_rx_mode, + .ndo_validate_addr = NULL, + .ndo_set_mac_address = xsc_eth_set_mac, + .ndo_change_mtu = xsc_eth_change_mtu, + + .ndo_tx_timeout = NULL, + .ndo_set_tx_maxrate = NULL, + .ndo_vlan_rx_add_vid = xsc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = xsc_vlan_rx_kill_vid, + .ndo_do_ioctl = NULL, + .ndo_set_vf_mac = xsc_set_vf_mac, + .ndo_set_vf_vlan = xsc_set_vf_vlan, + .ndo_set_vf_rate = xsc_set_vf_rate, + .ndo_set_vf_spoofchk = xsc_set_vf_spoofchk, + .ndo_set_vf_rss_query_en = NULL, + .ndo_set_vf_trust = xsc_set_vf_trust, + .ndo_get_vf_config = xsc_get_vf_config, + .ndo_set_vf_link_state = xsc_set_vf_link_state, + .ndo_get_stats64 = xsc_get_stats, + .ndo_setup_tc = NULL, + .ndo_set_features = xsc_eth_set_features, + .ndo_fix_features = xsc_fix_features, + .ndo_fdb_add = NULL, + .ndo_bridge_setlink = NULL, + .ndo_bridge_getlink = NULL, + .ndo_dfwd_add_station = NULL, + .ndo_dfwd_del_station = NULL, + .ndo_get_phys_port_name = xsc_get_phys_port_name, + +#ifdef HAVE_NETDEVICE_OPS_UDP_TUNNEL + .ndo_udp_tunnel_add = NULL, + .ndo_udp_tunnel_del = NULL, +#endif + .ndo_features_check = NULL, + .ndo_select_queue = xsc_select_queue, +}; + +static int xsc_get_max_num_channels(struct xsc_core_device *xdev) +{ +#ifdef NEED_CREATE_RX_THREAD + return 8; +#else + return min_t(int, xdev->dev_res->eq_table.num_comp_vectors, + XSC_ETH_MAX_NUM_CHANNELS); +#endif +} + +static int xsc_eth_netdev_init(struct xsc_adapter *adapter) +{ + unsigned int node, tc, nch; + + tc = adapter->nic_param.num_tc; + nch = adapter->nic_param.max_num_ch; + node = dev_to_node(adapter->dev); + adapter->txq2sq = kcalloc_node(nch * tc, + sizeof(*adapter->txq2sq), GFP_KERNEL, node); + if (!adapter->txq2sq) + goto err_out; + + mutex_init(&adapter->state_lock); + + INIT_WORK(&adapter->set_rx_mode_work, xsc_set_rx_mode_work); + + adapter->workq = create_singlethread_workqueue("xsc_eth"); + if (!adapter->workq) + goto err_free_priv; + + netif_carrier_off(adapter->netdev); + + return 0; + +err_free_priv: + kfree(adapter->txq2sq); +err_out: + return -ENOMEM; +} + +static const struct xsc_tirc_config tirc_default_config[XSC_NUM_INDIR_TIRS] = { + [XSC_TT_IPV4] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = XSC_HASH_IP, + }, + [XSC_TT_IPV4_TCP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = XSC_L4_PROT_TYPE_TCP, + .rx_hash_fields = XSC_HASH_IP_PORTS, + }, + [XSC_TT_IPV4_UDP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = XSC_L4_PROT_TYPE_UDP, + .rx_hash_fields = XSC_HASH_IP_PORTS, + }, + [XSC_TT_IPV6] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = XSC_HASH_IP6, + }, + [XSC_TT_IPV6_TCP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = XSC_L4_PROT_TYPE_TCP, + .rx_hash_fields = XSC_HASH_IP6_PORTS, + }, + [XSC_TT_IPV6_UDP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = XSC_L4_PROT_TYPE_UDP, + .rx_hash_fields = XSC_HASH_IP6_PORTS, + }, +}; + +struct xsc_tirc_config xsc_tirc_get_default_config(enum xsc_traffic_types tt) +{ + return tirc_default_config[tt]; +} + +void xsc_build_rss_params(struct xsc_rss_params *rss_params, u16 num_channels) +{ + enum xsc_traffic_types tt; + + rss_params->hfunc = ETH_RSS_HASH_TOP; + netdev_rss_key_fill(rss_params->toeplitz_hash_key, + sizeof(rss_params->toeplitz_hash_key)); + + xsc_build_default_indir_rqt(rss_params->indirection_rqt, + XSC_INDIR_RQT_SIZE, num_channels); + + for (tt = 0; tt < XSC_NUM_INDIR_TIRS; tt++) { + rss_params->rx_hash_fields[tt] = + tirc_default_config[tt].rx_hash_fields; + } + rss_params->rss_hash_tmpl = XSC_HASH_IP_PORTS | XSC_HASH_IP6_PORTS; +} + +void xsc_eth_build_nic_params(struct xsc_adapter *adapter, u32 ch_num, u32 tc_num) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eth_params *params = &adapter->nic_param; + + params->mtu = SW_DEFAULT_MTU; + params->num_tc = tc_num; + + params->comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; + params->max_num_ch = ch_num; + params->num_channels = ch_num; + + params->rq_max_size = BIT(xdev->caps.log_max_qp_depth); + params->sq_max_size = BIT(xdev->caps.log_max_qp_depth); + xsc_build_rss_params(&adapter->rss_params, adapter->nic_param.num_channels); + + if (params->num_channels > XSC_NET_DIM_ENABLE_THRESHOLD) { + params->rx_dim_enabled = 1; + params->tx_dim_enabled = 1; + xsc_set_rx_cq_mode_params(params, XSC_CQ_PERIOD_MODE_START_FROM_EQE); + xsc_set_tx_cq_mode_params(params, XSC_CQ_PERIOD_MODE_START_FROM_EQE); + } + + xsc_core_info(xdev, "mtu=%d, num_ch=%d(max=%d), num_tc=%d\n", + params->mtu, params->num_channels, + params->max_num_ch, params->num_tc); +} + +void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct xsc_core_device *xdev = adapter->xdev; + + /* Set up network device as normal. */ + netdev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + netdev->netdev_ops = &xsc_netdev_ops; + +#ifdef CONFIG_XSC_CORE_EN_DCB + netdev->dcbnl_ops = &xsc_dcbnl_ops; +#endif + eth_set_ethtool_ops(netdev); + + netdev->min_mtu = SW_MIN_MTU; + netdev->max_mtu = SW_MAX_MTU; + /*mtu - macheaderlen - ipheaderlen should be aligned in 8B*/ + netdev->mtu = SW_DEFAULT_MTU; + + netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;//NETIF_F_HW_CSUM; + netdev->vlan_features |= NETIF_F_GRO; + netdev->vlan_features |= NETIF_F_TSO;//NETIF_F_TSO_ECN + netdev->vlan_features |= NETIF_F_TSO6; + //todo: enable rx csum + netdev->vlan_features |= NETIF_F_RXCSUM; + netdev->vlan_features |= NETIF_F_RXHASH; + netdev->vlan_features |= NETIF_F_GSO_PARTIAL; + + netdev->hw_features = netdev->vlan_features; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + + if (xsc_vxlan_allowed(xdev) || xsc_geneve_tx_allowed(xdev) || + xsc_any_tunnel_proto_supported(xdev)) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_TSO; //NETIF_F_TSO_ECN + netdev->hw_enc_features |= NETIF_F_TSO6; + netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; + } + + netdev->features |= netdev->hw_features; + netdev->features |= NETIF_F_HIGHDMA; +} + +static int xsc_eth_nic_init(struct xsc_adapter *adapter, + void *rep_priv, u32 ch_num, u32 tc_num) +{ + int err = -1; + + xsc_eth_build_nic_params(adapter, ch_num, tc_num); + + err = xsc_eth_netdev_init(adapter); + if (err) + return err; + + xsc_eth_build_nic_netdev(adapter); + + return 0; +} + +static void xsc_eth_nic_cleanup(struct xsc_adapter *adapter) +{ + destroy_workqueue(adapter->workq); + kfree(adapter->txq2sq); +} + +/* create xdev resource,pd/domain/mkey */ +int xsc_eth_create_xdev_resources(struct xsc_core_device *xdev) +{ + return 0; +} + +static int xsc_eth_init_nic_tx(struct xsc_adapter *adapter) +{ + /*create tis table*/ +#ifdef CONFIG_XSC_CORE_EN_DCB + xsc_dcbnl_initialize(adapter); +#endif + + return 0; +} + +static int xsc_eth_cleanup_nic_tx(struct xsc_adapter *adapter) +{ + return 0; +} + +/* init tx: create hw resource, set register according to spec */ +int xsc_eth_init_nic_rx(struct xsc_adapter *adapter) +{ + /* create rqt and tir table + * tir table:base on traffic type like ip4_tcp/ipv6_tcp/ + * each rqt table for a traffic type + */ + + return 0; +} + +static int xsc_eth_cleanup_nic_rx(struct xsc_adapter *adapter) +{ + return 0; +} + +static void xsc_eth_l2_addr_init(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + char mac[6] = {0}; + int ret = 0; + + ret = xsc_eth_get_mac(adapter->xdev, mac); + if (ret) { + xsc_core_warn(adapter->xdev, "get mac failed %d, generate random mac...", ret); + eth_random_addr(mac); + } + dev_addr_mod(netdev, 0, mac, 6); + + if (!is_valid_ether_addr(netdev->perm_addr)) + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); +} + +static int xsc_eth_nic_enable(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + + if (xsc_core_is_pf(xdev)) + xsc_lag_add_netdev(adapter->netdev); + xsc_eth_l2_addr_init(adapter); + + xsc_eth_set_hw_mtu(xdev, XSC_SW2HW_MTU(adapter->nic_param.mtu), + XSC_SW2HW_RX_PKT_LEN(adapter->nic_param.mtu)); + +#ifdef CONFIG_XSC_CORE_EN_DCB + xsc_dcbnl_init_app(adapter); +#endif + + rtnl_lock(); + netif_device_attach(adapter->netdev); + rtnl_unlock(); + + return 0; +} + +static void xsc_eth_nic_disable(struct xsc_adapter *adapter) +{ + rtnl_lock(); + if (netif_running(adapter->netdev)) + xsc_eth_close(adapter->netdev); + netif_device_detach(adapter->netdev); + rtnl_unlock(); + + if (xsc_core_is_pf(adapter->xdev)) + xsc_lag_remove_netdev(adapter->netdev); +} + +/* call init tx/rx, enable function about nic init */ +static int xsc_attach_netdev(struct xsc_adapter *adapter) +{ + int err = -1; + + err = xsc_eth_init_nic_tx(adapter); + if (err) + return err; + + err = xsc_eth_init_nic_rx(adapter); + if (err) + return err; + + err = xsc_eth_nic_enable(adapter); + if (err) + return err; + + xsc_core_info(adapter->xdev, "%s ok\n", __func__); + return 0; +} + +static void xsc_detach_netdev(struct xsc_adapter *adapter) +{ + xsc_eth_nic_disable(adapter); + + flush_workqueue(adapter->workq); + + xsc_eth_cleanup_nic_rx(adapter); + xsc_eth_cleanup_nic_tx(adapter); + adapter->status = XSCALE_ETH_DRIVER_DETACH; +} + +static int xsc_eth_attach(struct xsc_core_device *xdev, struct xsc_adapter *adapter) +{ + int err = -1; + + if (netif_device_present(adapter->netdev)) + return 0; + + err = xsc_eth_create_xdev_resources(xdev); + if (err) + return err; + + err = xsc_attach_netdev(adapter); + if (err) + return err; + + xsc_core_info(adapter->xdev, "%s ok\n", __func__); + return 0; +} + +static void xsc_eth_detach(struct xsc_core_device *xdev, struct xsc_adapter *adapter) +{ + if (!netif_device_present(adapter->netdev)) + return; + + xsc_detach_netdev(adapter); +} + +static void *xsc_eth_add(struct xsc_core_device *xdev) +{ + int err = -1; + int num_chl, num_tc; + struct net_device *netdev; + struct xsc_adapter *adapter = NULL; + void *rep_priv = NULL; + + num_chl = xsc_get_max_num_channels(xdev); + num_tc = xdev->caps.max_tc; + + /* Allocate ourselves a network device with room for our info */ + netdev = alloc_etherdev_mqs(sizeof(struct xsc_adapter), + num_chl * num_tc, num_chl); + if (unlikely(!netdev)) { + xsc_core_warn(xdev, "alloc_etherdev_mqs failed, txq=%d, rxq=%d\n", + (num_chl * num_tc), num_chl); + return NULL; + } + + /* Set up our device-specific information */ + netdev->dev.parent = &xdev->pdev->dev; + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = xdev->pdev; + adapter->dev = &adapter->pdev->dev; + adapter->xdev = (void *)xdev; + xdev->eth_priv = adapter; + + err = xsc_eth_nic_init(adapter, rep_priv, num_chl, num_tc); + if (err) { + xsc_core_warn(xdev, "xsc_nic_init failed, num_ch=%d, num_tc=%d, err=%d\n", + num_chl, num_tc, err); + goto err_free_netdev; + } + + err = xsc_eth_attach(xdev, adapter); + if (err) { + xsc_core_warn(xdev, "xsc_eth_attach failed, err=%d\n", err); + goto err_cleanup_netdev; + } + + adapter->stats = kvzalloc(sizeof(*adapter->stats), GFP_KERNEL); + if (unlikely(!adapter->stats)) + goto err_detach; + + err = register_netdev(netdev); + if (err) { + xsc_core_warn(xdev, "register_netdev failed, err=%d\n", err); + goto err_reg_netdev; + } + + err = xsc_eth_sysfs_create(netdev, xdev); + if (err) + goto err_sysfs_create; + + xdev->netdev = (void *)netdev; + adapter->status = XSCALE_ETH_DRIVER_INIT; + + return adapter; + +err_sysfs_create: + unregister_netdev(adapter->netdev); +err_reg_netdev: + kfree(adapter->stats); +err_detach: + xsc_eth_detach(xdev, adapter); +err_cleanup_netdev: + xsc_eth_nic_cleanup(adapter); +err_free_netdev: + free_netdev(netdev); + + return NULL; +} + +static void xsc_eth_remove(struct xsc_core_device *xdev, void *context) +{ + struct xsc_adapter *adapter = NULL; + + if (!xdev) + return; + + adapter = xdev->eth_priv; + if (!adapter) { + xsc_core_warn(xdev, "failed! adapter is null\n"); + return; + } + + xsc_core_info(adapter->xdev, "remove netdev %s entry\n", adapter->netdev->name); + + xsc_eth_sysfs_remove(adapter->netdev, xdev); + + unregister_netdev(adapter->netdev); + + kfree(adapter->stats); + + xsc_eth_detach(xdev, adapter); + xsc_eth_nic_cleanup(adapter); + + free_netdev(adapter->netdev); + + xdev->netdev = NULL; + xdev->eth_priv = NULL; +} + +static struct xsc_interface xsc_interface = { + .add = xsc_eth_add, + .remove = xsc_eth_remove, + .event = NULL, + .protocol = XSC_INTERFACE_PROTOCOL_ETH, +}; + +int xsc_net_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +{ + pr_info("xsc net driver recv %lu event\n", action); + if (xsc_get_exit_flag()) + return NOTIFY_OK; + xsc_remove_eth_driver(); + + return NOTIFY_OK; +} + +struct notifier_block xsc_net_nb = { + .notifier_call = xsc_net_reboot_event_handler, + .next = NULL, + .priority = 1, +}; + +void xsc_remove_eth_driver(void) +{ + pr_info("remove ethernet driver\n"); + xsc_eth_ctrl_fini(); + xsc_unregister_interface(&xsc_interface); +} + +static __init int xsc_net_driver_init(void) +{ + int ret; + + pr_info("add ethernet driver\n"); + ret = xsc_register_interface(&xsc_interface); + if (ret != 0) { + pr_err("failed to register interface\n"); + goto out; + } + + ret = xsc_eth_ctrl_init(); + if (ret != 0) { + pr_err("failed to register port control node\n"); + xsc_unregister_interface(&xsc_interface); + goto out; + } + + register_reboot_notifier(&xsc_net_nb); + return 0; +out: + return -1; +} + +static __exit void xsc_net_driver_exit(void) +{ + unregister_reboot_notifier(&xsc_net_nb); + xsc_remove_eth_driver(); +} + +module_init(xsc_net_driver_init); +module_exit(xsc_net_driver_exit); diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c new file mode 100644 index 0000000000000000000000000000000000000000..6c4afad1be8fbc6af4400917d1c2e545e7c88b98 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" + +#include "xsc_eth.h" +#include "xsc_accel.h" +#include +#include +#include "xsc_eth_txrx.h" +#include "xsc_eth_stats.h" +#include "xsc_eth_debug.h" + +#ifdef NEED_CREATE_RX_THREAD + +extern void xsc_cq_notify_hw(struct xsc_cq *cq); + +DEFINE_PER_CPU(bool, txcqe_get); +EXPORT_PER_CPU_SYMBOL(txcqe_get); + +u32 xsc_eth_process_napi(struct xsc_adapter *adapter) +{ + int work_done = 0; + bool err = false; + int budget = 1; + int i, chl; + int errtx = false; + struct xsc_channel *c; + struct xsc_rq *prq; + struct xsc_ch_stats *ch_stats; + + if (adapter->status == XSCALE_ETH_DRIVER_OK) { + for (chl = 0; chl < adapter->channels.num_chl; chl++) { + c = &adapter->channels.c[chl]; + prq = &c->qp.rq[0]; + ch_stats = c->stats; + ch_stats->poll++; + + for (i = 0; i < c->num_tc; i++) { + errtx |= xsc_poll_tx_cq(&c->qp.sq[i].cq, budget); + ETH_DEBUG_LOG("errtx=%u.\r\n", errtx); + if (likely(__this_cpu_read(txcqe_get))) { + xsc_cq_notify_hw(&c->qp.sq[i].cq); + __this_cpu_write(txcqe_get, false); + } + } + + work_done = xsc_poll_rx_cq(&prq->cq, budget); + + ETH_DEBUG_LOG("work_done=%d.\r\n", work_done); + + if (work_done != 0) { + xsc_cq_notify_hw(&prq->cq); + err |= prq->post_wqes(prq); + + ETH_DEBUG_LOG("err=%u.\r\n", err); + } else { + ETH_DEBUG_LOG("no-load.\r\n"); + } + + ch_stats->arm++; + } + } + + return XSCALE_RET_SUCCESS; +} + +int xsc_eth_rx_thread(void *arg) +{ + u32 ret = XSCALE_RET_SUCCESS; + struct xsc_adapter *adapter = (struct xsc_adapter *)arg; + + while (kthread_should_stop() == 0) { + if (need_resched()) + schedule(); + ret = xsc_eth_process_napi(adapter); + if (ret != XSCALE_RET_SUCCESS) + ETH_DEBUG_LOG("unexpected branch.\r\n"); + + ETH_DEBUG_LOG("adapter=%p\r\n", adapter); + } + ETH_DEBUG_LOG("do_exit.\r\n"); + + return XSCALE_RET_SUCCESS; +} + +u32 g_thread_count; +u32 xsc_eth_rx_thread_create(struct xsc_adapter *adapter) +{ + struct task_struct *task = NULL; + + task = kthread_create(xsc_eth_rx_thread, (void *)adapter, + "xsc_rx%i", g_thread_count); + if (!task) + return XSCALE_RET_ERROR; + + ETH_DEBUG_LOG("thread_count=%d\r\n", g_thread_count); + + kthread_bind(task, g_thread_count); + wake_up_process(task); + adapter->task = task; + + g_thread_count++; + + return XSCALE_RET_SUCCESS; +} +#endif /* NEED_CREATE_RX_THREAD */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h new file mode 100644 index 0000000000000000000000000000000000000000..1378be66b6156f6e6e16df29e5ffcdaf4a20a7b4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ACCEL_H +#define XSC_ACCEL_H + +#include +#include +#include "common/xsc_core.h" + +static inline void xsc_udp_gso_handle_tx_skb(struct sk_buff *skb) +{ + int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + + udp_hdr(skb)->len = htons(payload_len); +} + +static inline struct sk_buff *xsc_accel_handle_tx(struct sk_buff *skb) +{ + /*no not consider tls and ipsec*/ + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + xsc_udp_gso_handle_tx_skb(skb); + return skb; +} + +static inline bool xsc_vxlan_allowed(struct xsc_core_device *dev) +{ + return false; +} + +static inline bool xsc_geneve_tx_allowed(struct xsc_core_device *dev) +{ + return false; +} + +static inline bool xsc_any_tunnel_proto_supported(struct xsc_core_device *dev) +{ + return false; +} + +#endif /* XSC_ACCEL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c new file mode 100644 index 0000000000000000000000000000000000000000..36503b3113f78769d66d5c0b9108c7a3503be58e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c @@ -0,0 +1,1482 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "common/vport.h" +#include "xsc_eth.h" +#include "xsc_eth_debug.h" +#include "xsc_hw_comm.h" + +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + +#define XSC_100MB (100000) +#define XSC_1GB (1000000) +#define XSC_RATE_LIMIT_BASE (16000) +#define XSC_WRR_DIV_BASE 10 +#define XSC_WRR_DEFAULT_WEIGHT 10 +#define XSC_DCBX_WFQ_TOTAL_WEIGHT 100 +#define XSC_DCBX_MAX_TC 8 + +#define XSC_CEE_STATE_UP 1 +#define XSC_CEE_STATE_DOWN 0 + +/* Max supported cable length is 1000 meters */ +#define XSC_MAX_CABLE_LENGTH 1000 + +enum { + XSC_VENDOR_TC_GROUP_NUM = 7, + XSC_LOWEST_PRIO_GROUP = 0, +}; + +#ifdef CONFIG_XSC_CORE_EN_DCB +static int xsc_set_trust_state(struct xsc_adapter *priv, u8 trust_state); +static int xsc_set_dscp2prio(struct xsc_adapter *priv, u8 dscp, u8 prio); +static u8 xsc_dcbnl_setall(struct net_device *netdev); + +static int xsc_max_tc(struct xsc_core_device *dev) +{ + u8 num_tc = dev->caps.max_tc ? : 8; + + if (num_tc > XSC_DCBX_MAX_TC) + num_tc = XSC_DCBX_MAX_TC; + + return num_tc - 1; +} + +static void xsc_pfc_array2bitmap(u8 *pfcbitmap, u8 *array) +{ + u8 i; + + *pfcbitmap = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (array[i]) + *pfcbitmap = *pfcbitmap | (1 << i); + } +} + +static void xsc_pfc_bitmap2array(u8 pfcbitmap, u8 *array) +{ + u8 i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if ((pfcbitmap >> i) & 0x1) + array[i] = 1; + } +} + +static int xsc_query_port_prio_tc(struct xsc_core_device *xdev, int prio, u8 *tc) +{ + /* user priotity to tc 0:0; 1:1; 2:2; 3:3 ... 7:7 */ + *tc = (u8)prio; + return 0; +} + +static int xsc_set_port_prio_tc(struct xsc_core_device *xdev, u8 *prio_tc) +{ + u8 i; + + for (i = 0; i <= xsc_max_tc(xdev); i++) + prio_tc[i] = i; + + return 0; +} + +static int xsc_wfq_to_wrr_adpat(struct xsc_core_device *xdev, u8 *dst_bw, + u8 *src_bw, u8 ets_cnt, u8 min_weight) +{ + u8 i, index; + u8 max_commom_div = 1; + u8 flag[XSC_DCBX_WFQ_TOTAL_WEIGHT] = {0}; + + if (min_weight >= XSC_DCBX_WFQ_TOTAL_WEIGHT || !ets_cnt) + return 0; + + for (index = 1; index <= min_weight; index++) { + for (i = 0; i < ets_cnt; i++) { + /*any ets bw can not div by whole,flag = 1*/ + if (src_bw[i] % index) { + flag[index] = 1; + break; + } + } + } + + for (index = 1; index <= min_weight; index++) { + if (flag[index] == 0) + max_commom_div = index; + } + + xsc_core_dbg(xdev, "max_commom_div = %d, min_weight = %d\n", max_commom_div, min_weight); + + for (i = 0; i < ets_cnt; i++) { + dst_bw[i] = src_bw[i] / max_commom_div; + xsc_core_dbg(xdev, "dst_bw[%d] = %d\n", i, dst_bw[i]); + } + + return 0; +} + +static int xsc_wrr_to_wfq_adpat(struct xsc_core_device *xdev, + struct xsc_weight_get *wrr, u8 *bandwidth) +{ + u8 i, wrr_cnt = 0, index; + u16 wrr_total_weight = 0, wfq_tatal_weight = 0; + u16 portion = 0; + u16 rmndr = 0; + u16 temp[IEEE_8021QAZ_MAX_TCS] = {0}; + + /*1 calc cur wrr weight total*/ + for (i = 0; i <= wrr->max_prio; i++) { + if (wrr->weight[i] > 0) { + wrr_total_weight += wrr->weight[i]; + wrr_cnt++; + } + } + + xsc_core_dbg(xdev, "%s: wrr_total_weight = %d max_prio = %d\n", + __func__, wrr_total_weight, wrr->max_prio); + + if (!wrr_total_weight || wrr_total_weight > XSC_DCBX_WFQ_TOTAL_WEIGHT) + return -EINVAL; + + portion = XSC_DCBX_WFQ_TOTAL_WEIGHT / wrr_total_weight; + rmndr = XSC_DCBX_WFQ_TOTAL_WEIGHT % wrr_total_weight; + + /*2 calc major wfq weight*/ + for (i = 0; i <= wrr->max_prio; i++) { + if (wrr->weight[i] > 0) { + temp[i] = wrr->weight[i] * portion; + wfq_tatal_weight += temp[i]; + } + } + + xsc_core_dbg(xdev, "portion = %d, rmndr = %d, wfq_tatal = %d\n", + portion, rmndr, wfq_tatal_weight); + + /*3 average remainder to every prio*/ + if (rmndr > 0) { + for (i = 0; i < rmndr; i++) { + index = i % wrr_cnt; + temp[index] = temp[index] + 1; + } + } + for (i = 0; i <= wrr->max_prio; i++) + bandwidth[i] = (u8)temp[i]; + + return 0; +} + +static int xsc_query_port_ets_rate_limit(struct xsc_core_device *xdev, u64 *ratelimit) +{ + u8 i; + int err = 0; + struct xsc_rate_limit_get req; + struct xsc_rate_limit_get rsp; + + memset(&req, 0, sizeof(struct xsc_rate_limit_get)); + memset(&rsp, 0, sizeof(struct xsc_rate_limit_get)); + /*0--port rate limit; 1--priority rate limit*/ + req.limit_level = 1; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_RATE_LIMIT, &req, &rsp); + if (err) + return err; + + for (i = 0; i <= xsc_max_tc(xdev); i++) + ratelimit[i] = (u64)(rsp.rate_cir[i]); + + return 0; +} + +static int xsc_modify_port_ets_rate_limit(struct xsc_core_device *xdev, u64 *ratelimit) +{ + u8 i; + struct xsc_rate_limit_set req; + + memset(&req, 0, sizeof(struct xsc_rate_limit_set)); + req.limit_level = 1; + + for (i = 0; i <= xsc_max_tc(xdev); i++) { + req.rate_cir = (u32)ratelimit[i]; + req.limit_id = i; + xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_RATE_LIMIT, &req, NULL); + } + + return 0; +} + +static int xsc_query_port_bw_config(struct xsc_core_device *xdev, u8 *bandwidth) +{ + u8 i; + u8 sp_cnt = 0; + int err = 0; + struct xsc_sp_get sp_rsp; + struct xsc_weight_get weight_rsp; + + memset(&sp_rsp, 0, sizeof(struct xsc_sp_get)); + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_SP, NULL, &sp_rsp); + if (err) + return err; + /*SP enable,bandwidth is 0*/ + for (i = 0; i <= sp_rsp.max_prio; i++) { + if (sp_rsp.sp[i]) { + sp_cnt++; + bandwidth[i] = 0; + } + } + + xsc_core_dbg(xdev, "sp_cnt = %d, max_prio = %d\n", sp_cnt, sp_rsp.max_prio); + + memset(&weight_rsp, 0, sizeof(struct xsc_weight_get)); + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_WEIGHT, NULL, &weight_rsp); + if (err) + return err; + + xsc_core_dbg(xdev, "weight_rsp.max_prio = %d\n", weight_rsp.max_prio); + for (i = 0; i <= weight_rsp.max_prio; i++) + xsc_core_dbg(xdev, "i = %d, weight = %d\n", i, weight_rsp.weight[i]); + + xsc_wrr_to_wfq_adpat(xdev, &weight_rsp, bandwidth); + + return 0; +} + +static int xsc_query_port_pfc(struct xsc_core_device *xdev, u8 *pfc_bitmap) +{ + int err = 0; + struct xsc_pfc_get rsp; + + memset(&rsp, 0, sizeof(struct xsc_pfc_get)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_PFC, NULL, &rsp); + if (err) + return err; + + xsc_pfc_array2bitmap(pfc_bitmap, rsp.pfc_on); + + return 0; +} + +static int xsc_query_port_stats(struct xsc_core_device *xdev, struct ieee_pfc *pfc) +{ + u8 i; + int err = 0; + struct xsc_pfc_prio_stats_mbox_in req; + struct xsc_pfc_prio_stats_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_prio_stats_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_pfc_prio_stats_mbox_out)); + + req.pport = xdev->mac_port; + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_QUERY_PFC_PRIO_STATS); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_QUERY_PFC_PRIO_STATS, &req, &rsp); + if (err == 0 && rsp.hdr.status == 0) { + for (i = 0; i <= xsc_max_tc(xdev); i++) { + pfc->requests[i] = rsp.prio_stats[i].tx_pause; + pfc->indications[i] = rsp.prio_stats[i].rx_pause; + } + } + + return 0; +} + +static int xsc_query_port_pfc_stats(struct xsc_core_device *xdev, struct ieee_pfc *pfc) +{ + xsc_query_port_stats(xdev, pfc); + + xsc_query_port_pfc(xdev, &pfc->pfc_en); + + return 0; +} + +static int xsc_set_port_pfc(struct xsc_core_device *xdev, u8 pfcbitmap) +{ + u8 i; + u8 pfc_en[IEEE_8021QAZ_MAX_TCS] = {0}; + struct xsc_pfc_set req; + struct xsc_pfc_set rsp; + + xsc_pfc_bitmap2array(pfcbitmap, pfc_en); + + memset(&req, 0, sizeof(struct xsc_pfc_set)); + for (i = 0; i <= xsc_max_tc(xdev); i++) { + req.pfc_on = pfc_en[i]; + req.priority = i; + xsc_core_dbg(xdev, "%s: prio %d, pfc %d\n", __func__, i, req.pfc_on); + xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC, &req, &rsp); + } + return 0; +} + +static int xsc_cmd_set_dscp2prio(struct xsc_core_device *xdev, u8 dscp, u8 prio) +{ + int err = 0; + struct xsc_dscp_pmt_set req; + + memset(&req, 0, sizeof(struct xsc_dscp_pmt_set)); + req.dscp = dscp; + req.priority = prio; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_DSCP_PMT, &req, NULL); + if (err) + return err; + + xsc_core_dbg(xdev, "%s: dscp %d mapping to prio %d\n", __func__, dscp, prio); + + return 0; +} + +static int xsc_cmd_set_trust_state(struct xsc_core_device *xdev, u8 trust_state) +{ + int err = 0; + struct xsc_trust_mode_set req; + + memset(&req, 0, sizeof(struct xsc_trust_mode_set)); + + /*set trust state,0,DSCP mdoe; 1,PCP mode*/ + if (trust_state == XSC_QPTS_TRUST_PCP) + req.is_pcp = 1; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_TRUST_MODE, &req, NULL); + if (err) + return err; + + return 0; +} + +static int xsc_cmd_get_trust_state(struct xsc_core_device *xdev, u8 *trust_state) +{ + int err; + struct xsc_trust_mode_get rsp; + + memset(&rsp, 0, sizeof(struct xsc_trust_mode_get)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_TRUST_MODE, NULL, &rsp); + if (err) + return err; + + if (rsp.is_pcp) + *trust_state = XSC_QPTS_TRUST_PCP; + else + *trust_state = XSC_QPTS_TRUST_DSCP; + + return 0; +} + +static int xsc_dcbnl_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + int err = 0; + int i; + + if (!priv->dcbx.enable || !xdev->caps.ets) + return -EOPNOTSUPP; + + memset(ets, 0, sizeof(*ets)); + ets->willing = 1; + ets->ets_cap = xsc_max_tc(priv->xdev) + 1; + for (i = 0; i < ets->ets_cap; i++) { + /*get prio->tc mapping*/ + xsc_query_port_prio_tc(xdev, i, &ets->prio_tc[i]); + } + + err = xsc_query_port_bw_config(xdev, ets->tc_tx_bw); + if (err) + return err; + + for (i = 0; i < ets->ets_cap; i++) { + if (!ets->tc_tx_bw[i]) + priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; + else if (ets->tc_tx_bw[i] < XSC_MAX_BW_ALLOC) + priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + + xsc_core_dbg(xdev, "%s: tc%d, bw=%d\n", + __func__, i, ets->tc_tx_bw[i]); + } + + memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); + + return err; +} + +static void xsc_build_tc_tx_bw_sch(struct xsc_core_device *xdev, + struct ieee_ets *ets, u8 *tc_tx_bw, + u8 *tc_sp_enable, int max_tc) +{ + u8 i; + u8 ets_cnt = 0; + u8 min_weight = 0xff; + + for (i = 0; i <= max_tc; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + tc_tx_bw[i] = 1; + tc_sp_enable[i] = i + 1; + break; + case IEEE_8021QAZ_TSA_ETS: + ets_cnt++; + if (ets->tc_tx_bw[i] <= min_weight) + min_weight = ets->tc_tx_bw[i]; + break; + } + } + xsc_wfq_to_wrr_adpat(xdev, tc_tx_bw, ets->tc_tx_bw, ets_cnt, min_weight); +} + +static int xsc_set_port_tx_bw_sch(struct xsc_core_device *xdev, u8 *tc_sp_enable, u8 *tc_tx_bw) +{ + u8 i; + int err = 0; + struct xsc_sp_set req_sch; + struct xsc_weight_set req_weight; + + memset(&req_sch, 0, sizeof(struct xsc_sp_set)); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + req_sch.sp[i] = tc_sp_enable[i]; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_SP, &req_sch, NULL); + if (err) + return err; + + memset(&req_weight, 0, sizeof(struct xsc_weight_set)); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + req_weight.weight[i] = tc_tx_bw[i]; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_WEIGHT, &req_weight, NULL); + if (err) + return err; + + return 0; +} + +int xsc_dcbnl_ieee_setets_core(struct xsc_adapter *priv, struct ieee_ets *ets) +{ + struct xsc_core_device *xdev = priv->xdev; + u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = {1}; + u8 tc_sp_enable[IEEE_8021QAZ_MAX_TCS]; + int max_tc = xsc_max_tc(xdev); + int err = 0; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + memset(tc_sp_enable, 0, IEEE_8021QAZ_MAX_TCS); + xsc_build_tc_tx_bw_sch(xdev, ets, tc_tx_bw, tc_sp_enable, max_tc); + xsc_set_port_prio_tc(xdev, ets->prio_tc); + + err = xsc_set_port_tx_bw_sch(xdev, tc_sp_enable, tc_tx_bw); + if (err) + return err; + + memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); + + return err; +} + +static int xsc_dbcnl_validate_ets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + bool have_ets_tc = false; + int bw_sum = 0; + int i; + + if (!priv->dcbx.enable) + return 0; + + /* Validate Priority */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] >= XSC_MAX_PRIORITY) { + netdev_err(netdev, + "Failed to validate ETS: priority value greater than max(%d)\n", + XSC_MAX_PRIORITY); + return -EINVAL; + } + } + + /* Validate Bandwidth Sum */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { + /* do not allow ets with 0 weight */ + have_ets_tc = true; + if (!ets->tc_tx_bw[i]) + return -EINVAL; + bw_sum += ets->tc_tx_bw[i]; + } + } + + xsc_core_dbg(xdev, "%s bw_sum = %d\n", __func__, bw_sum); + + if (have_ets_tc && bw_sum != 100) { + netdev_err(netdev, "Failed to validate ETS: BW sum is illegal\n"); + return -EINVAL; + } + return 0; +} + +static int xsc_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct xsc_adapter *priv = netdev_priv(dev); + int err; + + if (!priv->dcbx.enable) + return 0; + + if (!priv->xdev->caps.ets) + return -EOPNOTSUPP; + + err = xsc_dbcnl_validate_ets(dev, ets); + if (err) + return err; + + err = xsc_dcbnl_ieee_setets_core(priv, ets); + if (err) + return err; + + return 0; +} + +static int xsc_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + pfc->pfc_cap = xsc_max_tc(xdev) + 1; + pfc->pfc_en = 0; + if (xdev->caps.port_buf) + pfc->delay = priv->dcbx.cable_len; + xsc_query_port_pfc_stats(xdev, pfc); + + xsc_core_dbg(xdev, "%s: pfc_en=0x%x\n", __func__, pfc->pfc_en); + + return 0; +} + +static int xsc_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + u8 curr_pfc_en; + int ret = 0; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + /* pfc_en */ + xsc_query_port_pfc(xdev, &curr_pfc_en); + if (pfc->pfc_en != curr_pfc_en) { + ret = xsc_set_port_pfc(xdev, pfc->pfc_en); + if (ret) + return ret; + } + + xsc_core_dbg(xdev, "%s: new_pfc_en=0x%x, cur_pfc_en=0x%x\n", + __func__, pfc->pfc_en, curr_pfc_en); + return ret; +} + +static u8 xsc_dcbnl_getdcbx(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + xsc_core_dbg(xdev, "%s: dcbx->cap=0x%x\n", __func__, priv->dcbx.cap); + return priv->dcbx.cap; +} + +static u8 xsc_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_dcbx *dcbx = &priv->dcbx; + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + struct xsc_lldp_status_mbox_in req; + struct xsc_lldp_status_mbox_out rsp; + int err = 0; + + memset(&req, 0, sizeof(struct xsc_lldp_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_lldp_status_mbox_out)); + + req.sub_type = XSC_OS_HANDLE_LLDP_STATUS; + req.os_handle_lldp = cpu_to_be32(1); + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_SET_LLDP_STATUS, &req, &rsp); + if (err) { + xsc_core_err(xdev, "set LLDP status fail,err %d\n", err); + return err; + } + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + xsc_core_dbg(xdev, "%s: mode=%d, dcbx->cap = %d\n", __func__, mode, dcbx->cap); + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return -EINVAL; + + if (mode == dcbx->cap) + return 0; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + /*mode switch, set base config*/ + if (mode & DCB_CAP_DCBX_VER_IEEE) { + xsc_dcbnl_ieee_setets(dev, &ets); + xsc_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + xsc_dcbnl_setall(dev); + } + + dcbx->cap = mode; + + return 0; +} + +static int xsc_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct dcb_app temp; + bool is_new; + int err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + if (!priv->xdev->caps.dscp) + return -EOPNOTSUPP; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || app->protocol >= XSC_MAX_DSCP) + return -EINVAL; + + /* Save the old entry info */ + temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; + temp.protocol = app->protocol; + temp.priority = priv->dcbx_dp.dscp2prio[app->protocol]; + + /* Check if need to switch to dscp trust state */ + if (!priv->dcbx.dscp_app_cnt) { + err = xsc_set_trust_state(priv, XSC_QPTS_TRUST_DSCP); + if (err) + return err; + } + + /* Skip the fw command if new and old mapping are the same */ + if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) { + err = xsc_set_dscp2prio(priv, app->protocol, app->priority); + if (err) + goto fw_err; + } + + /* Delete the old entry if exists */ + is_new = false; + err = dcb_ieee_delapp(dev, &temp); + if (err) + is_new = true; + + /* Add new entry and update counter */ + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + + if (is_new) + priv->dcbx.dscp_app_cnt++; + + return err; + +fw_err: + xsc_set_trust_state(priv, XSC_QPTS_TRUST_PCP); + return err; +} + +static int xsc_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) +{ + struct xsc_adapter *priv = netdev_priv(dev); + int err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + if (!priv->xdev->caps.dscp) + return -EOPNOTSUPP; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || app->protocol >= XSC_MAX_DSCP) + return -EINVAL; + + /* Skip if no dscp app entry */ + if (!priv->dcbx.dscp_app_cnt) + return -ENOENT; + + /* Check if the entry matches fw setting */ + if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) + return -ENOENT; + + /* Delete the app entry */ + err = dcb_ieee_delapp(dev, app); + if (err) + return err; + + /* Reset the priority mapping back to zero */ + err = xsc_set_dscp2prio(priv, app->protocol, 0); + if (err) + goto fw_err; + + priv->dcbx.dscp_app_cnt--; + + /* Check if need to switch to pcp trust state */ + if (!priv->dcbx.dscp_app_cnt) + err = xsc_set_trust_state(priv, XSC_QPTS_TRUST_PCP); + + return err; + +fw_err: + xsc_set_trust_state(priv, XSC_QPTS_TRUST_PCP); + return err; +} + +static int xsc_dcbnl_ieee_getmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + u64 max_bw_value[IEEE_8021QAZ_MAX_TCS] = {0}; + int i, err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate)); + + err = xsc_query_port_ets_rate_limit(xdev, max_bw_value); + if (err) + return err; + + for (i = 0; i <= xsc_max_tc(xdev); i++) + maxrate->tc_maxrate[i] = max_bw_value[i] * XSC_RATE_LIMIT_BASE / XSC_1GB; + + return 0; +} + +static int xsc_dcbnl_ieee_setmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + u64 max_bw_value[IEEE_8021QAZ_MAX_TCS]; + int i; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + memset(max_bw_value, 0, sizeof(max_bw_value)); + + for (i = 0; i <= xsc_max_tc(xdev); i++) { + if (!maxrate->tc_maxrate[i]) + continue; + max_bw_value[i] = maxrate->tc_maxrate[i] * XSC_1GB / XSC_RATE_LIMIT_BASE; + xsc_core_dbg(xdev, "%s: tc_%d <=> max_bw %llu * 16kbps\n", + __func__, i, max_bw_value[i]); + } + + return xsc_modify_port_ets_rate_limit(xdev, max_bw_value); +} + +static u8 xsc_dcbnl_setall(struct net_device *netdev) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + struct xsc_core_device *xdev = priv->xdev; + struct ieee_ets ets; + struct ieee_pfc pfc; + int err = -EOPNOTSUPP; + int i; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + if (!xdev->caps.ets) + goto out; + + memset(&ets, 0, sizeof(ets)); + memset(&pfc, 0, sizeof(pfc)); + + ets.ets_cap = IEEE_8021QAZ_MAX_TCS; + for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { + ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i]; + ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i]; + ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i]; + } + + err = xsc_dbcnl_validate_ets(netdev, &ets); + if (err) + goto out; + + err = xsc_dcbnl_ieee_setets_core(priv, &ets); + if (err) { + netdev_err(netdev, + "%s, Failed to set ETS: %d\n", __func__, err); + goto out; + } + + /* Set PFC */ + pfc.pfc_cap = xsc_max_tc(xdev) + 1; + if (!cee_cfg->pfc_enable) + pfc.pfc_en = 0; + else + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) + pfc.pfc_en |= cee_cfg->pfc_setting[i] << i; + + err = xsc_dcbnl_ieee_setpfc(netdev, &pfc); + if (err) { + netdev_err(netdev, + "%s, Failed to set PFC: %d\n", __func__, err); + goto out; + } +out: + return err ? XSC_DCB_NO_CHG : XSC_DCB_CHG_RESET; +} + +static u8 xsc_dcbnl_getstate(struct net_device *netdev) +{ + return XSC_CEE_STATE_UP; +} + +static void xsc_dcbnl_getpermhwaddr(struct net_device *netdev, + u8 *perm_addr) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + + if (!priv->dcbx.enable || !perm_addr) + return; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + xsc_query_nic_vport_mac_address(priv->xdev, 0, perm_addr); +} + +static void xsc_dcbnl_setpgtccfgtx(struct net_device *netdev, + int priority, u8 prio_type, + u8 pgid, u8 bw_pct, u8 up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "%s: prio=%d, type=%d, pgid=%d, bw_pct=%d, up_map=%d\n", + __func__, priority, prio_type, pgid, + bw_pct, up_map); + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + cee_cfg->prio_to_pg_map[priority] = pgid; +} + +static void xsc_dcbnl_setpgtccfgrx(struct net_device *netdev, + int priority, u8 prio_type, + u8 pgid, u8 bw_pct, u8 up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "Nothing to be done pgtccfg rx, not support\n"); +} + +static void xsc_dcbnl_setpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "%s: pgid=%d, bw_pct=%d\n", + __func__, pgid, bw_pct); + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + cee_cfg->pg_bw_pct[pgid] = bw_pct; +} + +static void xsc_dcbnl_setpgbwgcfgrx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "Nothing to be done pgbwgcfg rx, not support\n"); +} + +static void xsc_dcbnl_getpgtccfgtx(struct net_device *netdev, + int priority, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + if (!xdev->caps.ets) { + netdev_err(netdev, "%s, ets is not supported\n", __func__); + return; + } + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + xsc_query_port_prio_tc(xdev, priority, pgid); + + *up_map = *pgid; + *prio_type = 0; + *bw_pct = 100; + + xsc_core_dbg(xdev, "%s: prio=%d, pgid=%d, bw_pct=%d\n", + __func__, priority, *pgid, *bw_pct); +} + +static void xsc_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "pgtccfgrx Nothing to get; No RX support\n"); + + *prio_type = *pgid = *bw_pct = *up_map = 0; +} + +static void xsc_dcbnl_getpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct ieee_ets ets; + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + xsc_dcbnl_ieee_getets(netdev, &ets); + *bw_pct = ets.tc_tx_bw[pgid]; + xsc_core_dbg(xdev, "%s: pgid=%d, bw_pct=%d\n", + __func__, pgid, *bw_pct); +} + +static void xsc_dcbnl_setpfccfg(struct net_device *netdev, + int priority, u8 setting) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "%s: prio=%d, setting=%d\n", + __func__, priority, setting); + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (setting > 1) + return; + + cee_cfg->pfc_setting[priority] = setting; +} + +static void xsc_dcbnl_getpgbwgcfgrx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "bwgcfgrx Nothing to get; No RX support\n"); + + *bw_pct = 0; +} + +static int xsc_dcbnl_get_priority_pfc(struct net_device *netdev, + int priority, u8 *setting) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct ieee_pfc pfc; + int err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + err = xsc_dcbnl_ieee_getpfc(netdev, &pfc); + + if (err) + *setting = 0; + else + *setting = (pfc.pfc_en >> priority) & 0x01; + + xsc_core_dbg(xdev, "%s: prio=%d, setting=%d\n", + __func__, priority, *setting); + return err; +} + +static void xsc_dcbnl_getpfccfg(struct net_device *netdev, + int priority, u8 *setting) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + + if (!priv->dcbx.enable) + return; + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (!setting) + return; + + xsc_dcbnl_get_priority_pfc(netdev, priority, setting); +} + +static u8 xsc_dcbnl_getcap(struct net_device *netdev, + int capid, u8 *cap) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + u8 rval = 0; + + if (!priv->dcbx.enable) + return rval; + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 1 << xsc_max_tc(xdev); + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 1 << xsc_max_tc(xdev); + break; + case DCB_CAP_ATTR_GSP: + *cap = false; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->dcbx.cap | + DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_VER_IEEE; + break; + default: + *cap = 0; + rval = 1; + break; + } + + xsc_core_dbg(xdev, "%s: capid=%d, cap=%d, ret=%d\n", + __func__, capid, *cap, rval); + return rval; +} + +static int xsc_dcbnl_getnumtcs(struct net_device *netdev, + int tcs_id, u8 *num) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + switch (tcs_id) { + case DCB_NUMTCS_ATTR_PG: + case DCB_NUMTCS_ATTR_PFC: + *num = xsc_max_tc(xdev) + 1; + break; + default: + return -EINVAL; + } + + xsc_core_dbg(xdev, "%s: tcs_id=%d, tc_num=%d\n", + __func__, tcs_id, *num); + return 0; +} + +static u8 xsc_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct ieee_pfc pfc; + + if (!priv->dcbx.enable) + return XSC_CEE_STATE_DOWN; + + if (xsc_dcbnl_ieee_getpfc(netdev, &pfc)) + return XSC_CEE_STATE_DOWN; + + return pfc.pfc_en ? XSC_CEE_STATE_UP : XSC_CEE_STATE_DOWN; +} + +static void xsc_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + if (state != XSC_CEE_STATE_UP && state != XSC_CEE_STATE_DOWN) + return; + + cee_cfg->pfc_enable = state; +} + +const struct dcbnl_rtnl_ops xsc_dcbnl_ops = { + .ieee_getets = xsc_dcbnl_ieee_getets, + .ieee_setets = xsc_dcbnl_ieee_setets, + .ieee_getmaxrate = xsc_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = xsc_dcbnl_ieee_setmaxrate, + .ieee_getpfc = xsc_dcbnl_ieee_getpfc, + .ieee_setpfc = xsc_dcbnl_ieee_setpfc, + .ieee_setapp = xsc_dcbnl_ieee_setapp, + .ieee_delapp = xsc_dcbnl_ieee_delapp, + .getdcbx = xsc_dcbnl_getdcbx, + .setdcbx = xsc_dcbnl_setdcbx, + + /* CEE interfaces */ + .setall = xsc_dcbnl_setall, + .getstate = xsc_dcbnl_getstate, + .getpermhwaddr = xsc_dcbnl_getpermhwaddr, + + .setpgtccfgtx = xsc_dcbnl_setpgtccfgtx, + .setpgtccfgrx = xsc_dcbnl_setpgtccfgrx, + .setpgbwgcfgtx = xsc_dcbnl_setpgbwgcfgtx, + .setpgbwgcfgrx = xsc_dcbnl_setpgbwgcfgrx, + + .getpgtccfgtx = xsc_dcbnl_getpgtccfgtx, + .getpgtccfgrx = xsc_dcbnl_getpgtccfgrx, + .getpgbwgcfgtx = xsc_dcbnl_getpgbwgcfgtx, + .getpgbwgcfgtx = xsc_dcbnl_getpgbwgcfgrx, + + .setpfccfg = xsc_dcbnl_setpfccfg, + .getpfccfg = xsc_dcbnl_getpfccfg, + .getcap = xsc_dcbnl_getcap, + .getnumtcs = xsc_dcbnl_getnumtcs, + .getpfcstate = xsc_dcbnl_getpfcstate, + .setpfcstate = xsc_dcbnl_setpfcstate, +}; + +static void xsc_dcbnl_query_dcbx_mode(struct xsc_core_device *xdev, + enum xsc_dcbx_oper_mode *mode) +{ + int err = 0; + struct xsc_lldp_status_mbox_in req; + struct xsc_lldp_status_mbox_out rsp; + + *mode = XSC_DCBX_PARAM_VER_OPER_HOST; + + memset(&req, 0, sizeof(struct xsc_lldp_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_lldp_status_mbox_out)); + + req.sub_type = XSC_OS_HANDLE_LLDP_STATUS; + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_GET_LLDP_STATUS, &req, &rsp); + if (err) { + xsc_core_err(xdev, "get LLDP status fail,err %d\n", err); + return; + } + + rsp.status.os_handle_lldp = be32_to_cpu(rsp.status.os_handle_lldp); + xsc_core_dbg(xdev, "%s: lldp os handle = %u\n", __func__, rsp.status.os_handle_lldp); + if (rsp.status.os_handle_lldp != XSC_DCBX_PARAM_VER_OPER_HOST) + *mode = XSC_DCBX_PARAM_VER_OPER_AUTO; +} + +static void xsc_ets_init(struct xsc_adapter *priv) +{ + struct ieee_ets ets; + int err; + int i; + + if (!priv->xdev->caps.ets) + return; + memset(&ets, 0, sizeof(ets)); + ets.ets_cap = xsc_max_tc(priv->xdev) + 1; + for (i = 0; i < ets.ets_cap; i++) { + ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + ets.prio_tc[i] = i; + ets.tc_tx_bw[i] = XSC_WRR_DEFAULT_WEIGHT; + } + + err = xsc_dcbnl_ieee_setets_core(priv, &ets); + if (err) + netdev_err(priv->netdev, + "%s, Failed to init ETS: %d\n", __func__, err); +} + +enum { + INIT, + DELETE, +}; + +static void xsc_dcbnl_dscp_app(struct xsc_adapter *priv, int action) +{ + struct dcb_app temp; + struct xsc_core_device *xdev = priv->xdev; + int i; + + xsc_core_dbg(xdev, "%s: action=%d\n", __func__, action); + if (!priv->xdev->caps.dscp) + return; + + /* No SEL_DSCP entry in non DSCP state */ + if (priv->dcbx_dp.trust_state != XSC_QPTS_TRUST_DSCP) + return; + + temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; + for (i = 0; i < XSC_MAX_DSCP; i++) { + temp.protocol = i; + temp.priority = priv->dcbx_dp.dscp2prio[i]; + if (action == INIT) + dcb_ieee_setapp(priv->netdev, &temp); + else + dcb_ieee_delapp(priv->netdev, &temp); + } + + priv->dcbx.dscp_app_cnt = (action == INIT) ? XSC_MAX_DSCP : 0; +} + +void xsc_dcbnl_init_app(struct xsc_adapter *priv) +{ + xsc_dcbnl_dscp_app(priv, INIT); +} + +void xsc_dcbnl_delete_app(struct xsc_adapter *priv) +{ + xsc_dcbnl_dscp_app(priv, DELETE); +} + +static int xsc_query_trust_state(struct xsc_core_device *xdev, u8 *trust) +{ + int err = 0; + + err = xsc_cmd_get_trust_state(xdev, trust); + if (err) + return err; + + return 0; +} + +static int xsc_set_trust_state(struct xsc_adapter *priv, u8 trust_state) +{ + int err = 0; + + err = xsc_cmd_set_trust_state(priv->xdev, trust_state); + if (err) + return err; + + priv->dcbx_dp.trust_state = trust_state; + + return err; +} + +static int xsc_set_dscp2prio(struct xsc_adapter *priv, u8 dscp, u8 prio) +{ + int err = 0; + struct xsc_core_device *xdev = priv->xdev; + + xsc_core_dbg(xdev, "%s: dscp=%d, prio=%d\n", + __func__, dscp, prio); + + err = xsc_cmd_set_dscp2prio(priv->xdev, dscp, prio); + if (err) + return err; + + priv->dcbx_dp.dscp2prio[dscp] = prio; + return err; +} + +static int xsc_query_dscp2prio(struct xsc_core_device *xdev, u8 *dscp2prio) +{ + int err = 0; + struct xsc_dscp_pmt_get rsp; + + memset(&rsp, 0, sizeof(rsp)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_DSCP_PMT, NULL, &rsp); + if (err) + return err; + + memcpy(dscp2prio, rsp.prio_map, sizeof(u8) * XSC_MAX_DSCP); + + return 0; +} + +static int xsc_trust_initialize(struct xsc_adapter *priv) +{ + struct xsc_core_device *xdev = priv->xdev; + int err; + + priv->dcbx_dp.trust_state = XSC_QPTS_TRUST_PCP; + + if (!xdev->caps.dscp) + return 0; + + err = xsc_query_trust_state(xdev, &priv->dcbx_dp.trust_state); + if (err) + return err; + + err = xsc_query_dscp2prio(xdev, priv->dcbx_dp.dscp2prio); + if (err) + return err; + + return 0; +} + +#define XSC_BUFFER_CELL_SHIFT 7 +static u16 xsc_query_port_buffers_cell_size(struct xsc_adapter *priv) +{ + return (1 << XSC_BUFFER_CELL_SHIFT); +} + +static void xsc_cee_init(struct xsc_adapter *priv) +{ + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + struct xsc_core_device *xdev = priv->xdev; + int i, max_tc; + u8 pfc_bitmap; + + memset(cee_cfg, 0, sizeof(*cee_cfg)); + + cee_cfg->pfc_enable = 1; + + xsc_query_port_pfc(xdev, &pfc_bitmap); + + xsc_pfc_bitmap2array(pfc_bitmap, cee_cfg->pfc_setting); + + max_tc = xsc_max_tc(priv->xdev) + 1; + for (i = 0; i < max_tc; i++) + cee_cfg->prio_to_pg_map[i] = i % max_tc; +} + +static u8 xsc_dcbnl_get_dcbx_status(struct xsc_core_device *xdev) +{ + u8 enable = 0; + int err; + struct xsc_lldp_status_mbox_in req; + struct xsc_lldp_status_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_hwc_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_hwc_mbox_out)); + + req.sub_type = XSC_DCBX_STATUS; + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_GET_LLDP_STATUS, &req, &rsp); + if (err) + return 0; + + enable = (u8)be32_to_cpu(rsp.status.dcbx_status); + + return enable; +} + +void xsc_dcbnl_initialize(struct xsc_adapter *priv) +{ + struct xsc_dcbx *dcbx = &priv->dcbx; + struct xsc_core_device *xdev = priv->xdev; + + xsc_trust_initialize(priv); + + if (!priv->xdev->caps.qos) + return; + + if (priv->xdev->caps.dcbx) + xsc_dcbnl_query_dcbx_mode(xdev, &dcbx->mode); + + priv->dcbx.enable = xsc_dcbnl_get_dcbx_status(xdev); + + if (priv->dcbx.enable) { + priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE; + + if (priv->dcbx.mode == XSC_DCBX_PARAM_VER_OPER_HOST) + priv->dcbx.cap = priv->dcbx.cap | DCB_CAP_DCBX_HOST; + + priv->dcbx.port_buff_cell_sz = xsc_query_port_buffers_cell_size(priv); + priv->dcbx.manual_buffer = 0; + priv->dcbx.cable_len = XSC_DEFAULT_CABLE_LEN; + + xsc_cee_init(priv); + xsc_ets_init(priv); + } +} +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h new file mode 100644 index 0000000000000000000000000000000000000000..be7e6d89c9f6dc2555de2f5245f2cd9187a12b0a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_H +#define XSC_ETH_H + +#include "common/qp.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include "common/version.h" +#include +#include "common/xsc_fs.h" + +#define XSC_INVALID_LKEY 0x100 + +#define XSCALE_ETH_PHYPORT_DOWN 0 +#define XSCALE_ETH_PHYPORT_UP 1 +#ifdef CONFIG_DCB +#define CONFIG_XSC_CORE_EN_DCB 1 +#endif +#define XSC_PAGE_CACHE 1 + +#define XSCALE_DRIVER_NAME "xsc_eth" +#define XSCALE_RET_SUCCESS 0 +#define XSCALE_RET_ERROR 1 + +enum { + XSCALE_ETH_DRIVER_INIT, + XSCALE_ETH_DRIVER_OK, + XSCALE_ETH_DRIVER_CLOSE, + XSCALE_ETH_DRIVER_DETACH, +}; + +#define XSCALE_ETH_QP_NUM_MAX 1 +#define XSCALE_RX_THREAD_MAX 128 + +enum { + XSC_BW_NO_LIMIT = 0, + XSC_100_MBPS_UNIT = 3, + XSC_GBPS_UNIT = 4, +}; + +struct xsc_cee_config { + /* bw pct for priority group */ + u8 pg_bw_pct[CEE_DCBX_MAX_PGS]; + u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO]; + u8 pfc_setting[CEE_DCBX_MAX_PRIO]; + u8 pfc_enable; +}; + +enum { + XSC_DCB_CHG_RESET, + XSC_DCB_NO_CHG, + XSC_DCB_CHG_NO_RESET, +}; + +enum xsc_qpts_trust_state { + XSC_QPTS_TRUST_PCP = 1, + XSC_QPTS_TRUST_DSCP = 2, +}; + +enum xsc_dcbx_oper_mode { + XSC_DCBX_PARAM_VER_OPER_HOST = 0x0, + XSC_DCBX_PARAM_VER_OPER_AUTO = 0x3, +}; + +enum { + XSC_PORT_BUFFER_CABLE_LEN = BIT(0), + XSC_PORT_BUFFER_PFC = BIT(1), + XSC_PORT_BUFFER_PRIO2BUFFER = BIT(2), + XSC_PORT_BUFFER_SIZE = BIT(3), +}; + +struct xsc_dcbx { + u8 enable; + enum xsc_dcbx_oper_mode mode; + struct xsc_cee_config cee_cfg; /* pending configuration */ + u8 dscp_app_cnt; + + /* The only setting that cannot be read from FW */ + u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; + u8 cap; + + /* Buffer configuration */ + u8 manual_buffer; + u32 cable_len; + u32 xoff; + u16 port_buff_cell_sz; +}; + +struct xsc_bufferx_reg { + u8 lossy; + u8 epsb; + u32 size; + u32 xoff; + u32 xon; +}; + +struct xsc_port_buffer { + u32 port_buffer_size; + u32 spare_buffer_size; + struct xsc_bufferx_reg buffer[XSC_MAX_BUFFER]; +}; + +struct xsc_dcbx_dp { + u8 dscp2prio[XSC_MAX_DSCP]; + u8 trust_state; +}; + +struct xsc_rss_params { + u32 indirection_rqt[XSC_INDIR_RQT_SIZE]; + u32 rx_hash_fields[XSC_NUM_INDIR_TIRS]; + u8 toeplitz_hash_key[52]; + u8 hfunc; + u32 rss_hash_tmpl; +}; + +struct xsc_vlan_params { + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); +}; + +struct xsc_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct device *dev; + struct xsc_core_device *xdev; + + struct xsc_eth_params nic_param; + struct xsc_rss_params rss_params; + struct xsc_vlan_params vlan_params; + + struct xsc_flow_steering fs; + + struct workqueue_struct *workq; + struct work_struct update_carrier_work; + struct work_struct set_rx_mode_work; + struct work_struct event_work; + + struct xsc_eth_channels channels; + struct xsc_sq **txq2sq; + + u32 status; + spinlock_t lock; /* adapter lock */ + + struct mutex state_lock; /* Protects Interface state */ + struct xsc_stats *stats; + + struct xsc_dcbx dcbx; + struct xsc_dcbx_dp dcbx_dp; + + u32 msglevel; + + struct task_struct *task; + + int channel_tc2realtxq[XSC_ETH_MAX_NUM_CHANNELS][XSC_MAX_NUM_TC]; +}; + +struct xsc_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + u32 len; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + u32 page_offset; +#else + u16 page_offset; +#endif + u16 pagecnt_bias; +}; + +struct xsc_tx_buffer { + struct sk_buff *skb; + unsigned long *h_skb_data; + dma_addr_t dma; + u32 len; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + u32 page_offset; +#else + u16 page_offset; +#endif + u16 pagecnt_bias; +}; + +struct xsc_tx_wqe { + struct xsc_send_wqe_ctrl_seg ctrl; + struct xsc_wqe_data_seg data[]; +}; + +typedef int (*xsc_eth_fp_preactivate)(struct xsc_adapter *priv); +typedef int (*xsc_eth_fp_postactivate)(struct xsc_adapter *priv); + +int xsc_safe_switch_channels(struct xsc_adapter *adapter, + xsc_eth_fp_preactivate preactivate, + xsc_eth_fp_postactivate postactivate); +int xsc_eth_num_channels_changed(struct xsc_adapter *priv); +int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 change); +bool xsc_eth_get_link_status(struct xsc_adapter *adapter); +int xsc_eth_get_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo); +int xsc_eth_set_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo); + +int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter); + +/* Use this function to get max num channels after netdev was created */ +static inline int xsc_get_netdev_max_channels(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + return min_t(unsigned int, netdev->num_rx_queues, + netdev->num_tx_queues); +} + +static inline int xsc_get_netdev_max_tc(struct xsc_adapter *adapter) +{ + return adapter->nic_param.num_tc; +} + +#ifdef CONFIG_XSC_CORE_EN_DCB +extern const struct dcbnl_rtnl_ops xsc_dcbnl_ops; +int xsc_dcbnl_ieee_setets_core(struct xsc_adapter *priv, struct ieee_ets *ets); +void xsc_dcbnl_initialize(struct xsc_adapter *priv); +void xsc_dcbnl_init_app(struct xsc_adapter *priv); +void xsc_dcbnl_delete_app(struct xsc_adapter *priv); +#endif +#endif /* XSC_ETH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h new file mode 100644 index 0000000000000000000000000000000000000000..49550e1f87d205686f8d13b0b9ad5658e78fc131 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_COMMON_H +#define XSC_ETH_COMMON_H + +#include "xsc_queue.h" +#include "xsc_eth_compat.h" +#include "common/xsc_pph.h" +#include "common/xsc_hsi.h" + +#define SW_MIN_MTU 64 +#define SW_DEFAULT_MTU 1500 +#define SW_MAX_MTU 9600 + +#define XSC_ETH_HW_MTU_SEND 9800 /*need to obtain from hardware*/ +#define XSC_ETH_HW_MTU_RECV 9800 /*need to obtain from hardware*/ +#define XSC_SW2HW_MTU(mtu) ((mtu) + 14 + 4) +#define XSC_SW2HW_FRAG_SIZE(mtu) ((mtu) + 14 + 8 + 4 + XSC_PPH_HEAD_LEN) +#define XSC_SW2HW_RX_PKT_LEN(mtu) ((mtu) + 14 + 256) + +#define XSC_RX_MAX_HEAD (256) +#define XSC_RX_HEADROOM NET_SKB_PAD + +#define XSC_QPN_SQN_STUB 1025 +#define XSC_QPN_RQN_STUB 1024 + +#define XSC_LOG_INDIR_RQT_SIZE 0x8 + +#define XSC_INDIR_RQT_SIZE BIT(XSC_LOG_INDIR_RQT_SIZE) +#ifdef XSC_RSS_SUPPORT +#define XSC_ETH_MIN_NUM_CHANNELS 2 +#else +#define XSC_ETH_MIN_NUM_CHANNELS 1 +#endif +#define XSC_ETH_MAX_NUM_CHANNELS XSC_INDIR_RQT_SIZE + +#define XSC_TX_NUM_TC 1 +#define XSC_MAX_NUM_TC 8 +#define XSC_ETH_MAX_TC_TOTAL (XSC_ETH_MAX_NUM_CHANNELS * XSC_MAX_NUM_TC) +#define XSC_ETH_MAX_QP_NUM_PER_CH (XSC_MAX_NUM_TC + 1) + +#define XSC_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define XSC_MIN_SKB_FRAG_SZ (XSC_SKB_FRAG_SZ(XSC_RX_HEADROOM)) +#define XSC_LOG_MAX_RX_WQE_BULK \ + (ilog2(PAGE_SIZE / roundup_pow_of_two(XSC_MIN_SKB_FRAG_SZ))) + +#define XSC_MIN_LOG_RQ_SZ (1 + XSC_LOG_MAX_RX_WQE_BULK) +#define XSC_DEF_LOG_RQ_SZ 0xa +#define XSC_MAX_LOG_RQ_SZ 0xd + +#define XSC_MIN_LOG_SQ_SZ 0x6 +#define XSC_DEF_LOG_SQ_SZ 0xa +#define XSC_MAX_LOG_SQ_SZ 0xd + +#define XSC_SQ_ELE_NUM_DEF BIT(XSC_DEF_LOG_SQ_SZ) +#define XSC_RQ_ELE_NUM_DEF BIT(XSC_DEF_LOG_RQ_SZ) + +#define XSC_LOG_RQCQ_SZ 0xb +#define XSC_LOG_SQCQ_SZ 0xa + +#define XSC_RQCQ_ELE_NUM BIT(XSC_LOG_RQCQ_SZ) +#define XSC_SQCQ_ELE_NUM BIT(XSC_LOG_SQCQ_SZ) +#define XSC_RQ_ELE_NUM XSC_RQ_ELE_NUM_DEF //ds number of a wqebb +#define XSC_SQ_ELE_NUM XSC_SQ_ELE_NUM_DEF //DS number +#define XSC_EQ_ELE_NUM XSC_SQ_ELE_NUM_DEF //number of eq entry??? + +#define XSC_RQCQ_ELE_SZ 32 //size of a rqcq entry +#define XSC_SQCQ_ELE_SZ 32 //size of a sqcq entry +#define XSC_RQ_ELE_SZ XSC_RECV_WQE_BB +#define XSC_SQ_ELE_SZ XSC_SEND_WQE_BB +#define XSC_EQ_ELE_SZ 8 //size of a eq entry + +#define XSC_CQ_POLL_BUDGET 64 +#define XSC_TX_POLL_BUDGET 128 + +#define XSC_NET_DIM_ENABLE_THRESHOLD 16 + +#define XSC_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ +#define XSC_MAX_PRIORITY 8 +#define XSC_MAX_DSCP 64 +#define XSC_MAX_BUFFER 8 +#define XSC_DEFAULT_CABLE_LEN 7 /* 7 meters */ + +enum xsc_port_status { + XSC_PORT_DOWN = 0, + XSC_PORT_UP = 1, +}; + +/*all attributes of queue, MAYBE no use for some special queue*/ + +enum xsc_queue_type { + XSC_QUEUE_TYPE_EQ = 0, + XSC_QUEUE_TYPE_RQCQ, + XSC_QUEUE_TYPE_SQCQ, + XSC_QUEUE_TYPE_RQ, + XSC_QUEUE_TYPE_SQ, + XSC_QUEUE_TYPE_MAX, +}; + +struct xsc_queue_attr { + u8 q_type; + u32 ele_num; + u32 ele_size; + u8 ele_log_size; + u8 q_log_size; +}; + +/*MUST set value before create queue*/ +struct xsc_eth_eq_attr { + struct xsc_queue_attr xsc_eq_attr; +}; + +struct xsc_eth_cq_attr { + struct xsc_queue_attr xsc_cq_attr; +}; + +struct xsc_eth_rq_attr { + struct xsc_queue_attr xsc_rq_attr; +}; + +struct xsc_eth_sq_attr { + struct xsc_queue_attr xsc_sq_attr; +}; + +struct xsc_eth_qp_attr { + struct xsc_queue_attr xsc_qp_attr; +}; + +struct xsc_eth_rx_wqe_cyc { +#ifdef DECLARE_FLEX_ARRAY + DECLARE_FLEX_ARRAY(struct xsc_wqe_data_seg, data); +#else + struct xsc_wqe_data_seg data[0]; +#endif +}; + +struct xsc_eq_param { + struct xsc_queue_attr eq_attr; +}; + +struct xsc_cq_param { + struct xsc_wq_param wq; + struct cq_cmd { + u8 abc[16]; + } cqc; + struct xsc_queue_attr cq_attr; +}; + +struct xsc_rq_param { + struct xsc_wq_param wq; + struct xsc_queue_attr rq_attr; + struct xsc_rq_frags_info frags_info; + +}; + +struct xsc_sq_param { +// struct xsc_rq_cmd_param sqc; + struct xsc_wq_param wq; + struct xsc_queue_attr sq_attr; +}; + +struct xsc_qp_param { +// struct xsc_qp_cmd_param qpc; + struct xsc_queue_attr qp_attr; +}; + +struct xsc_channel_param { + struct xsc_cq_param rqcq_param; + struct xsc_cq_param sqcq_param; + struct xsc_rq_param rq_param; + struct xsc_sq_param sq_param; + struct xsc_qp_param qp_param; +}; + +struct xsc_eth_qp { + u16 rq_num; + u16 sq_num; + struct xsc_rq rq[XSC_MAX_NUM_TC]; /*may be use one only*/ + struct xsc_sq sq[XSC_MAX_NUM_TC]; /*reserved to tc*/ +}; + +enum channel_flags { + XSC_CHANNEL_NAPI_SCHED = 1, +}; + +struct xsc_channel { + /* data path */ + struct xsc_eth_qp qp; + struct napi_struct napi; + u8 num_tc; + int chl_idx; + + /*relationship*/ + struct xsc_adapter *adapter; + struct net_device *netdev; + int cpu; + unsigned long flags; + + /* data path - accessed per napi poll */ + const struct cpumask *aff_mask; + struct irq_desc *irq_desc; + struct xsc_ch_stats *stats; +} ____cacheline_aligned_in_smp; + +enum xsc_eth_priv_flag { + XSC_PFLAG_RX_NO_CSUM_COMPLETE, + XSC_PFLAG_SNIFFER, + XSC_PFLAG_DROPLESS_RQ, + XSC_PFLAG_RX_COPY_BREAK, + XSC_PFLAG_RX_CQE_BASED_MODER, + XSC_PFLAG_TX_CQE_BASED_MODER, + XSC_NUM_PFLAGS, /* Keep last */ +}; + +#define XSC_SET_PFLAG(params, pflag, enable) \ + do { \ + if (enable) \ + (params)->pflags |= BIT(pflag); \ + else \ + (params)->pflags &= ~(BIT(pflag)); \ + } while (0) + +#define XSC_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) + +struct xsc_eth_params { + u16 num_channels; + u16 max_num_ch; + u8 num_tc; + u32 mtu; + u32 hard_mtu; + u32 comp_vectors; + u32 sq_size; + u32 sq_max_size; + u8 rq_wq_type; + u32 rq_size; + u32 rq_max_size; + u32 rq_frags_size; + + u16 num_rl_txqs; + u8 rx_cqe_compress_def; + u8 tunneled_offload_en; + u8 lro_en; + u8 tx_min_inline_mode; + u8 vlan_strip_disable; + u8 scatter_fcs_en; + u8 rx_dim_enabled; + u8 tx_dim_enabled; + u32 rx_dim_usecs_low; + u32 rx_dim_frames_low; + u32 tx_dim_usecs_low; + u32 tx_dim_frames_low; + u32 lro_timeout; + u32 pflags; + + xsc_dim_cq_moder_t rx_cq_moderation; + xsc_dim_cq_moder_t tx_cq_moderation; +}; + +struct xsc_eth_channels { + struct xsc_channel *c; + unsigned int num_chl; + u32 rqn_base; +}; + +struct xsc_eth_redirect_rqt_param { + u8 is_rss; + union { + u32 rqn; /* Direct RQN (Non-RSS) */ + struct { + u8 hfunc; + struct xsc_eth_channels *channels; + } rss; /* RSS data */ + }; +}; + +union xsc_send_doorbell { + struct{ + s32 next_pid : 16; + u32 qp_num : 15; + }; + u32 send_data; +}; + +union xsc_recv_doorbell { + struct{ + s32 next_pid : 13; + u32 qp_num : 15; + }; + u32 recv_data; +}; + +#endif /* XSC_ETH_COMMON_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..5e34982faa46aece80d0052c50956b059e3badef --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_COMPAT_H +#define XSC_ETH_COMPAT_H + +#define xsc_netdev_xmit_more(skb) netdev_xmit_more() + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..ccf21b8c704bf513f15f38fcd864206d79d4c732 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c @@ -0,0 +1,654 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "xsc_hw_comm.h" + +#define XSC_ETH_CTRL_NAME "eth_ctrl" + +struct mutex pfc_mutex; /* protect pfc operation */ + +static void encode_watchdog_set(void *data, u32 mac_port) +{ + struct xsc_watchdog_period_set *req = + (struct xsc_watchdog_period_set *)data; + + req->period = __cpu_to_be32(req->period); +} + +static void decode_watchdog_get(void *data) +{ + struct xsc_watchdog_period_get *resp = + (struct xsc_watchdog_period_get *)data; + + resp->period = __be32_to_cpu(resp->period); +} + +static void encode_rlimit_set(void *data, u32 mac_port) +{ + struct xsc_rate_limit_set *req = (struct xsc_rate_limit_set *)data; + + req->rate_cir = __cpu_to_be32(req->rate_cir); + req->limit_id = __cpu_to_be32(req->limit_id); +} + +static void decode_rlimit_get(void *data) +{ + struct xsc_rate_limit_get *resp = (struct xsc_rate_limit_get *)data; + int i; + + for (i = 0; i <= QOS_PRIO_MAX; i++) + resp->rate_cir[i] = __be32_to_cpu(resp->rate_cir[i]); + + resp->max_limit_id = __be32_to_cpu(resp->max_limit_id); +} + +static int xsc_get_port_pfc(struct xsc_core_device *xdev, u8 *pfc, u8 pfc_size) +{ + int err = 0; + struct xsc_pfc_get rsp; + + memset(&rsp, 0, sizeof(struct xsc_pfc_get)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_PFC, NULL, &rsp); + if (err) { + xsc_core_err(xdev, "failed to get pfc, err: %d\n", err); + return err; + } + + memcpy(pfc, rsp.pfc_on, pfc_size); + + return 0; +} + +static int xsc_set_port_pfc_drop_th(struct xsc_core_device *xdev, u8 prio, u8 cfg_type) +{ + int err = 0; + struct xsc_pfc_set_drop_th_mbox_in req; + struct xsc_pfc_set_drop_th_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_set_drop_th_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_pfc_set_drop_th_mbox_out)); + + req.prio = prio; + req.cfg_type = cfg_type; + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH, &req, &rsp); + if (err) { + xsc_core_err(xdev, + "failed to set pfc drop th, err: %d, prio: %d, cfg_type: %d\n", + err, prio, cfg_type); + return err; + } + + return 0; +} + +static int xsc_set_drop_th(struct xsc_core_device *xdev, + const struct xsc_pfc_cfg *pfc_cfg, + u8 cfg_type) +{ + int err = 0; + + if (cfg_type == DROP_TH_CLEAR) { + err = xsc_set_port_pfc_drop_th(xdev, pfc_cfg->req_prio, cfg_type); + if (pfc_cfg->pfc_op == PFC_OP_MODIFY) + err |= xsc_set_port_pfc_drop_th(xdev, pfc_cfg->curr_prio, cfg_type); + } else if (cfg_type == DROP_TH_RECOVER) { + if (pfc_cfg->pfc_op == PFC_OP_DISABLE) { + err = xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->req_prio, + DROP_TH_RECOVER_LOSSY); + } else if (pfc_cfg->pfc_op == PFC_OP_ENABLE) { + err = xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->req_prio, + DROP_TH_RECOVER_LOSSLESS); + } else if (pfc_cfg->pfc_op == PFC_OP_MODIFY) { + err = xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->req_prio, + DROP_TH_RECOVER_LOSSLESS); + err |= xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->curr_prio, + DROP_TH_RECOVER_LOSSY); + } + } + + return err; +} + +static int xsc_get_port_pfc_cfg_status(struct xsc_core_device *xdev, u8 prio, int *status) +{ + int err = 0; + struct xsc_pfc_get_cfg_status_mbox_in req; + struct xsc_pfc_get_cfg_status_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_get_cfg_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_pfc_get_cfg_status_mbox_out)); + + req.prio = prio; + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS, &req, &rsp); + if (err) { + xsc_core_err(xdev, "failed to get pfc cfg status, err: %d, prio: %d\n", err, prio); + return err; + } + + *status = rsp.hdr.status; + + return 0; +} + +static int xsc_get_cfg_status(struct xsc_core_device *xdev, + struct xsc_pfc_cfg *pfc_cfg, + int *status) +{ + int err = 0; + + err = xsc_get_port_pfc_cfg_status(xdev, pfc_cfg->req_prio, status); + if (pfc_cfg->pfc_op == PFC_OP_MODIFY) + err |= xsc_get_port_pfc_cfg_status(xdev, pfc_cfg->curr_prio, status); + + return err; +} + +static int xsc_wait_pfc_check_complete(struct xsc_core_device *xdev, + struct xsc_pfc_cfg *pfc_cfg) +{ + int err = 0; + int status = 0; + u32 valid_cnt = 0; + u32 retry_cnt = 0; + + while (retry_cnt < PFC_CFG_CHECK_MAX_RETRY_TIMES) { + err = xsc_get_cfg_status(xdev, pfc_cfg, &status); + + if (err || status) { + valid_cnt = 0; + } else { + valid_cnt++; + if (valid_cnt >= PFC_CFG_CHECK_VALID_CNT) + break; + } + + retry_cnt++; + usleep_range(PFC_CFG_CHECK_SLEEP_TIME_US, + PFC_CFG_CHECK_SLEEP_TIME_US + 1); + } + + if (retry_cnt >= PFC_CFG_CHECK_MAX_RETRY_TIMES) { + xsc_core_err(xdev, "pfc check timeout, req_prio: %d, curr_prio:%d\n", + pfc_cfg->req_prio, pfc_cfg->curr_prio); + err = -EFAULT; + } + + return err | status; +} + +static int xsc_set_port_pfc(struct xsc_core_device *xdev, u8 prio, + u8 pfc_on, u8 pfc_op, u8 *lossless_num) +{ + int err = 0; + struct xsc_pfc_set req; + struct xsc_pfc_set rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_set)); + req.priority = prio; + req.pfc_on = pfc_on; + req.type = pfc_op; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC, &req, &rsp); + if (err) { + xsc_core_err(xdev, "failed to set pfc, err: %d, prio: %d, pfc_on: %d\n", + err, prio, pfc_on); + return err; + } + + *lossless_num = rsp.lossless_num; + + return 0; +} + +static int xsc_set_pfc(struct xsc_core_device *xdev, struct xsc_pfc_cfg *pfc_cfg) +{ + int err = 0; + u8 lossless_num = LOSSLESS_NUM_INVAILD; + + switch (pfc_cfg->pfc_op) { + case PFC_OP_DISABLE: + err = xsc_set_port_pfc(xdev, pfc_cfg->req_prio, NIF_PFC_EN_OFF, + pfc_cfg->pfc_op, &lossless_num); + break; + case PFC_OP_ENABLE: + err = xsc_set_port_pfc(xdev, pfc_cfg->req_prio, NIF_PFC_EN_ON, + pfc_cfg->pfc_op, &lossless_num); + break; + case PFC_OP_MODIFY: + err = xsc_set_port_pfc(xdev, pfc_cfg->curr_prio, NIF_PFC_EN_OFF, + pfc_cfg->pfc_op, &lossless_num); + err |= xsc_set_port_pfc(xdev, pfc_cfg->req_prio, NIF_PFC_EN_ON, + pfc_cfg->pfc_op, &lossless_num); + break; + default: + xsc_core_err(xdev, "unsupported pfc operation: %d\n", pfc_cfg->pfc_op); + err = -EINVAL; + } + + pfc_cfg->lossless_num = lossless_num; + return err; +} + +static int handle_pfc_cfg(struct xsc_core_device *xdev, + struct xsc_qos_mbox_in *in, int in_size, + struct xsc_qos_mbox_out *out, int out_size) +{ + const struct xsc_pfc_set *req = (struct xsc_pfc_set *)in->data; + struct xsc_pfc_set *rsp = (struct xsc_pfc_set *)out->data; + struct xsc_pfc_cfg pfc_cfg; + u8 curr_pfc[PFC_PRIO_MAX + 1] = {0}; + int idx; + int err = 0; + bool invalid_op = false; + + if (!mutex_trylock(&pfc_mutex)) { + xsc_core_err(xdev, "pfc is configuring by other user\n"); + return -EBUSY; + } + + memcpy(rsp, req, sizeof(struct xsc_pfc_set)); + memset(&pfc_cfg, 0, sizeof(struct xsc_pfc_cfg)); + + if (req->priority < 0 || req->priority > PFC_PRIO_MAX) { + xsc_core_err(xdev, "invalid req priority: %d\n", req->priority); + err = -EINVAL; + goto err_process; + } + + pfc_cfg.req_prio = req->priority; + pfc_cfg.req_pfc_en = req->pfc_on; + pfc_cfg.curr_pfc_en = 0; + pfc_cfg.pfc_op = PFC_OP_TYPE_MAX; + pfc_cfg.lossless_num = LOSSLESS_NUM_INVAILD; + + err = xsc_get_port_pfc(xdev, curr_pfc, sizeof(curr_pfc)); + if (err) + goto err_process; + + for (idx = 0; idx < PFC_PRIO_MAX + 1; idx++) { + if (curr_pfc[idx] == NIF_PFC_EN_ON) { + pfc_cfg.curr_prio = idx; + pfc_cfg.curr_pfc_en = 1; + break; + } + } + + if (pfc_cfg.curr_pfc_en && pfc_cfg.req_pfc_en) { + if (pfc_cfg.curr_prio != pfc_cfg.req_prio) + pfc_cfg.pfc_op = PFC_OP_MODIFY; + else + invalid_op = true; + } else if (pfc_cfg.curr_pfc_en && !pfc_cfg.req_pfc_en) { + if (pfc_cfg.curr_prio == pfc_cfg.req_prio) + pfc_cfg.pfc_op = PFC_OP_DISABLE; + else + invalid_op = true; + } else if (!pfc_cfg.curr_pfc_en && pfc_cfg.req_pfc_en) { + pfc_cfg.pfc_op = PFC_OP_ENABLE; + } else { + invalid_op = true; + } + + if (invalid_op) { + xsc_core_err(xdev, "invalid operation, req_pfc_cfg:%d,%d curr_pfc_cfg:%d,%d\n", + pfc_cfg.req_prio, pfc_cfg.req_pfc_en, + pfc_cfg.curr_prio, pfc_cfg.curr_pfc_en); + err = 0; + goto err_process; + } + + xsc_core_dbg(xdev, "req_pfc_cfg:%d, %d curr_pfc_cfg: %d,%d, pfc_op: %d\n", + pfc_cfg.req_prio, pfc_cfg.req_pfc_en, + pfc_cfg.curr_prio, pfc_cfg.curr_pfc_en, pfc_cfg.pfc_op); + + err = xsc_set_drop_th(xdev, &pfc_cfg, DROP_TH_CLEAR); + if (err) + goto err_process; + + err = xsc_wait_pfc_check_complete(xdev, &pfc_cfg); + if (!err) + err = xsc_set_pfc(xdev, &pfc_cfg); + + err |= xsc_set_drop_th(xdev, &pfc_cfg, DROP_TH_RECOVER); + +err_process: + mutex_unlock(&pfc_mutex); + + if (pfc_cfg.pfc_op == PFC_OP_MODIFY) + rsp->src_prio = pfc_cfg.curr_prio; + else + rsp->src_prio = pfc_cfg.req_prio; + + rsp->lossless_num = pfc_cfg.lossless_num; + rsp->type = pfc_cfg.pfc_op; + out->hdr.status = err; + xsc_core_dbg(xdev, "response lossless_num: %d, src_prio: %d, type: %d, hdr status: %d\n", + rsp->lossless_num, rsp->src_prio, rsp->type, out->hdr.status); + return err; +} + +static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_qos_mbox_in *in; + struct xsc_qos_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->req_prfx.mac_port = xdev->mac_port; + + if (encode) + encode((void *)in->data, xdev->mac_port); + + if (hdr->attr.opcode == XSC_CMD_OP_IOCTL_SET_PFC) + err = handle_pfc_cfg(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + else + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int _eth_ctrl_ioctl_hwconfig(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_hwc_mbox_in *in; + struct xsc_hwc_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dscp_pmt_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dscp_pmt_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_trust_mode_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_trust_mode_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_PCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_pcp_pmt_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_PCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_pcp_pmt_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_default_pri_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_default_pri_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_PFC: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_pfc_set), + sizeof(struct xsc_pfc_set), + NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_PFC: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_pfc_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_rate_limit_set), 0, + encode_rlimit_set, NULL); + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, sizeof(struct xsc_rate_limit_get), + sizeof(struct xsc_rate_limit_get), + NULL, decode_rlimit_get); + case XSC_CMD_OP_IOCTL_SET_SP: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_sp_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_SP: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_sp_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dpu_port_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dpu_port_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dpu_prio_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dpu_prio_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_HWC: + return _eth_ctrl_ioctl_hwconfig(xdev, user_hdr, &hdr, + sizeof(struct hwc_set_t), sizeof(struct hwc_set_t), + NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_HWC: + return _eth_ctrl_ioctl_hwconfig(xdev, user_hdr, &hdr, sizeof(struct hwc_get_t), + sizeof(struct hwc_get_t), + NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_watchdog_en_set), 0, + NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_watchdog_en_get), + NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_watchdog_period_set), 0, + encode_watchdog_set, NULL); + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_watchdog_period_get), + NULL, decode_watchdog_get); + default: + return TRY_NEXT_CB; + } + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, in, hdr.attr.length, out, hdr.attr.length); + + if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out, hdr.attr.length)) + err = -EFAULT; +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int _eth_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) +{ + struct xsc_core_device *xdev = file->xdev; + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = _eth_ctrl_ioctl_cmdq(xdev, user_hdr); + break; + default: + err = TRY_NEXT_CB; + break; + } + + return err; +} + +static void _eth_ctrl_reg_fini(void) +{ + xsc_port_ctrl_cb_dereg(XSC_ETH_CTRL_NAME); +} + +static int _eth_ctrl_reg_init(void) +{ + int ret; + + ret = xsc_port_ctrl_cb_reg(XSC_ETH_CTRL_NAME, _eth_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_ETH_CTRL_NAME); + + return ret; +} + +static void _pfc_global_res_init(void) +{ + mutex_init(&pfc_mutex); +} + +void xsc_eth_ctrl_fini(void) +{ + _eth_ctrl_reg_fini(); +} + +int xsc_eth_ctrl_init(void) +{ + _pfc_global_res_init(); + return _eth_ctrl_reg_init(); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..d7e93f0afc4197c699b47839e9e560badf5e49f2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_CTRL_H +#define XSC_ETH_CTRL_H + +void xsc_eth_ctrl_fini(void); +int xsc_eth_ctrl_init(void); + +#endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..61850c2ea9dee622de2c0ca6b43e00bc68bd2ab5 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_DEBUG_H +#define XSC_ETH_DEBUG_H + +#include "common/xsc_core.h" +#include +#include "xsc_eth.h" + +static bool debug; +#define FUN_LINE_FMT "%s %d " + +#define ETH_DEBUG_LOG(fmt, ...) do { } while (0) + +#define XSC_MSG_LEVEL (NETIF_MSG_LINK) // | NETIF_MSG_HW) + +#define xsc_eth_dbg(mlevel, priv, format, ...) \ +do { \ + if (NETIF_MSG_##mlevel & (priv)->msglevel) \ + netdev_warn(priv->netdev, format, \ + ##__VA_ARGS__); \ +} while (0) + +#define WQE_CSEG_DUMP(seg_name, seg) \ + do { \ + ETH_DEBUG_LOG("dump %s:\n", seg_name); \ + ETH_DEBUG_LOG("cseg->has_pph: %d\n", (seg)->has_pph); \ + ETH_DEBUG_LOG("cseg->so_type: %d\n", (seg)->so_type); \ + ETH_DEBUG_LOG("cseg->so_hdr_len: %d\n", (seg)->so_hdr_len); \ + ETH_DEBUG_LOG("cseg->so_data_size: %d\n", (seg)->so_data_size); \ + ETH_DEBUG_LOG("cseg->msg_opcode: %d\n", (seg)->msg_opcode); \ + ETH_DEBUG_LOG("cseg->wqe_id: %d\n", (seg)->wqe_id); \ + ETH_DEBUG_LOG("cseg->ds_data_num: %d\n", (seg)->ds_data_num); \ + ETH_DEBUG_LOG("cseg->msg_len: %d\n", (seg)->msg_len); \ + } while (0) + +#define WQE_DSEG_DUMP(seg_name, seg) \ + do { \ + ETH_DEBUG_LOG("dump %s:\n", seg_name); \ + ETH_DEBUG_LOG("dseg->va: %#llx\n", (seg)->va); \ + ETH_DEBUG_LOG("dseg->in_line: %d\n", (seg)->in_line); \ + ETH_DEBUG_LOG("dseg->mkey: %d\n", (seg)->mkey); \ + ETH_DEBUG_LOG("dseg->seg_len: %d\n", (seg)->seg_len); \ + } while (0) + +static inline void skbdata_debug_dump(struct sk_buff *skb, u16 headlen, int direct) +{ + if (!debug) + return; + + netdev_info(skb->dev, "pkt[%s]: skb_len=%d, head_len=%d\n", + (direct ? "tx" : "rx"), skb->len, headlen); + + if (skb) { + char *buf = skb->data; + int i, j; + int pos; + + for (i = 0; i < headlen; i++) { + if (i % 16 == 0) + pr_info("%#4.4x ", i); + pr_info("%2.2x ", ((unsigned char *)buf)[i]); + } + + pr_info("\n"); + + pos = headlen; + for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; + int fsz = skb_frag_size(frag); + + buf = (char *)(page_address(frag->bv_page) + frag->bv_offset); + for (i = 0; i < fsz; i++) { + if (i % 16 == 0) + pr_info("%#4.4x ", i); + pr_info("%2.2x ", ((unsigned char *)buf)[i]); + } + + pos += frag->bv_len; + } + pr_info("\n"); + } +} + +#define ETH_SQ_STATE(sq) \ + do { \ + if (test_bit(__QUEUE_STATE_STACK_XOFF, &(sq)->txq->state)) \ + ETH_DEBUG_LOG("sq is __QUEUE_STATE_STACK_XOFF\n"); \ + else if (test_bit(__QUEUE_STATE_DRV_XOFF, &(sq)->txq->state)) \ + ETH_DEBUG_LOG("sq is __QUEUE_STATE_DRV_XOFF\n"); \ + else \ + ETH_DEBUG_LOG("sq is %ld\n", (sq)->txq->state); \ + } while (0) + +static inline void xsc_pkt_pph_dump(char *data, int len) +{ + int i; + + if (!debug) + return; + + for (i = 0; i < len; i++) { + if (i % 16 == 0) + pr_info("%#4.4x ", i); + pr_info("%2.2x ", ((unsigned char *)data)[i]); + } +} + +#endif /* XSC_ETH_DEBUG_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c new file mode 100644 index 0000000000000000000000000000000000000000..3a29fa03e92ad9bd1d9def35ef7265ba0e9dce4b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth_dim.h" +#include "xsc_queue.h" +#include "xsc_eth_stats.h" + +xsc_dim_cq_moder_t xsc_get_def_tx_moderation(u8 cq_period_mode) +{ + xsc_dim_cq_moder_t moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + moder.usec = XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + if (cq_period_mode == XSC_CQ_PERIOD_MODE_START_FROM_CQE) + moder.usec = XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; + + return moder; +} + +xsc_dim_cq_moder_t xsc_get_def_rx_moderation(u8 cq_period_mode) +{ + xsc_dim_cq_moder_t moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + moder.usec = XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + return moder; +} + +void xsc_set_tx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode) +{ + if (params->tx_dim_enabled) + params->tx_cq_moderation = net_dim_get_tx_moderation(cq_period_mode, + XSC_DEF_TX_DIM_PROFILE_IDX); + else + params->tx_cq_moderation = xsc_get_def_tx_moderation(cq_period_mode); + + XSC_SET_PFLAG(params, XSC_PFLAG_TX_CQE_BASED_MODER, + params->tx_cq_moderation.cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); +} + +void xsc_set_rx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode) +{ + if (params->rx_dim_enabled) { + params->rx_cq_moderation = net_dim_get_rx_moderation(cq_period_mode, + XSC_DEF_RX_DIM_PROFILE_IDX); + if (cq_period_mode == XSC_CQ_PERIOD_MODE_START_FROM_EQE) + params->rx_cq_moderation.usec = + XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_EQE; + } else { + params->rx_cq_moderation = xsc_get_def_rx_moderation(cq_period_mode); + } + + params->rx_dim_usecs_low = XSC_PARAMS_RX_DIM_USECS_LOW; + params->rx_dim_frames_low = XSC_PARAMS_RX_DIM_FRAMES_LOW; + + XSC_SET_PFLAG(params, XSC_PFLAG_RX_CQE_BASED_MODER, + params->rx_cq_moderation.cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); +} + +void xsc_handle_tx_dim(struct xsc_sq *sq) +{ + xsc_dim_sample_t *sample = &sq->dim_obj.sample; + + if (unlikely(!test_bit(XSC_ETH_SQ_STATE_AM, &sq->state))) + return; + + dim_update_sample(sq->cq.event_ctr, sample->pkt_ctr, sample->byte_ctr, sample); + net_dim(&sq->dim_obj.dim, *sample); +} + +void xsc_handle_rx_dim(struct xsc_rq *rq) +{ + xsc_dim_sample_t *sample = &rq->dim_obj.sample; + + if (unlikely(!test_bit(XSC_ETH_RQ_STATE_AM, &rq->state))) + return; + + dim_update_sample(rq->cq.event_ctr, sample->pkt_ctr, sample->byte_ctr, sample); + net_dim(&rq->dim_obj.dim, *sample); +} + +static void xsc_complete_dim_work(xsc_dim_t *dim, xsc_dim_cq_moder_t moder, + struct xsc_core_device *dev, struct xsc_core_cq *xcq) +{ + xcq->dim_us = moder.usec; + xcq->dim_pkts = moder.pkts; + dim->state = XSC_DIM_START_MEASURE; +} + +void xsc_rx_dim_work(struct work_struct *work) +{ + xsc_dim_t *dim = container_of(work, xsc_dim_t, work); + struct xsc_dim *dim_obj = container_of(dim, struct xsc_dim, dim); + struct xsc_rq *rq = container_of(dim_obj, struct xsc_rq, dim_obj); + xsc_dim_cq_moder_t cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + xsc_complete_dim_work(dim, cur_moder, rq->cq.xdev, &rq->cq.xcq); + rq->stats->dim_pkts = cur_moder.pkts; +} + +void xsc_tx_dim_work(struct work_struct *work) +{ + xsc_dim_t *dim = container_of(work, xsc_dim_t, work); + struct xsc_dim *dim_obj = container_of(dim, struct xsc_dim, dim); + struct xsc_sq *sq = container_of(dim_obj, struct xsc_sq, dim_obj); + xsc_dim_cq_moder_t cur_moder = + net_dim_get_tx_moderation(dim->mode, dim->profile_ix); + + xsc_complete_dim_work(dim, cur_moder, sq->cq.xdev, &sq->cq.xcq); + sq->stats->dim_pkts = cur_moder.pkts; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h new file mode 100644 index 0000000000000000000000000000000000000000..1e3515db5eef996ca76a07f17ec7422f3349a970 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_DIM_H +#define XSC_ETH_DIM_H + +#include "xsc_eth_common.h" + +#define XSC_DEF_RX_DIM_PROFILE_IDX 4 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x1 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x40 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x2 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_EQE 0x40 + +#define XSC_PARAMS_RX_DIM_USECS_LOW 8 +#define XSC_PARAMS_RX_DIM_FRAMES_LOW 2 + +#define XSC_DEF_TX_DIM_PROFILE_IDX 4 +#define XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x1 +#define XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x2 +#define XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x80 +#define XSC_MAX_COAL_TIME 512 +#define XSC_MAX_COAL_FRAMES 1024 + +#define XSC_DIM_START_MEASURE DIM_START_MEASURE + +enum { + XSC_CQ_PERIOD_MODE_START_FROM_EQE = DIM_CQ_PERIOD_MODE_START_FROM_EQE, + XSC_CQ_PERIOD_MODE_START_FROM_CQE = DIM_CQ_PERIOD_MODE_START_FROM_CQE, + XSC_CQ_PERIOD_NUM_MODES +}; + +xsc_dim_cq_moder_t xsc_get_def_tx_moderation(u8 cq_period_mode); +xsc_dim_cq_moder_t xsc_get_def_rx_moderation(u8 cq_period_mode); +u8 xsc_to_net_dim_cq_period_mode(u8 cq_period_mode); +void xsc_set_tx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode); +void xsc_set_rx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode); + +void xsc_tx_dim_work(struct work_struct *work); +void xsc_rx_dim_work(struct work_struct *work); + +void xsc_handle_tx_dim(struct xsc_sq *sq); +void xsc_handle_rx_dim(struct xsc_rq *rq); + +#endif /* XSC_ETH_DIM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..01c055372003dcd3c77ef65bfc82542420a9c88c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c @@ -0,0 +1,1279 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_eth_stats.h" +#include "xsc_eth_debug.h" +#include "xsc_eth_ethtool.h" +#include "xsc_eth.h" +#include "common/xsc_cmd.h" +#include "common/xsc_pp.h" +#include "common/port.h" +#include "xsc_eth_dim.h" + +typedef int (*xsc_pflag_handler)(struct net_device *dev, bool enable); + +struct pflag_desc { + char name[ETH_GSTRING_LEN]; + xsc_pflag_handler handler; +}; + +static const char * const fpga_type_name[] = {"S", "L"}; +static const char * const hps_ddr_name[] = {"1", "2", "4", "unknown"}; +static const char * const onchip_ft_name[] = {"N", "O" }; +static const char * const rdma_icrc_name[] = {"N", "C" }; +static const char * const ma_xbar_name[] = {"N", "X" }; +static const char * const anlt_fec_name[] = {"N", "A" }; +static const char * const pp_tbl_dma_name[] = {"N", "D" }; +static const char * const pct_exp_name[] = {"N", "E" }; + +enum { + XSC_ST_LINK_STATE, + XSC_ST_LINK_SPEED, + XSC_ST_HEALTH_INFO, +#ifdef CONFIG_INET + XSC_ST_LOOPBACK, +#endif + XSC_ST_NUM, +}; + +const char xsc_self_tests[XSC_ST_NUM][ETH_GSTRING_LEN] = { + "Link Test", + "Speed Test", + "Health Test", +#ifdef CONFIG_INET + "Loopback Test", +#endif +}; + +static int xsc_test_loopback(struct xsc_adapter *adapter) +{ + if (adapter->status != XSCALE_ETH_DRIVER_OK) { + netdev_err(adapter->netdev, + "\tCan't perform loopback test while device is down\n"); + return -ENODEV; + } + return 0; +} + +static int xsc_test_health_info(struct xsc_adapter *adapter) +{ + struct xsc_core_health *health = &adapter->xdev->priv.health; + + return health->sick ? 1 : 0; +} + +static int xsc_test_link_state(struct xsc_adapter *adapter) +{ + u8 port_state; + + if (!netif_carrier_ok(adapter->netdev)) + return 1; + + port_state = xsc_eth_get_link_status(adapter); + return port_state == 0 ? 1 : 0; +} + +static int xsc_test_link_speed(struct xsc_adapter *adapter) +{ + struct xsc_event_linkinfo linkinfo; + + if (xsc_eth_get_link_info(adapter, &linkinfo)) + return 1; + + return 0; +} + +static int set_pflag_rx_no_csum_complete(struct net_device *dev, + bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_NO_CSUM_COMPLETE, enable); + + return 0; +} + +static int set_pflag_sniffer(struct net_device *dev, bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_SNIFFER, enable); + + return 0; +} + +static int set_pflag_dropless_rq(struct net_device *dev, + bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_DROPLESS_RQ, enable); + + return 0; +} + +static int set_pflag_rx_copy_break(struct net_device *dev, + bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_COPY_BREAK, enable); + + return 0; +} + +static int cqe_mode_to_period_mode(bool val) +{ + return val ? XSC_CQ_PERIOD_MODE_START_FROM_CQE : XSC_CQ_PERIOD_MODE_START_FROM_EQE; +} + +static int set_pflag_cqe_based_moder(struct net_device *dev, bool enable, + bool is_rx_cq) +{ + struct xsc_adapter *priv = netdev_priv(dev); + u8 cq_period_mode, current_cq_period_mode; + struct xsc_eth_params new_params; + int err; + + cq_period_mode = cqe_mode_to_period_mode(enable); + + current_cq_period_mode = is_rx_cq ? + priv->nic_param.rx_cq_moderation.cq_period_mode : + priv->nic_param.tx_cq_moderation.cq_period_mode; + + if (cq_period_mode == current_cq_period_mode) + return 0; + + new_params = priv->nic_param; + if (is_rx_cq) + xsc_set_rx_cq_mode_params(&new_params, cq_period_mode); + else + xsc_set_tx_cq_mode_params(&new_params, cq_period_mode); + + priv->nic_param = new_params; + + err = xsc_safe_switch_channels(priv, NULL, NULL); + return err; +} + +static int set_pflag_rx_cqe_moder(struct net_device *dev, bool enable) +{ + return set_pflag_cqe_based_moder(dev, enable, true); +} + +static int set_pflag_tx_cqe_moder(struct net_device *dev, bool enable) +{ + return set_pflag_cqe_based_moder(dev, enable, false); +} + +static const struct pflag_desc xsc_priv_flags[XSC_NUM_PFLAGS] = { + { "rx_no_csum_complete", set_pflag_rx_no_csum_complete }, + { "sniffer", set_pflag_sniffer }, + { "dropless_rq", set_pflag_dropless_rq}, + { "rx_copy_break", set_pflag_rx_copy_break}, + { "rx_cqe_moder", set_pflag_rx_cqe_moder}, + { "tx_cqe_moder", set_pflag_tx_cqe_moder}, +}; + +int xsc_priv_flags_num(void) +{ + return ARRAY_SIZE(xsc_priv_flags); +} + +const char *xsc_priv_flags_name(int flag) +{ + return xsc_priv_flags[flag].name; +} + +static int xsc_handle_pflag(struct net_device *dev, + u32 wanted_flags, + enum xsc_eth_priv_flag flag) +{ + struct xsc_adapter *priv = netdev_priv(dev); + bool enable = !!(wanted_flags & BIT(flag)); + u32 changes = wanted_flags ^ priv->nic_param.pflags; + int err; + + if (!(changes & BIT(flag))) + return 0; + + err = xsc_priv_flags[flag].handler(dev, enable); + if (err) + netdev_err(dev, "%s private flag '%s' failed err %d\n", + enable ? "Enable" : "Disable", + xsc_priv_flags[flag].name, err); + + return err; +} + +int xsc_set_priv_flags(struct net_device *dev, u32 pflags) +{ + struct xsc_adapter *priv = netdev_priv(dev); + enum xsc_eth_priv_flag pflag; + int err; + + mutex_lock(&priv->state_lock); + + for (pflag = 0; pflag < XSC_NUM_PFLAGS; pflag++) { + err = xsc_handle_pflag(dev, pflags, pflag); + if (err) + break; + } + + mutex_unlock(&priv->state_lock); + + /* Need to fix some features.. */ + netdev_update_features(dev); + + return err; +} + +static int xsc_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + int size_read = 0; + u8 data[4] = {0}; + + size_read = xsc_query_module_eeprom(xdev, 0, 3, data); + if (size_read < 3) + return -EIO; + + /* data[0] = identifier byte */ + switch (data[0]) { + case XSC_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case XSC_MODULE_ID_QSFP_PLUS: + case XSC_MODULE_ID_QSFP28: + /* data[1] = revision id */ + if (data[0] == XSC_MODULE_ID_QSFP28 || data[1] >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } + break; + case XSC_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case XSC_MODULE_ID_QSFP_DD: + case XSC_MODULE_ID_DSFP: + case XSC_MODULE_ID_QSFP_PLUS_CMIS: + modinfo->type = ETH_MODULE_SFF_8636; + /* Verify if module EEPROM is a flat memory. In case of flat + * memory only page 00h (0-255 bytes) can be read. Otherwise + * upper pages 01h and 02h can also be read. Upper pages 10h + * and 11h are currently not supported by the driver. + */ + if (data[2] & 0x80) + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + else + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", + __func__, data[0]); + return -EINVAL; + } + + return 0; +} + +static int xsc_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + int offset = ee->offset; + int size_read; + int i = 0; + + if (!ee->len) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + size_read = xsc_query_module_eeprom(xdev, offset, ee->len - i, data + i); + + if (!size_read) + /* Done reading */ + return 0; + + if (size_read < 0) { + netdev_err(priv->netdev, "%s: xsc_query_eeprom failed:0x%x\n", + __func__, size_read); + return 0; + } + + i += size_read; + offset += size_read; + } + + return 0; +} + +static int xsc_get_module_eeprom_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_module_eeprom_query_params query; + u8 *data = page_data->data; + int size_read; + int i = 0; + + if (!page_data->length) + return -EINVAL; + + memset(data, 0, page_data->length); + + query.offset = page_data->offset; + query.i2c_address = page_data->i2c_address; + query.bank = page_data->bank; + query.page = page_data->page; + while (i < page_data->length) { + query.size = page_data->length - i; + size_read = xsc_query_module_eeprom_by_page(xdev, &query, data + i); + + // Done reading, return how many bytes was read + if (!size_read) + return i; + + if (size_read == -EINVAL) + return -EINVAL; + if (size_read < 0) { + netdev_err(priv->netdev, "%s: xsc_query_module_eeprom_by_page failed:0x%x\n", + __func__, size_read); + return i; + } + + i += size_read; + query.offset += size_read; + } + + return i; +} + +u32 xsc_get_priv_flags(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + return priv->nic_param.pflags; +} + +static void xsc_set_drv_fw_version(struct ethtool_drvinfo *info, struct xsc_core_device *xdev) +{ + u8 fw_ver_major = xdev->fw_version_major; + u8 fw_ver_minor = xdev->fw_version_minor; + u16 fw_ver_patch = xdev->fw_version_patch; + u32 fw_ver_tweak = xdev->fw_version_tweak; + u8 fw_ver_extra_flag = xdev->fw_version_extra_flag; + + if (fw_ver_tweak == 0) { + if (fw_ver_extra_flag == 0) { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u", + fw_ver_major, fw_ver_minor, fw_ver_patch); + } else { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u-dirty", + fw_ver_major, fw_ver_minor, fw_ver_patch); + } + } else { + if (fw_ver_extra_flag == 0) { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u+%u", + fw_ver_major, fw_ver_minor, fw_ver_patch, fw_ver_tweak); + } else { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u+%u-dirty", + fw_ver_major, fw_ver_minor, fw_ver_patch, fw_ver_tweak); + } + } +} + +static void xsc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + snprintf(info->driver, sizeof(info->driver), "%s", XSCALE_DRIVER_NAME); + + if (HOTFIX_NUM == 0) + snprintf(info->version, sizeof(info->version), "%d.%d.%d.%d", + BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION); + else + snprintf(info->version, sizeof(info->version), "%d.%d.%d.%d.H%d", + BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION, HOTFIX_NUM); + + xsc_set_drv_fw_version(info, adapter->xdev); + strscpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); +} + +static void xsc_fill_stats_strings(struct xsc_adapter *adapter, u8 *data) +{ + int i, idx = 0; + + for (i = 0; i < xsc_num_stats_grps; i++) + idx = xsc_stats_grps[i].fill_strings(adapter, data, idx); +} + +static int xsc_self_test_num(struct xsc_adapter *adapter) +{ + return ARRAY_SIZE(xsc_self_tests); +} + +static void xsc_ethtool_get_strings(struct xsc_adapter *adapter, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + xsc_fill_stats_strings(adapter, data); + break; + + case ETH_SS_TEST: + for (i = 0; i < xsc_self_test_num(adapter); i++) + strscpy(data + i * ETH_GSTRING_LEN, + xsc_self_tests[i], + ETH_GSTRING_LEN); + break; + + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < XSC_NUM_PFLAGS; i++) + strscpy(data + i * ETH_GSTRING_LEN, + xsc_priv_flags[i].name, + ETH_GSTRING_LEN); + break; + + default: + ETH_DEBUG_LOG("wrong stringset\n"); + break; + } +} + +static void xsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + xsc_ethtool_get_strings(adapter, stringset, data); +} + +static int xsc_ethtool_get_sset_count(struct xsc_adapter *adapter, int sset) +{ + int i, num_stats = 0; + + switch (sset) { + case ETH_SS_STATS: + for (i = 0; i < xsc_num_stats_grps; i++) + num_stats += xsc_stats_grps[i].get_num_stats(adapter); + return num_stats; + case ETH_SS_PRIV_FLAGS: + return XSC_NUM_PFLAGS; + case ETH_SS_TEST: + return xsc_self_test_num(adapter); + default: + return -EOPNOTSUPP; + } +} + +static int xsc_get_sset_count(struct net_device *dev, int sset) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + return xsc_ethtool_get_sset_count(adapter, sset); +} + +static int (*xsc_st_func[XSC_ST_NUM])(struct xsc_adapter *) = { + xsc_test_link_state, + xsc_test_link_speed, + xsc_test_health_info, +#ifdef CONFIG_INET + xsc_test_loopback, +#endif +}; + +static void xsc_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf) +{ + struct xsc_adapter *priv = netdev_priv(ndev); + int i; + + memset(buf, 0, sizeof(u64) * XSC_ST_NUM); + + mutex_lock(&priv->state_lock); + netdev_info(ndev, "Self test begin..\n"); + + for (i = 0; i < XSC_ST_NUM; i++) { + netdev_info(ndev, "\t[%d] %s start..\n", + i, xsc_self_tests[i]); + buf[i] = xsc_st_func[i](priv); + netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", + i, xsc_self_tests[i], buf[i]); + } + + mutex_unlock(&priv->state_lock); + + for (i = 0; i < XSC_ST_NUM; i++) { + if (buf[i]) { + etest->flags |= ETH_TEST_FL_FAILED; + break; + } + } + netdev_info(ndev, "Self test out: status flags(0x%x)\n", + etest->flags); +} + +static void xsc_update_stats(struct xsc_adapter *adapter) +{ + int i; + + for (i = xsc_num_stats_grps - 1; i >= 0; i--) + if (xsc_stats_grps[i].update_stats) + xsc_stats_grps[i].update_stats(adapter); +} + +static void xsc_ethtool_get_ethtool_stats(struct xsc_adapter *adapter, + struct ethtool_stats *stats, u64 *data) +{ + int i, idx = 0; + + mutex_lock(&adapter->state_lock); + xsc_update_stats(adapter); + mutex_unlock(&adapter->state_lock); + + for (i = 0; i < xsc_num_stats_grps; i++) + idx = xsc_stats_grps[i].fill_stats(adapter, data, idx); +} + +static void xsc_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + xsc_ethtool_get_ethtool_stats(adapter, stats, data); +} + +static u32 xsc_get_msglevel(struct net_device *dev) +{ + return ((struct xsc_adapter *)netdev_priv(dev))->msglevel; +} + +static void xsc_set_msglevel(struct net_device *dev, u32 val) +{ + ((struct xsc_adapter *)netdev_priv(dev))->msglevel = val; +} + +static void xsc_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + param->rx_max_pending = 8192; //hack for H3C + param->rx_pending = priv->nic_param.rq_size; + param->tx_max_pending = 8192; //hack for H3C + param->tx_pending = priv->nic_param.sq_size; +} + +static int xsc_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(dev); + u32 old_rq_size, old_sq_size; + int err = 0; + + if (param->rx_jumbo_pending) { + netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n", + __func__); + return -EINVAL; + } + if (param->rx_mini_pending) { + netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n", + __func__); + return -EINVAL; + } + + if (param->rx_pending < BIT(XSC_MIN_LOG_RQ_SZ)) { + netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%ld)\n", + __func__, param->rx_pending, BIT(XSC_MIN_LOG_RQ_SZ)); + return -EINVAL; + } + if (param->rx_pending > priv->nic_param.rq_max_size) { + netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n", + __func__, param->rx_pending, priv->nic_param.rq_max_size); + param->rx_pending = priv->nic_param.rq_max_size; + } + + if (param->tx_pending < BIT(XSC_MIN_LOG_SQ_SZ)) { + netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%ld)\n", + __func__, param->tx_pending, BIT(XSC_MIN_LOG_SQ_SZ)); + return -EINVAL; + } + if (param->tx_pending > priv->nic_param.sq_max_size) { + netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n", + __func__, param->tx_pending, priv->nic_param.sq_max_size); + param->tx_pending = priv->nic_param.sq_max_size; + } + + if (param->rx_pending == priv->nic_param.rq_size && + param->tx_pending == priv->nic_param.sq_size) + return 0; + + mutex_lock(&priv->state_lock); + + if (priv->status != XSCALE_ETH_DRIVER_OK) + goto unlock; + + old_rq_size = priv->nic_param.rq_size; + old_sq_size = priv->nic_param.sq_size; + priv->nic_param.rq_size = param->rx_pending; + priv->nic_param.sq_size = param->tx_pending; + + netdev_info(priv->netdev, "%s: tx_pending(%d->%d), rx_pending(%d->%d)\n", + __func__, old_sq_size, param->tx_pending, + old_rq_size, priv->nic_param.rq_size); + err = xsc_safe_switch_channels(priv, NULL, NULL); + if (err) { + priv->nic_param.rq_size = old_rq_size; + priv->nic_param.sq_size = old_sq_size; + netdev_err(priv->netdev, "%s: set ringparams failed, err=%d\n", + __func__, err); + } + +unlock: + mutex_unlock(&priv->state_lock); + + return err; +} + +static void xsc_get_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + mutex_lock(&priv->state_lock); + + ch->max_combined = priv->nic_param.max_num_ch; + ch->combined_count = priv->nic_param.num_channels; + + mutex_unlock(&priv->state_lock); +} + +static int xsc_set_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_eth_params *params = &priv->nic_param; + unsigned int ch_max = params->max_num_ch; + unsigned int ch_num_old = params->num_channels; + unsigned int count = ch->combined_count; + int err = 0; + + if (!count) { + netdev_info(priv->netdev, "%s: combined_count=0 not supported\n", __func__); + return -EINVAL; + } + + if (ch->rx_count || ch->tx_count) { + netdev_info(priv->netdev, "%s: separate rx/tx count not supported\n", __func__); + return -EINVAL; + } + + if (count > ch_max) { + netdev_info(priv->netdev, "%s: count (%d) > max (%d)\n", + __func__, count, ch_max); + return -EINVAL; + } + + if (ch_num_old == count) + return 0; + + mutex_lock(&priv->state_lock); + + params->num_channels = count; + + if (priv->status != XSCALE_ETH_DRIVER_OK) { + err = xsc_eth_num_channels_changed(priv); + if (err) + params->num_channels = ch_num_old; + goto out; + } + + /* Switch to new channels, set new parameters and close old ones */ + err = xsc_safe_switch_channels(priv, NULL, xsc_eth_num_channels_changed); + +out: + mutex_unlock(&priv->state_lock); + netdev_info(priv->netdev, "set combined_cnt=%d, err=%d\n", count, err); + + return err; +} + +static int flow_type_to_traffic_type(u32 flow_type) +{ + switch (flow_type) { + case IPV4_FLOW: + return XSC_TT_IPV4; + case TCP_V4_FLOW: + return XSC_TT_IPV4_TCP; + case UDP_V4_FLOW: + return XSC_TT_IPV4_TCP; + case IPV6_FLOW: + return XSC_TT_IPV6; + case TCP_V6_FLOW: + return XSC_TT_IPV6_TCP; + case UDP_V6_FLOW: + return XSC_TT_IPV6_TCP; + case AH_V4_FLOW: + return XSC_TT_IPV4_IPSEC_AH; + case AH_V6_FLOW: + return XSC_TT_IPV6_IPSEC_AH; + case ESP_V4_FLOW: + return XSC_TT_IPV4_IPSEC_ESP; + case ESP_V6_FLOW: + return XSC_TT_IPV6_IPSEC_ESP; + default: + return -EINVAL; + } +} + +static int xsc_get_rss_hash_opt(struct xsc_adapter *priv, + struct ethtool_rxnfc *nfc) +{ + u32 hash_field = 0; + int tt; + + tt = flow_type_to_traffic_type(nfc->flow_type); + if (tt < 0) + return -EINVAL; + + hash_field = priv->rss_params.rx_hash_fields[tt]; + nfc->data = 0; + + if (hash_field & XSC_HASH_FIELD_SEL_PROTO) + nfc->data |= RXH_L3_PROTO; + if (tt == XSC_TT_IPV4_TCP) { + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IP) + nfc->data |= RXH_IP_SRC; + if (hash_field & XSC_HASH_FIELD_SEL_DST_IP) + nfc->data |= RXH_IP_DST; + if (hash_field & XSC_HASH_FIELD_SEL_SPORT) + nfc->data |= RXH_L4_B_0_1; + if (hash_field & XSC_HASH_FIELD_SEL_DPORT) + nfc->data |= RXH_L4_B_2_3; + } else if (tt == XSC_TT_IPV6_TCP) { + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IPV6) + nfc->data |= RXH_IP_SRC; + if (hash_field & XSC_HASH_FIELD_SEL_DST_IPV6) + nfc->data |= RXH_IP_DST; + if (hash_field & XSC_HASH_FIELD_SEL_SPORT_V6) + nfc->data |= RXH_L4_B_0_1; + if (hash_field & XSC_HASH_FIELD_SEL_DPORT_V6) + nfc->data |= RXH_L4_B_2_3; + } + + return 0; +} + +static int xsc_set_rss_hash_opt(struct xsc_adapter *priv, + struct ethtool_rxnfc *nfc) +{ + u32 rx_hash_field = XSC_HASH_FIELD_SEL_PROTO; + u32 change = 0; + int ret = 0; + int tt; + + tt = flow_type_to_traffic_type(nfc->flow_type); + if (tt < 0) + return -EINVAL; + + /* RSS does not support anything other than hashing to queues + * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest + * port. + */ + if (nfc->flow_type != TCP_V4_FLOW && + nfc->flow_type != TCP_V6_FLOW && + nfc->flow_type != UDP_V4_FLOW && + nfc->flow_type != UDP_V6_FLOW) + return -EOPNOTSUPP; + + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EOPNOTSUPP; + + if (nfc->flow_type == TCP_V4_FLOW) { + if (nfc->data & RXH_IP_SRC) + rx_hash_field |= XSC_HASH_FIELD_SEL_SRC_IP; + if (nfc->data & RXH_IP_DST) + rx_hash_field |= XSC_HASH_FIELD_SEL_DST_IP; + if (nfc->data & RXH_L4_B_0_1) + rx_hash_field |= XSC_HASH_FIELD_SEL_SPORT; + if (nfc->data & RXH_L4_B_2_3) + rx_hash_field |= XSC_HASH_FIELD_SEL_DPORT; + } else if (nfc->flow_type == TCP_V6_FLOW) { + if (nfc->data & RXH_IP_SRC) + rx_hash_field |= XSC_HASH_FIELD_SEL_SRC_IPV6; + if (nfc->data & RXH_IP_DST) + rx_hash_field |= XSC_HASH_FIELD_SEL_DST_IPV6; + if (nfc->data & RXH_L4_B_0_1) + rx_hash_field |= XSC_HASH_FIELD_SEL_SPORT_V6; + if (nfc->data & RXH_L4_B_2_3) + rx_hash_field |= XSC_HASH_FIELD_SEL_DPORT_V6; + } else { + return 0; + } + + mutex_lock(&priv->state_lock); + if (rx_hash_field != priv->rss_params.rx_hash_fields[tt]) { + change |= BIT(XSC_RSS_HASH_TEMP_UPDATE); + priv->rss_params.rx_hash_fields[tt] = rx_hash_field; + } + + xsc_core_info(priv->xdev, "flow_type=%d, change=0x%x, hash_tmpl=0x%x\n", + nfc->flow_type, change, rx_hash_field); + if (change) + ret = xsc_eth_modify_nic_hca(priv, change); + + mutex_unlock(&priv->state_lock); + return ret; +} + +int xsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_eth_params *params = &priv->nic_param; + int err = 0; + + if (info->cmd == ETHTOOL_GRXRINGS) { + info->data = params->num_channels; + return 0; + } + + switch (info->cmd) { + case ETHTOOL_GRXFH: + err = xsc_get_rss_hash_opt(priv, info); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +int xsc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct xsc_adapter *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + err = xsc_set_rss_hash_opt(priv, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static u32 xsc_get_rxfh_key_size(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + return sizeof(priv->rss_params.toeplitz_hash_key); +} + +static u32 xsc_get_rxfh_indir_size(struct net_device *netdev) +{ + return XSC_INDIR_RQT_SIZE; +} + +int xsc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_rss_params *rss = &priv->rss_params; + + if (indir) + memcpy(indir, rss->indirection_rqt, + sizeof(rss->indirection_rqt)); + + if (key) + memcpy(key, rss->toeplitz_hash_key, + sizeof(rss->toeplitz_hash_key)); + + if (hfunc) + *hfunc = rss->hfunc; + + return 0; +} + +int xsc_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_rss_params *rss = &priv->rss_params; + u32 refresh = 0; + int err = 0; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_XOR && + hfunc != ETH_RSS_HASH_TOP) + return -EINVAL; + + mutex_lock(&priv->state_lock); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) { + rss->hfunc = hfunc; + refresh |= BIT(XSC_RSS_HASH_FUNC_UPDATE); + } + + if (key) { + memcpy(rss->toeplitz_hash_key, key, sizeof(rss->toeplitz_hash_key)); + if (rss->hfunc == ETH_RSS_HASH_TOP) + refresh |= BIT(XSC_RSS_HASH_KEY_UPDATE); + } + + if (refresh > 0 && priv->status == XSCALE_ETH_DRIVER_OK) + err = xsc_eth_modify_nic_hca(priv, refresh); + + mutex_unlock(&priv->state_lock); + + return err; +} + +static int xsc_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_linkinfo linkinfo; + + if (xsc_eth_get_link_info(adapter, &linkinfo)) + return -EINVAL; + + cmd->base.port = linkinfo.port; + cmd->base.duplex = linkinfo.duplex; + cmd->base.autoneg = linkinfo.autoneg; + switch (linkinfo.linkspeed) { + case MODULE_SPEED_UNKNOWN: + cmd->base.speed = LINKSPEED_MODE_UNKNOWN; + break; + case MODULE_SPEED_10G: + cmd->base.speed = LINKSPEED_MODE_10G; + break; + case MODULE_SPEED_25G: + cmd->base.speed = LINKSPEED_MODE_25G; + break; + case MODULE_SPEED_40G_R4: + cmd->base.speed = LINKSPEED_MODE_40G; + break; + case MODULE_SPEED_50G_R: + case MODULE_SPEED_50G_R2: + cmd->base.speed = LINKSPEED_MODE_50G; + break; + case MODULE_SPEED_100G_R2: + case MODULE_SPEED_100G_R4: + cmd->base.speed = LINKSPEED_MODE_100G; + break; + case MODULE_SPEED_200G_R4: + case MODULE_SPEED_200G_R8: + cmd->base.speed = LINKSPEED_MODE_200G; + break; + case MODULE_SPEED_400G_R8: + cmd->base.speed = LINKSPEED_MODE_400G; + break; + default: + cmd->base.speed = LINKSPEED_MODE_25G; + break; + } + + //when link down, show speed && duplex as unknown + if (!linkinfo.linkstatus) { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = LINKSPEED_MODE_UNKNOWN; + } + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + bitmap_copy(cmd->link_modes.supported, (unsigned long *)linkinfo.supported_speed, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(cmd->link_modes.advertising, (unsigned long *)linkinfo.advertising_speed, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + bitmap_or(cmd->link_modes.supported, cmd->link_modes.supported, + (unsigned long *)&linkinfo.supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_or(cmd->link_modes.advertising, cmd->link_modes.advertising, + (unsigned long *)&linkinfo.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + + return 0; +} + +static int xsc_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_linkinfo linkinfo; + int err = 0, i; + + if (!adapter) { + pr_err("%s fail to find adapter\n", __func__); + return -EINVAL; + } + + memset(&linkinfo, 0, sizeof(struct xsc_event_linkinfo)); + + linkinfo.port = cmd->base.port; + linkinfo.duplex = cmd->base.duplex; + linkinfo.autoneg = cmd->base.autoneg; + linkinfo.linkspeed = cpu_to_be32(cmd->base.speed); + + bitmap_copy((unsigned long *)linkinfo.supported_speed, + cmd->link_modes.supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy((unsigned long *)linkinfo.advertising_speed, + cmd->link_modes.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + + for (i = 0; i < ARRAY_SIZE(linkinfo.supported_speed); i++) { + linkinfo.supported_speed[i] = be64_to_cpu(linkinfo.supported_speed[i]); + linkinfo.advertising_speed[i] = be64_to_cpu(linkinfo.advertising_speed[i]); + } + + err = xsc_eth_set_link_info(adapter, &linkinfo); + if (err) + xsc_core_err(adapter->xdev, "fail to set link info err %d\n", err); + + return err; +} + +static int xsc_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + int ret = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + xsc_eth_set_led_status(xdev->pf_id, adapter); + break; + case ETHTOOL_ID_INACTIVE: + xsc_eth_set_led_status(LED_ACT_ON_HW, adapter); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int xsc_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_modify_fecparam_mbox_in in; + struct xsc_event_modify_fecparam_mbox_out out; + u32 new_fec = fec->fec; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_FEC_PARAM); + in.fec = cpu_to_be32(new_fec); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to set fec param, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + return err; +} + +static int xsc_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_query_fecparam_mbox_in in; + struct xsc_event_query_fecparam_mbox_out out; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_FEC_PARAM); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to get fec param, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + fec->active_fec = be32_to_cpu(out.active_fec); + fec->fec = be32_to_cpu(out.fec_cfg); + + return err; +} + +static int xsc_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + xsc_dim_cq_moder_t *rx_moder, *tx_moder; + + rx_moder = &priv->nic_param.rx_cq_moderation; + coal->rx_coalesce_usecs = rx_moder->usec; + coal->rx_max_coalesced_frames = rx_moder->pkts; + coal->use_adaptive_rx_coalesce = priv->nic_param.rx_dim_enabled; + + tx_moder = &priv->nic_param.tx_cq_moderation; + coal->tx_coalesce_usecs = tx_moder->usec; + coal->tx_max_coalesced_frames = tx_moder->pkts; + coal->use_adaptive_tx_coalesce = priv->nic_param.tx_dim_enabled; + coal->rx_coalesce_usecs_low = priv->nic_param.rx_dim_usecs_low; + coal->rx_max_coalesced_frames_low = priv->nic_param.rx_dim_frames_low; + + kernel_coal->use_cqe_mode_rx = + XSC_GET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_CQE_BASED_MODER); + kernel_coal->use_cqe_mode_tx = + XSC_GET_PFLAG(&priv->nic_param, XSC_PFLAG_TX_CQE_BASED_MODER); + + return 0; +} + +static int xsc_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + xsc_dim_cq_moder_t *rx_moder, *tx_moder; + struct xsc_eth_params new_params = {}; + int err = 0; + bool reset_rx, reset_tx; + u8 mode; + + if (coal->tx_coalesce_usecs > XSC_MAX_COAL_TIME || + coal->rx_coalesce_usecs > XSC_MAX_COAL_TIME || + coal->rx_coalesce_usecs_low > XSC_MAX_COAL_TIME) { + netdev_info(priv->netdev, "%s: maximum coalesce time supported is %u usecs\n", + __func__, XSC_MAX_COAL_TIME); + return -ERANGE; + } + + if (coal->tx_max_coalesced_frames > XSC_MAX_COAL_FRAMES || + coal->rx_max_coalesced_frames > XSC_MAX_COAL_FRAMES || + coal->rx_max_coalesced_frames_low > XSC_MAX_COAL_FRAMES) { + netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %u\n", + __func__, XSC_MAX_COAL_FRAMES); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + new_params = priv->nic_param; + + rx_moder = &new_params.rx_cq_moderation; + rx_moder->usec = coal->rx_coalesce_usecs; + rx_moder->pkts = coal->rx_max_coalesced_frames; + new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + new_params.rx_dim_usecs_low = coal->rx_coalesce_usecs_low; + new_params.rx_dim_frames_low = coal->rx_max_coalesced_frames_low; + + tx_moder = &new_params.tx_cq_moderation; + tx_moder->usec = coal->tx_coalesce_usecs; + tx_moder->pkts = coal->tx_max_coalesced_frames; + new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + + if (priv->status != XSCALE_ETH_DRIVER_OK) { + priv->nic_param = new_params; + goto out; + } + + reset_rx = !!coal->use_adaptive_rx_coalesce != priv->nic_param.rx_dim_enabled; + reset_tx = !!coal->use_adaptive_tx_coalesce != priv->nic_param.tx_dim_enabled; + + if (rx_moder->cq_period_mode != kernel_coal->use_cqe_mode_rx) { + rx_moder->cq_period_mode = kernel_coal->use_cqe_mode_rx; + XSC_SET_PFLAG(&new_params, XSC_PFLAG_RX_CQE_BASED_MODER, + rx_moder->cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); + reset_rx = true; + } + if (tx_moder->cq_period_mode != kernel_coal->use_cqe_mode_tx) { + tx_moder->cq_period_mode = kernel_coal->use_cqe_mode_tx; + XSC_SET_PFLAG(&new_params, XSC_PFLAG_TX_CQE_BASED_MODER, + tx_moder->cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); + reset_tx = true; + } + + if (reset_rx) { + mode = XSC_GET_PFLAG(&new_params, XSC_PFLAG_RX_CQE_BASED_MODER); + + xsc_set_rx_cq_mode_params(&new_params, mode); + } + if (reset_tx) { + mode = XSC_GET_PFLAG(&new_params, XSC_PFLAG_TX_CQE_BASED_MODER); + + xsc_set_tx_cq_mode_params(&new_params, mode); + } + + priv->nic_param = new_params; + if (!reset_rx && !reset_tx) + goto out; + + err = xsc_safe_switch_channels(priv, NULL, NULL); + +out: + mutex_unlock(&priv->state_lock); + return err; +} + +static const struct ethtool_ops xsc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_LOW_HIGH | + ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .get_drvinfo = xsc_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = xsc_get_strings, + .get_sset_count = xsc_get_sset_count, + .get_ethtool_stats = xsc_get_ethtool_stats, + .get_ringparam = xsc_get_ringparam, + .set_ringparam = xsc_set_ringparam, + .set_channels = xsc_set_channels, + .get_channels = xsc_get_channels, + .get_coalesce = xsc_get_coalesce, + .set_coalesce = xsc_set_coalesce, + .get_ts_info = NULL, + .get_link_ksettings = xsc_get_link_ksettings, + .set_link_ksettings = xsc_set_link_ksettings, + .get_rxfh_key_size = xsc_get_rxfh_key_size, + .get_rxfh_indir_size = xsc_get_rxfh_indir_size, + .get_rxfh = xsc_get_rxfh, + .set_rxfh = xsc_set_rxfh, + .get_rxnfc = xsc_get_rxnfc, + .set_rxnfc = xsc_set_rxnfc, + .get_module_info = xsc_get_module_info, + .get_module_eeprom = xsc_get_module_eeprom, + .get_module_eeprom_by_page = xsc_get_module_eeprom_by_page, + .get_priv_flags = xsc_get_priv_flags, + .set_priv_flags = xsc_set_priv_flags, + .get_msglevel = xsc_get_msglevel, + .set_msglevel = xsc_set_msglevel, + .self_test = xsc_self_test, + .set_phys_id = xsc_set_phys_id, + .get_fecparam = xsc_get_fecparam, + .set_fecparam = xsc_set_fecparam, +}; + +void eth_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &xsc_ethtool_ops; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..eb2eb3491c148560ef9f108b6099b73a91f1f5b9 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_ETHTOOL_H +#define XSC_ETH_ETHTOOL_H + +void eth_set_ethtool_ops(struct net_device *dev); + +/* EEPROM Standards for plug in modules */ +#ifndef ETH_MODULE_SFF_8436_MAX_LEN +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#define ETH_MODULE_SFF_8436_MAX_LEN 640 +#endif + +#define LED_ACT_ON_HW 0xff + +#endif /* XSC_ETH_ETHTOOL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..547556aa536b99c37d2d2af13b746a1969d09045 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c @@ -0,0 +1,804 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include +#include "common/xsc_pp.h" + +#define PAGE_REF_ELEV (U16_MAX) +/* Upper bound on number of packets that share a single page */ +#define PAGE_REF_THRSD (PAGE_SIZE / 64) + +static inline void xsc_rq_notify_hw(struct xsc_rq *rq) +{ + struct xsc_core_device *xdev = rq->cq.xdev; + struct xsc_wq_cyc *wq = &rq->wqe.wq; + union xsc_recv_doorbell doorbell_value; + u64 rqwqe_id = wq->wqe_ctr << (ilog2(xdev->caps.recv_ds_num)); + + ETH_DEBUG_LOG("rq%d_db_val=0x%x, recv_ds=%d\n", + rq->rqn, doorbell_value.recv_data, + xdev->caps.recv_ds_num); + /*reverse wqe index to ds index*/ + doorbell_value.next_pid = rqwqe_id; + doorbell_value.qp_num = rq->rqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + writel(doorbell_value.recv_data, REG_ADDR(xdev, xdev->regs.rx_db)); +} + +static inline void xsc_skb_set_hash(struct xsc_adapter *adapter, + struct xsc_cqe *cqe, + struct sk_buff *skb) +{ + struct xsc_rss_params *rss = &adapter->rss_params; + u32 hash_field; + bool l3_hash = false; + bool l4_hash = false; + int ht = 0; + + if (adapter->netdev->features & NETIF_F_RXHASH) { + if (skb->protocol == htons(ETH_P_IP)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV4_TCP]; + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IP || + hash_field & XSC_HASH_FIELD_SEL_DST_IP) + l3_hash = true; + + if (hash_field & XSC_HASH_FIELD_SEL_SPORT || + hash_field & XSC_HASH_FIELD_SEL_DPORT) + l4_hash = true; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV6_TCP]; + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IPV6 || + hash_field & XSC_HASH_FIELD_SEL_DST_IPV6) + l3_hash = true; + + if (hash_field & XSC_HASH_FIELD_SEL_SPORT_V6 || + hash_field & XSC_HASH_FIELD_SEL_DPORT_V6) + l4_hash = true; + } + + if (l3_hash && l4_hash) + ht = PKT_HASH_TYPE_L4; + else if (l3_hash) + ht = PKT_HASH_TYPE_L3; + if (ht) + skb_set_hash(skb, be32_to_cpu(cqe->vni), ht); + } +} + +static inline unsigned short from32to16(unsigned int x) +{ + /* add up 16-bit and 16-bit for 16+c bit */ + x = (x & 0xffff) + (x >> 16); + /* add up carry.. */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +static inline bool handle_udp_frag_csum(struct sk_buff *skb, struct epp_pph *pph) +{ +#ifdef XSC_UDP_FRAG_CSUM + char *head = (char *)pph; + struct iphdr *iph; + u8 l3_proto = PPH_OUTER_IP_TYPE(head); + u8 l4_proto = PPH_OUTER_TP_TYPE(head); + u16 csum_off = (u16)PPH_CSUM_OFST(head); + u16 csum_plen = (u16)PPH_CSUM_PLEN(head); + u8 payload_off = PPH_PAYLOAD_OFST(head); + u32 hw_csum = PPH_CSUM_VAL(head); + u16 udp_check = 0; + u16 udp_len = 0; + u32 off = 64; + __wsum csum1, csum2, csum3, csum; + +#ifdef CUM_SKB_DATA + head = (char *)skb->data; + off = 0; +#endif + + if (l4_proto != L4_PROTO_UDP && l4_proto != L4_PROTO_NONE) + return false; + + off += ETH_HLEN; + if (l3_proto == L3_PROTO_IP) { + iph = (struct iphdr *)(head + off); + if (!ip_is_fragment(iph)) + return false; + +#ifdef UDP_CSUM_DEBUG + netdev_dbg("ip_id=%d frag_off=0x%x l4_prt=%d l3_prt=%d iph_off=%d ip_len=%d csum_off=%d pload_off=%d\n", + ntohs(iph->id), ntohs(iph->frag_off), + l4_proto, l3_proto, PPH_OUTER_IP_OFST(head), PPH_OUTER_IP_LEN(pph), + csum_off, payload_off); +#endif + + off += iph->ihl * 4; + if (l4_proto == L4_PROTO_UDP) { + struct udphdr *uh = (struct udphdr *)(head + off); + + udp_check = uh->check; + udp_len = ntohs(uh->len); + } + + if (csum_off == 0) + csum_off = 256; + + netdev_dbg("%s: ip_id=%d frag_off=0x%x skb_len=%d data_len=%d csum_off=%d csum_plen=%d payload_off=%d udp_off=%d udp_len=%d udp_check=0x%x\n", + __func__, ntohs(iph->id), ntohs(iph->frag_off), + skb->len, skb->data_len, + csum_off, csum_plen, payload_off, off, udp_len, udp_check); +#ifdef CUM_RAW_DATA_DUMP + xsc_pkt_pph_dump((char *)head, 272); +#endif + + if (csum_off < off) { + csum1 = csum_partial((char *)(head + csum_off), (off - csum_off), 0); + csum2 = htons(from32to16(hw_csum)); + csum = csum_sub(csum2, csum1); + } else if (csum_off > off) { + csum2 = csum_partial((char *)(head + csum_off), csum_plen, 0); + csum1 = csum_partial((char *)(head + off), (csum_off - off), 0); + csum = htons(from32to16(hw_csum)); + csum = csum_partial((char *)(head + off), (csum_off - off), csum); + csum3 = csum_partial((char *)(head + off), (skb->len - off + 64), 0); + } else { + csum = htons(from32to16(hw_csum)); + } + skb->csum = csum_unfold(from32to16(csum)); + + ETH_DEBUG_LOG("%s: sw_cal_csum[%d:%d]=0x%x -> 0x%x\n", + __func__, off, csum_off, csum1, from32to16(csum1)); + ETH_DEBUG_LOG("%s: sw_cal_hw_csum[%d:%d]=0x%x -> 0x%x, hw_csum=0x%x -> 0x%x\n", + __func__, csum_off, csum_plen, csum2, from32to16(csum2), + hw_csum, from32to16(hw_csum)); + ETH_DEBUG_LOG("%s: sw_cal_tot_csum[%d:%d]=0x%x -> 0x%x, skb_csum=0x%x -> 0x%x\n", + __func__, off, skb->len, csum3, from32to16(csum3), csum, skb->csum); + + skb->ip_summed = CHECKSUM_COMPLETE; + + return true; + } +#endif + + return false; +} + +static inline void xsc_handle_csum(struct xsc_cqe *cqe, struct xsc_rq *rq, + struct sk_buff *skb, struct xsc_wqe_frag_info *wi) +{ + struct xsc_rq_stats *stats = rq->stats; + struct xsc_channel *c = rq->cq.channel; + struct net_device *netdev = c->adapter->netdev; + struct xsc_dma_info *dma_info = wi->di; + int offset_from = wi->offset; + struct epp_pph *hw_pph = page_address(dma_info->page) + offset_from; + + if (unlikely((netdev->features & NETIF_F_RXCSUM) == 0)) + goto csum_none; + + if (unlikely(XSC_GET_EPP2SOC_PPH_ERROR_BITMAP(hw_pph) & PACKET_UNKNOWN)) + goto csum_none; + + if (handle_udp_frag_csum(skb, hw_pph)) { + stats->csum_succ++; + goto out; + } + + if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(cqe->csum_err & OUTER_AND_INNER))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + skb->encapsulation = 1; + + stats->csum_unnecessary++; + } else if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(cqe->csum_err & OUTER_BIT) && (cqe->csum_err & INNER_BIT))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 0; + skb->encapsulation = 1; + + stats->csum_unnecessary++; + } else if (!XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(cqe->csum_err & OUTER_BIT))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + + stats->csum_unnecessary++; + } else { + stats->csum_err++; + } + + goto out; + +csum_none: + skb->csum = 0; + skb->ip_summed = CHECKSUM_NONE; + stats->csum_none++; +out: + return; +} + +static inline void xsc_build_rx_skb(struct xsc_cqe *cqe, + u32 cqe_bcnt, + struct xsc_rq *rq, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) +{ + struct xsc_channel *c = rq->cq.channel; + struct net_device *netdev = c->netdev; + struct xsc_adapter *adapter = c->adapter; + + skb->mac_len = ETH_HLEN; + + skb_record_rx_queue(skb, rq->ix); + xsc_handle_csum(cqe, rq, skb, wi); + + skb->protocol = eth_type_trans(skb, netdev); + xsc_skb_set_hash(adapter, cqe, skb); +} + +static inline void xsc_complete_rx_cqe(struct xsc_rq *rq, + struct xsc_cqe *cqe, + u32 cqe_bcnt, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) +{ + struct xsc_rq_stats *stats = rq->stats; + + stats->packets++; + stats->bytes += cqe_bcnt; + xsc_build_rx_skb(cqe, cqe_bcnt, rq, skb, wi); + + rq->dim_obj.sample.pkt_ctr = rq->stats->packets; + rq->dim_obj.sample.byte_ctr = rq->stats->bytes; +} + +static inline void xsc_add_skb_frag(struct xsc_rq *rq, + struct sk_buff *skb, + struct xsc_dma_info *di, + u32 frag_offset, u32 len, + unsigned int truesize) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + + dma_sync_single_for_cpu(dev, di->addr + frag_offset, len, DMA_FROM_DEVICE); + page_ref_inc(di->page); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + di->page, frag_offset, len, truesize); +} + +static inline void xsc_copy_skb_header(struct device *dev, + struct sk_buff *skb, + struct xsc_dma_info *dma_info, + int offset_from, u32 headlen) +{ + void *from = page_address(dma_info->page) + offset_from; + /* Aligning len to sizeof(long) optimizes memcpy performance */ + unsigned int len = ALIGN(headlen, sizeof(long)); + + dma_sync_single_for_cpu(dev, dma_info->addr + offset_from, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, from, len); +} + +static inline struct sk_buff *xsc_build_linear_skb(struct xsc_rq *rq, void *va, + u32 frag_size, u16 headroom, + u32 cqe_bcnt) +{ + struct sk_buff *skb = build_skb(va, frag_size); + + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + skb_reserve(skb, headroom); + skb_put(skb, cqe_bcnt); + + return skb; +} + +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) +{ + struct xsc_dma_info *di = wi->di; + u16 rx_headroom = rq->buff.headroom; + int pph_len = has_pph ? XSC_PPH_HEAD_LEN : 0; + struct sk_buff *skb; + void *va, *data; + u32 frag_size; + + va = page_address(di->page) + wi->offset; + data = va + rx_headroom + pph_len; + frag_size = XSC_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); + + dma_sync_single_range_for_cpu(rq->cq.xdev->device, di->addr, wi->offset, + frag_size, DMA_FROM_DEVICE); + prefetchw(va); /* xdp_frame data area */ + prefetch(data); + + skb = xsc_build_linear_skb(rq, va, frag_size, (rx_headroom + pph_len), + (cqe_bcnt - pph_len)); + if (unlikely(!skb)) + return NULL; + + /* queue up for recycling/reuse */ + page_ref_inc(di->page); + + return skb; +} + +struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) +{ + struct xsc_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct xsc_wqe_frag_info *head_wi = wi; + struct xsc_wqe_frag_info *rx_wi = wi; + u16 headlen = min_t(u32, XSC_RX_MAX_HEAD, cqe_bcnt); + u16 frag_headlen = headlen; + u16 byte_cnt = cqe_bcnt - headlen; + struct sk_buff *skb; + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + struct net_device *netdev = c->adapter->netdev; + u8 fragcnt = 0; + u16 head_offset = head_wi->offset; + u16 frag_consumed_bytes = 0; + int i = 0; + +#ifndef NEED_CREATE_RX_THREAD + skb = napi_alloc_skb(rq->cq.napi, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); +#else + skb = netdev_alloc_skb(netdev, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); +#endif + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + prefetchw(skb->data); + + if (likely(has_pph)) { + headlen = min_t(u32, XSC_RX_MAX_HEAD, (cqe_bcnt - XSC_PPH_HEAD_LEN)); + frag_headlen = headlen + XSC_PPH_HEAD_LEN; + byte_cnt = cqe_bcnt - headlen - XSC_PPH_HEAD_LEN; + head_offset += XSC_PPH_HEAD_LEN; + } + + if (byte_cnt == 0 && (XSC_GET_PFLAG(&c->adapter->nic_param, XSC_PFLAG_RX_COPY_BREAK))) { + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) + wi->is_available = 1; + goto ret; + } + + for (i = 0; i < rq->wqe.info.num_frags; i++, rx_wi++) + rx_wi->is_available = 0; + + while (byte_cnt) { + /*figure out whether the first fragment can be a page ?*/ + frag_consumed_bytes = + min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); + + xsc_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, + frag_consumed_bytes, frag_info->frag_stride); + byte_cnt -= frag_consumed_bytes; + ETH_DEBUG_LOG("consumed=%d, frag_size=%d, byte_cnt=%d, cqe_bcnt=%d, addr=0x%llx\n", + frag_consumed_bytes, frag_info->frag_size, byte_cnt, + cqe_bcnt, (u64)wi->di->addr); + + /*to protect extend wqe read, drop exceed bytes*/ + frag_headlen = 0; + fragcnt++; + if (fragcnt == rq->wqe.info.num_frags) { + if (byte_cnt) { + rq->stats->oversize_pkts_sw_drop += byte_cnt; + netdev_warn(netdev, + "large packet reach the maximum rev-wqe num.\n"); + netdev_warn(netdev, + "%u bytes dropped: frag_num=%d, headlen=%d, cqe_cnt=%d, frag0_bytes=%d, frag_size=%d\n", + byte_cnt, fragcnt, headlen, cqe_bcnt, + frag_consumed_bytes, frag_info->frag_size); + } + break; + } + + frag_info++; + wi++; + } + +ret: + /* copy header */ + xsc_copy_skb_header(dev, skb, head_wi->di, head_offset, headlen); + + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; + + skbdata_debug_dump(skb, headlen, 0); + + return skb; +} + +static inline bool xsc_rx_cache_is_empty(struct xsc_page_cache *cache) +{ + return cache->head == cache->tail; +} + +static inline bool xsc_page_is_reserved(struct page *page) +{ + return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); +} + +static inline bool xsc_rx_cache_get(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_page_cache *cache = &rq->page_cache; + struct xsc_rq_stats *stats = rq->stats; + struct xsc_core_device *xdev = rq->cq.xdev; + + if (unlikely(xsc_rx_cache_is_empty(cache))) { + stats->cache_empty++; + return false; + } + + if (page_ref_count(cache->page_cache[cache->head].page) != 1) { + stats->cache_busy++; + return false; + } + + stats->cache_reuse++; + *dma_info = cache->page_cache[cache->head]; + cache->head = (cache->head + 1) & (cache->sz - 1); + + dma_sync_single_for_device(&xdev->pdev->dev, dma_info->addr, + PAGE_SIZE, DMA_FROM_DEVICE); + + return true; +} + +static inline bool xsc_rx_cache_put(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_page_cache *cache = &rq->page_cache; + struct xsc_rq_stats *stats = rq->stats; + u32 tail_next = (cache->tail + 1) & (cache->sz - 1); + + if (tail_next == cache->head) { + stats->cache_full++; + return false; + } + + if (unlikely(xsc_page_is_reserved(dma_info->page))) { + stats->cache_waive++; + return false; + } + + cache->page_cache[cache->tail] = *dma_info; + cache->tail = tail_next; + return true; +} + +void xsc_page_dma_unmap(struct xsc_rq *rq, struct xsc_dma_info *dma_info) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + + dma_unmap_page(dev, dma_info->addr, XSC_RX_FRAG_SZ, rq->buff.map_dir); +} + +static inline void xsc_put_page(struct xsc_dma_info *dma_info) +{ + put_page(dma_info->page); +} + +void xsc_page_release_dynamic(struct xsc_rq *rq, + struct xsc_dma_info *dma_info, bool recycle) +{ + if (likely(recycle)) { +#ifdef XSC_PAGE_CACHE + if (xsc_rx_cache_put(rq, dma_info)) + return; +#endif + + xsc_page_dma_unmap(rq, dma_info); + page_pool_recycle_direct(rq->page_pool, dma_info->page); + } else { + xsc_page_dma_unmap(rq, dma_info); + page_pool_put_defragged_page(rq->page_pool, + dma_info->page, + -1, true); + } +} + +static inline void xsc_put_rx_frag(struct xsc_rq *rq, + struct xsc_wqe_frag_info *frag, bool recycle) +{ + if (frag->last_in_page) + xsc_page_release_dynamic(rq, frag->di, recycle); +} + +static inline struct xsc_wqe_frag_info *get_frag(struct xsc_rq *rq, u16 ix) +{ + return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; +} + +static inline void xsc_free_rx_wqe(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, bool recycle) +{ + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) { + if (wi->is_available && recycle) + continue; + xsc_put_rx_frag(rq, wi, recycle); + } +} + +static void xsc_dump_error_rqcqe(struct xsc_rq *rq, + struct xsc_cqe *cqe) +{ + struct xsc_channel *c = rq->cq.channel; + struct net_device *netdev = c->adapter->netdev; + u32 ci = xsc_cqwq_get_ci(&rq->cq.wq); + + net_err_ratelimited("Error cqe on dev=%s, cqn=%d, ci=%d, rqn=%d, qpn=%d, error_code=0x%x\n", + netdev->name, rq->cq.xcq.cqn, ci, + rq->rqn, cqe->qp_id, get_cqe_opcode(cqe)); +} + +void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, + struct xsc_rq *rq, struct xsc_cqe *cqe) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + struct xsc_channel *c = rq->cq.channel; + u8 cqe_opcode = get_cqe_opcode(cqe); + struct xsc_wqe_frag_info *wi; + struct sk_buff *skb; + u32 cqe_bcnt; + u16 ci; + + ci = xsc_wq_cyc_ctr2ix(wq, cqwq->cc); + wi = get_frag(rq, ci); + if (unlikely(cqe_opcode & BIT(7))) { + xsc_dump_error_rqcqe(rq, cqe); + rq->stats->cqe_err++; + goto free_wqe; + } + + cqe_bcnt = le32_to_cpu(cqe->msg_len); + if (cqe->has_pph && cqe_bcnt <= XSC_PPH_HEAD_LEN) { + rq->stats->wqe_err++; + goto free_wqe; + } + + if (unlikely(cqe_bcnt > rq->frags_sz)) { + if (!XSC_GET_PFLAG(&c->adapter->nic_param, XSC_PFLAG_DROPLESS_RQ)) { + rq->stats->oversize_pkts_sw_drop += cqe_bcnt; + goto free_wqe; + } else { + rq->stats->oversize_pkts_err++; + } + } + + cqe_bcnt = min_t(u32, cqe_bcnt, rq->frags_sz); + skb = rq->wqe.skb_from_cqe(rq, wi, cqe_bcnt, cqe->has_pph); + if (!skb) + goto free_wqe; + + xsc_complete_rx_cqe(rq, cqe, + cqe->has_pph == 1 ? cqe_bcnt - XSC_PPH_HEAD_LEN : cqe_bcnt, + skb, wi); + +#ifdef NEED_CREATE_RX_THREAD + netif_rx_ni(skb); +#else + napi_gro_receive(rq->cq.napi, skb); +#endif + +free_wqe: + xsc_free_rx_wqe(rq, wi, true); + xsc_wq_cyc_pop(wq); +} + +int xsc_poll_rx_cq(struct xsc_cq *cq, int budget) +{ + struct xsc_rq *rq = container_of(cq, struct xsc_rq, cq); + struct xsc_cqwq *cqwq = &cq->wq; + struct xsc_cqe *cqe; + int work_done = 0; + struct xsc_ch_stats *ch_stats = cq->channel->stats; + + if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) + return 0; + + while ((work_done < budget) && (cqe = xsc_cqwq_get_cqe(cqwq))) { + rq->stats->cqes++; + + rq->handle_rx_cqe(cqwq, rq, cqe); + ++work_done; + + xsc_cqwq_pop(cqwq); + } + + if (!work_done) + goto out; + + xsc_cq_notify_hw(cq); + /* ensure cq space is freed before enabling more cqes */ + wmb(); + +out: + ch_stats->poll += work_done; + if (work_done < budget) { + if (ch_stats->poll == 0) + ch_stats->poll_0++; + else if (ch_stats->poll < 64) + ch_stats->poll_1_63++; + else if (ch_stats->poll < 512) + ch_stats->poll_64_511++; + else if (ch_stats->poll < 1024) + ch_stats->poll_512_1023++; + else if (ch_stats->poll >= 1024) + cq->channel->stats->poll_1024++; + } + + return work_done; +} + +static inline int xsc_page_alloc_mapped(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + +#ifdef XSC_PAGE_CACHE + if (xsc_rx_cache_get(rq, dma_info)) + return 0; + + rq->stats->cache_alloc++; +#endif + + dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); + if (unlikely(!dma_info->page)) + return -ENOMEM; + + dma_info->addr = dma_map_page(dev, dma_info->page, 0, + XSC_RX_FRAG_SZ, rq->buff.map_dir); + if (unlikely(dma_mapping_error(dev, dma_info->addr))) { + page_pool_recycle_direct(rq->page_pool, dma_info->page); + dma_info->page = NULL; + return -ENOMEM; + } + + return 0; +} + +static inline int xsc_get_rx_frag(struct xsc_rq *rq, + struct xsc_wqe_frag_info *frag) +{ + int err = 0; + + if (!frag->offset && !frag->is_available) + /* On first frag (offset == 0), replenish page (dma_info actually). + * Other frags that point to the same dma_info (with a different + * offset) should just use the new one without replenishing again + * by themselves. + */ + err = xsc_page_alloc_mapped(rq, frag->di); + + return err; +} + +static int xsc_alloc_rx_wqe(struct xsc_rq *rq, struct xsc_eth_rx_wqe_cyc *wqe, u16 ix) +{ + struct xsc_wqe_frag_info *frag = get_frag(rq, ix); + u64 addr; + int i; + int err; + + for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { + err = xsc_get_rx_frag(rq, frag); + if (unlikely(err)) + goto free_frags; + + addr = cpu_to_le64(frag->di->addr + frag->offset + rq->buff.headroom); + wqe->data[i].va = addr; + if (frag->offset == 0) + ETH_DEBUG_LOG("rq%d_wqe%d_frag%d off=%d last=%d refcnt=%d addr=0x%llx\n", + rq->rqn, ix, i, frag->offset, frag->last_in_page, + page_ref_count(frag->di->page), addr); + } + + return 0; + +free_frags: + while (--i >= 0) + xsc_put_rx_frag(rq, --frag, true); + + return err; +} + +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix) +{ + struct xsc_wqe_frag_info *wi = get_frag(rq, ix); + + xsc_free_rx_wqe(rq, wi, false); +} + +static int xsc_alloc_rx_wqes(struct xsc_rq *rq, u16 ix, u8 wqe_bulk) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + struct xsc_eth_rx_wqe_cyc *wqe; + int err; + int i; + int idx; + + for (i = 0; i < wqe_bulk; i++) { + idx = xsc_wq_cyc_ctr2ix(wq, (ix + i)); + wqe = xsc_wq_cyc_get_wqe(wq, idx); + + err = xsc_alloc_rx_wqe(rq, wqe, idx); + if (unlikely(err)) { + rq->stats->buff_alloc_err++; + goto free_wqes; + } + } + + return 0; + +free_wqes: + while (--i >= 0) + xsc_eth_dealloc_rx_wqe(rq, ix + i); + + return err; +} + +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + u8 wqe_bulk, wqe_bulk_min; + int alloc; + u16 head; + int err; + + wqe_bulk = rq->wqe.info.wqe_bulk; + wqe_bulk_min = rq->wqe.info.wqe_bulk_min; + if (xsc_wq_cyc_missing(wq) < wqe_bulk) + return false; + + do { + head = xsc_wq_cyc_get_head(wq); + + alloc = min_t(int, wqe_bulk, xsc_wq_cyc_missing(wq)); + if (alloc < wqe_bulk && alloc >= wqe_bulk_min) + alloc = alloc & 0xfffffffe; + + if (alloc > 0) { + err = xsc_alloc_rx_wqes(rq, head, alloc); + if (unlikely(err)) + break; + + xsc_wq_cyc_push_n(wq, alloc); + rq->stats->wqes += alloc; + } + } while (xsc_wq_cyc_missing(wq) >= wqe_bulk_min); + + dma_wmb(); + + /* ensure wqes are visible to device before updating doorbell record */ + xsc_rq_notify_hw(rq); + + return !!err; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..8b75ce05afb132ff7a9080ec72a23e950af3d3fc --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_cmd.h" +#include "common/xsc_core.h" + +#include "xsc_eth_stats.h" +#include "xsc_eth.h" + +static const struct counter_desc sw_stats_desc[] = { + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_inner_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_inner_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_unnecessary) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_none) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_succ) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_csum_partial) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_csum_partial_inner) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_queue_stopped) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_queue_dropped) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_xmit_more) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_cqes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_queue_wake) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_cqe_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_oversize_pkts_sw_drop) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_dim_us) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_dim_pkts) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, txdone_skb_null) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, txdone_skb_refcnt_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cqes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cqe_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_wqes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_wqe_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_dim_us) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_dim_pkts) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_oversize_pkts_sw_drop) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_oversize_pkts_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_buff_alloc_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_reuse) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_full) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_empty) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_busy) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_alloc) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_waive) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_ext) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_rdc) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_events) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_0) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_1_63) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_64_511) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_512_1023) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_1024) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_tx) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_arm) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_noarm) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_aff_change) }, +}; + +#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) + +static int xsc_grp_sw_get_num_stats(struct xsc_adapter *adapter) +{ + return NUM_SW_COUNTERS; +} + +static int xsc_grp_sw_fill_strings(struct xsc_adapter *adapter, u8 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_SW_COUNTERS; i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + sw_stats_desc[i].format, + ETH_GSTRING_LEN); + return idx; +} + +static int xsc_grp_sw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_SW_COUNTERS; i++) + data[idx++] = XSC_READ_CTR64_CPU(&adapter->stats->sw, sw_stats_desc, i); + return idx; +} + +void xsc_grp_sw_update_stats(struct xsc_adapter *adapter) +{ + struct xsc_sw_stats *s = &adapter->stats->sw; + int max_tc = xsc_get_netdev_max_tc(adapter); + int i; + + memset(s, 0, sizeof(*s)); + + for (i = 0; i < xsc_get_netdev_max_channels(adapter); i++) { + struct xsc_channel_stats *channel_stats = + &adapter->stats->channel_stats[i]; + + struct xsc_rq_stats *rq_stats = &channel_stats->rq; + struct xsc_ch_stats *ch_stats = &channel_stats->ch; + int j; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + s->rx_csum_unnecessary += rq_stats->csum_unnecessary; + s->rx_csum_none += rq_stats->csum_none; + s->rx_csum_err += rq_stats->csum_err; + s->rx_csum_succ += rq_stats->csum_succ; + s->rx_cqes += rq_stats->cqes; + s->rx_cqe_err += rq_stats->cqe_err; + s->rx_wqes += rq_stats->wqes; + s->rx_wqe_err += rq_stats->wqe_err; + s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; + s->rx_oversize_pkts_err += rq_stats->oversize_pkts_err; + s->rx_buff_alloc_err += rq_stats->buff_alloc_err; + s->rx_cache_reuse += rq_stats->cache_reuse; + s->rx_cache_full += rq_stats->cache_full; + s->rx_cache_empty += rq_stats->cache_empty; + s->rx_cache_busy += rq_stats->cache_busy; + s->rx_cache_alloc += rq_stats->cache_alloc; + s->rx_cache_waive += rq_stats->cache_waive; + s->rx_cache_ext += rq_stats->cache_ext; + s->rx_cache_rdc += rq_stats->cache_rdc; + s->rx_dim_us += rq_stats->dim_us; + s->rx_dim_pkts += rq_stats->dim_pkts; + + s->ch_events += ch_stats->events; + s->ch_poll += ch_stats->poll; + s->ch_poll_0 += ch_stats->poll_0; + s->ch_poll_1_63 += ch_stats->poll_1_63; + s->ch_poll_64_511 += ch_stats->poll_64_511; + s->ch_poll_512_1023 += ch_stats->poll_512_1023; + s->ch_poll_1024 += ch_stats->poll_1024; + s->ch_poll_tx += ch_stats->poll_tx; + s->ch_arm += ch_stats->arm; + s->ch_noarm += ch_stats->noarm; + s->ch_aff_change += ch_stats->aff_change; + + for (j = 0; j < max_tc; j++) { + struct xsc_sq_stats *sq_stats = &channel_stats->sq[j]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_tso_packets += sq_stats->tso_packets; + s->tx_tso_bytes += sq_stats->tso_bytes; + s->tx_tso_inner_packets += sq_stats->tso_inner_packets; + s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_csum_partial += sq_stats->csum_partial; + s->tx_csum_partial_inner += sq_stats->csum_partial_inner; + s->tx_csum_none += sq_stats->csum_none; + s->tx_queue_stopped += sq_stats->stopped; + s->tx_queue_dropped += sq_stats->dropped; + s->tx_xmit_more += sq_stats->xmit_more; + s->tx_cqes += sq_stats->cqes; + s->tx_queue_wake += sq_stats->wake; + s->tx_cqe_err += sq_stats->cqe_err; + s->tx_oversize_pkts_sw_drop += sq_stats->oversize_pkts_sw_drop; + s->txdone_skb_null += sq_stats->txdone_skb_null; + s->txdone_skb_refcnt_err += sq_stats->txdone_skb_refcnt_err; + s->skb_linear += sq_stats->skb_linear; + s->tx_dim_us += sq_stats->dim_us; + s->tx_dim_pkts += sq_stats->dim_pkts; + } + } +} + +static const struct counter_desc rq_stats_desc[] = { + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, packets) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, bytes) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_unnecessary) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_none) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_succ) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cqes) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, dim_us) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, dim_pkts) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, wqe_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, oversize_pkts_sw_drop) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, oversize_pkts_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, buff_alloc_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_reuse) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_full) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_empty) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_busy) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_alloc) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_waive) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_ext) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_rdc) }, +}; + +static const struct counter_desc sq_stats_desc[] = { + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, packets) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, bytes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_packets) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_bytes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_inner_packets) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_inner_bytes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, csum_partial) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, csum_partial_inner) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, csum_none) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, stopped) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, dropped) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, xmit_more) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, cqes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, wake) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, dim_us) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, dim_pkts) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, cqe_err) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, oversize_pkts_sw_drop) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, txdone_skb_null) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, txdone_skb_refcnt_err) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, skb_linear) }, +}; + +static const struct counter_desc ch_stats_desc[] = { + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, events) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_0) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_1_63) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_64_511) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_512_1023) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_1024) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_tx) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, arm) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, noarm) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, aff_change) }, +}; + +#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) +#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) +#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) + +static int xsc_grp_channels_get_num_stats(struct xsc_adapter *adapter) +{ + int max_nch = xsc_get_netdev_max_channels(adapter); + int max_tc = xsc_get_netdev_max_tc(adapter); + + return (NUM_RQ_STATS * max_nch) + + (NUM_CH_STATS * max_nch) + + (NUM_SQ_STATS * max_nch * max_tc); +} + +static int xsc_grp_channels_fill_strings(struct xsc_adapter *adapter, u8 *data, + int idx) +{ + int max_nch = xsc_get_netdev_max_channels(adapter); + int max_tc = xsc_get_netdev_max_tc(adapter); + int i, j, tc; + + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_CH_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ch_stats_desc[j].format, i); + + for (i = 0; i < max_nch; i++) { + for (j = 0; j < NUM_RQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_stats_desc[j].format, i); + } + + for (tc = 0; tc < max_tc; tc++) + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + sq_stats_desc[j].format, + i + tc * max_nch); + + return idx; +} + +static int xsc_grp_channels_fill_stats(struct xsc_adapter *adapter, u64 *data, + int idx) +{ + int max_nch = xsc_get_netdev_max_channels(adapter); + int max_tc = xsc_get_netdev_max_tc(adapter); + int i, j, tc; + struct xsc_stats *stats = adapter->stats; + + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_CH_STATS; j++) + data[idx++] = + XSC_READ_CTR64_CPU(&stats->channel_stats[i].ch, + ch_stats_desc, j); + + for (i = 0; i < max_nch; i++) { + for (j = 0; j < NUM_RQ_STATS; j++) + data[idx++] = + XSC_READ_CTR64_CPU(&stats->channel_stats[i].rq, + rq_stats_desc, j); + } + + for (tc = 0; tc < max_tc; tc++) + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + data[idx++] = + XSC_READ_CTR64_CPU(&stats->channel_stats[i].sq[tc], + sq_stats_desc, j); + + return idx; +} + +static const struct counter_desc hw_prio_stats_desc[] = { + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 0), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 1), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 2), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 3), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 4), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 5), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 6), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 7), + +}; + +static const struct counter_desc hw_pfc_prio_stats_desc[] = { + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 0), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 1), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 2), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 3), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 4), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 5), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 6), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 7), +}; + +static const struct counter_desc hw_eth_stats_pf_desc[] = { + /*by mac port*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_tx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_rx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, tx_pause) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_pause) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_fcs_errors) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_discards) }, + + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, tx_multicast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, tx_broadcast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_multicast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_broadcast_phy) }, + + /*by global*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_loopback_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_loopback_bytes) }, +}; + +static const struct counter_desc hw_eth_stats_vf_desc[] = { + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_tx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_rx_bytes) }, +}; + +static const struct counter_desc pfc_stall_stats_desc[] = { + /*by mac port*/ + { XSC_DECLARE_STAT(struct xsc_pfc_stall_stats, tx_pause_storm_triggered) }, +}; + +static int get_hw_stats_eth(struct xsc_core_device *dev, struct xsc_hw_stats_eth *stats_eth) +{ + int ret; + struct xsc_hw_stats_mbox_in in; + struct xsc_hw_stats_eth_mbox_out out; + + memset(stats_eth, 0, sizeof(*stats_eth)); + + if (!dev) + return -1; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_STATS_ETH); + in.mac_port = dev->mac_port; + + ret = xsc_cmd_exec(dev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); + if (ret || out.hdr.status) + return -1; + + memcpy(stats_eth, &out.hw_stats, sizeof(*stats_eth)); + return 0; +} + +static int xsc_hw_get_num_stats(struct xsc_adapter *adapter) +{ + int ret = 0; + + if (is_support_hw_pf_stats(adapter->xdev)) { + ret = ARRAY_SIZE(hw_prio_stats_desc) + ARRAY_SIZE(hw_eth_stats_pf_desc) + + (is_support_pfc_prio_statistic(adapter->xdev) ? + ARRAY_SIZE(hw_pfc_prio_stats_desc) : 0) + + (is_support_pfc_stall_stats(adapter->xdev) ? + ARRAY_SIZE(pfc_stall_stats_desc) : 0); + } else { + ret = ARRAY_SIZE(hw_eth_stats_vf_desc); + } + + return ret; +} + +static int xsc_hw_fill_strings(struct xsc_adapter *adapter, u8 *data, int idx) +{ + int i; + struct xsc_core_device *xdev; + + xdev = adapter->xdev; + + if (is_support_hw_pf_stats(xdev)) { + for (i = 0; i < ARRAY_SIZE(hw_prio_stats_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_prio_stats_desc[i].format, + ETH_GSTRING_LEN); + + if (is_support_pfc_prio_statistic(xdev)) + for (i = 0; i < ARRAY_SIZE(hw_pfc_prio_stats_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_pfc_prio_stats_desc[i].format, + ETH_GSTRING_LEN); + + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_pf_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_eth_stats_pf_desc[i].format, + ETH_GSTRING_LEN); + + if (is_support_pfc_stall_stats(xdev)) + for (i = 0; i < ARRAY_SIZE(pfc_stall_stats_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + pfc_stall_stats_desc[i].format, + ETH_GSTRING_LEN); + } else { + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_vf_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_eth_stats_vf_desc[i].format, + ETH_GSTRING_LEN); + } + + return idx; +} + +static int xsc_hw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) +{ + struct xsc_prio_stats_mbox_in in; + struct xsc_prio_stats_mbox_out out; + struct xsc_pfc_prio_stats_mbox_in pfc_prio_in; + struct xsc_pfc_prio_stats_mbox_out pfc_prio_out; + struct xsc_pfc_stall_stats_mbox_in pfc_stall_in; + struct xsc_pfc_stall_stats_mbox_out pfc_stall_out; + struct xsc_core_device *xdev; + int ret; + u32 i; + u64 val; + u8 *stats; + struct xsc_hw_stats_eth stats_eth; + int ret_s; + + xdev = adapter->xdev; + ret_s = get_hw_stats_eth(xdev, &stats_eth); + + if (is_support_hw_pf_stats(xdev)) { + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_QUERY_PRIO_STATS); + in.pport = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, (void *)&in, + sizeof(struct xsc_prio_stats_mbox_in), + (void *)&out, sizeof(struct xsc_prio_stats_mbox_out)); + if (ret == 0 && out.hdr.status == 0) { + for (i = 0; i < ARRAY_SIZE(hw_prio_stats_desc); i++) { + val = XSC_READ_CTR64_CPU(&out.prio_stats, + hw_prio_stats_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + + if (is_support_pfc_prio_statistic(xdev)) { + memset(&pfc_prio_in, 0, sizeof(pfc_prio_in)); + memset(&pfc_prio_out, 0, sizeof(pfc_prio_out)); + pfc_prio_in.hdr.opcode = + __cpu_to_be16(XSC_CMD_OP_QUERY_PFC_PRIO_STATS); + pfc_prio_in.pport = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, (void *)&pfc_prio_in, + sizeof(struct xsc_pfc_prio_stats_mbox_in), + (void *)&pfc_prio_out, + sizeof(struct xsc_pfc_prio_stats_mbox_out)); + if (ret == 0 && pfc_prio_out.hdr.status == 0) { + for (i = 0; i < ARRAY_SIZE(hw_pfc_prio_stats_desc); i++) { + val = XSC_READ_CTR64_CPU(&pfc_prio_out.prio_stats, + hw_pfc_prio_stats_desc, + i); + data[idx++] = __be64_to_cpu(val); + } + } + } + + if (!ret_s && stats_eth.is_pf) { + stats = (u8 *)&stats_eth.stats.pf_stats; + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_pf_desc); i++) { + val = XSC_READ_CTR64_CPU(stats, hw_eth_stats_pf_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + + if (is_support_pfc_stall_stats(xdev)) { + memset(&pfc_stall_in, 0, sizeof(pfc_stall_in)); + memset(&pfc_stall_out, 0, sizeof(pfc_stall_out)); + pfc_stall_in.hdr.opcode = + __cpu_to_be16(XSC_CMD_OP_IOCTL_QUERY_PFC_STALL_STATS); + pfc_stall_in.mac_port = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, + (void *)&pfc_stall_in, + sizeof(struct xsc_pfc_stall_stats_mbox_in), + (void *)&pfc_stall_out, + sizeof(struct xsc_pfc_stall_stats_mbox_out)); + if (ret == 0 && pfc_stall_out.hdr.status == 0) { + for (i = 0; i < ARRAY_SIZE(pfc_stall_stats_desc); i++) { + val = XSC_READ_CTR64_CPU(&pfc_stall_out.pfc_stall_stats, + pfc_stall_stats_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + } + } else { + if (!ret_s && !stats_eth.is_pf) { + stats = (u8 *)&stats_eth.stats.vf_stats; + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_vf_desc); i++) { + val = XSC_READ_CTR64_CPU(stats, hw_eth_stats_vf_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + } + + return idx; +} + +/* The stats groups order is opposite to the update_stats() order calls */ +const struct xsc_stats_grp xsc_stats_grps[] = { + { + .get_num_stats = xsc_grp_sw_get_num_stats, + .fill_strings = xsc_grp_sw_fill_strings, + .fill_stats = xsc_grp_sw_fill_stats, + .update_stats = xsc_grp_sw_update_stats, + }, + + { + .get_num_stats = xsc_grp_channels_get_num_stats, + .fill_strings = xsc_grp_channels_fill_strings, + .fill_stats = xsc_grp_channels_fill_stats, + }, + + { + .get_num_stats = xsc_hw_get_num_stats, + .fill_strings = xsc_hw_fill_strings, + .fill_stats = xsc_hw_fill_stats, + }, +}; + +const int xsc_num_stats_grps = ARRAY_SIZE(xsc_stats_grps); + +void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 *s) +{ + int i, j; + + for (i = 0; i < xsc_get_netdev_max_channels(adapter); i++) { + struct xsc_channel_stats *channel_stats = &adapter->stats->channel_stats[i]; + struct xsc_rq_stats *rq_stats = &channel_stats->rq; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + + for (j = 0; j < xsc_get_netdev_max_tc(adapter); j++) { + struct xsc_sq_stats *sq_stats = &channel_stats->sq[j]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_dropped += sq_stats->dropped; + } + } +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..069c5d8ad0dbbda55f1a56bbf768ae2738554290 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_EN_STATS_H +#define XSC_EN_STATS_H + +#include "xsc_eth_common.h" + +#define XSC_READ_CTR64_CPU(ptr, dsc, i) \ + (*(u64 *)((char *)(ptr) + (dsc)[i].offset)) + +#define ETH_GSTRING_LEN 32 + +#define XSC_DECLARE_STAT(type, fld) ""#fld, offsetof(type, fld) +#define XSC_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) +#define XSC_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) +#define XSC_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) + +#define XSC_DECLARE_HW_PRIO_STAT_NAME(fld, prio) (#fld "_prio"#prio) +#define XSC_DECLARE_HW_PRIO_STAT_OFFSET(type, fld, prio) \ + (offsetof(type, fld) + (sizeof(type) * (prio))) +#define XSC_DECLARE_HW_PRIO_STAT(type, fld, prio) \ + {XSC_DECLARE_HW_PRIO_STAT_NAME(fld, prio), \ + XSC_DECLARE_HW_PRIO_STAT_OFFSET(type, fld, prio)} + +struct xsc_rq_stats { + u64 packets; + u64 bytes; + u64 csum_unnecessary; + u64 csum_none; + u64 csum_err; + u64 csum_succ; + u64 cqes; + u64 cqe_err; + u64 wqes; + u64 wqe_err; + u64 oversize_pkts_sw_drop; + u64 oversize_pkts_err; + u64 buff_alloc_err; + u64 cache_reuse; + u64 cache_full; + u64 cache_empty; + u64 cache_busy; + u64 cache_alloc; + u64 cache_waive; + u64 cache_ext; + u64 cache_rdc; + u64 dim_us; + u64 dim_pkts; +}; + +struct xsc_sq_stats { + /* commonly accessed in data path */ + u64 packets; + u64 bytes; + u64 tso_packets; + u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 csum_partial; + u64 csum_partial_inner; + /* less likely accessed in data path */ + u64 csum_none; + u64 stopped; + u64 dropped; + u64 xmit_more; + /* dirtied @completion */ + u64 cqes; + u64 wake; + u64 cqe_err; + u64 oversize_pkts_sw_drop; + u64 txdone_skb_null; + u64 txdone_skb_refcnt_err; + u64 skb_linear; + u64 dim_us; + u64 dim_pkts; +}; + +struct xsc_ch_stats { + u64 events; + u64 poll; + u64 poll_0; + u64 poll_1_63; + u64 poll_64_511; + u64 poll_512_1023; + u64 poll_1024; + u64 poll_tx; + u64 arm; + u64 noarm; + u64 aff_change; +} ____cacheline_aligned_in_smp; + +struct xsc_adapter; +struct xsc_stats_grp { + u16 update_stats_mask; + int (*get_num_stats)(struct xsc_adapter *adapter); + int (*fill_strings)(struct xsc_adapter *adapter, u8 *data, int idx); + int (*fill_stats)(struct xsc_adapter *adapter, u64 *data, int idx); + void (*update_stats)(struct xsc_adapter *adapter); +}; + +struct counter_desc { + char format[ETH_GSTRING_LEN]; + size_t offset; /* Byte offset */ +}; + +struct xsc_sw_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 tx_tso_packets; + u64 tx_tso_bytes; + u64 tx_tso_inner_packets; + u64 tx_tso_inner_bytes; + u64 rx_csum_unnecessary; + u64 rx_csum_none; + u64 rx_csum_err; + u64 rx_csum_succ; + u64 tx_csum_none; + u64 tx_csum_partial; + u64 tx_csum_partial_inner; + u64 tx_queue_stopped; + u64 tx_queue_dropped; + u64 tx_xmit_more; + u64 tx_cqes; + u64 tx_queue_wake; + u64 tx_cqe_err; + u64 tx_oversize_pkts_sw_drop; + u64 tx_dim_us; + u64 tx_dim_pkts; + u64 txdone_skb_null; + u64 txdone_skb_refcnt_err; + u64 skb_linear; + u64 rx_cqes; + u64 rx_cqe_err; + u64 rx_wqes; + u64 rx_wqe_err; + u64 rx_oversize_pkts_sw_drop; + u64 rx_oversize_pkts_err; + u64 rx_buff_alloc_err; + u64 rx_cache_reuse; + u64 rx_cache_full; + u64 rx_cache_empty; + u64 rx_cache_busy; + u64 rx_cache_alloc; + u64 rx_cache_waive; + u64 rx_cache_ext; + u64 rx_cache_rdc; + u64 rx_dim_us; + u64 rx_dim_pkts; + u64 ch_events; + u64 ch_poll; + u64 ch_poll_0; + u64 ch_poll_1_63; + u64 ch_poll_64_511; + u64 ch_poll_512_1023; + u64 ch_poll_1024; + u64 ch_poll_tx; + u64 ch_arm; + u64 ch_noarm; + u64 ch_aff_change; +}; + +struct xsc_channel_stats { + struct xsc_ch_stats ch; + struct xsc_sq_stats sq[XSC_MAX_NUM_TC]; + struct xsc_rq_stats rq; +} ____cacheline_aligned_in_smp; + +struct xsc_stats { + struct xsc_sw_stats sw; + struct xsc_channel_stats channel_stats[XSC_ETH_MAX_NUM_CHANNELS]; +}; + +extern const struct xsc_stats_grp xsc_stats_grps[]; +extern const int xsc_num_stats_grps; + +void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 *s); + +#endif /* XSC_EN_STATS_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..8709b22c3b879e766f14de9fe2400b92637f3354 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" + +#include "xsc_eth.h" + +static void pcie_lat_hw_work(struct work_struct *work) +{ + int err; + struct delayed_work *dwork = to_delayed_work(work); + struct xsc_pcie_lat_work *pcie_lat = container_of(dwork, struct xsc_pcie_lat_work, work); + struct xsc_core_device *xdev = pcie_lat->xdev; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_HW); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", + err, out.hdr.status); + } + schedule_delayed_work_on(smp_processor_id(), dwork, + msecs_to_jiffies(pcie_lat->period * 1000)); +} + +static void pcie_lat_hw_init(struct xsc_core_device *xdev) +{ + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_HW_INIT); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", + err, out.hdr.status); + } +} + +static ssize_t pcie_lat_enable_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_EN); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%hhu\n", out.pcie_lat.pcie_lat_enable); +} + +static ssize_t pcie_lat_enable_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *pcie_lat = adapter->xdev->pcie_lat; + int err; + u16 pcie_lat_enable; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + err = kstrtou16(buf, 0, &pcie_lat_enable); + if (err != 0) + return -EINVAL; + + if (pcie_lat_enable != XSC_PCIE_LAT_EN_DISABLE && + pcie_lat_enable != XSC_PCIE_LAT_EN_ENABLE) { + xsc_core_err(adapter->xdev, + "pcie_lat_enable should be set as %d or %d, cannot be %d\n", + XSC_PCIE_LAT_EN_DISABLE, XSC_PCIE_LAT_EN_ENABLE, + pcie_lat_enable); + return -EPERM; + } + + if (pcie_lat_enable == XSC_PCIE_LAT_EN_ENABLE && + pcie_lat->enable == XSC_PCIE_LAT_EN_DISABLE) { + pcie_lat_hw_init(adapter->xdev); + pcie_lat->adapter = adapter; + INIT_DELAYED_WORK(&pcie_lat->work, pcie_lat_hw_work); + schedule_delayed_work_on(smp_processor_id(), &pcie_lat->work, + msecs_to_jiffies(pcie_lat->period * 1000)); + } else if (pcie_lat_enable == XSC_PCIE_LAT_EN_DISABLE && + pcie_lat->enable == XSC_PCIE_LAT_EN_ENABLE) { + cancel_delayed_work_sync(&pcie_lat->work); + } + + pcie_lat->enable = pcie_lat_enable; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_SET_EN); + in.pcie_lat.pcie_lat_enable = pcie_lat_enable; + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to set pcie_lat en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR_RW(pcie_lat_enable); + +static ssize_t pcie_lat_interval_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err, i; + u32 count = 0; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_INTERVAL); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat interval, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_PCIE_LAT_CFG_INTERVAL_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.pcie_lat.pcie_lat_interval[i])); + + count += sprintf(&buf[count], "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_interval[i])); + + return count; +} + +static DEVICE_ATTR_RO(pcie_lat_interval); + +static ssize_t pcie_lat_period_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; + + return sprintf(buf, "%u\n", tmp->period); +} + +static ssize_t pcie_lat_period_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; + int err; + u32 pcie_lat_period; + + err = kstrtouint(buf, 0, &pcie_lat_period); + if (err != 0) + return -EINVAL; + + if (pcie_lat_period < XSC_PCIE_LAT_PERIOD_MIN || + pcie_lat_period > XSC_PCIE_LAT_PERIOD_MAX) { + xsc_core_err(adapter->xdev, "pcie_lat_period should be set between [%d-%d], cannot be %d\n", + XSC_PCIE_LAT_PERIOD_MIN, XSC_PCIE_LAT_PERIOD_MAX, + pcie_lat_period); + return -EPERM; + } + + tmp->period = pcie_lat_period; + + return count; +} + +static DEVICE_ATTR_RW(pcie_lat_period); + +static ssize_t pcie_lat_histogram_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int i, err; + u32 count = 0; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_HISTOGRAM); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, + "Failed to get pcie_lat histogram, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_PCIE_LAT_CFG_HISTOGRAM_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.pcie_lat.pcie_lat_histogram[i])); + + count += sprintf(&buf[count], "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_histogram[i])); + + return count; +} + +static DEVICE_ATTR_RO(pcie_lat_histogram); + +static ssize_t pcie_lat_peak_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_PEAK); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat peak, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_peak)); +} + +static DEVICE_ATTR_RO(pcie_lat_peak); + +static struct attribute *pcie_lat_attrs[] = { + &dev_attr_pcie_lat_enable.attr, + &dev_attr_pcie_lat_interval.attr, + &dev_attr_pcie_lat_period.attr, + &dev_attr_pcie_lat_histogram.attr, + &dev_attr_pcie_lat_peak.attr, + NULL, +}; + +static struct attribute_group pcie_lat_group = { + .name = "pcie_lat", + .attrs = pcie_lat_attrs, +}; + +static int xsc_pcie_lat_sysfs_init(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err = 0; + struct xsc_pcie_lat_work *tmp; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + xdev->pcie_lat = tmp; + tmp->xdev = xdev; + + tmp->enable = XSC_PCIE_LAT_EN_DISABLE; + tmp->period = XSC_PCIE_LAT_PERIOD_MIN; + + err = sysfs_create_group(&dev->dev.kobj, &pcie_lat_group); + if (err) + goto remove_pcie_lat; + + return 0; + +remove_pcie_lat: + sysfs_remove_group(&dev->dev.kobj, &pcie_lat_group); + kfree(tmp); + + return err; +} + +static void xsc_pcie_lat_sysfs_fini(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err; + struct xsc_pcie_lat_work *tmp; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + tmp = xdev->pcie_lat; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_SET_EN); + in.pcie_lat.pcie_lat_enable = XSC_PCIE_LAT_EN_DISABLE; + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) + xsc_core_err(xdev, "Failed to set pcie_lat disable, err(%u), status(%u)\n", + err, out.hdr.status); + + if (tmp->enable == XSC_PCIE_LAT_EN_ENABLE) + cancel_delayed_work_sync(&tmp->work); + + sysfs_remove_group(&dev->dev.kobj, &pcie_lat_group); + + if (!xdev->pcie_lat) + return; + + kfree(tmp); + xdev->pcie_lat = NULL; +} + +int xsc_eth_sysfs_create(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err = 0; + + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + err = xsc_pcie_lat_sysfs_init(dev, xdev); + + return err; +} + +void xsc_eth_sysfs_remove(struct net_device *dev, struct xsc_core_device *xdev) +{ + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + xsc_pcie_lat_sysfs_fini(dev, xdev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..8f5b4ecd9ed9de6e0d15e775c874f1feba8c245c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_eth_stats.h" +#include "xsc_eth_common.h" +#include "common/xsc_hsi.h" +#include "common/qp.h" +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" + +#define XSC_OPCODE_RAW 0x7 + +static inline void *xsc_sq_fetch_wqe(struct xsc_sq *sq, size_t size, u16 *pi) +{ + struct xsc_wq_cyc *wq = &sq->wq; + void *wqe; + + /*caution, sp->pc is default to be zero*/ + *pi = xsc_wq_cyc_ctr2ix(wq, sq->pc); + wqe = xsc_wq_cyc_get_wqe(wq, *pi); + memset(wqe, 0, size); + + return wqe; +} + +u16 xsc_tx_get_gso_ihs(struct xsc_sq *sq, struct sk_buff *skb) +{ + struct xsc_sq_stats *stats = sq->stats; + u16 ihs; + + if (skb->encapsulation) { + ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + stats->tso_inner_packets++; + stats->tso_inner_bytes += skb->len - ihs; + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + stats->tso_packets++; + stats->tso_bytes += skb->len - ihs; + } + + return ihs; +} + +void xsc_txwqe_build_cseg_csum(struct xsc_sq *sq, + struct sk_buff *skb, + struct xsc_send_wqe_ctrl_seg *cseg) +{ + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + if (skb->encapsulation) { + cseg->csum_en = XSC_ETH_WQE_INNER_AND_OUTER_CSUM; + sq->stats->csum_partial_inner++; + } else { + cseg->csum_en = XSC_ETH_WQE_OUTER_CSUM; + sq->stats->csum_partial++; + } + } else { + cseg->csum_en = XSC_ETH_WQE_NONE_CSUM; + sq->stats->csum_none++; + } +} + +static inline struct xsc_sq_dma *xsc_dma_get(struct xsc_sq *sq, u32 i) +{ + return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; +} + +static inline void xsc_dma_push(struct xsc_sq *sq, dma_addr_t addr, u32 size, + enum xsc_dma_map_type map_type) +{ + struct xsc_sq_dma *dma = xsc_dma_get(sq, sq->dma_fifo_pc++); + + dma->addr = addr; + dma->size = size; + dma->type = map_type; + ETH_DEBUG_LOG("dma = %p, dma->addr = %#llx\n", dma, dma->addr); +} + +static inline void xsc_tx_dma_unmap(struct device *dev, struct xsc_sq_dma *dma) +{ + switch (dma->type) { + case XSC_DMA_MAP_SINGLE: + dma_unmap_single(dev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + case XSC_DMA_MAP_PAGE: + dma_unmap_page(dev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + default: + ETH_DEBUG_LOG("%s\n", "xsc_tx_dma_unmap unknown DMA type!\n"); + } +} + +static void xsc_dma_unmap_wqe_err(struct xsc_sq *sq, u8 num_dma) +{ + struct xsc_adapter *adapter = sq->channel->adapter; + struct device *dev = adapter->dev; + + int i; + + for (i = 0; i < num_dma; i++) { + struct xsc_sq_dma *last_pushed_dma = xsc_dma_get(sq, --sq->dma_fifo_pc); + + xsc_tx_dma_unmap(dev, last_pushed_dma); + } +} + +static void xsc_txwqe_build_csegs(struct xsc_sq *sq, struct sk_buff *skb, + u16 mss, u16 ihs, u16 headlen, + u8 opcode, u16 ds_cnt, u32 num_bytes, + struct xsc_send_wqe_ctrl_seg *cseg) +{ + struct xsc_core_device *xdev = sq->cq.xdev; + int send_wqe_ds_num_log = ilog2(xdev->caps.send_ds_num); + + xsc_txwqe_build_cseg_csum(sq, skb, cseg); + + if (mss != 0) { + cseg->has_pph = 0; + cseg->so_type = 1; + cseg->so_hdr_len = ihs; + cseg->so_data_size = cpu_to_le16(mss); + } + + cseg->msg_opcode = opcode; + cseg->wqe_id = cpu_to_le16(sq->pc << send_wqe_ds_num_log); + cseg->ds_data_num = ds_cnt - XSC_SEND_WQEBB_CTRL_NUM_DS; + cseg->msg_len = cpu_to_le32(num_bytes); + + cseg->ce = 1; + + WQE_CSEG_DUMP("cseg", cseg); +} + +static int xsc_txwqe_build_dsegs(struct xsc_sq *sq, struct sk_buff *skb, + u16 ihs, u16 headlen, + struct xsc_wqe_data_seg *dseg) +{ + dma_addr_t dma_addr = 0; + u8 num_dma = 0; + int i; + struct xsc_adapter *adapter = sq->channel->adapter; + struct device *dev = adapter->dev; + + if (headlen) { + dma_addr = dma_map_single(dev, skb->data, headlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->va = cpu_to_le64(dma_addr); + dseg->mkey = cpu_to_le32(sq->mkey_be); + dseg->seg_len = cpu_to_le32(headlen); + + WQE_DSEG_DUMP("dseg-headlen", dseg); + + xsc_dma_push(sq, dma_addr, headlen, XSC_DMA_MAP_SINGLE); + num_dma++; + dseg++; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int fsz = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(dev, frag, 0, fsz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->va = cpu_to_le64(dma_addr); + dseg->mkey = cpu_to_le32(sq->mkey_be); + dseg->seg_len = cpu_to_le32(fsz); + + WQE_DSEG_DUMP("dseg-frag", dseg); + + xsc_dma_push(sq, dma_addr, fsz, XSC_DMA_MAP_PAGE); + num_dma++; + dseg++; + } + + return num_dma; + +dma_unmap_wqe_err: + xsc_dma_unmap_wqe_err(sq, num_dma); + return -ENOMEM; +} + +static inline bool xsc_wqc_has_room_for(struct xsc_wq_cyc *wq, + u16 cc, u16 pc, u16 n) +{ + return (xsc_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); +} + +static inline void xsc_sq_notify_hw(struct xsc_wq_cyc *wq, u16 pc, + struct xsc_sq *sq) +{ + struct xsc_adapter *adapter = sq->channel->adapter; + struct xsc_core_device *xdev = adapter->xdev; + union xsc_send_doorbell doorbell_value; + int send_ds_num_log = ilog2(xdev->caps.send_ds_num); + + /*reverse wqe index to ds index*/ + doorbell_value.next_pid = pc << send_ds_num_log; + doorbell_value.qp_num = sq->sqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + ETH_DEBUG_LOG("pc = %d sqn = %d\n", pc, sq->sqn); + ETH_DEBUG_LOG("doorbell_value = %#x\n", doorbell_value.send_data); + writel(doorbell_value.send_data, REG_ADDR(xdev, xdev->regs.tx_db)); +} + +void xsc_txwqe_complete(struct xsc_sq *sq, struct sk_buff *skb, + u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, + struct xsc_tx_wqe_info *wi) +{ + struct xsc_wq_cyc *wq = &sq->wq; + + wi->num_bytes = num_bytes; + wi->num_dma = num_dma; + wi->num_wqebbs = num_wqebbs; + wi->skb = skb; + +#ifdef XSC_BQL_SUPPORT + ETH_SQ_STATE(sq); + netdev_tx_sent_queue(sq->txq, num_bytes); + ETH_SQ_STATE(sq); +#endif + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + ETH_DEBUG_LOG("%s\n", "hw tstamp\n"); + } + + /*1*/ + sq->pc += wi->num_wqebbs; + ETH_DEBUG_LOG("%d\n", sq->pc); + + if (unlikely(!xsc_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) { + netif_tx_stop_queue(sq->txq); + sq->stats->stopped++; + ETH_DEBUG_LOG("%p %d %d %d\n", wq, sq->cc, sq->pc, sq->stop_room); + } + + ETH_DEBUG_LOG("%d %d\n", xsc_netdev_xmit_more(skb), netif_xmit_stopped(sq->txq)); + + if (!xsc_netdev_xmit_more(skb) || netif_xmit_stopped(sq->txq)) + xsc_sq_notify_hw(wq, sq->pc, sq); +} + +static void xsc_dump_error_sqcqe(struct xsc_sq *sq, + struct xsc_cqe *cqe) +{ + u32 ci = xsc_cqwq_get_ci(&sq->cq.wq); + struct net_device *netdev = sq->channel->netdev; + + net_err_ratelimited("Err cqe on dev %s cqn=0x%x ci=0x%x sqn=0x%x err_code=0x%x qpid=0x%x\n", + netdev->name, sq->cq.xcq.cqn, ci, + sq->sqn, get_cqe_opcode(cqe), cqe->qp_id); +} + +void xsc_free_tx_wqe(struct device *dev, struct xsc_sq *sq) +{ + struct xsc_tx_wqe_info *wi; + struct sk_buff *skb; + u16 ci, npkts = 0; + u32 nbytes = 0; + int i; + + while (sq->cc != sq->pc) { + ci = xsc_wq_cyc_ctr2ix(&sq->wq, sq->cc); + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; + + if (!skb) { /* nop */ + sq->cc++; + continue; + } + + for (i = 0; i < wi->num_dma; i++) { + struct xsc_sq_dma *dma = + xsc_dma_get(sq, sq->dma_fifo_cc++); + + xsc_tx_dma_unmap(dev, dma); + } + + dev_kfree_skb_any(skb); + npkts++; + nbytes += wi->num_bytes; + sq->cc += wi->num_wqebbs; + } + +#ifdef XSC_BQL_SUPPORT + netdev_tx_completed_queue(sq->txq, npkts, nbytes); +#endif +} + +#ifdef NEED_CREATE_RX_THREAD + DECLARE_PER_CPU(bool, txcqe_get); +#endif + +bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) +{ + struct xsc_adapter *adapter; + struct device *dev; + struct xsc_sq_stats *stats; + struct xsc_sq *sq; + struct xsc_cqe *cqe; + u32 dma_fifo_cc; + u32 nbytes = 0; + u16 npkts = 0; + u16 sqcc; + int i = 0; + + sq = container_of(cq, struct xsc_sq, cq); + if (!test_bit(XSC_ETH_SQ_STATE_ENABLED, &sq->state)) + return false; + + adapter = sq->channel->adapter; + dev = adapter->dev; + + cqe = xsc_cqwq_get_cqe(&cq->wq); + if (!cqe) + goto out; + + stats = sq->stats; + + if (unlikely(get_cqe_opcode(cqe) & BIT(7))) { + xsc_dump_error_sqcqe(sq, cqe); + stats->cqe_err++; + return false; + } + +#ifdef NEED_CREATE_RX_THREAD + __this_cpu_write(txcqe_get, true); +#endif + + sqcc = sq->cc; + + /* avoid dirtying sq cache line every cqe */ + dma_fifo_cc = sq->dma_fifo_cc; + i = 0; + do { + struct xsc_tx_wqe_info *wi; + struct sk_buff *skb; + int j; + u16 ci; + + xsc_cqwq_pop(&cq->wq); + + ci = xsc_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; + + /*cqe may be overstanding in real test, not by nop in other*/ + if (unlikely(!skb)) { + stats->txdone_skb_null++; + continue; + } + + for (j = 0; j < wi->num_dma; j++) { + struct xsc_sq_dma *dma = xsc_dma_get(sq, dma_fifo_cc++); + + xsc_tx_dma_unmap(dev, dma); + } + +#ifndef NEED_CREATE_RX_THREAD + npkts++; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + napi_consume_skb(skb, napi_budget); +#else + npkts++; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + if (refcount_read(&skb->users) < 1) + stats->txdone_skb_refcnt_err++; + napi_consume_skb(skb, 0); +#endif + ETH_DEBUG_LOG("ci=%d, sqcc=%d, pkts=%d\n", ci, sqcc, npkts); + + } while ((++i <= napi_budget) && (cqe = xsc_cqwq_get_cqe(&cq->wq))); + + stats->cqes += i; + + xsc_cq_notify_hw(cq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; + ETH_DEBUG_LOG("dma_fifo_cc=%d, sqcc=%d\n", dma_fifo_cc, sqcc); + +#ifdef XSC_BQL_SUPPORT + ETH_SQ_STATE(sq); + netdev_tx_completed_queue(sq->txq, npkts, nbytes); + ETH_SQ_STATE(sq); +#endif + + if (netif_tx_queue_stopped(sq->txq) && + xsc_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room)) { + netif_tx_wake_queue(sq->txq); + stats->wake++; + } + +out: + return (i == napi_budget); +} + +static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, + struct xsc_sq *sq, + struct xsc_tx_wqe *wqe, + u16 pi) +{ + struct xsc_send_wqe_ctrl_seg *cseg; + struct xsc_wqe_data_seg *dseg; + struct xsc_tx_wqe_info *wi; + struct xsc_sq_stats *stats = sq->stats; + struct xsc_core_device *xdev = sq->cq.xdev; + u16 ds_cnt; + u16 mss, ihs, headlen; + u8 opcode; + u32 num_bytes, num_dma; + u8 num_wqebbs; + +retry_send: + /* Calc ihs and ds cnt, no writes to wqe yet */ + /*ctrl-ds, it would be reduce in ds_data_num*/ + ds_cnt = XSC_SEND_WQEBB_CTRL_NUM_DS; + + /*in andes inline is bonding with gso*/ + if (skb_is_gso(skb)) { + opcode = XSC_OPCODE_RAW; + mss = skb_shinfo(skb)->gso_size; + ihs = xsc_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len; + stats->packets += skb_shinfo(skb)->gso_segs; + } else { + opcode = XSC_OPCODE_RAW; + mss = 0; + ihs = 0; + num_bytes = skb->len; + stats->packets++; + } + + /*linear data in skb*/ + headlen = skb->len - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + ETH_DEBUG_LOG("skb_len=%d data_len=%d nr_frags=%d mss=%d ihs=%d headlen=%d ds_cnt=%d\n", + skb->len, skb->data_len, skb_shinfo(skb)->nr_frags, + mss, ihs, headlen, ds_cnt); + + /*to make the connection, only linear data is present*/ + skbdata_debug_dump(skb, headlen, 1); + + /* Check packet size. */ + if (unlikely(mss == 0 && num_bytes > sq->hw_mtu)) { + sq->stats->oversize_pkts_sw_drop++; + goto err_drop; + } + + num_wqebbs = DIV_ROUND_UP(ds_cnt, xdev->caps.send_ds_num); + /*if ds_cnt exceed one wqe, drop it*/ + if (num_wqebbs != 1) { + sq->stats->skb_linear++; + if (skb_linearize(skb)) + goto err_drop; + goto retry_send; + } + + /* fill wqe */ + wi = (struct xsc_tx_wqe_info *)&sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + dseg = &wqe->data[0]; + + if (unlikely(num_bytes == 0)) + goto err_drop; + + xsc_txwqe_build_csegs(sq, skb, mss, ihs, headlen, + opcode, ds_cnt, num_bytes, cseg); + + /*inline header is also use dma to transport*/ + num_dma = xsc_txwqe_build_dsegs(sq, skb, ihs, headlen, dseg); + if (unlikely(num_dma < 0)) + goto err_drop; + + xsc_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi); + + stats->bytes += num_bytes; + stats->xmit_more += xsc_netdev_xmit_more(skb); + + sq->dim_obj.sample.pkt_ctr = sq->stats->packets; + sq->dim_obj.sample.byte_ctr = sq->stats->bytes; + + return NETDEV_TX_OK; + +err_drop: + ETH_DEBUG_LOG("%s: drop skb, ds_cnt=%d, num_wqebbs=%d, num_dma=%d\n", + __func__, ds_cnt, num_wqebbs, num_dma); + stats->dropped++; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev) +{ + u32 ret; + u32 queue_id; + struct xsc_sq *sq; + struct xsc_tx_wqe *wqe; + u16 pi; + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + + if (!skb) { + ETH_DEBUG_LOG("skb == NULL\n"); + return NETDEV_TX_OK; + } + + if (!adapter) { + ETH_DEBUG_LOG("adapter == NULL\n"); + return NETDEV_TX_BUSY; + } + + if (adapter->status != XSCALE_ETH_DRIVER_OK) { + ETH_DEBUG_LOG("adapter->status = %d\n", adapter->status); + return NETDEV_TX_BUSY; + } + + queue_id = skb_get_queue_mapping(skb); + ETH_DEBUG_LOG("queue_id = %d\n", queue_id); + assert(adapter->xdev, queue_id < XSC_ETH_MAX_TC_TOTAL); + + sq = adapter->txq2sq[queue_id]; + if (!sq) { + ETH_DEBUG_LOG("sq = NULL\n"); + return NETDEV_TX_BUSY; + } + ETH_DEBUG_LOG("sqn = %d\n", sq->sqn); + + wqe = xsc_sq_fetch_wqe(sq, xdev->caps.send_ds_num * XSC_SEND_WQE_DS, &pi); + ETH_DEBUG_LOG("wqe = %p pi = %d\n", wqe, pi); + assert(adapter->xdev, wqe); + +#ifndef ANDES_DRIVER + skb = xsc_accel_handle_tx(skb); +#endif + + ret = xsc_eth_xmit_frame(skb, sq, wqe, pi); + + ETH_DEBUG_LOG("ret = %d\n", ret); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..13699c6dd0dc99ce015e787fc8d6855b3c7551cf --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_dim.h" + +void xsc_cq_notify_hw_rearm(struct xsc_cq *cq) +{ + union xsc_cq_doorbell db; + + ETH_DEBUG_LOG("cc = %d cqn = %d\n", cq->wq.cc, cq->xcq.cqn); + + db.val = 0; + db.cq_next_cid = cpu_to_le32(cq->wq.cc); + db.cq_id = cpu_to_le32(cq->xcq.cqn); + db.arm = 0; + + /* ensure doorbell record is visible to device before ringing the doorbell */ + wmb(); + writel(db.val, REG_ADDR(cq->xdev, cq->xdev->regs.complete_db)); + if (cq->channel && cq->channel->stats) + cq->channel->stats->arm++; +} + +void xsc_cq_notify_hw(struct xsc_cq *cq) +{ + struct xsc_core_device *xdev = cq->xdev; + union xsc_cq_doorbell db; + + ETH_DEBUG_LOG("cc = %d cqn = %d\n", cq->wq.cc, cq->xcq.cqn); + + dma_wmb(); + + db.val = 0; + db.cq_next_cid = cpu_to_le32(cq->wq.cc); + db.cq_id = cpu_to_le32(cq->xcq.cqn); + + writel(db.val, REG_ADDR(xdev, xdev->regs.complete_reg)); + if (cq->channel && cq->channel->stats) + cq->channel->stats->noarm++; +} + +static inline bool xsc_channel_no_affinity_change(struct xsc_channel *c) +{ + int current_cpu = smp_processor_id(); + + return cpumask_test_cpu(current_cpu, c->aff_mask); +} + +enum hrtimer_restart xsc_dim_reduce_timer_fn(struct hrtimer *timer) +{ + struct xsc_dim_reduce_work *reduce = (struct xsc_dim_reduce_work *)timer; + struct xsc_cq *cq = container_of(reduce, struct xsc_cq, cq_reduce); + + xsc_cq_notify_hw_rearm(cq); + + return HRTIMER_NORESTART; +} + +int xsc_eth_napi_poll(struct napi_struct *napi, int budget) +{ + struct xsc_channel *c = container_of(napi, struct xsc_channel, napi); + struct xsc_eth_params *params = &c->adapter->nic_param; + struct xsc_rq *rq = &c->qp.rq[0]; + struct xsc_sq *sq = NULL; + bool busy = false; + int work_done = 0; + int tx_budget = 0; + int i; + + rcu_read_lock(); + + clear_bit(XSC_CHANNEL_NAPI_SCHED, &c->flags); + + tx_budget = params->sq_size >> 2; + for (i = 0; i < c->num_tc; i++) + busy |= xsc_poll_tx_cq(&c->qp.sq[i].cq, tx_budget); + + /* budget=0 means: don't poll rx rings */ + if (likely(budget)) { + work_done = xsc_poll_rx_cq(&rq->cq, budget); + busy |= work_done == budget; + } + + busy |= rq->post_wqes(rq); + + if (busy) { + if (likely(xsc_channel_no_affinity_change(c))) { + rcu_read_unlock(); + return budget; + } + c->stats->aff_change++; + if (budget && work_done == budget) + work_done--; + } + +#ifdef NETDEV_NAPI_COMP_DONE_RETURN_VOID + napi_complete_done(napi, work_done); +#else + if (unlikely(!napi_complete_done(napi, work_done))) + goto out; +#endif + + for (i = 0; i < c->num_tc; i++) { + sq = &c->qp.sq[i]; + + if (test_bit(XSC_ETH_SQ_STATE_AM, &sq->state)) { + struct xsc_dim_reduce_work *reduce_sq = NULL; + u32 dim_us_tx = params->tx_cq_moderation.usec; + + xsc_handle_tx_dim(sq); + + reduce_sq = &sq->cq.cq_reduce; + if (hrtimer_is_queued(&reduce_sq->timer)) + continue; + + dim_us_tx = min_t(u32, sq->cq.xcq.dim_us, dim_us_tx); + sq->stats->dim_us = dim_us_tx; + if (dim_us_tx) { + hrtimer_start(&reduce_sq->timer, + ns_to_ktime(dim_us_tx * NSEC_PER_USEC), + HRTIMER_MODE_REL_PINNED); + continue; + } + } + xsc_cq_notify_hw_rearm(&sq->cq); + } + + if (test_bit(XSC_ETH_RQ_STATE_AM, &rq->state)) { + struct xsc_dim_reduce_work *reduce = &rq->cq.cq_reduce; + u32 dim_us = params->rx_cq_moderation.usec; + + xsc_handle_rx_dim(rq); + + if (c->stats->poll <= params->rx_dim_frames_low) { + dim_us = 0; + if (c->stats->poll == 0 && hrtimer_is_queued(&reduce->timer)) + goto out; + } else { + dim_us = min_t(u32, rq->cq.xcq.dim_us, dim_us); + } + rq->stats->dim_us = dim_us; + + if (dim_us) { + if (hrtimer_is_queued(&reduce->timer)) + goto out; + + reduce->dim_us = dim_us; + + if (dim_us <= params->rx_dim_usecs_low) { + udelay(dim_us); + xsc_cq_notify_hw_rearm(&rq->cq); + } else { + hrtimer_start(&reduce->timer, + ns_to_ktime(dim_us * NSEC_PER_USEC), + HRTIMER_MODE_REL_PINNED); + } + goto out; + } + } + + xsc_cq_notify_hw_rearm(&rq->cq); + +#ifndef NETDEV_NAPI_COMP_DONE_RETURN_VOID +out: +#endif + rcu_read_unlock(); + return work_done; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h new file mode 100644 index 0000000000000000000000000000000000000000..005f1ae4a55a38b880604941475d6d525e818dba --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_RXTX_H +#define XSC_RXTX_H + +#include "xsc_eth.h" +#include "common/qp.h" +#include "xsc_eth_debug.h" + +enum { + XSC_ETH_WQE_NONE_CSUM, + XSC_ETH_WQE_INNER_CSUM, + XSC_ETH_WQE_OUTER_CSUM, + XSC_ETH_WQE_INNER_AND_OUTER_CSUM, +}; + +#define ANDES_DRIVER + +static inline u32 xsc_cqwq_get_size(struct xsc_cqwq *wq) +{ + return wq->fbc.sz_m1 + 1; +} + +static inline struct xsc_cqe *xsc_cqwq_get_wqe(struct xsc_cqwq *wq, u32 ix) +{ + struct xsc_cqe *cqe = xsc_frag_buf_get_wqe(&wq->fbc, ix); + + ETH_DEBUG_LOG("cqe = %p\n", cqe); + + return cqe; +} + +static inline struct xsc_cqe *xsc_cqwq_get_cqe(struct xsc_cqwq *wq) +{ + struct xsc_cqe *cqe; + u8 cqe_ownership_bit; + u8 sw_ownership_val; + u32 ci = xsc_cqwq_get_ci(wq); + + cqe = xsc_cqwq_get_wqe(wq, ci); + + cqe_ownership_bit = cqe->owner & XSC_CQE_OWNER_MASK; + sw_ownership_val = xsc_cqwq_get_wrap_cnt(wq) & 1; + ETH_DEBUG_LOG("ci=%d, cqe_owner=%d, sw_owner=%d\n", + ci, cqe_ownership_bit, sw_ownership_val); + + if (cqe_ownership_bit != sw_ownership_val) + return NULL; + + /* ensure cqe content is read after cqe ownership bit */ + dma_rmb(); + + return cqe; +} + +void xsc_free_tx_wqe(struct device *dev, struct xsc_sq *sq); +int xsc_eth_napi_poll(struct napi_struct *napi, int budget); +bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget); +int xsc_poll_rx_cq(struct xsc_cq *cq, int budget); +void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, + struct xsc_rq *rq, struct xsc_cqe *cqe); +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); +struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph); +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq); +void xsc_cq_notify_hw(struct xsc_cq *cq); +void xsc_cq_notify_hw_rearm(struct xsc_cq *cq); +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix); +netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev); + +void xsc_page_release_dynamic(struct xsc_rq *rq, + struct xsc_dma_info *dma_info, + bool recycle); + +enum hrtimer_restart xsc_dim_reduce_timer_fn(struct hrtimer *timer); + +#endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c new file mode 100644 index 0000000000000000000000000000000000000000..7379574f1a7e3f032fc60b44cce3f52eedb34bc8 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth.h" +#include "common/vport.h" +#include "common/xsc_fs.h" + +static int xsc_vport_context_update_vlans(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, + u16 vid, bool add) +{ + struct net_device *ndev = adapter->netdev; + struct xsc_core_device *xdev = adapter->xdev; + int err; + + err = xsc_modify_nic_vport_vlans(xdev, vid, add); + if (err) + netdev_err(ndev, "Failed to modify vport vid:%d rule_type:%d err:%d\n", + vid, rule_type, err); + return err; +} + +static int xsc_add_vlan_rule(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, u16 vid) +{ + return xsc_vport_context_update_vlans(adapter, rule_type, vid, true); +} + +static void xsc_del_vlan_rule(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, u16 vid) +{ + xsc_vport_context_update_vlans(adapter, rule_type, vid, false); +} + +static int xsc_vlan_rx_add_cvid(struct xsc_adapter *adapter, u16 vid) +{ + int err; + + set_bit(vid, adapter->fs.vlan.active_cvlans); + + err = xsc_add_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + if (err) + clear_bit(vid, adapter->vlan_params.active_cvlans); + + return err; +} + +static int xsc_vlan_rx_add_svid(struct xsc_adapter *adapter, u16 vid) +{ + struct net_device *netdev = adapter->netdev; + int err; + + set_bit(vid, adapter->fs.vlan.active_svlans); + + err = xsc_add_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + if (err) { + clear_bit(vid, adapter->fs.vlan.active_svlans); + return err; + } + + /* Need to fix some features.. */ + netdev_update_features(netdev); + return err; +} + +int xsc_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!vid) + return 0; + + if (be16_to_cpu(proto) == ETH_P_8021Q) + return xsc_vlan_rx_add_cvid(adapter, vid); + else if (be16_to_cpu(proto) == ETH_P_8021AD) + return xsc_vlan_rx_add_svid(adapter, vid); + + return -EOPNOTSUPP; +} + +int xsc_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!vid) + return 0; + + if (be16_to_cpu(proto) == ETH_P_8021Q) { + clear_bit(vid, adapter->fs.vlan.active_cvlans); + xsc_del_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + } else if (be16_to_cpu(proto) == ETH_P_8021AD) { + clear_bit(vid, adapter->fs.vlan.active_svlans); + xsc_del_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + netdev_update_features(dev); + } + + return 0; +} + +void xsc_set_rx_mode_work(struct work_struct *work) +{ + int err = 0; + struct xsc_adapter *adapter = container_of(work, struct xsc_adapter, + set_rx_mode_work); + struct net_device *dev = adapter->netdev; + struct xsc_l2_table *l2 = &adapter->fs.l2; + + bool rx_mode_enable = (adapter->status == XSCALE_ETH_DRIVER_OK); + bool promisc_enabled = rx_mode_enable && (dev->flags & IFF_PROMISC); + bool allmulti_enabled = rx_mode_enable && (dev->flags & IFF_ALLMULTI); + + bool enable_promisc = !l2->promisc_enabled && promisc_enabled; + bool disable_promisc = l2->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !l2->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = l2->allmulti_enabled && !allmulti_enabled; + bool change = enable_promisc | disable_promisc | enable_allmulti | disable_allmulti; + + if (change) + err = xsc_modify_nic_vport_promisc(adapter->xdev, + (enable_allmulti | disable_allmulti), + (enable_promisc | disable_promisc), + allmulti_enabled, promisc_enabled); + if (err) { + xsc_core_err(adapter->xdev, "failed to set rx mode, err = %d\n", err); + + return; + } + + l2->promisc_enabled = promisc_enabled; + l2->allmulti_enabled = allmulti_enabled; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c new file mode 100644 index 0000000000000000000000000000000000000000..32eb74563e4be442ef9e2956118250c9b39e8406 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "common/xsc_cmd.h" +#include "xsc_eth.h" +#include "xsc_eth_debug.h" + +static void precmd_rlimit_set(void *data, u32 mac_port) +{ + struct xsc_rate_limit_set *req = (struct xsc_rate_limit_set *)data; + + req->rate_cir = __cpu_to_be32(req->rate_cir); + req->limit_id = __cpu_to_be32(req->limit_id); +} + +static void postcmd_rlimit_get(void *data) +{ + struct xsc_rate_limit_get *resp = (struct xsc_rate_limit_get *)data; + int i; + + for (i = 0; i <= QOS_PRIO_MAX; i++) + resp->rate_cir[i] = __be32_to_cpu(resp->rate_cir[i]); + + resp->max_limit_id = __be32_to_cpu(resp->max_limit_id); +} + +static int xsc_dcbx_hw_qos_cmdq(struct xsc_core_device *xdev, u16 opcode, + void *inupt, + void *output, + u16 expect_req_size, + u16 expect_resp_size, + void (*precmdq)(void *, u32), + void (*postcmdq)(void *)) +{ + struct xsc_qos_mbox_in *in; + struct xsc_qos_mbox_out *out; + int err; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + if (inupt) + memcpy(&in->data, inupt, expect_req_size); + + in->hdr.opcode = __cpu_to_be16(opcode); + in->req_prfx.mac_port = xdev->mac_port; + + if (precmdq) + precmdq((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + if (postcmdq) + postcmdq((void *)out->data); + + if (output) + memcpy(output, out->data, expect_resp_size); + + kvfree(in); + kvfree(out); + return 0; + +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int xsc_dcbx_hw_common(struct xsc_core_device *xdev, u16 opcode, + void *input, + void *output, + u16 expect_req_size, + u16 expect_resp_size, + void (*precmdq)(void *, u32), + void (*postcmdq)(void *)) +{ + int ret; + struct xsc_inbox_hdr *hdr; + + hdr = (struct xsc_inbox_hdr *)input; + hdr->opcode = __cpu_to_be16(opcode); + + ret = xsc_cmd_exec(xdev, (void *)input, expect_req_size, + (void *)output, expect_resp_size); + + return ret; +} + +int xsc_hw_kernel_call(struct xsc_core_device *xdev, u16 opcode, void *req, void *rsp) +{ + switch (opcode) { + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_rate_limit_get), + sizeof(struct xsc_rate_limit_get), + NULL, postcmd_rlimit_get); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_rate_limit_set), + 0, precmd_rlimit_set, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_PFC: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_pfc_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_PFC: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_set), + sizeof(struct xsc_pfc_set), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, 0, + sizeof(struct xsc_trust_mode_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_trust_mode_set), 0, + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_dscp_pmt_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_dscp_pmt_set), + 0, NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_SP: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_sp_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_SP: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_sp_set), + 0, NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_weight_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_weight_set), + 0, NULL, NULL); + fallthrough; + case XSC_CMD_OP_QUERY_PFC_PRIO_STATS: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_prio_stats_mbox_in), + sizeof(struct xsc_pfc_prio_stats_mbox_out), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_GET_LLDP_STATUS: + case XSC_CMD_OP_SET_LLDP_STATUS: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_lldp_status_mbox_in), + sizeof(struct xsc_lldp_status_mbox_out), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_set_drop_th_mbox_in), + sizeof(struct xsc_pfc_set_drop_th_mbox_out), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_get_cfg_status_mbox_in), + sizeof(struct xsc_pfc_get_cfg_status_mbox_out), + NULL, NULL); + fallthrough; + default: + xsc_core_dbg(xdev, "unknown type=%d\n", opcode); + } + + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h new file mode 100644 index 0000000000000000000000000000000000000000..a9043f85fa057cd159e9877da8e861c985c3e5c7 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_HW_COMMON_H +#define XSC_HW_COMMON_H + +int xsc_hw_kernel_call(struct xsc_core_device *xdev, u16 opcode, void *req, void *rsp); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..bbf05a26c7407b9cadfe3dc2b87fd0761a06ffff --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_QUEUE_H +#define XSC_QUEUE_H + +#include +#include + +#include + +#include "../pci/wq.h" + +enum { + XSC_SEND_WQE_DS = 16, + XSC_SEND_WQE_BB = 64, +}; + +enum { + XSC_RECV_WQE_DS = 16, + XSC_RECV_WQE_BB = 16, +}; + +#define XSC_SEND_WQEBB_NUM_DS (XSC_SEND_WQE_BB / XSC_SEND_WQE_DS) +#define XSC_LOG_SEND_WQEBB_NUM_DS ilog2(XSC_SEND_WQEBB_NUM_DS) + +#define XSC_RECV_WQEBB_NUM_DS (XSC_RECV_WQE_BB / XSC_RECV_WQE_DS) +#define XSC_LOG_RECV_WQEBB_NUM_DS ilog2(XSC_RECV_WQEBB_NUM_DS) + +#define XSC_SEND_WQEBB_CTRL_NUM_DS 1 + +enum { + XSC_ETH_RQ_STATE_ENABLED, + XSC_ETH_RQ_STATE_AM, + XSC_ETH_RQ_STATE_CACHE_REDUCE_PENDING, +}; + +enum { + XSC_ETH_SQ_STATE_ENABLED, + XSC_ETH_SQ_STATE_AM, +}; + +struct xsc_dma_info { + struct page *page; + dma_addr_t addr; +}; + +struct xsc_wqe_frag_info { + struct xsc_dma_info *di; + u32 offset; + u8 last_in_page; + u8 is_available; +}; + +struct xsc_rq_frag_info { + int frag_size; + int frag_stride; +}; + +struct xsc_rq_frags_info { + struct xsc_rq_frag_info arr[XSC_MAX_RX_FRAGS]; + u8 num_frags; + u8 log_num_frags; + u8 wqe_bulk; + u8 wqe_bulk_min; + u8 frags_max_num; +}; + +#define xsc_dim_t struct dim +#define xsc_dim_sample_t struct dim_sample +#define xsc_dim_cq_moder_t struct dim_cq_moder + +struct xsc_dim { + xsc_dim_t dim; + xsc_dim_sample_t sample; +}; + +struct xsc_dim_reduce_work { + struct hrtimer timer; + u32 dim_us; +}; + +struct xsc_cq { + /* data path - accessed per cqe */ + struct xsc_cqwq wq; + + /* data path - accessed per napi poll */ + u16 event_ctr; + struct napi_struct *napi; + struct xsc_core_cq xcq; + struct xsc_channel *channel; + + /* control */ + struct xsc_core_device *xdev; + struct xsc_wq_ctrl wq_ctrl; + u8 rx; + struct xsc_dim_reduce_work cq_reduce; +} ____cacheline_aligned_in_smp; + +struct xsc_pcie_lat_work { + struct xsc_core_device *xdev; + struct xsc_adapter *adapter; + struct delayed_work work; + u16 enable; + u32 period; +}; + +#define XSC_PAGE_CACHE_LOG_MAX_RQ_MULT 6 +#define XSC_PAGE_CACHE_REDUCE_WORK_INTERVAL 200 /* msecs */ +#define XSC_PAGE_CACHE_REDUCE_GRACE_PERIOD 1000 /* msecs */ +#define XSC_PAGE_CACHE_REDUCE_SUCCESS_CNT 4 + +struct xsc_page_cache_reduce { + struct delayed_work reduce_work; + u32 success; + unsigned long next_ts; + unsigned long grace_period; + unsigned long delay; + struct xsc_dma_info *pending; + u32 npages; +}; + +struct xsc_page_cache { + struct xsc_dma_info *page_cache; + u32 head; + u32 tail; + u32 sz; + u32 resv; +}; + +struct xsc_rq; +struct xsc_cqe; +typedef void (*xsc_fp_handle_rx_cqe)(struct xsc_cqwq *cqwq, struct xsc_rq *rq, + struct xsc_cqe *cqe); +typedef bool (*xsc_fp_post_rx_wqes)(struct xsc_rq *rq); +typedef void (*xsc_fp_dealloc_wqe)(struct xsc_rq *rq, u16 ix); +typedef struct sk_buff * (*xsc_fp_skb_from_cqe)(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); + +struct xsc_rq { + struct xsc_core_qp cqp; + struct { + struct xsc_wq_cyc wq; + struct xsc_wqe_frag_info *frags; + struct xsc_dma_info *di; + struct xsc_rq_frags_info info; + xsc_fp_skb_from_cqe skb_from_cqe; + } wqe; + + struct { + u16 headroom; + u8 map_dir; /* dma map direction */ + } buff; + + struct page_pool *page_pool; + struct xsc_wq_ctrl wq_ctrl; + struct xsc_cq cq; + u32 rqn; + int ix; + + unsigned long state; + struct work_struct recover_work; + struct xsc_rq_stats *stats; + struct xsc_dim dim_obj; + + u32 hw_mtu; + u32 frags_sz; + + xsc_fp_handle_rx_cqe handle_rx_cqe; + xsc_fp_post_rx_wqes post_wqes; + xsc_fp_dealloc_wqe dealloc_wqe; + struct xsc_page_cache page_cache; +} ____cacheline_aligned_in_smp; + +struct xsc_tx_wqe_info { + struct sk_buff *skb; + u32 num_bytes; + u8 num_wqebbs; + u8 num_dma; +}; + +enum xsc_dma_map_type { + XSC_DMA_MAP_SINGLE, + XSC_DMA_MAP_PAGE +}; + +struct xsc_sq_dma { + dma_addr_t addr; + u32 size; + enum xsc_dma_map_type type; +}; + +struct xsc_sq { + struct xsc_core_qp cqp; + /* dirtied @completion */ + u16 cc; + u32 dma_fifo_cc; + struct xsc_dim dim_obj; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + u32 dma_fifo_pc; + + struct xsc_cq cq; + + /* read only */ + struct xsc_wq_cyc wq; + u32 dma_fifo_mask; + struct xsc_sq_stats *stats; + struct { + struct xsc_sq_dma *dma_fifo; + struct xsc_tx_wqe_info *wqe_info; + } db; + void __iomem *uar_map; + struct netdev_queue *txq; + u32 sqn; + u16 stop_room; + + __be32 mkey_be; + unsigned long state; + unsigned int hw_mtu; + + /* control path */ + struct xsc_wq_ctrl wq_ctrl; + struct xsc_channel *channel; + int ch_ix; + int txq_ix; + struct work_struct recover_work; +} ____cacheline_aligned_in_smp; + +struct rdma_opcode_data { + u32 immdt_value; +} __packed __aligned(4); + +struct raw_opcode_data { + u16 has_pph : 1; + u16 so_type : 1; + u16 so_data_size : 14; + u8 rsv; + u8 so_hdr_len; +} __packed __aligned(4); + +struct rawtype_opcode_data { + u16 desc_id; + u16 is_last_wqe : 1; + u16 dst_qp_id : 15; +} __packed __aligned(4); + +struct xsc_wqe_ctrl_seg { + u8 msg_opcode; + u8 with_immdt : 1; + u8 csum_en : 2; + u8 ds_data_num : 5; + u16 wqe_id; + u32 msg_len; + union { + struct rdma_opcode_data _rdma_opcode_data; + struct raw_opcode_data _raw_opcode_data; + struct rawtype_opcode_data _rawtype_opcode_data; + } opcode_data; + u32 se : 1; + u32 ce : 1; + u32 rsv : 30; +}; + +static inline u8 get_cqe_opcode(struct xsc_cqe *cqe) +{ + return cqe->msg_opcode; +} + +static inline void xsc_dump_err_cqe(struct xsc_core_device *dev, + struct xsc_cqe *cqe) +{ + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, cqe, + sizeof(*cqe), false); +} + +#endif /* XSC_QUEUE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig b/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..fafa69b8a478699e4ea9e06d4880e3986467bc6a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon PCI configuration +# + +config YUNSILICON_XSC_PCI + tristate "Yunsilicon XSC PCI driver" + default n + select NET_DEVLINK + select PAGE_POOL + help + This driver is common for Yunsilicon XSC + ethernet and RDMA drivers. + + To compile this driver as a module, choose M here. The module + will be called xsc_pci. diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..45a7d473cac795fc063c5c39df596772455031dd --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc + +obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc_pci.o + +xsc_pci-y := main.o eq.o intf.o debugfs.o alloc.o wq.o cq.o qp.o \ + cmd2.o fw.o port.o mr.o pd.o xsc_lag.o xsc_pci_ctrl.o\ + pci_irq.o vport.o sriov.o sriov_sysfs.o devlink.o eswitch.o xsc_port_ctrl.o res_obj.o qpts.o\ + fw/cmd.o \ + fw/xsc_flow.o \ + fw/xsc_res.o \ + fw/osdep.o \ + fw/xsc_mem.o diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c new file mode 100644 index 0000000000000000000000000000000000000000..cdef1b996fdfb13b7ac07b2d62fa61f18df96f0a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "common/driver.h" + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, + struct xsc_buf *buf) +{ + dma_addr_t t; + + buf->size = size; + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_alloc_coherent(&xdev->pdev->dev, + size, &t, GFP_KERNEL | __GFP_ZERO); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + } else { + int i; + + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + GFP_KERNEL); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; i++) { + buf->page_list[i].buf = + dma_alloc_coherent(&xdev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL | __GFP_ZERO); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + } + + if (BITS_PER_LONG == 64) { + struct page **pages; + + pages = kmalloc_array(buf->nbufs, sizeof(*pages), GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; i++) { + if (is_vmalloc_addr(buf->page_list[i].buf)) + pages[i] = vmalloc_to_page(buf->page_list[i].buf); + else + pages[i] = virt_to_page(buf->page_list[i].buf); + } + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + } + + return 0; + +err_free: + xsc_buf_free(xdev, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(xsc_buf_alloc); + +void xsc_buf_free(struct xsc_core_device *xdev, struct xsc_buf *buf) +{ + int i; + + if (buf->nbufs == 1) { + dma_free_coherent(&xdev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); + } else { + if (BITS_PER_LONG == 64 && buf->direct.buf) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; i++) + if (buf->page_list[i].buf) + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} +EXPORT_SYMBOL_GPL(xsc_buf_free); + +void xsc_fill_page_array(struct xsc_buf *buf, __be64 *pas, int npages) +{ + u64 addr; + int i; + int shift = PAGE_SHIFT - PAGE_SHIFT_4K; + int mask = (1 << shift) - 1; + + for (i = 0; i < npages; i++) { + if (buf->nbufs == 1) + addr = buf->direct.map + (i << PAGE_SHIFT_4K); + else + addr = buf->page_list[i >> shift].map + ((i & mask) << PAGE_SHIFT_4K); + + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(xsc_fill_page_array); + +void xsc_fill_page_frag_array(struct xsc_frag_buf *buf, __be64 *pas, int npages) +{ + int i; + dma_addr_t addr; + int shift = PAGE_SHIFT - PAGE_SHIFT_4K; + int mask = (1 << shift) - 1; + + for (i = 0; i < npages; i++) { + addr = buf->frags[i >> shift].map + ((i & mask) << PAGE_SHIFT_4K); + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(xsc_fill_page_frag_array); + +static void *xsc_dma_zalloc_coherent_node(struct xsc_core_device *xdev, + size_t size, dma_addr_t *dma_handle, + int node) +{ + struct xsc_dev_resource *dev_res = xdev->dev_res; + struct device *device = &xdev->pdev->dev; + int original_node; + void *cpu_handle; + + /* WA for kernels that don't use numa_mem_id in alloc_pages_node */ + if (node == NUMA_NO_NODE) +#ifdef HAVE_NUMA_MEM_ID + node = numa_mem_id(); +#else + node = first_memory_node; +#endif + + mutex_lock(&dev_res->alloc_mutex); + original_node = dev_to_node(device); + set_dev_node(device, node); + cpu_handle = dma_alloc_coherent(device, size, dma_handle, + GFP_KERNEL); + set_dev_node(device, original_node); + mutex_unlock(&dev_res->alloc_mutex); + return cpu_handle; +} + +int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, + struct xsc_frag_buf *buf, int node) +{ + int i; + + buf->size = size; + buf->npages = DIV_ROUND_UP(size, PAGE_SIZE); + buf->page_shift = PAGE_SHIFT; + buf->frags = kcalloc(buf->npages, sizeof(struct xsc_buf_list), + GFP_KERNEL); + if (!buf->frags) + goto err_out; + + for (i = 0; i < buf->npages; i++) { + struct xsc_buf_list *frag = &buf->frags[i]; + int frag_sz = min_t(int, size, PAGE_SIZE); + + frag->buf = xsc_dma_zalloc_coherent_node(xdev, frag_sz, + &frag->map, node); + if (!frag->buf) + goto err_free_buf; + if (frag->map & ((1 << buf->page_shift) - 1)) { + dma_free_coherent(&xdev->pdev->dev, frag_sz, + buf->frags[i].buf, buf->frags[i].map); + xsc_core_warn(xdev, "unexpected map alignment: %pad, page_shift=%d\n", + &frag->map, buf->page_shift); + goto err_free_buf; + } + size -= frag_sz; + } + + return 0; + +err_free_buf: + while (i--) + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, buf->frags[i].buf, + buf->frags[i].map); + kfree(buf->frags); +err_out: + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(xsc_frag_buf_alloc_node); + +void xsc_frag_buf_free(struct xsc_core_device *xdev, struct xsc_frag_buf *buf) +{ + int size = buf->size; + int i; + + for (i = 0; i < buf->npages; i++) { + int frag_sz = min_t(int, size, PAGE_SIZE); + + dma_free_coherent(&xdev->pdev->dev, frag_sz, buf->frags[i].buf, + buf->frags[i].map); + size -= frag_sz; + } + kfree(buf->frags); +} +EXPORT_SYMBOL_GPL(xsc_frag_buf_free); + +static struct xsc_db_pgdir *xsc_alloc_db_pgdir(struct xsc_core_device *xdev, + int node) +{ + u32 db_per_page = PAGE_SIZE / cache_line_size(); + struct xsc_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); + if (!pgdir) + return NULL; + + pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL); + if (!pgdir->bitmap) { + kfree(pgdir); + return NULL; + } + + bitmap_fill(pgdir->bitmap, db_per_page); + + pgdir->db_page = xsc_dma_zalloc_coherent_node(xdev, PAGE_SIZE, + &pgdir->db_dma, node); + if (!pgdir->db_page) { + bitmap_free(pgdir->bitmap); + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int xsc_alloc_db_from_pgdir(struct xsc_db_pgdir *pgdir, + struct xsc_db *db) +{ + u32 db_per_page = PAGE_SIZE / cache_line_size(); + int offset; + int i; + + i = find_first_bit(pgdir->bitmap, db_per_page); + if (i >= db_per_page) + return -ENOMEM; + + __clear_bit(i, pgdir->bitmap); + + db->u.pgdir = pgdir; + db->index = i; + offset = db->index * cache_line_size(); + db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); + db->dma = pgdir->db_dma + offset; + + db->db[0] = 0; + db->db[1] = 0; + + return 0; +} + +int xsc_db_alloc_node(struct xsc_core_device *xdev, struct xsc_db *db, int node) +{ + struct xsc_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&xdev->dev_res->pgdir_mutex); + + list_for_each_entry(pgdir, &xdev->dev_res->pgdir_list, list) + if (!xsc_alloc_db_from_pgdir(pgdir, db)) + goto out; + + pgdir = xsc_alloc_db_pgdir(xdev, node); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &xdev->dev_res->pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(xsc_alloc_db_from_pgdir(pgdir, db)); + +out: + mutex_unlock(&xdev->dev_res->pgdir_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(xsc_db_alloc_node); + +int xsc_db_alloc(struct xsc_core_device *xdev, struct xsc_db *db) +{ + return xsc_db_alloc_node(xdev, db, xdev->priv.numa_node); +} +EXPORT_SYMBOL_GPL(xsc_db_alloc); + +void xsc_db_free(struct xsc_core_device *xdev, struct xsc_db *db) +{ + u32 db_per_page = PAGE_SIZE / cache_line_size(); + + mutex_lock(&xdev->dev_res->pgdir_mutex); + + __set_bit(db->index, db->u.pgdir->bitmap); + + if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) { + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + bitmap_free(db->u.pgdir->bitmap); + kfree(db->u.pgdir); + } + + mutex_unlock(&xdev->dev_res->pgdir_mutex); +} +EXPORT_SYMBOL_GPL(xsc_db_free); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c new file mode 100644 index 0000000000000000000000000000000000000000..9f7966169b182a08a5de7e292bdbf18f34e0a303 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c @@ -0,0 +1,2148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifdef HAVE_GENERIC_KMAP_TYPE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/driver.h" +#include +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "tmp_cmdq_defines.h" + +enum { + CMD_IF_REV = 3, +}; + +enum { + CMD_MODE_POLLING, + CMD_MODE_EVENTS +}; + +enum { + NUM_LONG_LISTS = 2, + NUM_MED_LISTS = 64, + LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + + XSC_CMD_DATA_BLOCK_SIZE, + MED_LIST_SIZE = 16 + XSC_CMD_DATA_BLOCK_SIZE, +}; + +enum { + XSC_CMD_DELIVERY_STAT_OK = 0x0, + XSC_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, + XSC_CMD_DELIVERY_STAT_TOK_ERR = 0x2, + XSC_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, + XSC_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, + XSC_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, + XSC_CMD_DELIVERY_STAT_FW_ERR = 0x6, + XSC_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, + XSC_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, + XSC_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, + XSC_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, +}; + +static struct xsc_cmd_work_ent *alloc_cmd(struct xsc_cmd *cmd, + struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out) +{ + struct xsc_cmd_work_ent *ent; + + ent = kzalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return ERR_PTR(-ENOMEM); + + ent->in = in; + ent->out = out; + ent->cmd = cmd; + + return ent; +} + +static u8 alloc_token(struct xsc_cmd *cmd) +{ + u8 token; + + spin_lock(&cmd->token_lock); + token = cmd->token++ % 255 + 1; + spin_unlock(&cmd->token_lock); + + return token; +} + +static int alloc_ent(struct xsc_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); + if (ret < cmd->max_reg_cmds) + clear_bit(ret, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); + + return ret < cmd->max_reg_cmds ? ret : -ENOMEM; +} + +static void free_ent(struct xsc_cmd *cmd, int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + set_bit(idx, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); +} + +static struct xsc_cmd_layout *get_inst(struct xsc_cmd *cmd, int idx) +{ + return cmd->cmd_buf + (idx << cmd->log_stride); +} + +static struct xsc_rsp_layout *get_cq_inst(struct xsc_cmd *cmd, int idx) +{ + return cmd->cq_buf + (idx << cmd->log_stride); +} + +static u8 xor8_buf(void *buf, int len) +{ + u8 *ptr = buf; + u8 sum = 0; + int i; + + for (i = 0; i < len; i++) + sum ^= ptr[i]; + + return sum; +} + +static int verify_block_sig(struct xsc_cmd_prot_block *block) +{ + if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + return -EINVAL; + + if (xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + return 0; +} + +static void calc_block_sig(struct xsc_cmd_prot_block *block, u8 token) +{ + block->token = token; + block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); + block->sig = ~xor8_buf(block, sizeof(*block) - 1); +} + +static void calc_chain_sig(struct xsc_cmd_mailbox *head, u8 token) +{ + struct xsc_cmd_mailbox *next = head; + + while (next) { + calc_block_sig(next->buf, token); + next = next->next; + } +} + +static void set_signature(struct xsc_cmd_work_ent *ent) +{ + ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); + calc_chain_sig(ent->in->next, ent->token); + calc_chain_sig(ent->out->next, ent->token); +} + +static void free_cmd(struct xsc_cmd_work_ent *ent) +{ + kfree(ent); +} + +static int verify_signature(struct xsc_cmd_work_ent *ent) +{ + struct xsc_cmd_mailbox *next = ent->out->next; + int err; + u8 sig; + + sig = xor8_buf(ent->rsp_lay, sizeof(*ent->rsp_lay)); + if (sig != 0xff) + return -EINVAL; + + while (next) { + err = verify_block_sig(next->buf); + if (err) + return err; + + next = next->next; + } + + return 0; +} + +static void dump_buf(void *buf, int size, int offset) +{ + __be32 *p = buf; + int i; + + for (i = 0; i < size; i += 16) { + xsc_pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[3])); + p += 4; + offset += 16; + } + xsc_pr_debug("\n"); +} + +const char *xsc_command_str(int command) +{ + switch (command) { + case XSC_CMD_OP_QUERY_HCA_CAP: + return "QUERY_HCA_CAP"; + + case XSC_CMD_OP_ENABLE_HCA: + return "ENABLE_HCA"; + + case XSC_CMD_OP_DISABLE_HCA: + return "DISABLE_HCA"; + + case XSC_CMD_OP_MODIFY_HCA: + return "MODIFY_HCA"; + + case XSC_CMD_OP_QUERY_CMDQ_VERSION: + return "QUERY_CMDQ_VERSION"; + + case XSC_CMD_OP_QUERY_MSIX_TBL_INFO: + return "QUERY_MSIX_TBL_INFO"; + + case XSC_CMD_OP_FUNCTION_RESET: + return "FUNCTION_RESET"; + + case XSC_CMD_OP_ALLOC_IA_LOCK: + return "ALLOC_IA_LOCK"; + + case XSC_CMD_OP_RELEASE_IA_LOCK: + return "RELEASE_IA_LOCK"; + + case XSC_CMD_OP_DUMMY: + return "DUMMY_CMD"; + + case XSC_CMD_OP_SET_DEBUG_INFO: + return "SET_DEBUG_INFO"; + + case XSC_CMD_OP_CREATE_MKEY: + return "CREATE_MKEY"; + + case XSC_CMD_OP_QUERY_MKEY: + return "QUERY_MKEY"; + + case XSC_CMD_OP_DESTROY_MKEY: + return "DESTROY_MKEY"; + + case XSC_CMD_OP_QUERY_SPECIAL_CONTEXTS: + return "QUERY_SPECIAL_CONTEXTS"; + + case XSC_CMD_OP_SET_MPT: + return "SET_MPT"; + + case XSC_CMD_OP_SET_MTT: + return "SET_MTT"; + + case XSC_CMD_OP_CREATE_EQ: + return "CREATE_EQ"; + + case XSC_CMD_OP_DESTROY_EQ: + return "DESTROY_EQ"; + + case XSC_CMD_OP_QUERY_EQ: + return "QUERY_EQ"; + + case XSC_CMD_OP_CREATE_CQ: + return "CREATE_CQ"; + + case XSC_CMD_OP_DESTROY_CQ: + return "DESTROY_CQ"; + + case XSC_CMD_OP_QUERY_CQ: + return "QUERY_CQ"; + + case XSC_CMD_OP_MODIFY_CQ: + return "MODIFY_CQ"; + + case XSC_CMD_OP_CREATE_QP: + return "CREATE_QP"; + + case XSC_CMD_OP_DESTROY_QP: + return "DESTROY_QP"; + + case XSC_CMD_OP_RST2INIT_QP: + return "RST2INIT_QP"; + + case XSC_CMD_OP_INIT2RTR_QP: + return "INIT2RTR_QP"; + + case XSC_CMD_OP_RTR2RTS_QP: + return "RTR2RTS_QP"; + + case XSC_CMD_OP_RTS2RTS_QP: + return "RTS2RTS_QP"; + + case XSC_CMD_OP_SQERR2RTS_QP: + return "SQERR2RTS_QP"; + + case XSC_CMD_OP_2ERR_QP: + return "2ERR_QP"; + + case XSC_CMD_OP_RTS2SQD_QP: + return "RTS2SQD_QP"; + + case XSC_CMD_OP_SQD2RTS_QP: + return "SQD2RTS_QP"; + + case XSC_CMD_OP_2RST_QP: + return "2RST_QP"; + + case XSC_CMD_OP_QUERY_QP: + return "QUERY_QP"; + + case XSC_CMD_OP_CONF_SQP: + return "CONF_SQP"; + + case XSC_CMD_OP_MAD_IFC: + return "MAD_IFC"; + + case XSC_CMD_OP_INIT2INIT_QP: + return "INIT2INIT_QP"; + + case XSC_CMD_OP_SQD2SQD_QP: + return "SQD2SQD_QP"; + + case XSC_CMD_OP_QUERY_QP_FLUSH_STATUS: + return "QUERY_QP_FLUSH_STATUS"; + + case XSC_CMD_OP_ALLOC_PD: + return "ALLOC_PD"; + + case XSC_CMD_OP_DEALLOC_PD: + return "DEALLOC_PD"; + + case XSC_CMD_OP_ACCESS_REG: + return "ACCESS_REG"; + + case XSC_CMD_OP_MODIFY_RAW_QP: + return "MODIFY_RAW_QP"; + + case XSC_CMD_OP_ENABLE_NIC_HCA: + return "ENABLE_NIC_HCA"; + + case XSC_CMD_OP_DISABLE_NIC_HCA: + return "DISABLE_NIC_HCA"; + + case XSC_CMD_OP_MODIFY_NIC_HCA: + return "MODIFY_NIC_HCA"; + + case XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT: + return "QUERY_NIC_VPORT_CONTEXT"; + + case XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: + return "MODIFY_NIC_VPORT_CONTEXT"; + + case XSC_CMD_OP_QUERY_VPORT_STATE: + return "QUERY_VPORT_STATE"; + + case XSC_CMD_OP_MODIFY_VPORT_STATE: + return "MODIFY_VPORT_STATE"; + + case XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT: + return "QUERY_HCA_VPORT_CONTEXT"; + + case XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: + return "MODIFY_HCA_VPORT_CONTEXT"; + + case XSC_CMD_OP_QUERY_HCA_VPORT_GID: + return "QUERY_HCA_VPORT_GID"; + + case XSC_CMD_OP_QUERY_HCA_VPORT_PKEY: + return "QUERY_HCA_VPORT_PKEY"; + + case XSC_CMD_OP_QUERY_VPORT_COUNTER: + return "QUERY_VPORT_COUNTER"; + + case XSC_CMD_OP_QUERY_PRIO_STATS: + return "QUERY_PRIO_STATS"; + + case XSC_CMD_OP_QUERY_PHYPORT_STATE: + return "QUERY_PHYPORT_STATE"; + + case XSC_CMD_OP_QUERY_EVENT_TYPE: + return "QUERY_EVENT_TYPE"; + + case XSC_CMD_OP_QUERY_LINK_INFO: + return "QUERY_LINK_INFO"; + + case XSC_CMD_OP_MODIFY_LINK_INFO: + return "MODIFY_LINK_INFO"; + + case XSC_CMD_OP_MODIFY_FEC_PARAM: + return "MODIFY_FEC_PARAM"; + + case XSC_CMD_OP_QUERY_FEC_PARAM: + return "QUERY_FEC_PARAM"; + + case XSC_CMD_OP_LAG_CREATE: + return "LAG_CREATE"; + + case XSC_CMD_OP_LAG_ADD_MEMBER: + return "LAG ADD MEMBER"; + + case XSC_CMD_OP_LAG_REMOVE_MEMBER: + return "LAG REMOVE MEMBER"; + + case XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS: + return "LAG UPDATE MEMBER STATUS"; + + case XSC_CMD_OP_LAG_UPDATE_HASH_TYPE: + return "LAG UPDATE HASH TYPE"; + + case XSC_CMD_OP_LAG_DESTROY: + return "LAG_DESTROY"; + + case XSC_CMD_OP_LAG_SET_QOS: + return "LAG_SET_QOS"; + + case XSC_CMD_OP_ENABLE_MSIX: + return "ENABLE_MSIX"; + + case XSC_CMD_OP_IOCTL_FLOW: + return "CFG_FLOW_TABLE"; + + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return "SET_DSCP_PMT"; + + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return "GET_DSCP_PMT"; + + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return "SET_TRUST_MODE"; + + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return "GET_TRUST_MODE"; + + case XSC_CMD_OP_IOCTL_SET_PCP_PMT: + return "SET_PCP_PMT"; + + case XSC_CMD_OP_IOCTL_GET_PCP_PMT: + return "GET_PCP_PMT"; + + case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: + return "SET_DEFAULT_PRI"; + + case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: + return "GET_DEFAULT_PRI"; + + case XSC_CMD_OP_IOCTL_SET_PFC: + return "SET_PFC"; + + case XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH: + return "SET_PFC_DROP_TH"; + + case XSC_CMD_OP_IOCTL_GET_PFC: + return "GET_PFC"; + + case XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS: + return "GET_PFC_CFG_STATUS"; + + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return "SET_RATE_LIMIT"; + + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return "GET_RATE_LIMIT"; + + case XSC_CMD_OP_IOCTL_SET_SP: + return "SET_SP"; + + case XSC_CMD_OP_IOCTL_GET_SP: + return "GET_SP"; + + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return "SET_WEIGHT"; + + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return "GET_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT: + return "DPU_SET_PORT_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT: + return "DPU_GET_PORT_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT: + return "DPU_SET_PRIO_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT: + return "DPU_GET_PRIO_WEIGHT"; + + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN: + return "SET_WATCHDOG_EN"; + + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN: + return "GET_WATCHDOG_EN"; + + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD: + return "SET_WATCHDOG_PERIOD"; + + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD: + return "GET_WATCHDOG_PERIOD"; + + case XSC_CMD_OP_IOCTL_SET_ENABLE_RP: + return "ENABLE_RP"; + + case XSC_CMD_OP_IOCTL_SET_ENABLE_NP: + return "ENABLE_NP"; + + case XSC_CMD_OP_IOCTL_SET_INIT_ALPHA: + return "SET_INIT_ALPHA"; + + case XSC_CMD_OP_IOCTL_SET_G: + return "SET_G"; + + case XSC_CMD_OP_IOCTL_SET_AI: + return "SET_AI"; + + case XSC_CMD_OP_IOCTL_SET_HAI: + return "SET_HAI"; + + case XSC_CMD_OP_IOCTL_SET_TH: + return "SET_TH"; + + case XSC_CMD_OP_IOCTL_SET_BC_TH: + return "SET_BC_TH"; + + case XSC_CMD_OP_IOCTL_SET_CNP_OPCODE: + return "SET_CNP_OPCODE"; + + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_B: + return "SET_CNP_BTH_B"; + + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_F: + return "SET_CNP_BTH_F"; + + case XSC_CMD_OP_IOCTL_SET_CNP_ECN: + return "SET_CNP_ECN"; + + case XSC_CMD_OP_IOCTL_SET_DATA_ECN: + return "SET_DATA_ECN"; + + case XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL: + return "SET_CNP_TX_INTERVAL"; + + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME: + return "SET_EVT_PERIOD_RSTTIME"; + + case XSC_CMD_OP_IOCTL_SET_CNP_DSCP: + return "SET_CNP_DSCP"; + + case XSC_CMD_OP_IOCTL_SET_CNP_PCP: + return "SET_CNP_PCP"; + + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA: + return "SET_EVT_PERIOD_ALPHA"; + + case XSC_CMD_OP_IOCTL_GET_CC_CFG: + return "GET_CC_CFG"; + + case XSC_CMD_OP_IOCTL_GET_CC_STAT: + return "GET_CC_STAT"; + + case XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE: + return "SET_CLAMP_TGT_RATE"; + + case XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR: + return "SET_MAX_HAI_FACTOR"; + + case XSC_CMD_OP_IOCTL_SET_HWC: + return "SET_HWCONFIG"; + + case XSC_CMD_OP_IOCTL_GET_HWC: + return "GET_HWCONFIG"; + + case XSC_CMD_OP_SET_MTU: + return "SET_MTU"; + + case XSC_CMD_OP_QUERY_ETH_MAC: + return "QUERY_ETH_MAC"; + + case XSC_CMD_OP_QUERY_HW_STATS: + return "QUERY_HW_STATS"; + + case XSC_CMD_OP_QUERY_PAUSE_CNT: + return "QUERY_PAUSE_CNT"; + + case XSC_CMD_OP_SET_RTT_EN: + return "SET_RTT_EN"; + + case XSC_CMD_OP_GET_RTT_EN: + return "GET_RTT_EN"; + + case XSC_CMD_OP_SET_RTT_QPN: + return "SET_RTT_QPN"; + + case XSC_CMD_OP_GET_RTT_QPN: + return "GET_RTT_QPN"; + + case XSC_CMD_OP_SET_RTT_PERIOD: + return "SET_RTT_PERIOD"; + + case XSC_CMD_OP_GET_RTT_PERIOD: + return "GET_RTT_PERIOD"; + + case XSC_CMD_OP_GET_RTT_RESULT: + return "GET_RTT_RESULT"; + + case XSC_CMD_OP_GET_RTT_STATS: + return "ET_RTT_STATS"; + + case XSC_CMD_OP_SET_LED_STATUS: + return "SET_LED_STATUS"; + + case XSC_CMD_OP_AP_FEAT: + return "AP_FEAT"; + + case XSC_CMD_OP_PCIE_LAT_FEAT: + return "PCIE_LAT_FEAT"; + + case XSC_CMD_OP_USER_EMU_CMD: + return "USER_EMU_CMD"; + + case XSC_CMD_OP_QUERY_PFC_PRIO_STATS: + return "QUERY_PFC_PRIO_STATS"; + + case XSC_CMD_OP_IOCTL_QUERY_PFC_STALL_STATS: + return "QUERY_PFC_STALL_STATS"; + + case XSC_CMD_OP_QUERY_HW_STATS_RDMA: + return "QUERY_HW_STATS_RDMA"; + + case XSC_CMD_OP_QUERY_HW_STATS_ETH: + return "QUERY_HW_STATS_ETH"; + + case XSC_CMD_OP_SET_VPORT_RATE_LIMIT: + return "SET_VPORT_RATE_LIMIT"; + + default: return "unknown command opcode"; + } +} + +static void dump_command(struct xsc_core_device *xdev, struct xsc_cmd_mailbox *next, + struct xsc_cmd_work_ent *ent, int input, int len) +{ + u16 op = be16_to_cpu(((struct xsc_inbox_hdr *)(ent->lay->in))->opcode); + int offset = 0; + + if (!(xsc_debug_mask & (1 << XSC_CMD_DATA))) + return; + + xsc_core_dbg(xdev, "dump command %s(0x%x) %s\n", xsc_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + + if (input) { + dump_buf(ent->lay, sizeof(*ent->lay), offset); + offset += sizeof(*ent->lay); + } else { + dump_buf(ent->rsp_lay, sizeof(*ent->rsp_lay), offset); + offset += sizeof(*ent->rsp_lay); + } + + while (next && offset < len) { + xsc_core_dbg(xdev, "command block:\n"); + dump_buf(next->buf, sizeof(struct xsc_cmd_prot_block), offset); + offset += sizeof(struct xsc_cmd_prot_block); + next = next->next; + } +} + +static void cmd_work_handler(struct work_struct *work) +{ + struct xsc_cmd_work_ent *ent = container_of(work, struct xsc_cmd_work_ent, work); + struct xsc_cmd *cmd = ent->cmd; + struct xsc_core_device *xdev = container_of(cmd, struct xsc_core_device, cmd); + struct xsc_cmd_layout *lay; + struct semaphore *sem; + unsigned long flags; + + sem = &cmd->sem; + down(sem); + ent->idx = alloc_ent(cmd); + if (ent->idx < 0) { + xsc_core_err(xdev, "failed to allocate command entry\n"); + up(sem); + return; + } + + ent->token = alloc_token(cmd); + cmd->ent_arr[ent->idx] = ent; + + spin_lock_irqsave(&cmd->doorbell_lock, flags); + lay = get_inst(cmd, cmd->cmd_pid); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + if (ent->in->next) + lay->in_ptr = cpu_to_be64(ent->in->next->dma); + lay->inlen = cpu_to_be32(ent->in->len); + if (ent->out->next) + lay->out_ptr = cpu_to_be64(ent->out->next->dma); + lay->outlen = cpu_to_be32(ent->out->len); + lay->type = XSC_PCI_CMD_XPORT; + lay->token = ent->token; + lay->idx = ent->idx; + if (!cmd->checksum_disabled) + set_signature(ent); + else + lay->sig = 0xff; + dump_command(xdev, ent->in->next, ent, 1, ent->in->len); + + ktime_get_ts64(&ent->ts1); + + /* ring doorbell after the descriptor is valid */ + wmb(); + + cmd->cmd_pid = (cmd->cmd_pid + 1) % (1 << cmd->log_sz); + writel(cmd->cmd_pid, REG_ADDR(xdev, cmd->reg.req_pid_addr)); + mmiowb(); + spin_unlock_irqrestore(&cmd->doorbell_lock, flags); +} + +static const char *deliv_status_to_str(u8 status) +{ + switch (status) { + case XSC_CMD_DELIVERY_STAT_OK: + return "no errors"; + case XSC_CMD_DELIVERY_STAT_SIGNAT_ERR: + return "signature error"; + case XSC_CMD_DELIVERY_STAT_TOK_ERR: + return "token error"; + case XSC_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + return "bad block number"; + case XSC_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + return "output pointer not aligned to block size"; + case XSC_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return "input pointer not aligned to block size"; + case XSC_CMD_DELIVERY_STAT_FW_ERR: + return "firmware internal error"; + case XSC_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + return "command input length error"; + case XSC_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + return "command output length error"; + case XSC_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return "reserved fields not cleared"; + case XSC_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + return "bad command descriptor type"; + default: + return "unknown status code"; + } +} + +static u16 msg_to_opcode(struct xsc_cmd_msg *in) +{ + struct xsc_inbox_hdr *hdr = (struct xsc_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static int wait_func(struct xsc_core_device *xdev, struct xsc_cmd_work_ent *ent) +{ + unsigned long timeout = msecs_to_jiffies(XSC_CMD_TIMEOUT_MSEC); + int err; + struct xsc_cmd *cmd = &xdev->cmd; + + if (!wait_for_completion_timeout(&ent->done, timeout)) + err = -ETIMEDOUT; + else + err = ent->ret; + + if (err == -ETIMEDOUT) { + cmd->cmd_status = XSC_CMD_STATUS_TIMEDOUT; + xsc_core_warn(xdev, "wait for %s(0x%x) response timeout!\n", + xsc_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); + } else if (err) { + xsc_core_dbg(xdev, "err %d, delivery status %s(%d)\n", err, + deliv_status_to_str(ent->status), ent->status); + } + + return err; +} + +/* Notes: + * 1. Callback functions may not sleep + * 2. page queue commands do not support asynchrous completion + */ +static int xsc_cmd_invoke(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out, u8 *status) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent *ent; + ktime_t t1, t2, delta; + struct xsc_cmd_stats *stats; + int err = 0; + s64 ds; + u16 op; + struct semaphore *sem; + + ent = alloc_cmd(cmd, in, out); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + init_completion(&ent->done); + INIT_WORK(&ent->work, cmd_work_handler); + if (!queue_work(cmd->wq, &ent->work)) { + xsc_core_warn(xdev, "failed to queue work\n"); + err = -ENOMEM; + goto out_free; + } + + err = wait_func(xdev, ent); + if (err == -ETIMEDOUT) + goto out; + t1 = timespec64_to_ktime(ent->ts1); + t2 = timespec64_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + op = be16_to_cpu(((struct xsc_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock(&stats->lock); + } + xsc_core_dbg_mask(xdev, 1 << XSC_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + xsc_command_str(op), ds); + *status = ent->status; + free_cmd(ent); + + return err; + +out: + sem = &cmd->sem; + up(sem); +out_free: + free_cmd(ent); + return err; +} + +static ssize_t dbg_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + char lbuf[3]; + int err; + + if (!dbg->in_msg || !dbg->out_msg) + return -ENOMEM; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EPERM; + + lbuf[sizeof(lbuf) - 1] = 0; + + if (strcmp(lbuf, "go")) + return -EINVAL; + + err = xsc_cmd_exec(xdev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); + + return err ? err : count; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dbg_write, +}; + +static int xsc_copy_to_cmd_msg(struct xsc_cmd_msg *to, void *from, int size) +{ + struct xsc_cmd_prot_block *block; + struct xsc_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(to->first.data)); + memcpy(to->first.data, from, copy); + size -= copy; + from += copy; + + next = to->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, XSC_CMD_DATA_BLOCK_SIZE); + block = next->buf; + memcpy(block->data, from, copy); + block->owner_status = 0; + from += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static int xsc_copy_from_rsp_msg(void *to, struct xsc_rsp_msg *from, int size) +{ + struct xsc_cmd_prot_block *block; + struct xsc_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(from->first.data)); + memcpy(to, from->first.data, copy); + size -= copy; + to += copy; + + next = from->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, XSC_CMD_DATA_BLOCK_SIZE); + block = next->buf; + if (!block->owner_status) + pr_err("block ownership check failed\n"); + + memcpy(to, block->data, copy); + to += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static struct xsc_cmd_mailbox *alloc_cmd_box(struct xsc_core_device *xdev, + gfp_t flags) +{ + struct xsc_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof(*mailbox), flags); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = dma_pool_alloc(xdev->cmd.pool, flags, + &mailbox->dma); + if (!mailbox->buf) { + xsc_core_dbg(xdev, "failed allocation\n"); + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + memset(mailbox->buf, 0, sizeof(struct xsc_cmd_prot_block)); + mailbox->next = NULL; + + return mailbox; +} + +static void free_cmd_box(struct xsc_core_device *xdev, + struct xsc_cmd_mailbox *mailbox) +{ + dma_pool_free(xdev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +static struct xsc_cmd_msg *xsc_alloc_cmd_msg(struct xsc_core_device *xdev, + gfp_t flags, int size) +{ + struct xsc_cmd_mailbox *tmp, *head = NULL; + struct xsc_cmd_prot_block *block; + struct xsc_cmd_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + XSC_CMD_DATA_BLOCK_SIZE - 1) / XSC_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(xdev, flags); + if (IS_ERR(tmp)) { + xsc_core_warn(xdev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(xdev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void xsc_free_cmd_msg(struct xsc_core_device *xdev, + struct xsc_cmd_msg *msg) +{ + struct xsc_cmd_mailbox *head = msg->next; + struct xsc_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(xdev, head); + head = next; + } + kfree(msg); +} + +static struct xsc_rsp_msg *xsc_alloc_rsp_msg(struct xsc_core_device *xdev, + gfp_t flags, int size) +{ + struct xsc_cmd_mailbox *tmp, *head = NULL; + struct xsc_cmd_prot_block *block; + struct xsc_rsp_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + XSC_CMD_DATA_BLOCK_SIZE - 1) / XSC_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(xdev, flags); + if (IS_ERR(tmp)) { + xsc_core_warn(xdev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(xdev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void xsc_free_rsp_msg(struct xsc_core_device *xdev, + struct xsc_rsp_msg *msg) +{ + struct xsc_cmd_mailbox *head = msg->next; + struct xsc_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(xdev, head); + head = next; + } + kfree(msg); +} + +static ssize_t data_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + void *ptr; + int err; + + if (*pos != 0) + return -EINVAL; + + kfree(dbg->in_msg); + dbg->in_msg = NULL; + dbg->inlen = 0; + + ptr = kzalloc(count, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (copy_from_user(ptr, buf, count)) { + err = -EPERM; + goto out; + } + dbg->in_msg = ptr; + dbg->inlen = count; + + *pos = count; + + return count; + +out: + kfree(ptr); + return err; +} + +static ssize_t data_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + int copy; + + if (*pos) + return 0; + + if (!dbg->out_msg) + return -ENOMEM; + + copy = min_t(int, count, dbg->outlen); + if (copy_to_user(buf, dbg->out_msg, copy)) + return -EPERM; + + *pos += copy; + + return copy; +} + +static const struct file_operations dfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, +}; + +static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + char outlen[8]; + int err; + + if (*pos) + return 0; + + err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); + if (err < 0) + return err; + + if (copy_to_user(buf, &outlen, err)) + return -EPERM; + + *pos += err; + + return err; +} + +static ssize_t outlen_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + char outlen_str[8]; + int outlen; + void *ptr; + int err; + + if (*pos != 0 || count > 6) + return -EINVAL; + + kfree(dbg->out_msg); + dbg->out_msg = NULL; + dbg->outlen = 0; + + if (copy_from_user(outlen_str, buf, count)) + return -EPERM; + + outlen_str[7] = 0; + + err = kstrtoint(outlen_str, 10, &outlen); + if (err < 0) + return err; + + ptr = kzalloc(outlen, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dbg->out_msg = ptr; + dbg->outlen = outlen; + + *pos = count; + + return count; +} + +static const struct file_operations olfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = outlen_write, + .read = outlen_read, +}; + +static void set_wqname(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + + snprintf(cmd->wq_name, sizeof(cmd->wq_name), "xsc_cmd_%s", + dev_name(&xdev->pdev->dev)); +} + +static void clean_debug_files(struct xsc_core_device *xdev) +{ + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + + if (!xsc_debugfs_root) + return; + + xsc_cmdif_debugfs_cleanup(xdev); + debugfs_remove_recursive(dbg->dbg_root); +} + +static int create_debugfs_files(struct xsc_core_device *xdev) +{ + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + int err = -ENOMEM; + + if (!xsc_debugfs_root) + return 0; + + dbg->dbg_root = debugfs_create_dir("cmd", xdev->dev_res->dbg_root); + if (!dbg->dbg_root) + return err; + + dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, + xdev, &dfops); + if (!dbg->dbg_in) + goto err_dbg; + + dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, + xdev, &dfops); + if (!dbg->dbg_out) + goto err_dbg; + + dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, + xdev, &olfops); + if (!dbg->dbg_outlen) + goto err_dbg; + + debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); + + dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, xdev, &fops); + if (!dbg->dbg_run) + goto err_dbg; + + xsc_cmdif_debugfs_init(xdev); + + return 0; + +err_dbg: + clean_debug_files(xdev); + return err; +} + +void xsc_cmd_use_events(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + flush_workqueue(cmd->wq); + + cmd->mode = CMD_MODE_EVENTS; + + while (cmd->cmd_pid != cmd->cq_cid) + msleep(20); + kthread_stop(cmd->cq_task); + cmd->cq_task = NULL; + + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static int cmd_cq_polling(void *data); +void xsc_cmd_use_polling(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + flush_workqueue(cmd->wq); + cmd->mode = CMD_MODE_POLLING; + cmd->cq_task = kthread_create(cmd_cq_polling, (void *)xdev, "xsc_cmd_cq_polling"); + if (cmd->cq_task) + wake_up_process(cmd->cq_task); + + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static int status_to_err(u8 status) +{ + return status ? -1 : 0; /* TBD more meaningful codes */ +} + +static struct xsc_cmd_msg *alloc_msg(struct xsc_core_device *xdev, int in_size) +{ + struct xsc_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct xsc_cmd *cmd = &xdev->cmd; + struct cache_ent *ent = NULL; + + if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) + ent = &cmd->cache.large; + else if (in_size > 16 && in_size <= MED_LIST_SIZE) + ent = &cmd->cache.med; + + if (ent) { + spin_lock(&ent->lock); + if (!list_empty(&ent->head)) { + msg = list_entry(ent->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + } + spin_unlock(&ent->lock); + } + + if (IS_ERR(msg)) + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, in_size); + + return msg; +} + +static void free_msg(struct xsc_core_device *xdev, struct xsc_cmd_msg *msg) +{ + if (msg->cache) { + spin_lock(&msg->cache->lock); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock(&msg->cache->lock); + } else { + xsc_free_cmd_msg(xdev, msg); + } +} + +static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out, u16 dummy_cnt, u16 dummy_start_pid) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent **dummy_ent_arr; + struct xsc_cmd_layout *lay; + struct semaphore *sem; + int err = 0; + u16 i; + u16 free_cnt = 0; + u16 temp_pid = dummy_start_pid; + + sem = &cmd->sem; + + dummy_ent_arr = kcalloc(dummy_cnt, sizeof(struct xsc_cmd_work_ent *), GFP_KERNEL); + if (!dummy_ent_arr) { + err = -ENOMEM; + goto alloc_ent_arr_err; + } + + for (i = 0; i < dummy_cnt; i++) { + dummy_ent_arr[i] = alloc_cmd(cmd, in, out); + if (IS_ERR(dummy_ent_arr[i])) { + xsc_core_err(xdev, "failed to alloc cmd buffer\n"); + err = -ENOMEM; + free_cnt = i; + goto alloc_ent_err; + } + + down(sem); + + dummy_ent_arr[i]->idx = alloc_ent(cmd); + if (dummy_ent_arr[i]->idx < 0) { + xsc_core_err(xdev, "failed to allocate command entry\n"); + err = -1; + free_cnt = i; + goto get_cmd_ent_idx_err; + } + dummy_ent_arr[i]->token = alloc_token(cmd); + cmd->ent_arr[dummy_ent_arr[i]->idx] = dummy_ent_arr[i]; + init_completion(&dummy_ent_arr[i]->done); + + lay = get_inst(cmd, temp_pid); + dummy_ent_arr[i]->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, dummy_ent_arr[i]->in->first.data, sizeof(dummy_ent_arr[i]->in)); + lay->inlen = cpu_to_be32(dummy_ent_arr[i]->in->len); + lay->outlen = cpu_to_be32(dummy_ent_arr[i]->out->len); + lay->type = XSC_PCI_CMD_XPORT; + lay->token = dummy_ent_arr[i]->token; + lay->idx = dummy_ent_arr[i]->idx; + if (!cmd->checksum_disabled) + set_signature(dummy_ent_arr[i]); + else + lay->sig = 0xff; + temp_pid = (temp_pid + 1) % (1 << cmd->log_sz); + } + + /* ring doorbell after the descriptor is valid */ + wmb(); + writel(cmd->cmd_pid, REG_ADDR(xdev, cmd->reg.req_pid_addr)); + if (readl(REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)) != 0) + writel(0xF, REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)); + + mmiowb(); + xsc_core_dbg(xdev, "write 0x%x to command doorbell, idx %u ~ %u\n", cmd->cmd_pid, + dummy_ent_arr[0]->idx, dummy_ent_arr[dummy_cnt - 1]->idx); + + if (wait_for_completion_timeout(&dummy_ent_arr[dummy_cnt - 1]->done, + msecs_to_jiffies(3000)) == 0) { + xsc_core_err(xdev, "dummy_cmd %d ent timeout, cmdq fail\n", dummy_cnt - 1); + err = -ETIMEDOUT; + } else { + xsc_core_dbg(xdev, "%d ent done\n", dummy_cnt); + } + + for (i = 0; i < dummy_cnt; i++) + free_cmd(dummy_ent_arr[i]); + + kfree(dummy_ent_arr); + return err; + +get_cmd_ent_idx_err: + free_cmd(dummy_ent_arr[free_cnt]); + up(sem); +alloc_ent_err: + for (i = 0; i < free_cnt; i++) { + free_ent(cmd, dummy_ent_arr[i]->idx); + up(sem); + free_cmd(dummy_ent_arr[i]); + } + kfree(dummy_ent_arr); +alloc_ent_arr_err: + return err; +} + +static int xsc_dummy_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size, u16 dmmy_cnt, u16 dummy_start) +{ + struct xsc_cmd_msg *inb; + struct xsc_rsp_msg *outb; + int err; + + inb = alloc_msg(xdev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = xsc_copy_to_cmd_msg(inb, in, in_size); + if (err) { + xsc_core_warn(xdev, "err %d\n", err); + goto out_in; + } + + outb = xsc_alloc_rsp_msg(xdev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = dummy_work(xdev, inb, outb, dmmy_cnt, dummy_start); + + if (err) + goto out_out; + + err = xsc_copy_from_rsp_msg(out, outb, out_size); + +out_out: + xsc_free_rsp_msg(xdev, outb); + +out_in: + free_msg(xdev, inb); + return err; +} + +static int xsc_send_dummy_cmd(struct xsc_core_device *xdev, u16 gap, u16 dummy_start) +{ + struct xsc_cmd_dummy_mbox_out *out; + struct xsc_cmd_dummy_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto no_mem_out; + } + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DUMMY); + + err = xsc_dummy_cmd_exec(xdev, &in, sizeof(in), out, sizeof(*out), gap, dummy_start); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + +out_out: + kfree(out); +no_mem_out: + return err; +} + +static int request_pid_cid_mismatch_restore(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + u16 req_pid, req_cid; + u16 gap; + + int err; + + req_pid = readl(REG_ADDR(xdev, cmd->reg.req_pid_addr)); + req_cid = readl(REG_ADDR(xdev, cmd->reg.req_cid_addr)); + if (req_pid >= (1 << cmd->log_sz) || req_cid >= (1 << cmd->log_sz)) { + xsc_core_err(xdev, "req_pid %d, req_cid %d, out of normal range!!! max value is %d\n", + req_pid, req_cid, (1 << cmd->log_sz)); + return -1; + } + + if (req_pid == req_cid) + return 0; + + gap = (req_pid > req_cid) ? (req_pid - req_cid) : ((1 << cmd->log_sz) + req_pid - req_cid); + xsc_core_info(xdev, "Cmdq req_pid %d, req_cid %d, send %d dummy cmds\n", + req_pid, req_cid, gap); + + err = xsc_send_dummy_cmd(xdev, gap, req_cid); + if (err) { + xsc_core_err(xdev, "Send dummy cmd failed\n"); + goto send_dummy_fail; + } + +send_dummy_fail: + return err; +} + +int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + struct xsc_cmd_msg *inb; + struct xsc_rsp_msg *outb; + int err; + u8 status = 0; + struct xsc_cmd *cmd = &xdev->cmd; + + if (cmd->cmd_status == XSC_CMD_STATUS_TIMEDOUT) + return -ETIMEDOUT; + + inb = alloc_msg(xdev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = xsc_copy_to_cmd_msg(inb, in, in_size); + if (err) { + xsc_core_warn(xdev, "err %d\n", err); + goto out_in; + } + + outb = xsc_alloc_rsp_msg(xdev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = xsc_cmd_invoke(xdev, inb, outb, &status); + if (err) + goto out_out; + + if (status) { + xsc_core_err(xdev, "opcode:%#x, err %d, status %d\n", + msg_to_opcode(inb), err, status); + err = status_to_err(status); + goto out_out; + } + + err = xsc_copy_from_rsp_msg(out, outb, out_size); + +out_out: + xsc_free_rsp_msg(xdev, outb); + +out_in: + free_msg(xdev, inb); + return err; +} +EXPORT_SYMBOL(_xsc_cmd_exec); + +static void destroy_msg_cache(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_msg *msg; + struct xsc_cmd_msg *n; + + list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { + list_del(&msg->list); + xsc_free_cmd_msg(xdev, msg); + } + + list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { + list_del(&msg->list); + xsc_free_cmd_msg(xdev, msg); + } +} + +static int create_msg_cache(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_msg *msg; + int err; + int i; + + spin_lock_init(&cmd->cache.large.lock); + INIT_LIST_HEAD(&cmd->cache.large.head); + spin_lock_init(&cmd->cache.med.lock); + INIT_LIST_HEAD(&cmd->cache.med.head); + + for (i = 0; i < NUM_LONG_LISTS; i++) { + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, LONG_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.large; + list_add_tail(&msg->list, &cmd->cache.large.head); + } + + for (i = 0; i < NUM_MED_LISTS; i++) { + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, MED_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.med; + list_add_tail(&msg->list, &cmd->cache.med.head); + } + + return 0; + +ex_err: + destroy_msg_cache(xdev); + return err; +} + +static void xsc_cmd_comp_handler(struct xsc_core_device *xdev, u8 idx, struct xsc_rsp_layout *rsp) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent *ent; + struct xsc_inbox_hdr *hdr; + + if (idx > cmd->max_reg_cmds || (cmd->bitmask & (1 << idx))) { + xsc_core_err(xdev, "idx[%d] exceed max cmds, or has no relative request.\n", idx); + return; + } + ent = cmd->ent_arr[idx]; + ent->rsp_lay = rsp; + ktime_get_ts64(&ent->ts2); + + memcpy(ent->out->first.data, ent->rsp_lay->out, sizeof(ent->rsp_lay->out)); + dump_command(xdev, ent->out->next, ent, 0, ent->out->len); + if (!cmd->checksum_disabled) + ent->ret = verify_signature(ent); + else + ent->ret = 0; + ent->status = 0; + + hdr = (struct xsc_inbox_hdr *)ent->in->first.data; + xsc_core_dbg(xdev, "delivery status:%s(%d), rsp status=%d, opcode %#x, idx:%d,%d, ret=%d\n", + deliv_status_to_str(ent->status), ent->status, + ((struct xsc_outbox_hdr *)ent->rsp_lay->out)->status, + __be16_to_cpu(hdr->opcode), idx, ent->lay->idx, ent->ret); + free_ent(cmd, ent->idx); + complete(&ent->done); + up(&cmd->sem); +} + +static int cmd_cq_polling(void *data) +{ + struct xsc_core_device *xdev = data; + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_rsp_layout *rsp; + u32 cq_pid; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + cq_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + if (cmd->cq_cid == cq_pid) { + mdelay(3); + continue; + } + + //get cqe + rsp = get_cq_inst(cmd, cmd->cq_cid); + if (!cmd->ownerbit_learned) { + cmd->ownerbit_learned = 1; + cmd->owner_bit = rsp->owner_bit; + } + if (cmd->owner_bit != rsp->owner_bit) { + //hw update cq doorbell but buf may not ready + xsc_core_err(xdev, "hw update cq doorbell but buf not ready %u %u\n", + cmd->cq_cid, cq_pid); + continue; + } + + xsc_cmd_comp_handler(xdev, rsp->idx, rsp); + + cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); + + writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (cmd->cq_cid == 0) + cmd->owner_bit = !cmd->owner_bit; + } + return 0; +} + +int xsc_cmd_err_handler(struct xsc_core_device *xdev) +{ + union interrupt_stat { + struct { + u32 hw_read_req_err:1; + u32 hw_write_req_err:1; + u32 req_pid_err:1; + u32 rsp_cid_err:1; + }; + u32 raw; + } stat; + int err = 0; + int retry = 0; + + stat.raw = readl(REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + while (stat.raw != 0) { + err++; + if (stat.hw_read_req_err) { + retry = 1; + stat.hw_read_req_err = 0; + xsc_core_err(xdev, "hw report read req from host failed!\n"); + } else if (stat.hw_write_req_err) { + retry = 1; + stat.hw_write_req_err = 0; + xsc_core_err(xdev, "hw report write req to fw failed!\n"); + } else if (stat.req_pid_err) { + stat.req_pid_err = 0; + xsc_core_err(xdev, "hw report unexpected req pid!\n"); + } else if (stat.rsp_cid_err) { + stat.rsp_cid_err = 0; + xsc_core_err(xdev, "hw report unexpected rsp cid!\n"); + } else { + stat.raw = 0; + xsc_core_err(xdev, "ignore unknown interrupt!\n"); + } + } + + if (retry) + writel(xdev->cmd.cmd_pid, REG_ADDR(xdev, xdev->cmd.reg.req_pid_addr)); + + if (err) + writel(0xf, REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + + return err; +} + +void xsc_cmd_resp_handler(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_rsp_layout *rsp; + u32 cq_pid; + const int budget = 32; + int count = 0; + + while (count < budget) { + cq_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + if (cq_pid == cmd->cq_cid) + return; + + rsp = get_cq_inst(cmd, cmd->cq_cid); + if (!cmd->ownerbit_learned) { + cmd->ownerbit_learned = 1; + cmd->owner_bit = rsp->owner_bit; + } + if (cmd->owner_bit != rsp->owner_bit) { + xsc_core_err(xdev, "hw update cq doorbell but buf not ready %u %u\n", + cmd->cq_cid, cq_pid); + return; + } + + xsc_cmd_comp_handler(xdev, rsp->idx, rsp); + + cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); + writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (cmd->cq_cid == 0) + cmd->owner_bit = !cmd->owner_bit; + + count++; + } +} + +static void xsc_cmd_handle_rsp_before_reload +(struct xsc_cmd *cmd, struct xsc_core_device *xdev) +{ + u32 rsp_pid, rsp_cid; + + rsp_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + rsp_cid = readl(REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (rsp_pid == rsp_cid) + return; + + cmd->cq_cid = rsp_pid; + + writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); +} + +int xsc_cmd_init(struct xsc_core_device *xdev) +{ + int size = sizeof(struct xsc_cmd_prot_block); + int align = roundup_pow_of_two(size); + struct xsc_cmd *cmd = &xdev->cmd; + u32 cmd_h, cmd_l; + u32 err_stat; + int err; + int i; + + //sriov need adapt for this process. + //now there is 544 cmdq resource, soc using from id 514 + if (xsc_core_is_pf(xdev)) { + cmd->reg.req_pid_addr = HIF_CMDQM_HOST_REQ_PID_MEM_ADDR; + cmd->reg.req_cid_addr = HIF_CMDQM_HOST_REQ_CID_MEM_ADDR; + cmd->reg.rsp_pid_addr = HIF_CMDQM_HOST_RSP_PID_MEM_ADDR; + cmd->reg.rsp_cid_addr = HIF_CMDQM_HOST_RSP_CID_MEM_ADDR; + cmd->reg.req_buf_h_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.req_buf_l_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_h_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_l_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.msix_vec_addr = HIF_CMDQM_VECTOR_ID_MEM_ADDR; + cmd->reg.element_sz_addr = HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR; + cmd->reg.q_depth_addr = HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR; + cmd->reg.interrupt_stat_addr = HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR; + } else { + cmd->reg.req_pid_addr = CMDQM_HOST_REQ_PID_MEM_ADDR; + cmd->reg.req_cid_addr = CMDQM_HOST_REQ_CID_MEM_ADDR; + cmd->reg.rsp_pid_addr = CMDQM_HOST_RSP_PID_MEM_ADDR; + cmd->reg.rsp_cid_addr = CMDQM_HOST_RSP_CID_MEM_ADDR; + cmd->reg.req_buf_h_addr = CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.req_buf_l_addr = CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_h_addr = CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_l_addr = CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.msix_vec_addr = CMDQM_VECTOR_ID_MEM_ADDR; + cmd->reg.element_sz_addr = CMDQM_Q_ELEMENT_SZ_REG_ADDR; + cmd->reg.q_depth_addr = CMDQM_HOST_Q_DEPTH_REG_ADDR; + cmd->reg.interrupt_stat_addr = CMDQM_HOST_VF_ERR_STS_MEM_ADDR; + } + + cmd->pool = dma_pool_create("xsc_cmd", &xdev->pdev->dev, size, align, 0); + if (!cmd->pool) + return -ENOMEM; + + cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cmd_buf) { + err = -ENOMEM; + goto err_free_pool; + } + cmd->cq_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cq_buf) { + err = -ENOMEM; + goto err_free_cmd; + } + + cmd->dma = dma_map_single(&xdev->pdev->dev, cmd->cmd_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&xdev->pdev->dev, cmd->dma)) { + err = -ENOMEM; + goto err_free; + } + + cmd->cq_dma = dma_map_single(&xdev->pdev->dev, cmd->cq_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&xdev->pdev->dev, cmd->cq_dma)) { + err = -ENOMEM; + goto err_map_cmd; + } + + cmd->cmd_pid = readl(REG_ADDR(xdev, cmd->reg.req_pid_addr)); + cmd->cq_cid = readl(REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + cmd->ownerbit_learned = 0; + + xsc_cmd_handle_rsp_before_reload(cmd, xdev); + +#define ELEMENT_SIZE_LOG 6 //64B +#define Q_DEPTH_LOG 5 //32 + + cmd->log_sz = Q_DEPTH_LOG; + cmd->log_stride = readl(REG_ADDR(xdev, cmd->reg.element_sz_addr)); + writel(1 << cmd->log_sz, REG_ADDR(xdev, cmd->reg.q_depth_addr)); + if (cmd->log_stride != ELEMENT_SIZE_LOG) { + dev_err(&xdev->pdev->dev, "firmware failed to init cmdq, log_stride=(%d, %d)\n", + cmd->log_stride, ELEMENT_SIZE_LOG); + err = -ENODEV; + goto err_map; + } + + if (1 << cmd->log_sz > XSC_MAX_COMMANDS) { + dev_err(&xdev->pdev->dev, "firmware reports too many outstanding commands %d\n", + 1 << cmd->log_sz); + err = -EINVAL; + goto err_map; + } + + if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) { + dev_err(&xdev->pdev->dev, "command queue size overflow\n"); + err = -EINVAL; + goto err_map; + } + + cmd->checksum_disabled = 1; + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; + cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + + spin_lock_init(&cmd->alloc_lock); + spin_lock_init(&cmd->token_lock); + spin_lock_init(&cmd->doorbell_lock); + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) + spin_lock_init(&cmd->stats[i].lock); + + sema_init(&cmd->sem, cmd->max_reg_cmds); + + cmd_h = (u32)((u64)(cmd->dma) >> 32); + cmd_l = (u32)(cmd->dma); + if (cmd_l & 0xfff) { + dev_err(&xdev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_map; + } + + writel(cmd_h, REG_ADDR(xdev, cmd->reg.req_buf_h_addr)); + writel(cmd_l, REG_ADDR(xdev, cmd->reg.req_buf_l_addr)); + + cmd_h = (u32)((u64)(cmd->cq_dma) >> 32); + cmd_l = (u32)(cmd->cq_dma); + if (cmd_l & 0xfff) { + dev_err(&xdev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_map; + } + writel(cmd_h, REG_ADDR(xdev, cmd->reg.rsp_buf_h_addr)); + writel(cmd_l, REG_ADDR(xdev, cmd->reg.rsp_buf_l_addr)); + + /* Make sure firmware sees the complete address before we proceed */ + wmb(); + + xsc_core_dbg(xdev, "descriptor at dma 0x%llx 0x%llx\n", + (unsigned long long)(cmd->dma), (unsigned long long)(cmd->cq_dma)); + + cmd->mode = CMD_MODE_POLLING; + cmd->cmd_status = XSC_CMD_STATUS_NORMAL; + + err = create_msg_cache(xdev); + if (err) { + dev_err(&xdev->pdev->dev, "failed to create command cache\n"); + goto err_map; + } + + set_wqname(xdev); + cmd->wq = create_singlethread_workqueue(cmd->wq_name); + if (!cmd->wq) { + dev_err(&xdev->pdev->dev, "failed to create command workqueue\n"); + err = -ENOMEM; + goto err_cache; + } + + cmd->cq_task = kthread_create(cmd_cq_polling, (void *)xdev, "xsc_cmd_cq_polling"); + if (!cmd->cq_task) { + dev_err(&xdev->pdev->dev, "failed to create cq task\n"); + err = -ENOMEM; + goto err_wq; + } + wake_up_process(cmd->cq_task); + + err = create_debugfs_files(xdev); + if (err) { + err = -ENOMEM; + goto err_task; + } + + err = request_pid_cid_mismatch_restore(xdev); + if (err) { + dev_err(&xdev->pdev->dev, "request pid,cid wrong, restore failed\n"); + goto err_req_restore; + } + + // clear abnormal state to avoid the impact of previous error + err_stat = readl(REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + if (err_stat) { + xsc_core_warn(xdev, "err_stat 0x%x when initializing, clear it\n", err_stat); + writel(0xf, REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + } + + return 0; + +err_req_restore: +err_task: + kthread_stop(cmd->cq_task); + +err_wq: + destroy_workqueue(cmd->wq); + +err_cache: + destroy_msg_cache(xdev); + +err_map: + dma_unmap_single(&xdev->pdev->dev, cmd->cq_dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + +err_map_cmd: + dma_unmap_single(&xdev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); +err_free: + free_pages((unsigned long)cmd->cq_buf, 0); + +err_free_cmd: + free_pages((unsigned long)cmd->cmd_buf, 0); + +err_free_pool: + dma_pool_destroy(cmd->pool); + + return err; +} +EXPORT_SYMBOL(xsc_cmd_init); + +void xsc_cmd_cleanup(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + + clean_debug_files(xdev); + destroy_workqueue(cmd->wq); + if (cmd->cq_task) + kthread_stop(cmd->cq_task); + destroy_msg_cache(xdev); + dma_unmap_single(&xdev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cq_buf, 0); + dma_unmap_single(&xdev->pdev->dev, cmd->cq_dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cmd_buf, 0); + dma_pool_destroy(cmd->pool); +} +EXPORT_SYMBOL(xsc_cmd_cleanup); + +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case XSC_CMD_STAT_OK: + return "OK"; + case XSC_CMD_STAT_INT_ERR: + return "internal error"; + case XSC_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case XSC_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case XSC_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case XSC_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case XSC_CMD_STAT_RES_BUSY: + return "resource busy"; + case XSC_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case XSC_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case XSC_CMD_STAT_IX_ERR: + return "bad index"; + case XSC_CMD_STAT_NO_RES_ERR: + return "no resources"; + case XSC_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case XSC_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case XSC_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr) +{ + if (!hdr->status) + return 0; + + pr_warn("command failed, status %s(0x%x)\n", + cmd_status_str(hdr->status), hdr->status); + + switch (hdr->status) { + case XSC_CMD_STAT_OK: return 0; + case XSC_CMD_STAT_INT_ERR: return -EIO; + case XSC_CMD_STAT_BAD_OP_ERR: return -EOPNOTSUPP; + case XSC_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case XSC_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case XSC_CMD_STAT_RES_BUSY: return -EBUSY; + case XSC_CMD_STAT_LIM_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case XSC_CMD_STAT_IX_ERR: return -EINVAL; + case XSC_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case XSC_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case XSC_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c new file mode 100644 index 0000000000000000000000000000000000000000..49a00f759b5fdecf9ac82c70c64f92d18081b6b4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/driver.h" +#include "common/cq.h" +#include + +void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *cq; + + spin_lock(&table->lock); + + cq = radix_tree_lookup(&table->tree, cqn); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&table->lock); + + if (!cq) { + xsc_core_warn(xdev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_create_cq_mbox_in *in, int inlen) +{ + int err; + struct xsc_cq_table *table = &dev->dev_res->cq_table; + struct xsc_create_cq_mbox_out out; + struct xsc_destroy_cq_mbox_in din; + struct xsc_destroy_cq_mbox_out dout; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + cq->cqn = be32_to_cpu(out.cqn); + cq->cons_index = 0; + cq->arm_sn = 0; + cq->arm_db = dev->regs.complete_db; + cq->ci_db = dev->regs.complete_reg; + cq->dev = dev; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, cq->cqn, cq); + spin_unlock_irq(&table->lock); + if (err) + goto err_cmd; + + cq->pid = current->pid; + err = xsc_debug_cq_add(dev, cq); + if (err) + xsc_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", cq->cqn); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + xsc_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(xsc_core_create_cq); + +int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq) +{ + struct xsc_cq_table *table = &dev->dev_res->cq_table; + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + struct xsc_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + xsc_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); + return -EINVAL; + } + if (tmp != cq) { + xsc_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + xsc_debug_cq_remove(dev, cq); + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + return 0; +} +EXPORT_SYMBOL(xsc_core_destroy_cq); + +int xsc_core_query_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_query_cq_mbox_out *out) +{ + struct xsc_query_cq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(xsc_core_query_cq); + +void xsc_init_cq_table(struct xsc_core_device *dev) +{ + struct xsc_cq_table *table = &dev->dev_res->cq_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + xsc_cq_debugfs_init(dev); +} + +void xsc_cleanup_cq_table(struct xsc_core_device *dev) +{ + xsc_cq_debugfs_cleanup(dev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..5ea8d8a29107272059704a9041130f17e647818c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/qp.h" +#include "common/cq.h" + +enum { + QP_PID, + QP_STATE, + QP_XPORT, + QP_MTU, + QP_N_RECV, + QP_RECV_SZ, + QP_N_SEND, + QP_LOG_PG_SZ, + QP_RQPN, +}; + +static char *qp_fields[] = { + [QP_PID] = "pid", + [QP_STATE] = "state", + [QP_XPORT] = "transport", + [QP_MTU] = "mtu", + [QP_N_RECV] = "num_recv", + [QP_RECV_SZ] = "rcv_wqe_sz", + [QP_N_SEND] = "num_send", + [QP_LOG_PG_SZ] = "log2_page_sz", + [QP_RQPN] = "remote_qpn", +}; + +enum { + EQ_NUM_EQES, + EQ_INTR, + EQ_LOG_PG_SZ, +}; + +static char *eq_fields[] = { + [EQ_NUM_EQES] = "num_eqes", + [EQ_INTR] = "intr", + [EQ_LOG_PG_SZ] = "log_page_size", +}; + +enum { + CQ_PID, + CQ_NUM_CQES, + CQ_LOG_PG_SZ, +}; + +static char *cq_fields[] = { + [CQ_PID] = "pid", + [CQ_NUM_CQES] = "num_cqes", + [CQ_LOG_PG_SZ] = "log_page_size", +}; + +struct dentry *xsc_debugfs_root; +EXPORT_SYMBOL(xsc_debugfs_root); + +static ssize_t xsc_debugfs_reg_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + char *buf; + int len; + char xsc_debugfs_reg_buf[256] = ""; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + "xsc debugfs", + xsc_debugfs_reg_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + + return len; +} + +static ssize_t xsc_debugfs_reg_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xsc_core_device *xdev = filp->private_data; + u64 reg; + int cnt, len; + int num; + int offset; + char xsc_debugfs_reg_buf[256] = ""; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + if (count >= sizeof(xsc_debugfs_reg_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(xsc_debugfs_reg_buf, + sizeof(xsc_debugfs_reg_buf) - 1, + ppos, buffer, count); + if (len < 0) + return len; + + xsc_debugfs_reg_buf[len] = '\0'; + + if (strncmp(xsc_debugfs_reg_buf, "write", 5) == 0) { + cnt = sscanf(&xsc_debugfs_reg_buf[5], "%llx %n", + ®, &offset); + if (cnt == 1) { + int tmp; + int value; + int buf[8]; + int *ptr; + + offset += 5; + num = 0; + while (1) { + cnt = sscanf(&xsc_debugfs_reg_buf[offset], "%x %n", &value, &tmp); + if (cnt < 2) + break; + xsc_core_info(xdev, "write: 0x%llx = 0x%x\n", + (reg + sizeof(int) * num), value); + offset += tmp; + buf[num++] = value; + if (num == 8) + break; + } + if (num > 1) { + ptr = &buf[0]; + IA_WRITE(xdev, reg, ptr, num); + } else if (num == 1) { + REG_WR32(xdev, reg, buf[0]); + } + } else { + xsc_core_err(xdev, "write \n"); + } + } else if (strncmp(xsc_debugfs_reg_buf, "read", 4) == 0) { + cnt = sscanf(&xsc_debugfs_reg_buf[4], "%llx %d %n", ®, &num, &offset); + if (cnt == 2) { + int *buf; + int i; + int *ptr; + + buf = kcalloc(num, sizeof(int), GFP_KERNEL); + if (!buf) + return -ENOMEM; + ptr = buf; + IA_READ(xdev, reg, ptr, num); + xsc_core_info(xdev, "read: 0x%llx num:%d\n", reg, num); + for (i = 0; i < num; i++) + xsc_core_info(xdev, "read:0x%llx = %#x\n", + (reg + sizeof(int) * i), buf[i]); + } else if (cnt == 1) { + int value = REG_RD32(xdev, reg); + + xsc_core_info(xdev, "read: 0x%llx = %#x\n", reg, value); + } else { + xsc_core_err(xdev, "read \n"); + } + } else { + xsc_core_err(xdev, "Unknown command %s\n", xsc_debugfs_reg_buf); + xsc_core_err(xdev, "Available commands:\n"); + xsc_core_err(xdev, "read \n"); + xsc_core_err(xdev, "write \n"); + } + return count; +} + +static const struct file_operations xsc_debugfs_reg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = xsc_debugfs_reg_read, + .write = xsc_debugfs_reg_write, +}; + +int xsc_debugfs_init(struct xsc_core_device *dev) +{ + const char *name = pci_name(dev->pdev); + struct dentry *pfile; + + if (!xsc_debugfs_root) + return -ENOMEM; + + dev->dev_res->dbg_root = debugfs_create_dir(name, xsc_debugfs_root); + if (dev->dev_res->dbg_root) { + pfile = debugfs_create_file("reg_ops", 0600, + dev->dev_res->dbg_root, dev, + &xsc_debugfs_reg_fops); + if (!pfile) + xsc_core_err(dev, "failed to create debugfs ops for %s\n", name); + } else { + xsc_core_err(dev, "failed to create debugfs dir for %s\n", name); + return -ENOMEM; + } + + return 0; +} + +void xsc_debugfs_fini(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->dbg_root); +} + +void xsc_register_debugfs(void) +{ + xsc_debugfs_root = debugfs_create_dir("xsc_pci", NULL); +} + +void xsc_unregister_debugfs(void) +{ + debugfs_remove(xsc_debugfs_root); +} + +int xsc_qp_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + atomic_set(&dev->num_qps, 0); + + dev->dev_res->qp_debugfs = debugfs_create_dir("QPs", dev->dev_res->dbg_root); + if (!dev->dev_res->qp_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_qp_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->qp_debugfs); +} + +int xsc_eq_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + dev->dev_res->eq_debugfs = debugfs_create_dir("EQs", dev->dev_res->dbg_root); + if (!dev->dev_res->eq_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_eq_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->eq_debugfs); +} + +static ssize_t average_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_cmd_stats *stats; + u64 field = 0; + int ret; + int err; + char tbuf[22]; + + if (*pos) + return 0; + + stats = filp->private_data; + spin_lock(&stats->lock); + if (stats->n) + field = stats->sum / stats->n; + spin_unlock(&stats->lock); + ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); + if (ret > 0) { + err = copy_to_user(buf, tbuf, ret); + if (err) + return err; + } + + *pos += ret; + return ret; +} + +static ssize_t average_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_cmd_stats *stats; + + stats = filp->private_data; + spin_lock(&stats->lock); + stats->sum = 0; + stats->n = 0; + spin_unlock(&stats->lock); + + *pos += count; + + return count; +} + +static const struct file_operations stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = average_read, + .write = average_write, +}; + +int xsc_cmdif_debugfs_init(struct xsc_core_device *xdev) +{ + struct xsc_cmd_stats *stats; + struct xsc_cmd *cmd; + struct dentry **cmdif_debugfs; + const char *namep; + int err; + int i; + + if (!xsc_debugfs_root) + return 0; + + cmd = &xdev->cmd; + cmdif_debugfs = &xdev->dev_res->cmdif_debugfs; + *cmdif_debugfs = debugfs_create_dir("commands", xdev->dev_res->dbg_root); + if (!*cmdif_debugfs) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) { + stats = &cmd->stats[i]; + namep = xsc_command_str(i); + if (strcmp(namep, "unknown command opcode")) { + stats->root = debugfs_create_dir(namep, *cmdif_debugfs); + if (!stats->root) { + xsc_core_warn(xdev, "failed adding command %d\n", i); + err = -ENOMEM; + goto out; + } + + stats->avg = debugfs_create_file("average", 0400, + stats->root, stats, + &stats_fops); + if (!stats->avg) { + xsc_core_warn(xdev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + + debugfs_create_u64("n", 0400, stats->root, &stats->n); + } + } + + return 0; +out: + debugfs_remove_recursive(xdev->dev_res->cmdif_debugfs); + return err; +} + +void xsc_cmdif_debugfs_cleanup(struct xsc_core_device *xdev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(xdev->dev_res->cmdif_debugfs); +} + +int xsc_cq_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + dev->dev_res->cq_debugfs = debugfs_create_dir("CQs", dev->dev_res->dbg_root); + if (!dev->dev_res->cq_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_cq_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->cq_debugfs); +} + +int xsc_qptrace_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + dev->dev_res->qptrace_debugfs = + debugfs_create_dir("QPTrace", dev->dev_res->dbg_root); + if (!dev->dev_res->qptrace_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_qptrace_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->qptrace_debugfs); +} + +static u64 qp_read_field(struct xsc_core_device *dev, struct xsc_core_qp *qp, + int index) +{ + struct xsc_query_qp_mbox_out *out; + struct xsc_qp_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + err = xsc_core_qp_query(dev, qp, out, sizeof(*out)); + if (err) { + xsc_core_warn(dev, "failed to query qp\n"); + goto out; + } + + ctx = &out->ctx; + switch (index) { + case QP_PID: + param = qp->pid; + break; + case QP_MTU: + param = ctx->mtu_mode ? IB_MTU_1024 : IB_MTU_4096; + break; + case QP_RQPN: + param = cpu_to_be32(ctx->remote_qpn) & 0xffffff; + break; + } + +out: + kfree(out); + return param; +} + +static u64 eq_read_field(struct xsc_core_device *dev, struct xsc_eq *eq, + int index) +{ + struct xsc_query_eq_mbox_out *out; + struct xsc_eq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = xsc_core_eq_query(dev, eq, out, sizeof(*out)); + if (err) { + xsc_core_warn(dev, "failed to query eq\n"); + goto out; + } + + switch (index) { + case EQ_NUM_EQES: + break; + case EQ_INTR: + break; + case EQ_LOG_PG_SZ: + break; + } + +out: + kfree(out); + return param; +} + +static u64 cq_read_field(struct xsc_core_device *dev, struct xsc_core_cq *cq, + int index) +{ + struct xsc_query_cq_mbox_out *out; + struct xsc_cq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = xsc_core_query_cq(dev, cq, out); + if (err) { + xsc_core_warn(dev, "failed to query cq\n"); + goto out; + } + + switch (index) { + case CQ_PID: + break; + case CQ_NUM_CQES: + break; + case CQ_LOG_PG_SZ: + break; + } + +out: + kfree(out); + return param; +} + +static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_field_desc *desc; + struct xsc_rsc_debug *d; + char tbuf[18]; + u64 field; + int ret; + int err; + + if (*pos) + return 0; + + desc = filp->private_data; + d = (void *)(desc - desc->i) - sizeof(*d); + switch (d->type) { + case XSC_DBG_RSC_QP: + field = qp_read_field(d->xdev, d->object, desc->i); + break; + + case XSC_DBG_RSC_EQ: + field = eq_read_field(d->xdev, d->object, desc->i); + break; + + case XSC_DBG_RSC_CQ: + field = cq_read_field(d->xdev, d->object, desc->i); + break; + + default: + xsc_core_warn(d->xdev, "invalid resource type %d\n", d->type); + return -EINVAL; + } + + ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + if (ret > 0) { + err = copy_to_user(buf, tbuf, ret); + if (err) + return err; + } + + *pos += ret; + return ret; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = dbg_read, +}; + +static int add_res_tree(struct xsc_core_device *dev, enum dbg_rsc_type type, + struct dentry *root, struct xsc_rsc_debug **dbg, + int rsn, char **field, int nfile, void *data) +{ + struct xsc_rsc_debug *d; + char resn[32]; + int err; + int i; + + d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->xdev = dev; + d->object = data; + d->type = type; + sprintf(resn, "0x%x", rsn); + d->root = debugfs_create_dir(resn, root); + if (!d->root) { + err = -ENOMEM; + goto out_free; + } + + for (i = 0; i < nfile; i++) { + d->fields[i].i = i; + d->fields[i].dent = debugfs_create_file(field[i], 0400, + d->root, &d->fields[i], + &fops); + if (!d->fields[i].dent) { + err = -ENOMEM; + goto out_rem; + } + } + *dbg = d; + + return 0; +out_rem: + debugfs_remove_recursive(d->root); + +out_free: + kfree(d); + return err; +} + +static void rem_res_tree(struct xsc_rsc_debug *d) +{ + debugfs_remove_recursive(d->root); + kfree(d); +} + +int xsc_debug_qp_add(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + int err; + + if (!xsc_debugfs_root) + return 0; + + err = add_res_tree(dev, XSC_DBG_RSC_QP, dev->dev_res->qp_debugfs, + &qp->dbg, qp->qpn, qp_fields, + ARRAY_SIZE(qp_fields), qp); + if (err) + qp->dbg = NULL; + + return err; +} + +void xsc_debug_qp_remove(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + if (!xsc_debugfs_root) + return; + + if (qp->dbg) + rem_res_tree(qp->dbg); +} + +static int set_udp_sport(u32 qpn, u32 sport, struct xsc_core_device *xdev, struct xsc_qp_trace *t) +{ + int err; + struct xsc_ap_feat_mbox_in in; + struct xsc_ap_feat_mbox_out out; + struct timespec64 ts; + struct xsc_qpt_update_msg msg; + + ktime_get_boottime_ts64(&ts); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_AP_FEAT); + in.xsc_ap_feature_opcode = __cpu_to_be16(XSC_AP_FEAT_SET_UDP_SPORT); + in.ap.set_udp_sport.qpn = __cpu_to_be32(qpn); + in.ap.set_udp_sport.udp_sport = __cpu_to_be32(sport); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to set udp_sport, err(%u), status(%u)\n", err, + out.hdr.status); + return -EINVAL; + } + + msg.main_ver = YS_QPTRACE_VER_MAJOR; + msg.sub_ver = YS_QPTRACE_VER_MINOR; + msg.type = YS_QPTRACE_UPDATE_TYPE_SPORT; + msg.data.timestamp = (u64)(u32)ts.tv_sec * MSEC_PER_SEC + + ts.tv_nsec / NSEC_PER_MSEC; + msg.data.qpn = qpn; + msg.data.bus = xdev->pdev->bus->number; + msg.data.dev = PCI_SLOT(xdev->pdev->devfn); + msg.data.fun = PCI_FUNC(xdev->pdev->devfn); + msg.data.update.sport.port_old = t->s_port; + msg.data.update.sport.port_new = __cpu_to_be16(sport); + t->s_port = msg.data.update.sport.port_new; + + qpts_write_one_msg(&msg); + + xsc_core_info(xdev, "Set qpn(%u) udp_sport(%u)\n", qpn, sport); + + return 0; +} + +static ssize_t trace_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) +{ + struct xsc_core_qp *qp = filp->private_data; + struct xsc_qp_trace *trace_info; + int err; + int len; + + if (*pos) + return 0; + + if (!qp || !qp->trace_info) + return -EIO; + + trace_info = qp->trace_info; + + len = sizeof(struct xsc_qp_trace); + err = copy_to_user(buf, trace_info, len); + if (err) + return err; + + *pos += len; + return len; +} + +static ssize_t trace_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) +{ + struct xsc_core_qp *qp = filp->private_data; + struct xsc_qp_trace *trace_info; + struct xsc_core_device *xdev; + int ret = 0, len; + u32 sport; + char tmp_buf[256] = ""; + + ret = -EIO; + if (!qp || !qp->dbg || !qp->dbg->xdev || !qp->trace_info) { + pr_err("%s error null pointer!\n", __func__); + goto trace_write_out; + } + + trace_info = qp->trace_info; + xdev = qp->dbg->xdev; + + ret = 0; + /* don't allow partial writes */ + if (*pos != 0) { + xsc_core_err(xdev, "Don't allow partial writes!\n"); + goto trace_write_out; + } + + ret = -ENOSPC; + if (count >= sizeof(tmp_buf)) { + xsc_core_err(xdev, "Count out of size of buffer!\n"); + goto trace_write_out; + } + + len = simple_write_to_buffer(tmp_buf, sizeof(tmp_buf) - 1, + pos, buf, count); + ret = len; + if (len < 0) { + xsc_core_err(xdev, "Write to buffer error(%d)!\n", len); + goto trace_write_out; + } + + tmp_buf[len] = '\0'; + + // + // sport 10000 + if (strncmp(tmp_buf, "sport", 5) == 0) { + ret = kstrtouint(&tmp_buf[6], 0, &sport); + if (ret != 0) { + xsc_core_err(xdev, "error arguments: \n"); + ret = -EINVAL; + goto trace_write_out; + } + ret = set_udp_sport(trace_info->lqpn, sport, xdev, trace_info); + if (ret) { + ret = -EIO; + goto trace_write_out; + } + } else { + xsc_core_err(xdev, "invalid arguments: %s\n", tmp_buf); + ret = -EOPNOTSUPP; + goto trace_write_out; + } + + return count; + +trace_write_out: + return ret; +} + +static const struct file_operations fops_trace = { + .owner = THIS_MODULE, + .open = simple_open, + .read = trace_read, + .write = trace_write, +}; + +int xsc_create_qptrace(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + char name[16]; + + if (!xsc_debugfs_root) + return 0; + + snprintf(name, sizeof(name), "%d", qp->qpn); + + qp->trace = debugfs_create_file(name, 0644, dev->dev_res->qptrace_debugfs, + (void *)qp, &fops_trace); + if (!qp->trace) + return -1; + + return 0; +} + +void xsc_remove_qptrace(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove(qp->trace); +} + +int xsc_debug_eq_add(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + int err; + + if (!xsc_debugfs_root) + return 0; + + err = add_res_tree(dev, XSC_DBG_RSC_EQ, dev->dev_res->eq_debugfs, + &eq->dbg, eq->eqn, eq_fields, + ARRAY_SIZE(eq_fields), eq); + if (err) + eq->dbg = NULL; + + return err; +} + +void xsc_debug_eq_remove(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + if (!xsc_debugfs_root) + return; + + if (eq->dbg) + rem_res_tree(eq->dbg); +} + +int xsc_debug_cq_add(struct xsc_core_device *dev, struct xsc_core_cq *cq) +{ + int err; + + if (!xsc_debugfs_root) + return 0; + + err = add_res_tree(dev, XSC_DBG_RSC_CQ, dev->dev_res->cq_debugfs, + &cq->dbg, cq->cqn, cq_fields, + ARRAY_SIZE(cq_fields), cq); + if (err) + cq->dbg = NULL; + + return err; +} + +void xsc_debug_cq_remove(struct xsc_core_device *dev, struct xsc_core_cq *cq) +{ + if (!xsc_debugfs_root) + return; + + if (cq->dbg) + rem_res_tree(cq->dbg); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c new file mode 100644 index 0000000000000000000000000000000000000000..7ea5e1c78230948f67e0e289d5b3d522c4455878 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "devlink.h" +#include "eswitch.h" + +static const struct devlink_ops xsc_devlink_ops = { + .eswitch_mode_set = xsc_devlink_eswitch_mode_set, + .eswitch_mode_get = xsc_devlink_eswitch_mode_get, +}; + +struct devlink *xsc_devlink_alloc(struct device *dev) +{ + return devlink_alloc(&xsc_devlink_ops, sizeof(struct xsc_core_device), dev); +} + +void xsc_devlink_free(struct devlink *devlink) +{ + devlink_free(devlink); +} + +int xsc_devlink_register(struct devlink *devlink, struct device *dev) +{ + int err = 0; + + devlink_register(devlink); + return err; +} + +void xsc_devlink_unregister(struct devlink *devlink) +{ + devlink_unregister(devlink); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h new file mode 100644 index 0000000000000000000000000000000000000000..c08d04bfa989c30554857578543b06049c37aa43 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DEVLINK_H +#define XSC_DEVLINK_H + +#include + +struct devlink *xsc_devlink_alloc(struct device *dev); +void xsc_devlink_free(struct devlink *devlink); +int xsc_devlink_register(struct devlink *devlink, struct device *dev); +void xsc_devlink_unregister(struct devlink *devlink); + +#endif /* XSC_DEVLINK_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c new file mode 100644 index 0000000000000000000000000000000000000000..1ce0123fcdd2ee66582bdf557a6a97dcc15b8b7a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include "common/driver.h" +#include "common/cq.h" +#include "fw/xsc_fw.h" +#include "wq.h" +#include "common/xsc_core.h" + +enum { + XSC_EQE_SIZE = sizeof(struct xsc_eqe), + XSC_EQE_OWNER_INIT_VAL = 0x1, +}; + +enum { + XSC_NUM_SPARE_EQE = 0x80, + XSC_NUM_ASYNC_EQE = 0x100, +}; + +struct map_eq_in { + u64 mask; + u32 reserved; + u32 unmap_eqn; +}; + +struct cre_des_eq { + u8 reserved[15]; + u8 eqn; +}; + +static int xsc_cmd_destroy_eq(struct xsc_core_device *dev, u32 eqn) +{ + struct xsc_destroy_eq_mbox_in in; + struct xsc_destroy_eq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_EQ); + in.eqn = cpu_to_be32(eqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (!err) + goto ex; + + if (out.hdr.status) + err = xsc_cmd_status_to_err(&out.hdr); + +ex: + return err; +} + +static struct xsc_eqe *get_eqe(struct xsc_eq *eq, u32 entry) +{ + return xsc_buf_offset(&eq->buf, entry * XSC_EQE_SIZE); +} + +static struct xsc_eqe *next_eqe_sw(struct xsc_eq *eq) +{ + struct xsc_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + + return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; +} + +static void eq_update_ci(struct xsc_eq *eq, int arm) +{ + union xsc_eq_doorbell db; + + db.val = 0; + db.arm = !!arm; + db.eq_next_cid = eq->cons_index; + db.eq_id = eq->eqn; + writel(db.val, REG_ADDR(eq->dev, eq->doorbell)); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +void xsc_cq_completion(struct xsc_core_device *dev, u32 cqn) +{ + struct xsc_core_cq *cq; + struct xsc_cq_table *table = &dev->dev_res->cq_table; + + rcu_read_lock(); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + rcu_read_unlock(); + + if (!cq) { + xsc_core_err(dev, "Completion event for bogus CQ, cqn=%d\n", cqn); + return; + } + + ++cq->arm_sn; + + if (!cq->comp) + xsc_core_err(dev, "cq->comp is NULL\n"); + else + cq->comp(cq); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +void xsc_eq_cq_event(struct xsc_core_device *dev, u32 cqn, int event_type) +{ + struct xsc_core_cq *cq; + struct xsc_cq_table *table = &dev->dev_res->cq_table; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + spin_unlock(&table->lock); + + if (unlikely(!cq)) { + xsc_core_err(dev, "Async event for bogus CQ, cqn=%d\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + struct xsc_eqe *eqe; + int eqes_found = 0; + int set_ci = 0; + u32 cqn, qpn, queue_id; + + while ((eqe = next_eqe_sw(eq))) { + /* Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + switch (eqe->type) { + case XSC_EVENT_TYPE_COMP: + case XSC_EVENT_TYPE_INTERNAL_ERROR: + /* eqe is changing */ + queue_id = eqe->queue_id; + cqn = queue_id; + xsc_cq_completion(dev, cqn); + break; + + case XSC_EVENT_TYPE_CQ_ERROR: + queue_id = eqe->queue_id; + cqn = queue_id; + xsc_eq_cq_event(dev, cqn, eqe->type); + break; + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + queue_id = eqe->queue_id; + qpn = queue_id; + xsc_qp_event(dev, qpn, eqe->type); + break; + default: + xsc_core_warn(dev, "Unhandle event %d on EQ %d\n", eqe->type, eq->eqn); + break; + } + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with XSC_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= XSC_NUM_SPARE_EQE)) { + xsc_core_dbg(dev, "EQ%d eq_num=%d qpn=%d, db_noarm\n", + eq->eqn, set_ci, eqe->queue_id); + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); + + return eqes_found; +} + +static irqreturn_t xsc_msix_handler(int irq, void *eq_ptr) +{ + struct xsc_eq *eq = eq_ptr; + struct xsc_core_device *dev = eq->dev; + + xsc_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static void init_eq_buf(struct xsc_eq *eq) +{ + struct xsc_eqe *eqe; + int i; + + for (i = 0; i < eq->nent; i++) { + eqe = get_eqe(eq, i); + eqe->owner = XSC_EQE_OWNER_INIT_VAL; + } +} + +int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, + int nent, const char *name) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + u16 msix_vec_offset = dev->msix_vec_base + vecidx; + struct xsc_create_eq_mbox_in *in; + struct xsc_create_eq_mbox_out out; + int err; + int inlen; + int hw_npages; + + eq->nent = roundup_pow_of_two(roundup(nent, XSC_NUM_SPARE_EQE)); + err = xsc_buf_alloc(dev, eq->nent * XSC_EQE_SIZE, PAGE_SIZE, &eq->buf); + if (err) + return err; + + init_eq_buf(eq); + + hw_npages = DIV_ROUND_UP(eq->nent * XSC_EQE_SIZE, PAGE_SIZE_4K); + inlen = sizeof(*in) + sizeof(in->pas[0]) * hw_npages; + in = xsc_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_buf; + } + memset(&out, 0, sizeof(out)); + + xsc_fill_page_array(&eq->buf, in->pas, hw_npages); + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_EQ); + in->ctx.log_eq_sz = ilog2(eq->nent); + in->ctx.vecidx = cpu_to_be16(msix_vec_offset); + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(dev->glb_func_id); + in->ctx.is_async_eq = (vecidx == XSC_EQ_VEC_ASYNC ? 1 : 0); + + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + goto err_in; + + if (out.hdr.status) { + err = -ENOSPC; + goto err_in; + } + + snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + name, pci_name(dev->pdev)); + + eq->eqn = be32_to_cpu(out.eqn); + eq->irqn = pci_irq_vector(dev->pdev, vecidx); + eq->dev = dev; + eq->doorbell = dev->regs.event_db; + eq->index = vecidx; + xsc_core_dbg(dev, "msix%d request vector%d eq%d irq%d\n", + vecidx, msix_vec_offset, eq->eqn, eq->irqn); + + err = request_irq(eq->irqn, xsc_msix_handler, 0, + dev_res->irq_info[vecidx].name, eq); + if (err) + goto err_eq; + + /* EQs are created in ARMED state + */ + eq_update_ci(eq, 1); + xsc_vfree(in); + return 0; + +err_eq: + xsc_cmd_destroy_eq(dev, eq->eqn); + +err_in: + xsc_vfree(in); + +err_buf: + xsc_buf_free(dev, &eq->buf); + return err; +} +EXPORT_SYMBOL_GPL(xsc_create_map_eq); + +int xsc_destroy_unmap_eq(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + int err; + + if (!xsc_fw_is_available(dev)) + return 0; + + free_irq(eq->irqn, eq); + err = xsc_cmd_destroy_eq(dev, eq->eqn); + if (err) + xsc_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", + eq->eqn); + xsc_buf_free(dev, &eq->buf); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_destroy_unmap_eq); + +int xsc_eq_init(struct xsc_core_device *dev) +{ + int err; + + spin_lock_init(&dev->dev_res->eq_table.lock); + + err = xsc_eq_debugfs_init(dev); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_eq_init); + +void xsc_eq_cleanup(struct xsc_core_device *dev) +{ + xsc_eq_debugfs_cleanup(dev); +} +EXPORT_SYMBOL_GPL(xsc_eq_cleanup); + +int xsc_start_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int err; + + err = xsc_create_map_eq(dev, &table->async_eq, XSC_EQ_VEC_ASYNC, + XSC_NUM_ASYNC_EQE, "xsc_async_eq"); + if (err) + xsc_core_warn(dev, "failed to create async EQ %d\n", err); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_start_eqs); + +void xsc_stop_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + + xsc_destroy_unmap_eq(dev, &table->async_eq); +} + +int xsc_core_eq_query(struct xsc_core_device *dev, struct xsc_eq *eq, + struct xsc_query_eq_mbox_out *out, int outlen) +{ + struct xsc_query_eq_mbox_in in; + int err = 0; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EQ); + in.eqn = eq->eqn; + + if (out->hdr.status) + err = xsc_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_eq_query); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c new file mode 100644 index 0000000000000000000000000000000000000000..005c8aa93d72075d9818976c1e1e86189fee0849 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c @@ -0,0 +1,812 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/vport.h" +#include "eswitch.h" +#include "common/xsc_lag.h" + +static int xsc_eswitch_check(const struct xsc_core_device *dev) +{ + if (!ESW_ALLOWED(dev->priv.eswitch)) + return -EPERM; + if (!dev->priv.eswitch->num_vfs) + return -EOPNOTSUPP; + + return 0; +} + +struct xsc_vport *__must_check +xsc_eswitch_get_vport(struct xsc_eswitch *esw, u16 vport_num) +{ + u16 idx; + + if (!esw || !xsc_core_is_vport_manager(esw->dev)) + return ERR_PTR(-EPERM); + + idx = xsc_eswitch_vport_num_to_index(esw, vport_num); + if (idx > esw->total_vports - 1) { + xsc_core_dbg(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n", + vport_num, idx); + return ERR_PTR(-EINVAL); + } + + return &esw->vports[idx]; +} +EXPORT_SYMBOL(xsc_eswitch_get_vport); + +static int eswitch_devlink_pf_support_check(const struct xsc_eswitch *esw) +{ + return 0; +} + +static int esw_mode_from_devlink(u16 mode, u16 *xsc_mode) +{ + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + *xsc_mode = XSC_ESWITCH_LEGACY; + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + *xsc_mode = XSC_ESWITCH_OFFLOADS; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int esw_mode_to_devlink(u16 xsc_mode, u16 *mode) +{ + switch (xsc_mode) { + case XSC_ESWITCH_LEGACY: + *mode = DEVLINK_ESWITCH_MODE_LEGACY; + break; + case XSC_ESWITCH_OFFLOADS: + *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + break; + default: + return -EINVAL; + } + + return 0; +} + +int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) +{ + struct xsc_core_device *dev = devlink_priv(devlink); + struct xsc_eswitch *esw = dev->priv.eswitch; + u16 cur_xsc_mode, xsc_mode = 0; + int err = 0; + + err = xsc_eswitch_check(dev); + if (err) + return err; + + if (esw_mode_from_devlink(mode, &xsc_mode)) + return -EINVAL; + + mutex_lock(&esw->mode_lock); + err = eswitch_devlink_pf_support_check(esw); + if (err) + goto done; + + cur_xsc_mode = esw->mode; + + if (cur_xsc_mode == xsc_mode) + goto done; + + if (xsc_host_is_dpu_mode(dev) || + (cur_xsc_mode != XSC_ESWITCH_LEGACY && xsc_mode == XSC_ESWITCH_OFFLOADS) || + (cur_xsc_mode == XSC_ESWITCH_OFFLOADS && xsc_mode == XSC_ESWITCH_LEGACY)) { + xsc_core_err(dev, "%s failed: do not set mode %d to mode %d\n", + __func__, cur_xsc_mode, xsc_mode); + mutex_unlock(&esw->mode_lock); + return -EOPNOTSUPP; + } + + xsc_lag_disable(dev); + + esw->mode = xsc_mode; + if (esw->mode == XSC_ESWITCH_OFFLOADS) + xsc_cmd_modify_hca(dev); + + xsc_lag_enable(dev); + +done: + mutex_unlock(&esw->mode_lock); + return err; +} + +int xsc_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct xsc_core_device *dev = devlink_priv(devlink); + struct xsc_eswitch *esw = dev->priv.eswitch; + int err = 0; + + err = xsc_eswitch_check(dev); + if (err) + return err; + + mutex_lock(&esw->mode_lock); + if (xsc_host_is_dpu_mode(dev)) + err = -EOPNOTSUPP; + else + err = esw_mode_to_devlink(esw->mode, mode); + mutex_unlock(&esw->mode_lock); + + return err; +} + +static void esw_vport_change_handle_locked(struct xsc_vport *vport) +{ + struct xsc_core_device *dev = vport->dev; + u8 mac[ETH_ALEN]; + + xsc_query_other_nic_vport_mac_address(dev, vport->vport, mac); +} + +static void esw_vport_change_handler(struct work_struct *work) +{ + struct xsc_vport *vport = + container_of(work, struct xsc_vport, vport_change_handler); + struct xsc_eswitch *esw = vport->dev->priv.eswitch; + + mutex_lock(&esw->state_lock); + esw_vport_change_handle_locked(vport); + mutex_unlock(&esw->state_lock); +} + +void xsc_eswitch_enable_vport(struct xsc_eswitch *esw, + struct xsc_vport *vport, + enum xsc_eswitch_vport_event enabled_events) +{ + mutex_lock(&esw->state_lock); + if (vport->enabled) + goto unlock_out; + + bitmap_zero(vport->req_vlan_bitmap, VLAN_N_VID); + bitmap_zero(vport->acl_vlan_8021q_bitmap, VLAN_N_VID); + bitmap_zero(vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID); + + /* Sync with current vport context */ + vport->enabled_events = enabled_events; + vport->enabled = true; + + esw->enabled_vports++; +unlock_out: + mutex_unlock(&esw->state_lock); +} + +void xsc_eswitch_disable_vport(struct xsc_eswitch *esw, + struct xsc_vport *vport) +{ + u16 vport_num = vport->vport; + + mutex_lock(&esw->state_lock); + if (!vport->enabled) + goto done; + + xsc_core_dbg(esw->dev, "Disabling vport(%d)\n", vport_num); + /* Mark this vport as disabled to discard new events */ + vport->enabled = false; + vport->enabled_events = 0; + esw->enabled_vports--; +done: + mutex_unlock(&esw->state_lock); +} + +void xsc_eswitch_enable_pf_vf_vports(struct xsc_eswitch *esw, + enum xsc_eswitch_vport_event enabled_events) +{ + struct xsc_vport *vport; + int i; + + vport = xsc_eswitch_get_vport(esw, XSC_VPORT_PF); + xsc_eswitch_enable_vport(esw, vport, enabled_events); + + xsc_esw_for_each_vf_vport(esw, i, vport, esw->num_vfs) + xsc_eswitch_enable_vport(esw, vport, enabled_events); +} + +#define XSC_LEGACY_SRIOV_VPORT_EVENTS (XSC_VPORT_UC_ADDR_CHANGE | \ + XSC_VPORT_MC_ADDR_CHANGE | \ + XSC_VPORT_PROMISC_CHANGE | \ + XSC_VPORT_VLAN_CHANGE) + +static int esw_legacy_enable(struct xsc_eswitch *esw) +{ + struct xsc_vport *vport; + unsigned long i; + + xsc_esw_for_each_vf_vport(esw, i, vport, esw->num_vfs) { + vport->info.link_state = XSC_VPORT_ADMIN_STATE_AUTO; + } + xsc_eswitch_enable_pf_vf_vports(esw, XSC_LEGACY_SRIOV_VPORT_EVENTS); + return 0; +} + +int xsc_eswitch_enable_locked(struct xsc_eswitch *esw, int mode, int num_vfs) +{ + int err; + + lockdep_assert_held(&esw->mode_lock); + + esw->num_vfs = num_vfs; + + if (esw->mode == XSC_ESWITCH_NONE) + err = esw_legacy_enable(esw); + else + err = -EOPNOTSUPP; + + if (err) + goto ret; + + esw->mode = mode; + + xsc_core_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", + mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", + num_vfs, esw->enabled_vports); + + return 0; + +ret: + return err; +} + +int xsc_eswitch_enable(struct xsc_eswitch *esw, int mode, int num_vfs) +{ + int ret; + + mutex_lock(&esw->mode_lock); + ret = xsc_eswitch_enable_locked(esw, mode, num_vfs); + mutex_unlock(&esw->mode_lock); + return ret; +} + +void xsc_eswitch_disable_locked(struct xsc_eswitch *esw, bool clear_vf) +{ + int old_mode; + + lockdep_assert_held(&esw->mode_lock); + + if (esw->mode == XSC_ESWITCH_NONE) + return; + + xsc_core_info(esw->dev, "Disable: mode(%s)\n", + esw->mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS"); + + old_mode = esw->mode; + esw->mode = XSC_ESWITCH_NONE; + + esw->num_vfs = 0; +} + +void xsc_eswitch_disable(struct xsc_eswitch *esw, bool clear_vf) +{ + if (!ESW_ALLOWED(esw)) + return; + + mutex_lock(&esw->mode_lock); + xsc_eswitch_disable_locked(esw, clear_vf); + mutex_unlock(&esw->mode_lock); +} + +int xsc_eswitch_init(struct xsc_core_device *dev) +{ + struct xsc_eswitch *esw; + struct xsc_vport *vport; + int i, total_vports, err; + + if (!XSC_VPORT_MANAGER(dev)) { + if (xsc_core_is_pf(dev)) + xsc_core_err(dev, "%s XSC_VPORT_MANAGER check fail\n", __func__); + return 0; + } + + total_vports = xsc_eswitch_get_total_vports(dev); + + xsc_core_info(dev, "Total vports %d\n", total_vports); + + esw = kzalloc(sizeof(*esw), GFP_KERNEL); + if (!esw) + return -ENOMEM; + + esw->dev = dev; + esw->manager_vport = xsc_eswitch_manager_vport(dev); + esw->first_host_vport = xsc_eswitch_first_host_vport_num(dev); + esw->work_queue = create_singlethread_workqueue("xsc_esw_wq"); + if (!esw->work_queue) { + err = -ENOMEM; + goto abort; + } + esw->vports = kcalloc(total_vports, sizeof(struct xsc_vport), + GFP_KERNEL); + if (!esw->vports) { + err = -ENOMEM; + goto abort; + } + esw->total_vports = total_vports; + + mutex_init(&esw->state_lock); + mutex_init(&esw->mode_lock); + + xsc_esw_for_all_vports(esw, i, vport) { + vport->vport = xsc_eswitch_index_to_vport_num(esw, i); + vport->info.link_state = XSC_VPORT_ADMIN_STATE_AUTO; + vport->info.vlan_proto = htons(ETH_P_8021Q); + vport->info.roce = true; + + vport->dev = dev; + INIT_WORK(&vport->vport_change_handler, + esw_vport_change_handler); + } + esw->enabled_vports = 0; + esw->mode = XSC_ESWITCH_NONE; + + dev->priv.eswitch = esw; + return 0; + +abort: + if (esw->work_queue) + destroy_workqueue(esw->work_queue); + kfree(esw->vports); + kfree(esw); + return 0; +} + +void xsc_eswitch_cleanup(struct xsc_core_device *dev) +{ + if (!dev->priv.eswitch || !XSC_VPORT_MANAGER(dev)) + return; + + xsc_core_dbg(dev, "cleanup\n"); + + destroy_workqueue(dev->priv.eswitch->work_queue); + kfree(dev->priv.eswitch->vports); + kfree(dev->priv.eswitch); +} + +#ifdef XSC_ESW_GUID_ENABLE +static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) +{ + ((u8 *)node_guid)[7] = mac[0]; + ((u8 *)node_guid)[6] = mac[1]; + ((u8 *)node_guid)[5] = mac[2]; + ((u8 *)node_guid)[4] = 0xff; + ((u8 *)node_guid)[3] = 0xfe; + ((u8 *)node_guid)[2] = mac[3]; + ((u8 *)node_guid)[1] = mac[4]; + ((u8 *)node_guid)[0] = mac[5]; +} +#endif + +int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 mac[ETH_ALEN]) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + +#ifdef XSC_ESW_GUID_ENABLE + u64 node_guid; +#endif + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + if (is_multicast_ether_addr(mac)) + return -EINVAL; + + mutex_lock(&esw->state_lock); + + if (evport->info.spoofchk && !is_valid_ether_addr(mac)) + xsc_core_warn(esw->dev, + "Set invalid MAC while spoofchk is on, vport(%d)\n", + vport); + + err = xsc_modify_other_nic_vport_mac_address(esw->dev, vport, mac, false); + if (err) { + xsc_core_err(esw->dev, + "Failed to xsc_modify_nic_vport_mac vport(%d) err=(%d)\n", + vport, err); + goto unlock; + } + + ether_addr_copy(evport->info.mac, mac); + +#ifdef XSC_ESW_GUID_ENABLE + node_guid_gen_from_mac(&node_guid, mac); + err = xsc_modify_other_nic_vport_node_guid(esw->dev, vport, node_guid); + if (err) + xsc_core_err(esw->dev, + "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", + vport, err); + evport->info.node_guid = node_guid; +#endif + +#ifdef XSC_ESW_FDB_ENABLE + if (evport->enabled && esw->mode == XSC_ESWITCH_LEGACY) + err = esw_vport_ingress_config(esw, evport); +#endif + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_mac); + +int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 *mac) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + ether_addr_copy(mac, evport->info.mac); + mutex_unlock(&esw->state_lock); + return 0; +} + +int __xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, u16 vlan, + u8 qos, __be16 proto, u8 set_flags) +{ + struct xsc_modify_nic_vport_context_in *in; + int err, in_sz; + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; + + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select.addresses_list = 1; + if ((set_flags & SET_VLAN_STRIP) || (set_flags & SET_VLAN_INSERT)) + in->nic_vport_ctx.vlan_allowed = 1; + else + in->nic_vport_ctx.vlan_allowed = 0; + in->vport_number = cpu_to_be16(vport); + in->other_vport = 1; + in->nic_vport_ctx.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN_OFFLOAD; + in->nic_vport_ctx.vlan_proto = cpu_to_be16(ntohs(proto)); + in->nic_vport_ctx.qos = qos; + in->nic_vport_ctx.vlan = cpu_to_be16(vlan); + + err = xsc_modify_nic_vport_context(esw->dev, in, in_sz); + + kfree(in); + return err; +} + +int xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + u8 set_flags = 0; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + + if (vlan || qos) + set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; + else + set_flags = CLR_VLAN_STRIP | CLR_VLAN_INSERT; + + mutex_lock(&esw->state_lock); + if (esw->mode != XSC_ESWITCH_LEGACY) { + if (!vlan) + goto unlock; /* compatibility with libvirt */ + + err = -EOPNOTSUPP; + goto unlock; + } + + err = __xsc_eswitch_set_vport_vlan(esw, vport, vlan, qos, vlan_proto, set_flags); + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL_GPL(xsc_eswitch_set_vport_vlan); + +static int xsc_vport_link2ifla(u8 esw_link) +{ + switch (esw_link) { + case XSC_VPORT_ADMIN_STATE_DOWN: + return IFLA_VF_LINK_STATE_DISABLE; + case XSC_VPORT_ADMIN_STATE_UP: + return IFLA_VF_LINK_STATE_ENABLE; + } + return IFLA_VF_LINK_STATE_AUTO; +} + +static int xsc_ifla_link2vport(u8 ifla_link) +{ + switch (ifla_link) { + case IFLA_VF_LINK_STATE_DISABLE: + return XSC_VPORT_ADMIN_STATE_DOWN; + case IFLA_VF_LINK_STATE_ENABLE: + return XSC_VPORT_ADMIN_STATE_UP; + } + return XSC_VPORT_ADMIN_STATE_AUTO; +} + +int xsc_eswitch_set_vport_state(struct xsc_eswitch *esw, + u16 vport, int link_state) +{ + u8 xsc_link = xsc_ifla_link2vport((u8)link_state); + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + err = xsc_modify_vport_admin_state(esw->dev, XSC_CMD_OP_MODIFY_VPORT_STATE, + vport, 1, xsc_link); + if (err) { + xsc_core_warn(esw->dev, + "Failed to set vport %d link state %d, err = %d", + vport, xsc_link, err); + goto unlock; + } + + evport->info.link_state = xsc_link; + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_state); + +int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, + u16 vport, u8 spoofchk) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + bool pschk; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != XSC_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + + pschk = evport->info.spoofchk; + evport->info.spoofchk = spoofchk; + if (spoofchk && !is_valid_ether_addr(evport->info.mac)) + xsc_core_warn(esw->dev, "Spoofchk in set while MAC is invalid, vport(%d)\n", + evport->vport); + + if (pschk != spoofchk) { + err = xsc_modify_nic_vport_spoofchk(esw->dev, vport, spoofchk); + if (err) + evport->info.spoofchk = pschk; + } + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_spoofchk); + +static int xsc_eswitch_update_vport_trunk(struct xsc_eswitch *esw, + struct xsc_vport *evport, + unsigned long *old_trunk) +{ + DECLARE_BITMAP(diff_vlan_bm, VLAN_N_VID); + int err = 0; + + bitmap_xor(diff_vlan_bm, old_trunk, + evport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID); + if (!bitmap_weight(diff_vlan_bm, VLAN_N_VID)) + return err; + + if (err) + bitmap_copy(evport->info.vlan_trunk_8021q_bitmap, old_trunk, VLAN_N_VID); + + return err; +} + +int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan) +{ + DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID); + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + if (end_vlan > VLAN_N_VID || start_vlan > end_vlan) + return -EINVAL; + + mutex_lock(&esw->state_lock); + + if (evport->info.vlan || evport->info.qos) { + err = -EPERM; + xsc_core_warn(esw->dev, + "VGT+ is not allowed when operating in VST mode vport(%d)\n", + vport); + goto unlock; + } + + bitmap_copy(prev_vport_bitmap, evport->info.vlan_trunk_8021q_bitmap, + VLAN_N_VID); + bitmap_set(evport->info.vlan_trunk_8021q_bitmap, start_vlan, + end_vlan - start_vlan + 1); + err = xsc_eswitch_update_vport_trunk(esw, evport, prev_vport_bitmap); + +unlock: + mutex_unlock(&esw->state_lock); + + return err; +} + +int xsc_eswitch_del_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan) +{ + DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID); + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + if (end_vlan > VLAN_N_VID || start_vlan > end_vlan) + return -EINVAL; + + mutex_lock(&esw->state_lock); + bitmap_copy(prev_vport_bitmap, evport->info.vlan_trunk_8021q_bitmap, + VLAN_N_VID); + bitmap_clear(evport->info.vlan_trunk_8021q_bitmap, start_vlan, + end_vlan - start_vlan + 1); + err = xsc_eswitch_update_vport_trunk(esw, evport, prev_vport_bitmap); + mutex_unlock(&esw->state_lock); + + return err; +} + +int xsc_eswitch_set_vport_trust(struct xsc_eswitch *esw, + u16 vport_num, bool setting) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport_num); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != XSC_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + if (setting != evport->info.trusted) { + err = xsc_modify_nic_vport_trust(esw->dev, vport_num, setting); + if (err) + goto unlock; + + evport->info.trusted = setting; + } + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_trust); + +int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + err = xsc_modify_vport_max_rate(evport->dev, vport, max_rate); + if (!err) { + evport->info.max_rate = max_rate; + evport->info.min_rate = min_rate; + } + mutex_unlock(&esw->state_lock); + + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_rate); + +int xsc_eswitch_get_vport_config(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_info *ivi) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + memset(ivi, 0, sizeof(*ivi)); + ivi->vf = vport - 1; + + mutex_lock(&esw->state_lock); + ether_addr_copy(ivi->mac, evport->info.mac); + + ivi->linkstate = xsc_vport_link2ifla(evport->info.link_state); + ivi->spoofchk = evport->info.spoofchk; + ivi->trusted = evport->info.trusted; + ivi->min_tx_rate = evport->info.min_rate; + ivi->max_tx_rate = evport->info.max_rate; + ivi->vlan = evport->vlan_id; + ivi->vlan_proto = evport->vlan_proto; + + mutex_unlock(&esw->state_lock); + + return 0; +} +EXPORT_SYMBOL(xsc_eswitch_get_vport_config); + +int xsc_eswitch_vport_update_group(struct xsc_eswitch *esw, int vport_num, + u32 group_id) +{ + return 0; +} + +int xsc_eswitch_set_vgroup_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate) +{ + return 0; +} + +int xsc_eswitch_set_vgroup_max_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate) +{ + return 0; +} + +int xsc_eswitch_set_vgroup_min_rate(struct xsc_eswitch *esw, int group_id, + u32 min_rate) +{ + return 0; +} + +int xsc_eswitch_modify_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, void *in, int inlen) +{ + return 0; +} + +int xsc_eswitch_query_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, void *out, int outlen) +{ + return 0; +} + +int xsc_eswitch_get_vport_stats(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_stats *vf_stats) +{ + return 0; +} + +int xsc_eswitch_query_vport_drop_stats(struct xsc_core_device *dev, + struct xsc_vport *vport, + struct xsc_vport_drop_stats *stats) +{ + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h new file mode 100644 index 0000000000000000000000000000000000000000..711e698cc0cc885ad10e78c7c2e74b54c76cea6e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef ESWITCH_H +#define ESWITCH_H + +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/vport.h" + +struct xsc_vport_drop_stats { + u64 rx_dropped; + u64 tx_dropped; +}; + +int xsc_eswitch_init(struct xsc_core_device *dev); +void xsc_eswitch_cleanup(struct xsc_core_device *dev); +int xsc_eswitch_enable_locked(struct xsc_eswitch *esw, int mode, int num_vfs); +int xsc_eswitch_enable(struct xsc_eswitch *esw, int mode, int num_vfs); +void xsc_eswitch_disable_locked(struct xsc_eswitch *esw, bool clear_vf); +void xsc_eswitch_disable(struct xsc_eswitch *esw, bool clear_vf); + +int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mod, struct netlink_ext_ack *extack); +int xsc_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); + +struct xsc_vport *__must_check +xsc_eswitch_get_vport(struct xsc_eswitch *esw, u16 vport_num); +int xsc_eswitch_get_vport_config(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_info *ivi); +int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 mac[ETH_ALEN]); +int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 *mac); +int xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, + u16 vlan, u8 qos, __be16 vlan_proto); +int xsc_eswitch_set_vport_state(struct xsc_eswitch *esw, + u16 vport, int link_state); +int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, + u16 vport, u8 spoofchk); +int xsc_eswitch_set_vport_trust(struct xsc_eswitch *esw, + u16 vport_num, bool setting); +int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate); +int xsc_eswitch_vport_update_group(struct xsc_eswitch *esw, int vport_num, + u32 group_id); +int xsc_eswitch_set_vgroup_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate); +int xsc_eswitch_set_vgroup_max_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate); +int xsc_eswitch_set_vgroup_min_rate(struct xsc_eswitch *esw, int group_id, + u32 min_rate); +int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan); +int xsc_eswitch_del_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan); +int xsc_eswitch_modify_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, + void *in, int inlen); +int xsc_eswitch_query_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, + void *out, int outlen); +int xsc_eswitch_get_vport_stats(struct xsc_eswitch *esw, + u16 vport, + struct ifla_vf_stats *vf_stats); +int xsc_eswitch_query_vport_drop_stats(struct xsc_core_device *dev, + struct xsc_vport *vport, + struct xsc_vport_drop_stats *stats); +int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate); + +#define xsc_esw_for_all_vports(esw, i, vport) \ + for ((i) = XSC_VPORT_PF; \ + (vport) = &(esw)->vports[(i)], \ + (i) < (esw)->total_vports; (i)++) + +#define xsc_esw_for_each_vf_vport(esw, i, vport, nvfs) \ + for ((i) = XSC_VPORT_FIRST_VF; \ + (vport) = &(esw)->vports[(i)], \ + (i) <= (nvfs); (i)++) + +static inline int xsc_eswitch_uplink_idx(struct xsc_eswitch *esw) +{ + /* Uplink always locate at the last element of the array.*/ + return esw->total_vports - 1; +} + +static inline int xsc_eswitch_ecpf_idx(struct xsc_eswitch *esw) +{ + return esw->total_vports - 2; +} + +static inline int xsc_eswitch_vport_num_to_index(struct xsc_eswitch *esw, + u16 vport_num) +{ + if (vport_num == XSC_VPORT_ECPF) { + if (!xsc_ecpf_vport_exists(esw->dev) && + !xsc_core_is_ecpf_esw_manager(esw->dev)) + xsc_core_warn(esw->dev, "ECPF vport doesn't exist!\n"); + return xsc_eswitch_ecpf_idx(esw); + } + + if (vport_num == XSC_VPORT_UPLINK) + return xsc_eswitch_uplink_idx(esw); + + /* PF and VF vports start from 0 to max_vfs */ + return vport_num; +} + +static inline u16 xsc_eswitch_index_to_vport_num(struct xsc_eswitch *esw, + int index) +{ + if (index == xsc_eswitch_uplink_idx(esw)) + return XSC_VPORT_UPLINK; + return index; +} + +static inline u16 xsc_eswitch_manager_vport(struct xsc_core_device *dev) +{ + return xsc_core_is_ecpf_esw_manager(dev) ? + XSC_VPORT_ECPF : XSC_VPORT_PF; +} + +static inline u16 xsc_eswitch_first_host_vport_num(struct xsc_core_device *dev) +{ + return xsc_core_is_ecpf_esw_manager(dev) ? + XSC_VPORT_PF : XSC_VPORT_FIRST_VF; +} + +static inline u8 xsc_get_eswitch_mode(struct xsc_core_device *dev) +{ + struct xsc_eswitch *esw = dev->priv.eswitch; + + return ESW_ALLOWED(esw) ? esw->mode : XSC_ESWITCH_NONE; +} + +static inline bool xsc_host_is_dpu_mode(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MF_HOST_PF_DEV_ID || + dev->pdev->device == XSC_MV_HOST_PF_DEV_ID); +} + +static inline bool xsc_pf_vf_is_dpu_mode(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MF_HOST_PF_DEV_ID || + dev->pdev->device == XSC_MF_HOST_VF_DEV_ID || + dev->pdev->device == XSC_MV_HOST_PF_DEV_ID || + dev->pdev->device == XSC_MV_HOST_VF_DEV_ID); +} + +static inline bool xsc_get_pp_bypass_res(struct xsc_core_device *dev, bool esw_set) +{ + return esw_set || xsc_pf_vf_is_dpu_mode(dev); +} + +static inline bool xsc_get_pct_drop_config(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MC_PF_DEV_ID) || + (dev->pdev->device == XSC_MF_SOC_PF_DEV_ID) || + (dev->pdev->device == XSC_MS_PF_DEV_ID) || + (dev->pdev->device == XSC_MV_SOC_PF_DEV_ID); +} + +#endif /* ESWITCH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c new file mode 100644 index 0000000000000000000000000000000000000000..91827fd56b0079c6ad30c63b20e1c7f05ca2a8ed --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/driver.h" +#include +#include "eswitch.h" + +static struct xsc_board_info *board_info[MAX_BOARD_NUM]; + +static struct xsc_board_info *xsc_get_board_info(char *board_sn) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) { + if (!board_info[i]) + continue; + if (!strncmp(board_info[i]->board_sn, board_sn, XSC_BOARD_SN_LEN)) + return board_info[i]; + } + return NULL; +} + +static struct xsc_board_info *xsc_alloc_board_info(void) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) { + if (!board_info[i]) + break; + } + if (i == MAX_BOARD_NUM) + return NULL; + board_info[i] = vmalloc(sizeof(*board_info[i])); + if (!board_info[i]) + return NULL; + memset(board_info[i], 0, sizeof(*board_info[i])); + board_info[i]->board_id = i; + return board_info[i]; +} + +void xsc_free_board_info(void) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) + vfree(board_info[i]); +} + +int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, + struct xsc_caps *caps) +{ + struct xsc_cmd_query_hca_cap_mbox_out *out; + struct xsc_cmd_query_hca_cap_mbox_in in; + int err; + u16 t16; + struct xsc_board_info *board_info = NULL; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HCA_CAP); + in.cpu_num = cpu_to_be16(num_online_cpus()); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + + dev->glb_func_id = be32_to_cpu(out->hca_cap.glb_func_id); + caps->pf0_vf_funcid_base = be16_to_cpu(out->hca_cap.pf0_vf_funcid_base); + caps->pf0_vf_funcid_top = be16_to_cpu(out->hca_cap.pf0_vf_funcid_top); + caps->pf1_vf_funcid_base = be16_to_cpu(out->hca_cap.pf1_vf_funcid_base); + caps->pf1_vf_funcid_top = be16_to_cpu(out->hca_cap.pf1_vf_funcid_top); + caps->pcie0_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_base); + caps->pcie0_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_top); + caps->pcie1_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_base); + caps->pcie1_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_top); + caps->funcid_to_logic_port = be16_to_cpu(out->hca_cap.funcid_to_logic_port); + if (xsc_core_is_pf(dev)) { + xsc_core_dbg(dev, "pf0_vf_range(%4u, %4u), pf1_vf_range(%4u, %4u)\n", + caps->pf0_vf_funcid_base, caps->pf0_vf_funcid_top, + caps->pf1_vf_funcid_base, caps->pf1_vf_funcid_top); + xsc_core_dbg(dev, "pcie0_pf_range=(%4u, %4u), pcie1_pf_range=(%4u, %4u)\n", + caps->pcie0_pf_funcid_base, caps->pcie0_pf_funcid_top, + caps->pcie1_pf_funcid_base, caps->pcie1_pf_funcid_top); + } + caps->pcie_host = out->hca_cap.pcie_host; + caps->nif_port_num = out->hca_cap.nif_port_num; + caps->hw_feature_flag = be32_to_cpu(out->hca_cap.hw_feature_flag); + + caps->raweth_qp_id_base = be16_to_cpu(out->hca_cap.raweth_qp_id_base); + caps->raweth_qp_id_end = be16_to_cpu(out->hca_cap.raweth_qp_id_end); + caps->raweth_rss_qp_id_base = be16_to_cpu(out->hca_cap.raweth_rss_qp_id_base); + caps->raw_tpe_qp_num = be16_to_cpu(out->hca_cap.raw_tpe_qp_num); + caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz; + caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz; + caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); + caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); + caps->flags = be64_to_cpu(out->hca_cap.flags); + caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); + caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; + caps->num_ports = out->hca_cap.num_ports & 0xf; + caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; + caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; + caps->log_max_msix = out->hca_cap.log_max_msix & 0xf; + caps->mac_port = out->hca_cap.mac_port & 0xff; + dev->mac_port = caps->mac_port; + if (caps->num_ports > XSC_MAX_FW_PORTS) { + xsc_core_err(dev, "device has %d ports while the driver supports max %d ports\n", + caps->num_ports, XSC_MAX_FW_PORTS); + err = -EINVAL; + goto out_out; + } + caps->send_ds_num = out->hca_cap.send_seg_num; + caps->send_wqe_shift = out->hca_cap.send_wqe_shift; + caps->recv_ds_num = out->hca_cap.recv_seg_num; + caps->recv_wqe_shift = out->hca_cap.recv_wqe_shift; + + caps->embedded_cpu = 0; + caps->ecpf_vport_exists = 0; + caps->eswitch_manager = 1; + caps->vport_group_manager = 1; + caps->log_max_current_uc_list = 0; + caps->log_max_current_mc_list = 0; + caps->log_max_vlan_list = 8; + caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; + caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; + caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; + caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; + caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; + caps->log_max_mcg = out->hca_cap.log_max_mcg; + caps->log_max_mtt = out->hca_cap.log_max_mtt; + caps->log_max_tso = out->hca_cap.log_max_tso; + caps->hca_core_clock = be32_to_cpu(out->hca_cap.hca_core_clock); + caps->max_rwq_indirection_tables = + be32_to_cpu(out->hca_cap.max_rwq_indirection_tables); + caps->max_rwq_indirection_table_size = + be32_to_cpu(out->hca_cap.max_rwq_indirection_table_size); + caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); + caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); + caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); + caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; + caps->rx_pkt_len_max = be32_to_cpu(out->hca_cap.rx_pkt_len_max); + caps->max_vfs = be16_to_cpu(out->hca_cap.max_vfs); + caps->qp_rate_limit_min = be32_to_cpu(out->hca_cap.qp_rate_limit_min); + caps->qp_rate_limit_max = be32_to_cpu(out->hca_cap.qp_rate_limit_max); + +#ifdef MSIX_SUPPORT + caps->msix_enable = 1; +#else + caps->msix_enable = 0; +#endif + + caps->msix_base = be16_to_cpu(out->hca_cap.msix_base); + caps->msix_num = be16_to_cpu(out->hca_cap.msix_num); + + t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); + if (t16 & 0x8000) { + caps->bf_reg_size = 1 << (t16 & 0x1f); + caps->bf_regs_per_page = XSC_BF_REGS_PER_PAGE; + } else { + caps->bf_reg_size = 0; + caps->bf_regs_per_page = 0; + } + caps->min_page_sz = ~(u32)((1 << PAGE_SHIFT) - 1); + + caps->dcbx = 1; + caps->qos = 1; + caps->ets = 1; + caps->dscp = 1; + caps->max_tc = out->hca_cap.max_tc; + caps->log_max_qp_depth = out->hca_cap.log_max_qp_depth & 0xff; + caps->mac_bit = out->hca_cap.mac_bit; + caps->lag_logic_port_ofst = out->hca_cap.lag_logic_port_ofst; + + dev->chip_ver_h = be32_to_cpu(out->hca_cap.chip_ver_h); + dev->chip_ver_m = be32_to_cpu(out->hca_cap.chip_ver_m); + dev->chip_ver_l = be32_to_cpu(out->hca_cap.chip_ver_l); + dev->hotfix_num = be32_to_cpu(out->hca_cap.hotfix_num); + dev->feature_flag = be32_to_cpu(out->hca_cap.feature_flag); + + board_info = xsc_get_board_info(out->hca_cap.board_sn); + if (!board_info) { + board_info = xsc_alloc_board_info(); + if (!board_info) + return -ENOMEM; + + memcpy(board_info->board_sn, out->hca_cap.board_sn, sizeof(out->hca_cap.board_sn)); + } + dev->board_info = board_info; + + if (xsc_core_is_pf(dev)) { + dev->regs.tx_db = be64_to_cpu(out->hca_cap.tx_db); + dev->regs.rx_db = be64_to_cpu(out->hca_cap.rx_db); + dev->regs.complete_db = be64_to_cpu(out->hca_cap.complete_db); + dev->regs.complete_reg = be64_to_cpu(out->hca_cap.complete_reg); + dev->regs.event_db = be64_to_cpu(out->hca_cap.event_db); + } + + dev->fw_version_major = out->hca_cap.fw_ver.fw_version_major; + dev->fw_version_minor = out->hca_cap.fw_ver.fw_version_minor; + dev->fw_version_patch = be16_to_cpu(out->hca_cap.fw_ver.fw_version_patch); + dev->fw_version_tweak = be32_to_cpu(out->hca_cap.fw_ver.fw_version_tweak); + dev->fw_version_extra_flag = out->hca_cap.fw_ver.fw_version_extra_flag; + dev->reg_mr_via_cmdq = out->hca_cap.reg_mr_via_cmdq; +out_out: + kfree(out); + + return err; +} + +int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_num, u16 max_msix) +{ + struct xsc_cmd_enable_hca_mbox_in in; + struct xsc_cmd_enable_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_HCA); + + in.vf_num = cpu_to_be16(vf_num); + in.max_msix_vec = cpu_to_be16(max_msix); + in.cpu_num = cpu_to_be16(num_online_cpus()); + in.pp_bypass = xsc_get_pp_bypass_res(dev, false); + in.esw_mode = XSC_ESWITCH_LEGACY; + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, + "cpu's msix vec(%u) not enough for all %u vfs, err=%d, status=%d\n", + max_msix, vf_num, err, out.hdr.status); + return -EINVAL; + } + + return err; +} + +int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_num) +{ + struct xsc_cmd_disable_hca_mbox_in in; + struct xsc_cmd_disable_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_HCA); + in.vf_num = cpu_to_be16(vf_num); + in.pp_bypass = xsc_get_pp_bypass_res(dev, false); + in.esw_mode = XSC_ESWITCH_NONE; + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "failed to disable hca, err=%d, status=%d\n", + err, out.hdr.status); + return -EINVAL; + } + + return err; +} + +int xsc_cmd_modify_hca(struct xsc_core_device *dev) +{ + struct xsc_cmd_modify_hca_mbox_in in; + struct xsc_cmd_modify_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_HCA); + in.pp_bypass = xsc_get_pp_bypass_res(dev, true); + in.esw_mode = xsc_get_eswitch_mode(dev); + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = xsc_cmd_status_to_err(&out.hdr); + + return err; +} + +static int xsc_cmd_query_guid(struct xsc_core_device *dev) +{ + struct xsc_cmd_query_guid_mbox_in in; + struct xsc_cmd_query_guid_mbox_out out; + int err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_GUID); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + dev->board_info->guid = out.guid; + dev->board_info->guid_valid = 1; + return 0; +} + +int xsc_query_guid(struct xsc_core_device *dev) +{ + if (dev->board_info->guid_valid) + return 0; + + return xsc_cmd_query_guid(dev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h new file mode 100644 index 0000000000000000000000000000000000000000..94d8438010309c64a42a1abb2c8fb1a8ca036a90 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef BITOPS_H +#define BITOPS_H + +#include +#include + +#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) +#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#define clear_bit(bit, bitmap) __clear_bit(bit, bitmap) + +static inline void xsc_clear_bit(int bit, long *bitmap) +{ + clear_bit(bit, bitmap); +} + +static inline int xsc_test_bit(int bit, long *bitmap) +{ + return test_bit(bit, bitmap); +} + +static inline int xsc_test_and_set_bit(int bit, long *bitmap) +{ + return test_and_set_bit(bit, bitmap); +} + +static inline void xsc_set_bit(int bit, long *bitmap) +{ + set_bit(bit, bitmap); +} + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c new file mode 100644 index 0000000000000000000000000000000000000000..ca5e889050b3c2f896452cd404522549d5d41a22 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" + +#include "xsc_reg_struct.h" +#include "xsc_fw.h" +#include "xsc_flow.h" + +#include + +static inline void xsc_iae_lock(struct xsc_core_device *dev, int grp) +{ + spin_lock_bh(&get_xsc_res(dev)->iae_lock[grp]); +} + +static inline void xsc_iae_unlock(struct xsc_core_device *dev, int grp) +{ + spin_unlock_bh(&get_xsc_res(dev)->iae_lock[grp]); +} + +static inline int xsc_iae_idx_get(struct xsc_core_device *dev, int grp) +{ + return get_xsc_res(dev)->iae_idx[grp]; +} + +static inline int xsc_iae_grp_get(struct xsc_core_device *dev) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + return atomic_inc_return(&xres->iae_grp) & XSC_RES_IAE_GRP_MASK; +} + +static int xsc_cmd_exec_create_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_create_mkey_mbox_out *resp = out; + u32 mpt_idx = 0; + + if (alloc_mpt_entry(xdev, &mpt_idx)) + return -EINVAL; + + resp->mkey = cpu_to_be32(mpt_idx & 0xffffff); + resp->hdr.status = 0; + + return 0; +} + +int xsc_create_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + unsigned long flags; + struct xsc_resources *xres = get_xsc_res(xdev); + int ret = 0; + + xsc_acquire_lock(&xres->lock, &flags); + ret = xsc_cmd_exec_create_mkey(xdev, in, out); + xsc_release_lock(&xres->lock, flags); + return ret; +} + +static int xsc_cmd_exec_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_destroy_mkey_mbox_in *req = in; + struct xsc_destroy_mkey_mbox_out *resp = out; + u32 mkey = be32_to_cpu(req->mkey); + u32 mpt_idx = xsc_mkey_to_idx(mkey); + + dealloc_mpt_entry(xdev, &mpt_idx); + + resp->hdr.status = 0; + + return 0; +} + +int xsc_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + unsigned long flags; + struct xsc_resources *xres = get_xsc_res(xdev); + int ret = 0; + + xsc_acquire_lock(&xres->lock, &flags); + ret = xsc_cmd_exec_destroy_mkey(xdev, in, out); + xsc_release_lock(&xres->lock, flags); + return ret; +} + +static int xsc_cmd_exec_reg_mr(struct xsc_core_device *dev, void *in, void *out) +{ + struct xsc_register_mr_mbox_in *req = in; + struct xsc_register_mr_mbox_out *resp = out; + struct xsc_mpt_entry mpt_ent; + u32 mpt_idx = 0; + u32 mtt_base; + u64 va = be64_to_cpu(req->req.va_base); + u32 mem_size = be32_to_cpu(req->req.len); + u32 pdn = be32_to_cpu(req->req.pdn); + u32 key = be32_to_cpu(req->req.mkey); + int pa_num = be32_to_cpu(req->req.pa_num); + u32 *ptr; + u64 reg_addr; + int i; + int reg_stride; + int iae_idx, iae_grp; + + if (pa_num && alloc_mtt_entry(dev, pa_num, &mtt_base)) + return -EINVAL; + + mpt_idx = xsc_mkey_to_idx(key); + mpt_ent.va_l = va & 0xFFFFFFFF; + mpt_ent.va_h = va >> 32; + mpt_ent.mem_size = mem_size; + mpt_ent.pdn = pdn; + mpt_ent.key = key & 0xFF; + mpt_ent.mtt_base = mtt_base; + mpt_ent.acc = req->req.acc; + mpt_ent.page_mode = req->req.page_mode; + mpt_ent.mem_map_en = req->req.map_en; + mpt_ent.rsv = 0; + + get_xsc_res(dev)->mpt_entry[mpt_idx].va = va; + get_xsc_res(dev)->mpt_entry[mpt_idx].mtt_base = mtt_base; + get_xsc_res(dev)->mpt_entry[mpt_idx].page_num = pa_num; + + ptr = (u32 *)&mpt_ent; + reg_stride = REG_WIDTH_TO_STRIDE(MMC_MPT_TBL_MEM_WIDTH); + reg_addr = MMC_MPT_TBL_MEM_ADDR + + mpt_idx * roundup_pow_of_two(reg_stride); + + iae_grp = xsc_iae_grp_get(dev); + iae_idx = xsc_iae_idx_get(dev, iae_grp); + + xsc_iae_lock(dev, iae_grp); + + IA_WRITE_REG_MR(dev, reg_addr, ptr, sizeof(mpt_ent) / sizeof(u32), iae_idx); + + xsc_core_info(dev, "reg mr, write mpt[%u]: va=%llx, mem_size=%u, pdn=%u\n", + mpt_idx, va, mpt_ent.mem_size, mpt_ent.pdn); + xsc_core_info(dev, "key=%u, mtt_base=%u, acc=%u, page_mode=%u, mem_map_en=%u\n", + mpt_ent.key, mpt_ent.mtt_base, mpt_ent.acc, + mpt_ent.page_mode, mpt_ent.mem_map_en); + + for (i = 0; i < pa_num; i++) { + u64 pa = req->req.pas[i]; + + pa = be64_to_cpu(pa); + pa = pa >> PAGE_SHIFT_4K; + ptr = (u32 *)&pa; + reg_addr = MMC_MTT_TBL_MEM_ADDR + + (mtt_base + i) * REG_WIDTH_TO_STRIDE(MMC_MTT_TBL_MEM_WIDTH); + + IA_WRITE_REG_MR(dev, reg_addr, ptr, sizeof(pa) / sizeof(u32), iae_idx); + + xsc_core_info(dev, "reg mr, write mtt: pa[%u]=%llx\n", i, pa); + } + + xsc_iae_unlock(dev, iae_grp); + + resp->hdr.status = 0; + return 0; +} + +int xsc_reg_mr(struct xsc_core_device *xdev, void *in, void *out) +{ + return xsc_cmd_exec_reg_mr(xdev, in, out); +} + +static int xsc_cmd_exec_dereg_mr(struct xsc_core_device *dev, void *in, void *out) +{ + struct xsc_unregister_mr_mbox_in *req; + struct xsc_unregister_mr_mbox_out *resp; + u32 mpt_idx; + u32 mtt_base; + int pages_num; + + req = in; + resp = out; + resp->hdr.status = -EINVAL; + + mpt_idx = be32_to_cpu(req->mkey); + xsc_core_info(dev, "mpt idx:%u\n", mpt_idx); + + pages_num = get_xsc_res(dev)->mpt_entry[mpt_idx].page_num; + mtt_base = get_xsc_res(dev)->mpt_entry[mpt_idx].mtt_base; + if (pages_num > 0) { + dealloc_mtt_entry(dev, pages_num, mtt_base); + get_xsc_res(dev)->mpt_entry[mpt_idx].page_num = 0; + } else { + xsc_core_dbg(dev, "no mtt entries to be freed, mpt_idx=%d\n", mpt_idx); + } + + resp->hdr.status = 0; + return 0; +} + +int xsc_dereg_mr(struct xsc_core_device *xdev, void *in, void *out) +{ + return xsc_cmd_exec_dereg_mr(xdev, in, out); +} + +static int xsc_cmd_exec_ioctl_flow(struct xsc_core_device *dev, + void *in, void *out) +{ + struct xsc_ioctl_mbox_in *req; + struct xsc_ioctl_mbox_out *resp; + struct xsc_ioctl_data_tl *tl; + char *data; + u16 datalen; + u16 tllen = sizeof(struct xsc_ioctl_data_tl); + int opmod; + int table; + int length; + int ret = -EINVAL; + + req = in; + resp = out; + resp->hdr.status = -EINVAL; + + data = (char *)req->data; + datalen = be16_to_cpu(req->len); + + if (datalen < tllen) + goto out; + + tl = (struct xsc_ioctl_data_tl *)data; + opmod = tl->opmod; + table = tl->table; + length = tl->length; + + switch (opmod) { + case XSC_IOCTL_OP_ADD: + ret = xsc_flow_add(dev, table, length, tl + 1); + break; + default: + ret = -EINVAL; + break; + } + + xsc_core_dbg(dev, "table=%d, opcode=0x%x, ret=%d\n", table, opmod, ret); + +out: + resp->hdr.status = 0; + resp->error = cpu_to_be32(ret); + return ret; +} + +int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_size, void *out, + int out_size, int func_id) +{ + int opcode, ret = 0; + unsigned long flags; + struct xsc_inbox_hdr *hdr; + + hdr = (struct xsc_inbox_hdr *)in; + opcode = be16_to_cpu(hdr->opcode); + xsc_core_dbg(dev, "opcode: %x\n", opcode); + + xsc_acquire_lock(&dev->reg_access_lock, &flags); + switch (opcode) { + case XSC_CMD_OP_IOCTL_FLOW: + ret = xsc_cmd_exec_ioctl_flow(dev, in, out); + break; + default: + ret = -EINVAL; + break; + } + + /* ensure pci sequence */ + xsc_mmiowb(); + + xsc_release_lock(&dev->reg_access_lock, flags); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h new file mode 100644 index 0000000000000000000000000000000000000000..fbc6c7699f7f7cbe9876cb724b8a22e94a5fd517 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef CMDQ_H +#define CMDQ_H + +//hw will use this for some records(e.g. vf_id) +struct cmdq_rsv { + u16 func_id; + u8 rsv[2]; +}; + +//related with hw, won't change +#define CMDQ_ENTRY_SIZE 64 +#define CMD_FIRST_SIZE 8 +#define RSP_FIRST_SIZE 14 + +struct xsc_cmd_layout { + struct cmdq_rsv rsv0; + __be32 inlen; + __be64 in_ptr; + __be32 in[CMD_FIRST_SIZE]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, arm will check this bit to make sure mem written +}; + +struct xsc_rsp_layout { + struct cmdq_rsv rsv0; + __be32 out[RSP_FIRST_SIZE]; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, driver will check this bit to make sure mem written +}; + +struct xsc_cmd_prot_block { + u8 data[512]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 owner_status; //fw should change this val to 1 + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +#endif // XSC_CMD_H diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c new file mode 100644 index 0000000000000000000000000000000000000000..9c63cdae414be98db4d1154c1e5fd7fad9323c71 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_core.h" + +void xsc_lock_init(struct xsc_lock *lock) +{ + spin_lock_init(&lock->lock); +} + +void xsc_acquire_lock(struct xsc_lock *lock, unsigned long *oflags) +{ + unsigned long flags; + + spin_lock_irqsave(&lock->lock, flags); + *oflags = flags; +} + +void xsc_release_lock(struct xsc_lock *lock, unsigned long flags) +{ + spin_unlock_irqrestore(&lock->lock, flags); +} + +void xsc_mmiowb(void) +{ + mmiowb(); +} + +void xsc_wmb(void) +{ + /* mem barrier for xsc operation */ + wmb(); +} + +void xsc_msleep(int timeout) +{ + msleep(timeout); +} + +void xsc_udelay(int timeout) +{ + udelay(timeout); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h new file mode 100644 index 0000000000000000000000000000000000000000..9d858175963324a6fa2a38e045f6a84cf7848202 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef OSDEP_H +#define OSDEP_H + +#include "common/xsc_core.h" + +#define xsc_print printk + +void xsc_msleep(int timeout); + +void xsc_udelay(int timeout); + +void xsc_lock_init(struct xsc_lock *lock); + +void xsc_acquire_lock(struct xsc_lock *lock, unsigned long *flags); + +void xsc_release_lock(struct xsc_lock *lock, unsigned long flags); + +void xsc_mmiowb(void); + +void xsc_wmb(void); + +void *xsc_malloc(unsigned int size); + +void xsc_free(void *addr); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h new file mode 100644 index 0000000000000000000000000000000000000000..44a1b78489024369aad1e090de06435018374f07 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_COUNTERS_H__ +#define __XSC_COUNTERS_H__ + +/* From E-tile Hard User Guide */ +#define NIF_ETH_TX_PFC_LOW 0x83c +#define NIF_ETH_TX_PFC_HIGH 0x83d +#define NIF_ETH_RX_PFC_LOW 0x93c +#define NIF_ETH_RX_PFC_HIGH 0x93d +#define NIF_ETH_TX_CNTR_CONFIG 0x845 +#define NIF_ETH_RX_CNTR_CONFIG 0x945 +#define NIF_ETH_RX_FCSERR_LOW 0x904 +#define NIF_ETH_RX_FCSERR_HIGH 0x905 + +#define XSC_CNT_WIDTH_32_BIT 32 +#define XSC_CNT_WIDTH_64_BIT 64 +#define XSC_CNT_MASK_32 0xffffffff +#define XSC_CNT_MASK_64 0xffffffffffffffff + +struct cnt_value_64 { + u32 va_l; + u32 va_h; +}; + +struct cnt_value_96 { + u32 va_l; + u32 va_m; + u32 va_h; +}; + +enum { + XSC_CNT_TYPE_TX_PAUSE = 0, + XSC_CNT_TYPE_RX_PAUSE, +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c new file mode 100644 index 0000000000000000000000000000000000000000..0623b0f7d4ecc8fd523bde50cb80145179eb7ab4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" + +#include "xsc_flow.h" + +static DECLARE_COMPLETION(dma_read_done); + +static inline int xsc_dma_wr_isbusy(struct xsc_core_device *xdev) +{ + u32 busy = 0; + + do { + busy = REG_RD32(xdev, HIF_TBL_TBL_DL_BUSY_REG_ADDR); + } while (busy != 0x0); + + return busy; +} + +static inline int xsc_dma_rd_isbusy(struct xsc_core_device *xdev) +{ + u32 busy = 0; + + do { + busy = REG_RD32(xdev, CLSF_DMA_DMA_UL_BUSY_REG_ADDR); + } while (busy != 0x0); + + return busy; +} + +static inline int xsc_dma_done(struct xsc_core_device *xdev) +{ + u32 done = 0; + + do { + done = REG_RD32(xdev, CLSF_DMA_DMA_DL_DONE_REG_ADDR); + } while ((done & 0x1) != 0x1); + + return done; +} + +static inline void xsc_dma_wr_success_get(struct xsc_core_device *xdev, u32 *success, u32 size) +{ + u32 *ptr = NULL; + + ptr = success; + IA_READ(xdev, CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR, ptr, (size / sizeof(u32))); +} + +int xsc_flow_table_dma_write_add(struct xsc_core_device *xdev, + const struct tdi_dma_write_key_bits *key, + const struct tdi_dma_write_action_bits *action) +{ + u32 i = 0; + u32 busy = 0; + u32 dma_wr_num = 0; + u32 value = 0; + u32 done = 0; + u64 success[2]; + u32 data_len = 0; + u64 dma_wr_addr = 0; + + if (!xdev || !key || !action) + return -1; + + if (!action->entry_num) + return -1; + + dma_wr_num = ((action->entry_num + (XSC_DMA_WR_MAX - 1)) / XSC_DMA_WR_MAX); + + for (i = 0; i < dma_wr_num; i++) { + if ((action->entry_num % XSC_DMA_WR_MAX) && (i == (dma_wr_num - 1))) + data_len = ((action->entry_num % XSC_DMA_WR_MAX) * XSC_DMA_LEN); + else + data_len = (XSC_DMA_WR_MAX * XSC_DMA_LEN); + + busy = xsc_dma_wr_isbusy(xdev); + if (busy) + return -1; + + REG_WR32(xdev, CLSF_DMA_ERR_CODE_CLR_REG_ADDR, 1); + + value = ((data_len << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT) | + (key->host_id << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT) | + key->func_id); + + REG_WR32(xdev, HIF_TBL_TBL_DL_REQ_REG_ADDR, value); + + dma_wr_addr = (action->data_addr + ((i * XSC_DMA_WR_MAX) * XSC_DMA_LEN)); + value = (dma_wr_addr & HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK); + REG_WR32(xdev, HIF_TBL_TBL_DL_ADDR_L_REG_ADDR, value); + + value = ((dma_wr_addr >> 32) & HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK); + REG_WR32(xdev, HIF_TBL_TBL_DL_ADDR_H_REG_ADDR, value); + + REG_WR32(xdev, HIF_TBL_TBL_DL_START_REG_ADDR, 1); + + done = xsc_dma_done(xdev); + if (done != XSC_DMA_WR_SUCCESS) { + memset(success, 0, sizeof(success)); + xsc_dma_wr_success_get(xdev, (u32 *)&success, sizeof(success)); + xsc_core_err(xdev, "DMA write time %d status 0x%lx%lx fail.\n", i, + (unsigned long)success[1], (unsigned long)success[0]); + return -1; + } + } + + return 0; +} + +void xsc_dma_read_done_complete(void) +{ + complete(&dma_read_done); +} + +int xsc_flow_table_dma_read_add(struct xsc_core_device *xdev, + const struct tdi_dma_read_key_bits *key, + const struct tdi_dma_read_action_bits *action) +{ + u32 busy = 0; + u32 value = 0; + + if (!xdev || !key || !action) + return -1; + + if (!action->burst_num) + return -1; + + busy = xsc_dma_rd_isbusy(xdev); + if (busy) + return -1; + + value = ((key->host_id << HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT) | + key->func_id); + + REG_WR32(xdev, HIF_TBL_TBL_UL_REQ_REG_ADDR, value); + + value = (action->data_addr & HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK); + REG_WR32(xdev, HIF_TBL_TBL_UL_ADDR_L_REG_ADDR, value); + + value = ((action->data_addr >> 32) & HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK); + REG_WR32(xdev, HIF_TBL_TBL_UL_ADDR_H_REG_ADDR, value); + + REG_WR32(xdev, HIF_TBL_TBL_UL_START_REG_ADDR, 1); + + value = (key->tbl_id & CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK); + REG_WR32(xdev, CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR, value); + + value = ((action->burst_num << CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT) | + key->tbl_start_addr); + REG_WR32(xdev, CLSF_DMA_DMA_RD_ADDR_REG_ADDR, value); + + REG_WR32(xdev, CLSF_DMA_INDRW_RD_START_REG_ADDR, 1); + + /*wait msix interrupt */ + if (!wait_for_completion_timeout(&dma_read_done, msecs_to_jiffies(5000))) { + xsc_core_err(xdev, "wait for dma read done completion timeout.\n"); + return -ETIMEDOUT; + } + + REG_WR32(xdev, HIF_TBL_MSG_RDY_REG_ADDR, 1); + + return 0; +} + +int xsc_flow_add(struct xsc_core_device *xdev, + int table, int length, void *data) +{ + int ret = -EINVAL; + struct xsc_flow_dma_write_add *dma_wr; + struct xsc_flow_dma_read_add *dma_rd; + + switch (table) { + case XSC_FLOW_DMA_WR: + if (length == sizeof(struct xsc_flow_dma_write_add)) { + dma_wr = (struct xsc_flow_dma_write_add *)data; + ret = xsc_flow_table_dma_write_add(xdev, &dma_wr->key, &dma_wr->action); + } + break; + case XSC_FLOW_DMA_RD: + if (length == sizeof(struct xsc_flow_dma_read_add)) { + dma_rd = (struct xsc_flow_dma_read_add *)data; + ret = xsc_flow_table_dma_read_add(xdev, &dma_rd->key, &dma_rd->action); + } + break; + default: + ret = -EINVAL; + break; + } + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h new file mode 100644 index 0000000000000000000000000000000000000000..ec7c7a2c39597be0378975e823cf10d53c84e21f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FLOW_H +#define XSC_FLOW_H + +#include "osdep.h" + +#define XSC_DMA_LEN 64 +#define XSC_DMA_WR_MAX 128 +#define XSC_DMA_WR_SUCCESS 0x3 + +/* key */ +struct tdi_dma_write_key_bits { + uint8_t host_id:1; + uint16_t func_id:11; +} __packed; + +struct tdi_dma_read_key_bits { + uint16_t tbl_start_addr:16; + uint8_t tbl_id:7; + uint8_t host_id:1; + uint16_t func_id:11; +} __packed; + +/* action */ +struct tdi_dma_write_action_bits { + uint32_t entry_num:32; + uint64_t data_addr:64; +} __packed; + +struct tdi_dma_read_action_bits { + uint16_t burst_num:16; + uint64_t data_addr:64; +} __packed; + +/* ioctl data - add */ +struct xsc_flow_dma_write_add { + struct tdi_dma_write_key_bits key; + struct tdi_dma_write_action_bits action; +}; + +struct xsc_flow_dma_read_add { + struct tdi_dma_read_key_bits key; + struct tdi_dma_read_action_bits action; +}; + +struct xsc_logic_in_port_cfg_reg { + u32 phy_port_offset:11; + u32 resv0:5; + u32 func_id_offset:11; + u32 resv1:5; + u32 aps_port_offset:11; + u32 resv2:1; + u32 aps_port_rec_flg:1; + u32 resv3:19; +}; + +int xsc_flow_add(struct xsc_core_device *xdev, + int table, int length, void *data); + +void xsc_dma_read_done_complete(void); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h new file mode 100644 index 0000000000000000000000000000000000000000..a949bb0f4a2c2f1cb08b03e4577c385b06568529 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FW_H +#define XSC_FW_H + +#include "osdep.h" + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" + +struct xsc_free_list { + struct list_head list; + int start; + int end; +}; + +struct xsc_free_list_wl { + struct xsc_free_list head; + struct xsc_lock lock; +}; + +struct xsc_mpt_info { + u64 va; + u32 mtt_base; + u32 page_num; +}; + +#define XSC_RES_IAE_GRP_MASK (XSC_RES_NUM_IAE_GRP - 1) +struct xsc_resources { + int refcnt; + atomic_t iae_grp; + int iae_idx[XSC_RES_NUM_IAE_GRP]; + spinlock_t iae_lock[XSC_RES_NUM_IAE_GRP]; /* iae group lock */ +#define XSC_MAX_MPT_NUM MMC_MPT_TBL_MEM_DEPTH + struct xsc_mpt_info mpt_entry[XSC_MAX_MPT_NUM]; + int max_mpt_num; + u64 mpt_tbl[XSC_MAX_MPT_NUM >> 6]; +#define XSC_MAX_MTT_NUM MMC_MTT_TBL_MEM_DEPTH + int max_mtt_num; + struct xsc_free_list_wl mtt_list; + struct xsc_lock lock; +}; + +struct xsc_resources *get_xsc_res(struct xsc_core_device *dev); + +int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max); + +int xsc_dealloc_res(u32 *res, u64 *res_tbl); + +int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, + u32 base_align); + +int release_to_free_list(struct xsc_free_list_wl *list, u32 release, + u32 num_released); + +int alloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); + +int dealloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); + +int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base); + +int dealloc_mtt_entry(struct xsc_core_device *dev, int pages_num, u32 mtt_base); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..758b5c77a263219e627c5c5cc0ed2ef7b2c5ef6b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" + +void *xsc_malloc(unsigned int size) +{ + return kmalloc(size, GFP_ATOMIC); +} + +void xsc_free(void *addr) +{ + kfree(addr); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h new file mode 100644 index 0000000000000000000000000000000000000000..8eab3e6803a3272e5be79218e920a10018a00e61 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_REG_DEFINE_H +#define XSC_REG_DEFINE_H + +struct xsc_mpt_entry { + u32 va_l; + u32 va_h; + u32 mem_size; + u32 pdn:24; + u32 key:8; + u32 mtt_base:18; + u32 acc:4; + u32 page_mode:2; + u32 mem_map_en:1; + u32 rsv:7; +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c new file mode 100644 index 0000000000000000000000000000000000000000..8bd6916e21035a009f0fd39a47bf49816588e090 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_fw.h" + +struct xsc_resources *g_xres[MAX_BOARD_NUM]; + +static int xsc_alloc_free_list_res(struct xsc_free_list_wl *list, int max_num) +{ + struct xsc_free_list *free_node; + + xsc_lock_init(&list->lock); + INIT_LIST_HEAD(&list->head.list); + + free_node = xsc_malloc(sizeof(struct xsc_free_list)); + if (!free_node) + return -ENOMEM; + + free_node->start = 0; + free_node->end = free_node->start + max_num - 1; + list_add(&free_node->list, &list->head.list); + + return 0; +} + +static void xsc_destroy_free_list_res(struct xsc_free_list_wl *list) +{ + struct xsc_free_list *pos; + struct xsc_free_list *next; + + list_for_each_entry_safe(pos, next, &list->head.list, list) { + list_del(&pos->list); + xsc_free(pos); + } +} + +static int xsc_res_iae_init(struct xsc_core_device *dev) +{ + int i = 0; + int ret = 0; + struct xsc_resources *res = get_xsc_res(dev); + struct xsc_alloc_ia_lock_mbox_in in; + struct xsc_alloc_ia_lock_mbox_out out; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ALLOC_IA_LOCK); + in.lock_num = XSC_RES_NUM_IAE_GRP; + + ret = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(dev, "failed to alloc ia lock from fw, ret = %d\n", ret); + return -EINVAL; + } + + for (i = 0; i < XSC_RES_NUM_IAE_GRP; i++) { + res->iae_idx[i] = out.lock_idx[i]; + spin_lock_init(&res->iae_lock[i]); + } + + atomic_set(&res->iae_grp, 0); + + xsc_core_info(dev, "allocated %d iae groups", i); + + return 0; +} + +static void xsc_res_iae_release(struct xsc_core_device *dev) +{ + int ret = 0; + int i = 0; + struct xsc_resources *res = get_xsc_res(dev); + struct xsc_release_ia_lock_mbox_in in; + struct xsc_release_ia_lock_mbox_out out; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_RELEASE_IA_LOCK); + for (i = 0; i < XSC_RES_NUM_IAE_GRP; i++) + in.lock_idx[i] = res->iae_idx[i]; + + ret = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (ret) + xsc_core_err(dev, "failed to release ia lock, ret = %d\n", ret); +} + +int xsc_create_res(struct xsc_core_device *dev) +{ + int ret = 0; + u32 board_id = dev->board_info->board_id; + struct xsc_resources *xres = get_xsc_res(dev); + + if (xres) { + xres->refcnt++; + if (xres->refcnt > 1) + return 0; + } else { + g_xres[board_id] = vmalloc(sizeof(*g_xres[board_id])); + if (!g_xres[board_id]) + return -ENOMEM; + xres = g_xres[board_id]; + xres->refcnt = 1; + } + + xsc_lock_init(&xres->lock); + xres->max_mpt_num = XSC_MAX_MPT_NUM; + memset(xres->mpt_tbl, 0xFF, XSC_MAX_MPT_NUM >> 3); + /* reserved for local dma lkey */ + clear_bit(0, (unsigned long *)xres->mpt_tbl); + + ret = xsc_res_iae_init(dev); + if (ret) { + vfree(g_xres[board_id]); + g_xres[board_id] = NULL; + return -EINVAL; + } + + xres->max_mtt_num = XSC_MAX_MTT_NUM; + ret = xsc_alloc_free_list_res(&xres->mtt_list, xres->max_mtt_num); + if (ret) + goto err_mtt; + + return ret; + +err_mtt: + xsc_res_iae_release(dev); + vfree(g_xres[board_id]); + g_xres[board_id] = NULL; + return ret; +} + +void xsc_destroy_res(struct xsc_core_device *dev) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + if (xres) { + xres->refcnt--; + if (xres->refcnt) + return; + + xsc_destroy_free_list_res(&xres->mtt_list); + xsc_res_iae_release(dev); + vfree(g_xres[dev->board_info->board_id]); + g_xres[dev->board_info->board_id] = NULL; + } +} + +struct xsc_resources *get_xsc_res(struct xsc_core_device *dev) +{ + return g_xres[dev->board_info->board_id]; +} + +int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max) +{ + u32 bit_num; + + bit_num = find_first_bit((unsigned long *)res_tbl, max); + if (bit_num == max) + return -ENOMEM; + clear_bit(bit_num, (unsigned long *)res_tbl); + *res = bit_num; + return 0; +} + +int xsc_dealloc_res(u32 *res, u64 *res_tbl) +{ + if (test_and_set_bit(*res, (unsigned long *)res_tbl)) + return -EINVAL; + + *res = 0; + return 0; +} + +int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, + u32 base_align) +{ + struct xsc_free_list *free_node; + struct xsc_free_list *next; + struct xsc_free_list *new_node; + unsigned long flags; + + *alloc = -1; + xsc_acquire_lock(&list->lock, &flags); + list_for_each_entry_safe(free_node, next, &list->head.list, list) { + int start = round_up(free_node->start, base_align); + int avail_num = free_node->end - start + 1; + + if (required < avail_num) { + if (start > free_node->start) { + new_node = xsc_malloc(sizeof(struct xsc_free_list)); + if (!new_node) { + xsc_release_lock(&list->lock, flags); + return -ENOMEM; + } + new_node->start = free_node->start; + new_node->end = start - 1; + __list_add(&new_node->list, free_node->list.prev, + &free_node->list); + } + *alloc = start; + free_node->start = start + required; + break; + } else if (required == avail_num) { + *alloc = start; + if (start > free_node->start) { + free_node->end = start - 1; + } else { + list_del(&free_node->list); + xsc_free(free_node); + } + break; + } + } + xsc_release_lock(&list->lock, flags); + + if (*alloc == -1) + return -EINVAL; + + return 0; +} + +int release_to_free_list(struct xsc_free_list_wl *list, uint32_t release, + uint32_t num_released) +{ + struct xsc_free_list *free_node = NULL; + struct xsc_free_list *next, *prev; + struct xsc_free_list *new_node; + unsigned long flags; + bool new_flag = false; + bool end_merge = false; + int ret = 0; + + xsc_acquire_lock(&list->lock, &flags); + list_for_each_entry_safe(free_node, next, &list->head.list, list) { + if (release + num_released < free_node->start) { + new_flag = true; + } else if (release + num_released == free_node->start) { + /* backward merge */ + end_merge = true; + free_node->start = release; + } + + if (new_flag || end_merge) { + /* forward merge, and backward merge if possible */ + if (free_node->list.prev == &list->head.list) + goto create_node; + + prev = list_entry(free_node->list.prev, struct xsc_free_list, list); + if (release == prev->end + 1) { + if (end_merge) { + prev->end = free_node->end; + list_del(&free_node->list); + xsc_free(free_node); + free_node = NULL; + } else { + prev->end = release + num_released - 1; + new_flag = false; + } + } + + break; + } + } + + if (list_empty(&list->head.list)) { + new_flag = true; + free_node = &list->head; + } + +create_node: + if (new_flag && free_node) { + new_node = xsc_malloc(sizeof(struct xsc_free_list)); + if (!new_node) { + ret = -ENOMEM; + goto ret; + } + new_node->start = release; + new_node->end = release + num_released - 1; + __list_add(&new_node->list, free_node->list.prev, + &free_node->list); + } +ret: + xsc_release_lock(&list->lock, flags); + return ret; +} + +int alloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + if (xsc_alloc_res(mpt_idx, xres->mpt_tbl, xres->max_mpt_num)) + return -EINVAL; + + return 0; +} + +int dealloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + if (xsc_dealloc_res(mpt_idx, xres->mpt_tbl)) + return -EINVAL; + + return 0; +} + +int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base) +{ + struct xsc_resources *xres = get_xsc_res(dev); + int ret = alloc_from_free_list(&xres->mtt_list, pages_num, mtt_base, 1); + + xsc_core_dbg(dev, "alloc mtt for %d pages start from %d\n", + pages_num, *mtt_base); + + return ret; +} + +int dealloc_mtt_entry(struct xsc_core_device *dev, int pages_num, u32 mtt_base) +{ + struct xsc_resources *xres = get_xsc_res(dev); + int ret = release_to_free_list(&xres->mtt_list, mtt_base, pages_num); + + xsc_core_dbg(dev, "mtt release %d pages start from %d\n", + pages_num, mtt_base); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c new file mode 100644 index 0000000000000000000000000000000000000000..da4761565f1aab6ca6ae12f11eb81cbcd2ade204 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" + +LIST_HEAD(intf_list); +LIST_HEAD(xsc_dev_list); +DEFINE_MUTEX(xsc_intf_mutex); // protect intf_list and xsc_dev_list + +static void xsc_add_device(struct xsc_interface *intf, struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev; + + dev = container_of(priv, struct xsc_core_device, priv); + dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); + if (!dev_ctx) + return; + + dev_ctx->intf = intf; + + dev_ctx->context = intf->add(dev); + if (dev_ctx->context) { + set_bit(XSC_INTERFACE_ADDED, &dev_ctx->state); + if (intf->attach) + set_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state); + + spin_lock_irq(&priv->ctx_lock); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irq(&priv->ctx_lock); + } else { + kfree(dev_ctx); + } +} + +static struct xsc_device_context *xsc_get_device(struct xsc_interface *intf, + struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + + /* caller of this function has mutex protection */ + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf == intf) + return dev_ctx; + + return NULL; +} + +static void xsc_remove_device(struct xsc_interface *intf, struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev = container_of(priv, struct xsc_core_device, priv); + + dev_ctx = xsc_get_device(intf, priv); + if (!dev_ctx) + return; + + spin_lock_irq(&priv->ctx_lock); + list_del(&dev_ctx->list); + spin_unlock_irq(&priv->ctx_lock); + + if (test_bit(XSC_INTERFACE_ADDED, &dev_ctx->state)) + intf->remove(dev, dev_ctx->context); + + kfree(dev_ctx); +} + +int xsc_register_interface(struct xsc_interface *intf) +{ + struct xsc_priv *priv; + + if (!intf->add || !intf->remove) + return -EINVAL; + + mutex_lock(&xsc_intf_mutex); + list_add_tail(&intf->list, &intf_list); + list_for_each_entry(priv, &xsc_dev_list, dev_list) { + xsc_add_device(intf, priv); + } + mutex_unlock(&xsc_intf_mutex); + + return 0; +} +EXPORT_SYMBOL(xsc_register_interface); + +void xsc_unregister_interface(struct xsc_interface *intf) +{ + struct xsc_priv *priv; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(priv, &xsc_dev_list, dev_list) + xsc_remove_device(intf, priv); + list_del(&intf->list); + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_unregister_interface); + +static void xsc_attach_interface(struct xsc_interface *intf, + struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev = container_of(priv, struct xsc_core_device, priv); + + dev_ctx = xsc_get_device(intf, priv); + if (!dev_ctx) + return; + + if (intf->attach) { + if (test_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state)) + return; + if (intf->attach(dev, dev_ctx->context)) + return; + set_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state); + } else { + if (test_bit(XSC_INTERFACE_ADDED, &dev_ctx->state)) + return; + dev_ctx->context = intf->add(dev); + if (!dev_ctx->context) + return; + set_bit(XSC_INTERFACE_ADDED, &dev_ctx->state); + } +} + +static void xsc_detach_interface(struct xsc_interface *intf, + struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev = container_of(priv, struct xsc_core_device, priv); + + dev_ctx = xsc_get_device(intf, priv); + if (!dev_ctx) + return; + + if (intf->detach) { + if (!test_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state)) + return; + intf->detach(dev, dev_ctx->context); + clear_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state); + } else { + if (!test_bit(XSC_INTERFACE_ADDED, &dev_ctx->state)) + return; + intf->remove(dev, dev_ctx->context); + clear_bit(XSC_INTERFACE_ADDED, &dev_ctx->state); + } +} + +void xsc_attach_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(intf, &intf_list, list) { + xsc_attach_interface(intf, priv); + } + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_attach_device); + +void xsc_attach_device_by_protocol(struct xsc_core_device *dev, int protocol) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) + xsc_attach_interface(intf, priv); + mutex_unlock(&xsc_intf_mutex); +} + +void xsc_detach_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(intf, &intf_list, list) + xsc_detach_interface(intf, priv); + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_detach_device); + +bool xsc_device_registered(struct xsc_core_device *dev) +{ + struct xsc_priv *priv; + bool found = false; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(priv, &xsc_dev_list, dev_list) + if (priv == &dev->priv) + found = true; + mutex_unlock(&xsc_intf_mutex); + + return found; +} + +int xsc_register_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_add_tail(&priv->dev_list, &xsc_dev_list); + list_for_each_entry(intf, &intf_list, list) + xsc_add_device(intf, priv); + mutex_unlock(&xsc_intf_mutex); + + return 0; +} +EXPORT_SYMBOL(xsc_register_device); + +void xsc_unregister_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry_reverse(intf, &intf_list, list) + xsc_remove_device(intf, priv); + list_del(&priv->dev_list); + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_unregister_device); + +void xsc_add_dev_by_protocol(struct xsc_core_device *dev, int protocol) +{ + struct xsc_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + xsc_add_device(intf, &dev->priv); + break; + } +} +EXPORT_SYMBOL(xsc_add_dev_by_protocol); + +void xsc_remove_dev_by_protocol(struct xsc_core_device *dev, int protocol) +{ + struct xsc_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + xsc_remove_device(intf, &dev->priv); + break; + } +} +EXPORT_SYMBOL(xsc_remove_dev_by_protocol); + +void xsc_dev_list_lock(void) +{ + mutex_lock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_dev_list_lock); + +void xsc_dev_list_unlock(void) +{ + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_dev_list_unlock); + +int xsc_dev_list_trylock(void) +{ + return mutex_trylock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_dev_list_trylock); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/main.c b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c new file mode 100644 index 0000000000000000000000000000000000000000..0c9ba75b2d3703ddab13b7e4abb91851524fa261 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c @@ -0,0 +1,937 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/cq.h" +#include "common/qp.h" +#include "common/xsc_lag.h" +#include "common/xsc_port_ctrl.h" +#include "devlink.h" +#include "eswitch.h" +#include "fw/xsc_counters.h" +#include "xsc_pci_ctrl.h" + +unsigned int xsc_debug_mask; +module_param_named(debug_mask, xsc_debug_mask, uint, 0644); +MODULE_PARM_DESC(debug_mask, + "debug mask: 1=dump cmd data, 2=dump cmd exec time, 3=both. Default=0"); + +unsigned int xsc_log_level = XSC_LOG_LEVEL_WARN; +module_param_named(log_level, xsc_log_level, uint, 0644); +MODULE_PARM_DESC(log_level, + "lowest log level to print: 0=debug, 1=info, 2=warning, 3=error. Default=1"); +EXPORT_SYMBOL(xsc_log_level); + +static bool probe_vf = 1; +module_param_named(probe_vf, probe_vf, bool, 0644); +MODULE_PARM_DESC(probe_vf, "probe VFs or not, 0 = not probe, 1 = probe. Default = 1"); + +static bool xsc_hw_reset; + +#define DRIVER_NAME "xsc_pci" +#define DRIVER_VERSION "0.1.0" +#define ETH_DRIVER_NAME "xsc_eth" + +static const struct pci_device_id xsc_pci_id_table[] = { + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_SOC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MS_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_HOST_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_SOC_PF_DEV_ID) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, xsc_pci_id_table); + +static const struct xsc_device_product_info xsc_product_list[] = { + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_50, "metaConnect-50")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_100, "metaConnect-100")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_200, "metaConnect-200")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_400S, "metaConnect-400S")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MF_50, "metaFusion-50")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MF_200, "metaFusion-200")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_50, "metaScale-50")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_100Q, "metaScale-100Q")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_200, "metaScale-200")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_200S, "metaScale-200S")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_400M, "metaScale-400M")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_200_OCP, "metaScale-200-OCP")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MV_100, "metaVisor-100")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MV_200, "metaVisor-200")}, + {0} +}; + +#define IS_VIRT_FUNCTION(id) ((id)->driver_data == XSC_PCI_DEV_IS_VF) + +static bool need_write_reg_directly(void *in) +{ + struct xsc_inbox_hdr *hdr; + struct xsc_ioctl_mbox_in *req; + struct xsc_ioctl_data_tl *tl; + char *data; + + hdr = (struct xsc_inbox_hdr *)in; + if (unlikely(be16_to_cpu(hdr->opcode) == XSC_CMD_OP_IOCTL_FLOW)) { + req = (struct xsc_ioctl_mbox_in *)in; + data = (char *)req->data; + tl = (struct xsc_ioctl_data_tl *)data; + if (tl->opmod == XSC_IOCTL_OP_ADD) { + if (unlikely(tl->table == XSC_FLOW_DMA_WR || tl->table == XSC_FLOW_DMA_RD)) + return true; + } + } + return false; +} + +int xsc_cmd_exec(struct xsc_core_device *dev, void *in, int in_size, void *out, + int out_size) +{ + struct xsc_inbox_hdr *hdr = (struct xsc_inbox_hdr *)in; + + hdr->ver = 0; + if (hdr->ver != 0) { + xsc_core_warn(dev, "recv an unexpected cmd ver = %d, opcode = %d\n", + be16_to_cpu(hdr->ver), be16_to_cpu(hdr->opcode)); + WARN_ON(hdr->ver != 0); + } + + if (need_write_reg_directly(in)) + return xsc_cmd_write_reg_directly(dev, in, in_size, out, + out_size, dev->glb_func_id); + return _xsc_cmd_exec(dev, in, in_size, out, out_size); +} +EXPORT_SYMBOL(xsc_cmd_exec); + +static int set_dma_caps(struct pci_dev *pdev) +{ + int err = 0; + + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + else + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + + if (!err) + dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); + + return err; +} + +static int xsc_pci_enable_device(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + int err = 0; + + mutex_lock(&dev->pci_status_mutex); + if (dev->pci_status == XSC_PCI_STATUS_DISABLED) { + err = pci_enable_device(pdev); + if (!err) + dev->pci_status = XSC_PCI_STATUS_ENABLED; + } + mutex_unlock(&dev->pci_status_mutex); + + return err; +} + +static void xsc_pci_disable_device(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + mutex_lock(&dev->pci_status_mutex); + if (dev->pci_status == XSC_PCI_STATUS_ENABLED) { + pci_disable_device(pdev); + dev->pci_status = XSC_PCI_STATUS_DISABLED; + } + mutex_unlock(&dev->pci_status_mutex); +} + +int xsc_priv_init(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + + strscpy(priv->name, dev_name(&dev->pdev->dev), XSC_MAX_NAME_LEN); + priv->name[XSC_MAX_NAME_LEN - 1] = 0; + + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); + mutex_init(&dev->intf_state_mutex); + + return 0; +} + +int xsc_dev_res_init(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = NULL; + + dev_res = kvzalloc(sizeof(*dev_res), GFP_KERNEL); + if (!dev_res) + return -ENOMEM; + + dev->dev_res = dev_res; + /* init access lock */ + spin_lock_init(&dev->reg_access_lock.lock); + mutex_init(&dev_res->alloc_mutex); + mutex_init(&dev_res->pgdir_mutex); + INIT_LIST_HEAD(&dev_res->pgdir_list); + spin_lock_init(&dev_res->mkey_lock); + + return 0; +} + +void xsc_dev_res_cleanup(struct xsc_core_device *dev) +{ + kfree(dev->dev_res); + dev->dev_res = NULL; +} + +void xsc_init_reg_addr(struct xsc_core_device *dev) +{ + if (xsc_core_is_pf(dev)) { + dev->regs.cpm_get_lock = HIF_CPM_LOCK_GET_REG_ADDR; + dev->regs.cpm_put_lock = HIF_CPM_LOCK_PUT_REG_ADDR; + dev->regs.cpm_lock_avail = HIF_CPM_LOCK_AVAIL_REG_ADDR; + dev->regs.cpm_data_mem = HIF_CPM_IDA_DATA_MEM_ADDR; + dev->regs.cpm_cmd = HIF_CPM_IDA_CMD_REG_ADDR; + dev->regs.cpm_addr = HIF_CPM_IDA_ADDR_REG_ADDR; + dev->regs.cpm_busy = HIF_CPM_IDA_BUSY_REG_ADDR; + } else { + dev->regs.tx_db = TX_DB_FUNC_MEM_ADDR; + dev->regs.rx_db = RX_DB_FUNC_MEM_ADDR; + dev->regs.complete_db = DB_CQ_FUNC_MEM_ADDR; + dev->regs.complete_reg = DB_CQ_CID_DIRECT_MEM_ADDR; + dev->regs.event_db = DB_EQ_FUNC_MEM_ADDR; + dev->regs.cpm_get_lock = CPM_LOCK_GET_REG_ADDR; + dev->regs.cpm_put_lock = CPM_LOCK_PUT_REG_ADDR; + dev->regs.cpm_lock_avail = CPM_LOCK_AVAIL_REG_ADDR; + dev->regs.cpm_data_mem = CPM_IDA_DATA_MEM_ADDR; + dev->regs.cpm_cmd = CPM_IDA_CMD_REG_ADDR; + dev->regs.cpm_addr = CPM_IDA_ADDR_REG_ADDR; + dev->regs.cpm_busy = CPM_IDA_BUSY_REG_ADDR; + } +} + +int xsc_dev_init(struct xsc_core_device *dev) +{ + int err = 0; + + xsc_priv_init(dev); + + err = xsc_dev_res_init(dev); + if (err) { + xsc_core_err(dev, "xsc dev res init failed %d\n", err); + goto err_res_init; + } + + /* create debugfs */ + err = xsc_debugfs_init(dev); + if (err) { + xsc_core_err(dev, "xsc_debugfs_init failed %d\n", err); + goto err_debugfs_init; + } + + return 0; + +err_debugfs_init: + xsc_dev_res_cleanup(dev); +err_res_init: + return err; +} + +void xsc_dev_cleanup(struct xsc_core_device *dev) +{ +// iounmap(dev->iseg); + xsc_debugfs_fini(dev); + xsc_dev_res_cleanup(dev); +} + +static void xsc_product_info(struct pci_dev *pdev) +{ + const struct xsc_device_product_info *p_info = xsc_product_list; + + while (p_info->vendor) { + if (pdev->device == p_info->device && pdev->subsystem_device == p_info->subdevice) { + pr_info("Product: %s, Vendor: Yunsilicon\n", p_info->product_name); + break; + } + p_info++; + } +} + +static int xsc_pci_init(struct xsc_core_device *dev, const struct pci_device_id *id) +{ + struct pci_dev *pdev = dev->pdev; + int err = 0; + int bar_num = 0; + void __iomem *bar_base = NULL; + + mutex_init(&dev->pci_status_mutex); + dev->priv.numa_node = dev_to_node(&pdev->dev); + if (dev->priv.numa_node == -1) + dev->priv.numa_node = 0; + + /* enable the device */ + err = xsc_pci_enable_device(dev); + if (err) { + xsc_core_err(dev, "failed to enable PCI device: err=%d\n", err); + goto err_ret; + } + + err = pci_request_region(pdev, bar_num, KBUILD_MODNAME); + if (err) { + xsc_core_err(dev, "failed to request %s pci_region=%d: err=%d\n", + KBUILD_MODNAME, bar_num, err); + goto err_disable; + } + + pci_set_master(pdev); + + err = set_dma_caps(pdev); + if (err) { + xsc_core_err(dev, "failed to set DMA capabilities mask: err=%d\n", err); + goto err_clr_master; + } + + bar_base = pci_ioremap_bar(pdev, bar_num); + if (!bar_base) { + xsc_core_err(dev, "failed to ioremap %s bar%d\n", KBUILD_MODNAME, bar_num); + goto err_clr_master; + } + + err = pci_save_state(pdev); + if (err) { + xsc_core_err(dev, "pci_save_state failed: err=%d\n", err); + goto err_io_unmap; + } + + dev->bar_num = bar_num; + dev->bar = bar_base; + + xsc_init_reg_addr(dev); + + return 0; + +err_io_unmap: + pci_iounmap(pdev, bar_base); +err_clr_master: + pci_clear_master(pdev); + pci_release_region(pdev, bar_num); +err_disable: + xsc_pci_disable_device(dev); +err_ret: + return err; +} + +static void xsc_pci_fini(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + if (dev->bar) + pci_iounmap(pdev, dev->bar); + pci_clear_master(pdev); + pci_release_region(pdev, dev->bar_num); + xsc_pci_disable_device(dev); +} + +static int xsc_check_cmdq_version(struct xsc_core_device *dev) +{ + struct xsc_cmd_query_cmdq_ver_mbox_out *out; + struct xsc_cmd_query_cmdq_ver_mbox_in in; + + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto no_mem_out; + } + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_CMDQ_VERSION); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + + if (be16_to_cpu(out->cmdq_ver) != CMDQ_VERSION) { + xsc_core_err(dev, "cmdq version check failed, expecting version %d, actual version %d\n", + CMDQ_VERSION, be16_to_cpu(out->cmdq_ver)); + err = -EINVAL; + goto out_out; + } + dev->cmdq_ver = CMDQ_VERSION; + +out_out: + kfree(out); +no_mem_out: + return err; +} + +int xsc_reset_function_resource(struct xsc_core_device *dev) +{ + struct xsc_function_reset_mbox_in in; + struct xsc_function_reset_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_FUNCTION_RESET); + in.glb_func_id = cpu_to_be16(dev->glb_func_id); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) + return -EINVAL; + + return 0; +} + +static int xsc_fpga_not_supported(struct xsc_core_device *dev) +{ +#define FPGA_VERSION_H 0x100 +#define ASIC_VERSION_H 0x20230423 + u32 ver_h; + + if (!xsc_core_is_pf(dev)) + return 0; + + ver_h = REG_RD32(dev, HIF_CPM_CHIP_VERSION_H_REG_ADDR); + if (ver_h != FPGA_VERSION_H && ver_h != ASIC_VERSION_H) { + xsc_core_err(dev, "fpga version 0x%x not supported\n", ver_h); + return 1; + } + + return 0; +} + +int xsc_chip_type(struct xsc_core_device *dev) +{ + switch (dev->pdev->device) { + case XSC_MC_PF_DEV_ID: + case XSC_MC_VF_DEV_ID: + return XSC_CHIP_MC; + case XSC_MF_HOST_PF_DEV_ID: + case XSC_MF_HOST_VF_DEV_ID: + case XSC_MF_SOC_PF_DEV_ID: + return XSC_CHIP_MF; + case XSC_MS_PF_DEV_ID: + case XSC_MS_VF_DEV_ID: + return XSC_CHIP_MS; + case XSC_MV_HOST_PF_DEV_ID: + case XSC_MV_HOST_VF_DEV_ID: + case XSC_MV_SOC_PF_DEV_ID: + return XSC_CHIP_MV; + default: + return XSC_CHIP_UNKNOWN; + } +} +EXPORT_SYMBOL(xsc_chip_type); + +#if defined(__sw_64__) +static void xsc_enable_relaxed_order(struct xsc_core_device *dev) +{ + struct xsc_cmd_enable_relaxed_order_in in; + struct xsc_cmd_enable_relaxed_order_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_RELAXED_ORDER); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto err_out; + + if (out.hdr.status) { + err = xsc_cmd_status_to_err(&out.hdr); + goto err_out; + } + + return; +err_out: + xsc_core_warn(dev, "Failed to enable relaxed order %d\n", err); +} +#endif + +static int xsc_cmd_activate_hw_config(struct xsc_core_device *dev) +{ + struct xsc_cmd_activate_hw_config_mbox_in in; + struct xsc_cmd_activate_hw_config_mbox_out out; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ACTIVATE_HW_CONFIG); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + dev->board_info->hw_config_activated = 1; + return 0; +} + +static int xsc_activate_hw_config(struct xsc_core_device *dev) +{ + if (dev->board_info->hw_config_activated) + return 0; + + return xsc_cmd_activate_hw_config(dev); +} + +static int xsc_init_once(struct xsc_core_device *dev) +{ + int err; + + err = xsc_cmd_init(dev); + if (err) { + xsc_core_err(dev, "Failed initializing command interface, aborting\n"); + goto err_cmd_init; + } + + err = xsc_check_cmdq_version(dev); + if (err) { + xsc_core_err(dev, "Failed to check cmdq version\n"); + goto err_cmdq_ver_chk; + } + + err = xsc_cmd_query_hca_cap(dev, &dev->caps); + if (err) { + xsc_core_err(dev, "Failed to query hca, err=%d\n", err); + goto err_cmdq_ver_chk; + } + + err = xsc_query_guid(dev); + if (err) { + xsc_core_err(dev, "failed to query guid, err=%d\n", err); + goto err_cmdq_ver_chk; + } + + err = xsc_activate_hw_config(dev); + if (err) { + xsc_core_err(dev, "failed to activate hw config, err=%d\n", err); + goto err_cmdq_ver_chk; + } + + err = xsc_reset_function_resource(dev); + if (err) { + xsc_core_err(dev, "Failed to reset function resource\n"); + goto err_cmdq_ver_chk; + } + + funcid_to_pf_vf_index(&dev->caps, dev->glb_func_id, &dev->pcie_no, + &dev->pf_id, &dev->vf_id); + xsc_init_cq_table(dev); + xsc_init_qp_table(dev); + xsc_eq_init(dev); + +#ifdef CONFIG_XSC_SRIOV + err = xsc_sriov_init(dev); + if (err) { + xsc_core_err(dev, "Failed to init sriov %d\n", err); + goto err_sriov_init; + } + err = xsc_eswitch_init(dev); + if (err) { + xsc_core_err(dev, "Failed to init eswitch %d\n", err); + goto err_eswitch_init; + } +#endif + +#if defined(__sw_64__) + xsc_enable_relaxed_order(dev); +#endif + return 0; + +#ifdef CONFIG_XSC_SRIOV +err_eswitch_init: + xsc_sriov_cleanup(dev); +err_sriov_init: + xsc_eq_cleanup(dev); + xsc_cleanup_qp_table(dev); + xsc_cleanup_cq_table(dev); +#endif +err_cmdq_ver_chk: + xsc_cmd_cleanup(dev); +err_cmd_init: + return err; +} + +static int xsc_cleanup_once(struct xsc_core_device *dev) +{ +#ifdef CONFIG_XSC_SRIOV + xsc_eswitch_cleanup(dev); + xsc_sriov_cleanup(dev); +#endif + xsc_eq_cleanup(dev); + xsc_cleanup_qp_table(dev); + xsc_cleanup_cq_table(dev); + xsc_cmd_cleanup(dev); + return 0; +} + +static int xsc_load(struct xsc_core_device *dev) +{ + int err; + + err = xsc_irq_eq_create(dev); + if (err) { + xsc_core_err(dev, "xsc_irq_eq_create failed %d\n", err); + goto err_irq_eq_create; + } + +#ifdef CONFIG_XSC_SRIOV + err = xsc_sriov_attach(dev); + if (err) { + xsc_core_err(dev, "sriov init failed %d\n", err); + goto err_sriov; + } +#endif + return 0; + +#ifdef CONFIG_XSC_SRIOV +err_sriov: + xsc_irq_eq_destroy(dev); +#endif +err_irq_eq_create: + return err; +} + +static int xsc_unload(struct xsc_core_device *dev) +{ +#ifdef CONFIG_XSC_SRIOV + xsc_sriov_detach(dev); +#endif + if (xsc_fw_is_available(dev)) + xsc_irq_eq_destroy(dev); + + return 0; +} + +int xsc_load_one(struct xsc_core_device *dev, bool boot) +{ + int err = 0; + + mutex_lock(&dev->intf_state_mutex); + if (test_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state)) { + xsc_core_warn(dev, "interface is up, NOP\n"); + goto out; + } + + if (test_bit(XSC_INTERFACE_STATE_TEARDOWN, &dev->intf_state)) { + xsc_core_warn(dev, "device is being removed, stop load\n"); + err = -ENODEV; + goto out; + } + + if (boot) { + err = xsc_init_once(dev); + if (err) { + xsc_core_err(dev, "xsc_init_once failed %d\n", err); + goto err_dev_init; + } + } + + err = xsc_load(dev); + if (err) { + xsc_core_err(dev, "xsc_load failed %d\n", err); + goto err_load; + } + + if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) { + err = xsc_create_res(dev); + if (err) { + xsc_core_err(dev, "Failed to create resource, err=%d\n", err); + goto err_create_res; + } + } + + if (boot) { + err = xsc_devlink_register(priv_to_devlink(dev), dev->device); + if (err) + goto err_devlink_reg; + } + + if (xsc_core_is_pf(dev)) + xsc_lag_add_xdev(dev); + + if (xsc_device_registered(dev)) { + xsc_attach_device(dev); + } else { + err = xsc_register_device(dev); + if (err) { + xsc_core_err(dev, "register device failed %d\n", err); + goto err_reg_dev; + } + } + + err = xsc_port_ctrl_probe(dev); + if (err) { + xsc_core_err(dev, "failed to probe port control node\n"); + goto err_port_ctrl; + } + + set_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state); + mutex_unlock(&dev->intf_state_mutex); + + return err; + +err_port_ctrl: + xsc_unregister_device(dev); +err_reg_dev: + if (xsc_core_is_pf(dev)) + xsc_lag_remove_xdev(dev); + if (boot) + xsc_devlink_unregister(priv_to_devlink(dev)); +err_devlink_reg: + if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) + xsc_destroy_res(dev); + +err_create_res: + xsc_unload(dev); + +err_load: + if (boot) + xsc_cleanup_once(dev); +err_dev_init: +out: + mutex_unlock(&dev->intf_state_mutex); + return err; +} + +int xsc_unload_one(struct xsc_core_device *dev, bool cleanup) +{ + xsc_port_ctrl_remove(dev); + xsc_devlink_unregister(priv_to_devlink(dev)); + if (cleanup) + xsc_unregister_device(dev); + mutex_lock(&dev->intf_state_mutex); + if (!test_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state)) { + xsc_core_warn(dev, "%s: interface is down, NOP\n", + __func__); + if (cleanup) + xsc_cleanup_once(dev); + goto out; + } + + clear_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state); + if (xsc_device_registered(dev)) + xsc_detach_device(dev); + + if (xsc_core_is_pf(dev)) + xsc_lag_remove_xdev(dev); + + if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) + xsc_destroy_res(dev); + + xsc_unload(dev); + + if (cleanup) + xsc_cleanup_once(dev); + +out: + mutex_unlock(&dev->intf_state_mutex); + + return 0; +} + +static int xsc_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct xsc_core_device *xdev; + struct xsc_priv *priv; + int err; + struct devlink *devlink; + + devlink = xsc_devlink_alloc(&pci_dev->dev); + if (!devlink) { + dev_err(&pci_dev->dev, "devlink alloc failed\n"); + return -ENOMEM; + } + xdev = devlink_priv(devlink); + + xsc_product_info(pci_dev); + xdev->pdev = pci_dev; + xdev->device = &pci_dev->dev; + priv = &xdev->priv; + xdev->coredev_type = (IS_VIRT_FUNCTION(id)) ? + XSC_COREDEV_VF : XSC_COREDEV_PF; + xsc_core_info(xdev, "dev_type=%d is_vf=%d\n", + xdev->coredev_type, pci_dev->is_virtfn); + +#ifdef CONFIG_XSC_SRIOV + priv->sriov.probe_vf = probe_vf; + if ((IS_VIRT_FUNCTION(id)) && !probe_vf) { + xsc_core_err(xdev, "VFs are not binded to xsc driver\n"); + return 0; + } +#endif + + /* init pcie device */ + pci_set_drvdata(pci_dev, xdev); + err = xsc_pci_init(xdev, id); + if (err) { + xsc_core_err(xdev, "xsc_pci_init failed %d\n", err); + goto err_pci_init; + } + + err = xsc_dev_init(xdev); + if (err) { + xsc_core_err(xdev, "xsc_dev_init failed %d\n", err); + goto err_dev_init; + } + + if (xsc_fpga_not_supported(xdev)) { + err = -EOPNOTSUPP; + goto err_version_check; + } + + err = xsc_load_one(xdev, true); + if (err) { + xsc_core_err(xdev, "xsc_load_one failed %d\n", err); + goto err_load; + } + + request_module_nowait(ETH_DRIVER_NAME); + + return 0; + +err_load: +err_version_check: + xsc_dev_cleanup(xdev); +err_dev_init: + xsc_pci_fini(xdev); +err_pci_init: + pci_set_drvdata(pci_dev, NULL); + xsc_devlink_free(devlink); + return err; +} + +static void xsc_pci_remove(struct pci_dev *pci_dev) +{ + struct xsc_core_device *xdev = pci_get_drvdata(pci_dev); + + set_bit(XSC_INTERFACE_STATE_TEARDOWN, &xdev->intf_state); + xsc_unload_one(xdev, true); + xsc_dev_cleanup(xdev); + + xsc_pci_fini(xdev); + pci_set_drvdata(pci_dev, NULL); + xsc_devlink_free(priv_to_devlink(xdev)); +} + +static struct pci_driver xsc_pci_driver = { + .name = "xsc-pci", + .id_table = xsc_pci_id_table, + .probe = xsc_pci_probe, + .remove = xsc_pci_remove, + +#ifdef CONFIG_XSC_SRIOV + .sriov_configure = xsc_core_sriov_configure, +#endif +}; + +int xsc_pci_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +{ + pr_info("xsc pci driver recv %lu event\n", action); + if (xsc_get_exit_flag()) + return NOTIFY_OK; + xsc_pci_exit(); + + return NOTIFY_OK; +} + +struct notifier_block xsc_pci_nb = { + .notifier_call = xsc_pci_reboot_event_handler, + .next = NULL, + .priority = 0, +}; + +void xsc_pci_exit(void) +{ + xsc_stop_delayed_release(); + pci_unregister_driver(&xsc_pci_driver); + xsc_pci_ctrl_fini(); + xsc_port_ctrl_fini(); + xsc_unregister_debugfs(); + qpts_fini(); + xsc_free_board_info(); +} + +static int __init xsc_init(void) +{ + int err; + + xsc_register_debugfs(); + + qpts_init(); + + err = xsc_port_ctrl_init(); + if (err) { + pr_err("failed to initialize port control\n"); + goto err_port_ctrl; + } + + err = xsc_pci_ctrl_init(); + if (err) { + pr_err("failed to initialize dpdk ctrl\n"); + goto err_pci_ctrl; + } + + xsc_hw_reset = false; + err = pci_register_driver(&xsc_pci_driver); + if (err) { + pr_err("failed to register pci driver\n"); + goto err_register; + } + + xsc_init_delayed_release(); + register_reboot_notifier(&xsc_pci_nb); + + return 0; + +err_register: + xsc_pci_ctrl_fini(); +err_pci_ctrl: + xsc_port_ctrl_fini(); +err_port_ctrl: + xsc_unregister_debugfs(); + qpts_fini(); + return err; +} + +static void __exit xsc_fini(void) +{ + unregister_reboot_notifier(&xsc_pci_nb); + xsc_pci_exit(); +} + +module_init(xsc_init); +module_exit(xsc_fini); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c new file mode 100644 index 0000000000000000000000000000000000000000..a834a09d23da6727f9851fed71adbaa63586e020 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/driver.h" +#include "common/xsc_cmd.h" + +int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) +{ + struct xsc_create_mkey_mbox_in in; + struct xsc_create_mkey_mbox_out out; + int err; + u8 key; + + memset(&out, 0, sizeof(out)); + spin_lock(&dev->dev_res->mkey_lock); + key = 0x80 + dev->dev_res->mkey_key++; + spin_unlock(&dev->dev_res->mkey_lock); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_MKEY); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + else + err = xsc_create_mkey(dev, &in, &out); + + if (err) { + xsc_core_err(dev, "cmd exec faile %d\n", err); + return err; + } + + if (out.hdr.status) { + xsc_core_err(dev, "status %d\n", out.hdr.status); + return xsc_cmd_status_to_err(&out.hdr); + } + + mr->key = xsc_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; + xsc_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); + + return err; +} +EXPORT_SYMBOL(xsc_core_create_mkey); + +int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) +{ + struct xsc_destroy_mkey_mbox_in in; + struct xsc_destroy_mkey_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mr->key); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + else + err = xsc_destroy_mkey(dev, &in, &out); + + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(xsc_core_destroy_mkey); + +int xsc_set_mpt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, + u32 *mtt_base) +{ + struct xsc_set_mpt_mbox_in *in; + struct xsc_set_mpt_mbox_out out; + struct xsc_register_mr_request *req = &in_cmd->req; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + return err; + } + in->mpt_item.pdn = req->pdn; + in->mpt_item.pa_num = req->pa_num; + in->mpt_item.len = req->len; + in->mpt_item.mkey = req->mkey; + in->mpt_item.acc = req->acc; + in->mpt_item.page_mode = req->page_mode; + in->mpt_item.map_en = req->map_en; + in->mpt_item.va_base = req->va_base; + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MPT); + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "set mpt failed\n"); + kfree(in); + return -EINVAL; + } + + *mtt_base = be32_to_cpu(out.mtt_base); + kfree(in); + return 0; +} + +int xsc_set_mtt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, + u32 mtt_base) +{ +#define PA_NUM_PER_CMD 1024 + struct xsc_set_mtt_mbox_in *seg_in; + struct xsc_set_mtt_mbox_out seg_out; + struct xsc_register_mr_request *req = &in_cmd->req; + int tot_pg_num = be32_to_cpu(req->pa_num); + int seg_idx, tot_seg_num, seg_pa_num; + int pa_idx_base = 0; + int i; + int in_len; + int err; + + tot_seg_num = (tot_pg_num & 0x7FF) ? ((tot_pg_num >> 10) + 1) : + (tot_pg_num >> 10); + for (seg_idx = 0; seg_idx < tot_seg_num; seg_idx++) { + seg_pa_num = (seg_idx != tot_seg_num - 1) ? PA_NUM_PER_CMD : + (tot_pg_num - ((tot_seg_num - 1) << 10)); + in_len = (seg_pa_num << 3) + sizeof(*seg_in); + seg_in = kzalloc(in_len, GFP_KERNEL); + if (!seg_in) { + err = -ENOMEM; + return err; + } + seg_in->mtt_setting.mtt_base = cpu_to_be32(mtt_base); + seg_in->mtt_setting.pa_num = cpu_to_be32(seg_pa_num); + for (i = 0; i < seg_pa_num; i++) + seg_in->mtt_setting.pas[i] = req->pas[pa_idx_base + i]; + seg_in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MTT); + + memset(&seg_out, 0, sizeof(seg_out)); + xsc_core_dbg(dev, "set mtt seg %d, pa_num %d, pa_idx_base %d, tot_seg %d\n", + seg_idx, seg_pa_num, pa_idx_base, tot_seg_num); + err = xsc_cmd_exec(dev, seg_in, in_len, &seg_out, sizeof(seg_out)); + if (err || seg_out.hdr.status) { + xsc_core_err(dev, "set mtt seg %d failed\n", seg_idx); + kfree(seg_in); + return -EINVAL; + } + kfree(seg_in); + pa_idx_base += seg_pa_num; + mtt_base += seg_pa_num; + } + return 0; +} + +int xsc_dereg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = in_cmd->req.mkey; + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return -EINVAL; + } + return 0; +} + +int xsc_reg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in) +{ + u32 mtt_base; + int err; + + err = xsc_set_mpt_via_cmdq(dev, in, &mtt_base); + if (err) { + xsc_core_err(dev, "set mpt via cmdq failed\n"); + return err; + } + + err = xsc_set_mtt_via_cmdq(dev, in, mtt_base); + if (err) { + xsc_core_err(dev, "set mtt via cmdq failed\n"); + goto set_mtt_err; + } + return 0; + +set_mtt_err: + err = xsc_dereg_mr_via_cmdq(dev, in); + if (err) + xsc_core_err(dev, "dereg error mr failed\n"); + return err; +} + +int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, + struct xsc_register_mr_mbox_in *in, int inlen) +{ + struct xsc_register_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_REG_MR); + if (dev->reg_mr_via_cmdq) + err = xsc_reg_mr_via_cmdq(dev, in); + else + err = xsc_reg_mr(dev, in, &out); + + if (err) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + if (out.hdr.status) { + xsc_core_err(dev, "status %d\n", out.hdr.status); + return xsc_cmd_status_to_err(&out.hdr); + } + + return 0; +} +EXPORT_SYMBOL(xsc_core_register_mr); + +int xsc_core_dereg_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = cpu_to_be32(xsc_mkey_to_idx(mr->key)); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + else + err = xsc_dereg_mr(dev, &in, &out); + + if (err) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + if (out.hdr.status) { + xsc_core_err(dev, "status %d\n", out.hdr.status); + return xsc_cmd_status_to_err(&out.hdr); + } + + return 0; +} +EXPORT_SYMBOL(xsc_core_dereg_mr); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..7138c281ed203a38ce0454b840887acad4d77bcb --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "common/driver.h" +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#ifdef CONFIG_RFS_ACCEL +#include +#endif +#include "fw/xsc_flow.h" +#include "fw/xsc_fw.h" + +enum xsc_eq_type { + XSC_EQ_TYPE_COMP, + XSC_EQ_TYPE_ASYNC, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + XSC_EQ_TYPE_PF, +#endif +}; + +struct xsc_irq { + struct atomic_notifier_head nh; + cpumask_var_t mask; + char name[XSC_MAX_IRQ_NAME]; +}; + +struct xsc_irq_table { + struct xsc_irq *irq; + int nvec; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif +}; + +struct xsc_msix_resource *g_msix_xres; + +static irqreturn_t xsc_dma_read_msix_handler(int irq, void *dev_id) +{ + xsc_dma_read_done_complete(); + return IRQ_HANDLED; +} + +static int xsc_dma_read_msix_init(struct xsc_core_device *xdev) +{ + int err; + char *name = "xsc_dma_read_done"; + struct xsc_dev_resource *dev_res = xdev->dev_res; + int irqn; + u32 value = 0; + int vecid = 0; + + snprintf(dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + name, pci_name(xdev->pdev)); + irqn = pci_irq_vector(xdev->pdev, XSC_DMA_READ_DONE_VEC); + err = request_irq(irqn, xsc_dma_read_msix_handler, 0, + dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, (void *)xdev); + + vecid = (xdev->msix_vec_base + XSC_DMA_READ_DONE_VEC); + value = ((1 << 12) | (vecid & 0xfff)); + REG_WR32(xdev, HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR, value); + + return err; +} + +static void xsc_free_irq(struct xsc_core_device *xdev, unsigned int vector) +{ + unsigned int irqn = 0; + + irqn = pci_irq_vector(xdev->pdev, vector); + disable_irq(irqn); + + if (xsc_fw_is_available(xdev)) + free_irq(irqn, xdev); +} + +static void xsc_dma_read_msix_fini(struct xsc_core_device *xdev) +{ + if (xdev->caps.msix_enable && xsc_core_is_pf(xdev)) + xsc_free_irq(xdev, XSC_DMA_READ_DONE_VEC); +} + +struct xsc_eq *xsc_eq_get(struct xsc_core_device *dev, int i) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + struct xsc_eq *eq, *n; + struct xsc_eq *eq_ret = NULL; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == i) { + eq_ret = eq; + break; + } + } + spin_unlock(&table->lock); + + return eq_ret; +} +EXPORT_SYMBOL(xsc_eq_get); + +void mask_cpu_by_node(int node, struct cpumask *dstp) +{ + int i; + + for (i = 0; i < nr_cpu_ids; i++) { + if (node == cpu_to_node(i)) + cpumask_set_cpu(i, dstp); + } +} +EXPORT_SYMBOL(mask_cpu_by_node); + +static int set_comp_irq_affinity_hint(struct xsc_core_device *dev, int i) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int vecidx = table->eq_vec_comp_base + i; + struct xsc_eq *eq = xsc_eq_get(dev, i); + unsigned int irqn; + int ret; + + irqn = pci_irq_vector(dev->pdev, vecidx); + if (!zalloc_cpumask_var(&eq->mask, GFP_KERNEL)) { + xsc_core_err(dev, "zalloc_cpumask_var rx cpumask failed"); + return -ENOMEM; + } + + if (!zalloc_cpumask_var(&dev->xps_cpumask, GFP_KERNEL)) { + xsc_core_err(dev, "zalloc_cpumask_var tx cpumask failed"); + return -ENOMEM; + } + + mask_cpu_by_node(dev->priv.numa_node, eq->mask); + ret = irq_set_affinity_hint(irqn, eq->mask); + + return ret; +} + +static void clear_comp_irq_affinity_hint(struct xsc_core_device *dev, int i) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int vecidx = table->eq_vec_comp_base + i; + struct xsc_eq *eq = xsc_eq_get(dev, i); + int irqn; + + irqn = pci_irq_vector(dev->pdev, vecidx); + irq_set_affinity_hint(irqn, NULL); + free_cpumask_var(eq->mask); +} + +static int set_comp_irq_affinity_hints(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int nvec = table->num_comp_vectors; + int err; + int i; + + for (i = 0; i < nvec; i++) { + err = set_comp_irq_affinity_hint(dev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + clear_comp_irq_affinity_hint(dev, i); + free_cpumask_var(dev->xps_cpumask); + + return err; +} + +static void clear_comp_irq_affinity_hints(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int nvec = table->num_comp_vectors; + int i; + + for (i = 0; i < nvec; i++) + clear_comp_irq_affinity_hint(dev, i); + free_cpumask_var(dev->xps_cpumask); +} + +struct cpumask * +xsc_comp_irq_get_affinity_mask(struct xsc_core_device *dev, int vector) +{ + struct xsc_eq *eq = xsc_eq_get(dev, vector); + + if (unlikely(!eq)) + return NULL; + + return eq->mask; +} +EXPORT_SYMBOL(xsc_comp_irq_get_affinity_mask); + +static int xsc_alloc_irq_vectors(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + struct xsc_eq_table *table = &dev_res->eq_table; + int nvec = dev->caps.msix_num; + int nvec_base; + int err; + + if (xsc_core_is_pf(dev)) + nvec_base = XSC_EQ_VEC_COMP_BASE; + else + /*VF device not need dma read done vector.*/ + nvec_base = (XSC_EQ_VEC_COMP_BASE - 1); + + if (nvec <= nvec_base) { + xsc_core_warn(dev, "failed to alloc irq vector(%d)\n", nvec); + return -ENOMEM; + } + + dev_res->irq_info = kcalloc(nvec, sizeof(*dev_res->irq_info), GFP_KERNEL); + if (!dev_res->irq_info) + return -ENOMEM; + + nvec = pci_alloc_irq_vectors(dev->pdev, nvec_base + 1, nvec, PCI_IRQ_MSIX); + if (nvec < 0) { + err = nvec; + goto err_free_irq_info; + } + + table->eq_vec_comp_base = nvec_base; + table->num_comp_vectors = nvec - nvec_base; + dev->msix_vec_base = dev->caps.msix_base; + xsc_core_info(dev, + "alloc msix_vec_num=%d, comp_num=%d, max_msix_num=%d, msix_vec_base=%d\n", + nvec, table->num_comp_vectors, dev->caps.msix_num, dev->msix_vec_base); + + return 0; + +err_free_irq_info: + pci_free_irq_vectors(dev->pdev); + kfree(dev_res->irq_info); + return err; +} + +static void xsc_free_irq_vectors(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + + if (!xsc_fw_is_available(dev)) + return; + + pci_free_irq_vectors(dev->pdev); + kfree(dev_res->irq_info); +} + +int xsc_vector2eqn(struct xsc_core_device *dev, int vector, int *eqn, + unsigned int *irqn) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + struct xsc_eq *eq, *n; + int err = -ENOENT; + + if (!dev->caps.msix_enable) + return 0; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == vector) { + *eqn = eq->eqn; + *irqn = eq->irqn; + err = 0; + break; + } + } + spin_unlock(&table->lock); + + return err; +} +EXPORT_SYMBOL(xsc_vector2eqn); + +static void free_comp_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + struct xsc_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (xsc_destroy_unmap_eq(dev, eq)) + xsc_core_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); +} + +static int alloc_comp_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + char name[XSC_MAX_IRQ_NAME]; + struct xsc_eq *eq; + int ncomp_vec; + int nent; + int err; + int i; + + INIT_LIST_HEAD(&table->comp_eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = XSC_COMP_EQ_SIZE; + + for (i = 0; i < ncomp_vec; i++) { + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto clean; + } + + snprintf(name, XSC_MAX_IRQ_NAME, "xsc_comp%d", i); + err = xsc_create_map_eq(dev, eq, + i + table->eq_vec_comp_base, nent, name); + if (err) { + kfree(eq); + goto clean; + } + + eq->index = i; + spin_lock(&table->lock); + list_add_tail(&eq->list, &table->comp_eqs_list); + spin_unlock(&table->lock); + } + + return 0; + +clean: + free_comp_eqs(dev); + return err; +} + +static irqreturn_t xsc_cmd_handler(int irq, void *arg) +{ + struct xsc_core_device *dev = (struct xsc_core_device *)arg; + int err; + + disable_irq_nosync(dev->cmd.irqn); + err = xsc_cmd_err_handler(dev); + if (!err) + xsc_cmd_resp_handler(dev); + enable_irq(dev->cmd.irqn); + + return IRQ_HANDLED; +} + +int xsc_request_irq_for_cmdq(struct xsc_core_device *dev, u8 vecidx) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + + writel(dev->msix_vec_base + vecidx, REG_ADDR(dev, dev->cmd.reg.msix_vec_addr)); + + snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + "xsc_cmd", pci_name(dev->pdev)); + dev->cmd.irqn = pci_irq_vector(dev->pdev, vecidx); + return request_irq(dev->cmd.irqn, xsc_cmd_handler, 0, + dev_res->irq_info[vecidx].name, dev); +} + +void xsc_free_irq_for_cmdq(struct xsc_core_device *dev) +{ + xsc_free_irq(dev, XSC_VEC_CMD); +} + +static irqreturn_t xsc_event_handler(int irq, void *arg) +{ + struct xsc_core_device *dev = (struct xsc_core_device *)arg; + + xsc_core_dbg(dev, "cmd event hint irq: %d\n", irq); + + if (!dev->eth_priv) + return IRQ_NONE; + + if (!dev->event_handler) + return IRQ_NONE; + + dev->event_handler(dev->eth_priv); + + return IRQ_HANDLED; +} + +int xsc_request_irq_for_event(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + + snprintf(dev_res->irq_info[XSC_VEC_CMD_EVENT].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + "xsc_eth_event", pci_name(dev->pdev)); + return request_irq(pci_irq_vector(dev->pdev, XSC_VEC_CMD_EVENT), xsc_event_handler, 0, + dev_res->irq_info[XSC_VEC_CMD_EVENT].name, dev); +} + +void xsc_free_irq_for_event(struct xsc_core_device *dev) +{ + xsc_free_irq(dev, XSC_VEC_CMD_EVENT); +} + +int xsc_cmd_enable_msix(struct xsc_core_device *xdev) +{ + struct xsc_msix_table_info_mbox_in in; + struct xsc_msix_table_info_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_MSIX); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + xsc_core_err(xdev, "xsc_cmd_exec enable msix failed %d\n", err); + return err; + } + + return 0; +} + +int xsc_irq_eq_create(struct xsc_core_device *dev) +{ + int err; + + if (dev->caps.msix_enable == 0) + return 0; + + err = xsc_alloc_irq_vectors(dev); + if (err) { + xsc_core_err(dev, "enable msix failed, err=%d\n", err); + goto err_alloc_irq; + } + + err = xsc_start_eqs(dev); + if (err) { + xsc_core_err(dev, "failed to start EQs, err=%d\n", err); + goto err_start_eqs; + } + + err = alloc_comp_eqs(dev); + if (err) { + xsc_core_err(dev, "failed to alloc comp EQs, err=%d\n", err); + goto err_alloc_comp_eqs; + } + + err = xsc_request_irq_for_cmdq(dev, XSC_VEC_CMD); + if (err) { + xsc_core_err(dev, "failed to request irq for cmdq, err=%d\n", err); + goto err_request_cmd_irq; + } + + err = xsc_request_irq_for_event(dev); + if (err) { + xsc_core_err(dev, "failed to request irq for event, err=%d\n", err); + goto err_request_event_irq; + } + + if (dev->caps.msix_enable && xsc_core_is_pf(dev)) { + err = xsc_dma_read_msix_init(dev); + if (err) { + xsc_core_err(dev, "dma read msix init failed %d.\n", err); + goto err_dma_read_msix; + } + } + + err = set_comp_irq_affinity_hints(dev); + if (err) { + xsc_core_err(dev, "failed to alloc affinity hint cpumask, err=%d\n", err); + goto err_set_affinity; + } + + xsc_cmd_use_events(dev); + err = xsc_cmd_enable_msix(dev); + if (err) { + xsc_core_err(dev, "xsc_cmd_enable_msix failed %d.\n", err); + xsc_cmd_use_polling(dev); + goto err_set_affinity; + } + return 0; + +err_set_affinity: + xsc_dma_read_msix_fini(dev); +err_dma_read_msix: + xsc_free_irq_for_event(dev); +err_request_event_irq: + xsc_free_irq_for_cmdq(dev); +err_request_cmd_irq: + free_comp_eqs(dev); +err_alloc_comp_eqs: + xsc_stop_eqs(dev); +err_start_eqs: + xsc_free_irq_vectors(dev); +err_alloc_irq: + return err; +} + +int xsc_irq_eq_destroy(struct xsc_core_device *dev) +{ + if (dev->caps.msix_enable == 0) + return 0; + + xsc_stop_eqs(dev); + clear_comp_irq_affinity_hints(dev); + free_comp_eqs(dev); + + xsc_dma_read_msix_fini(dev); + xsc_free_irq_for_event(dev); + xsc_free_irq_for_cmdq(dev); + xsc_free_irq_vectors(dev); + + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c new file mode 100644 index 0000000000000000000000000000000000000000..37db01d1742f8c6bf91d0e965df3491e971ed2aa --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/driver.h" + +int xsc_core_alloc_pd(struct xsc_core_device *xdev, u32 *pdn) +{ + struct xsc_alloc_pd_mbox_in in; + struct xsc_alloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ALLOC_PD); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + *pdn = be32_to_cpu(out.pdn) & 0xffffff; + return err; +} +EXPORT_SYMBOL(xsc_core_alloc_pd); + +int xsc_core_dealloc_pd(struct xsc_core_device *xdev, u32 pdn) +{ + struct xsc_dealloc_pd_mbox_in in; + struct xsc_dealloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(xsc_core_dealloc_pd); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/port.c b/drivers/net/ethernet/yunsilicon/xsc/pci/port.c new file mode 100644 index 0000000000000000000000000000000000000000..80414f3917d97c1a4c4e7fae32ae4a29b56cf641 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/port.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/driver.h" +#include "common/port.h" + +int xsc_core_access_reg(struct xsc_core_device *xdev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write) +{ + struct xsc_access_reg_mbox_in *in = NULL; + struct xsc_access_reg_mbox_out *out = NULL; + int err = -ENOMEM; + + in = xsc_vzalloc(sizeof(*in) + size_in); + if (!in) + return -ENOMEM; + + out = xsc_vzalloc(sizeof(*out) + size_out); + if (!out) + goto ex1; + + memcpy(in->data, data_in, size_in); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_ACCESS_REG); + in->arg = cpu_to_be32(arg); + in->register_id = cpu_to_be16(reg_num); + err = xsc_cmd_exec(xdev, in, sizeof(*in) + size_in, out, + sizeof(*out) + size_out); + if (err) + goto ex2; + + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + + if (!err) + memcpy(data_out, out->data, size_out); + +ex2: + xsc_vfree(out); +ex1: + xsc_vfree(in); + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_access_reg); + +struct xsc_reg_pcap { + u8 rsvd0; + u8 port_num; + u8 rsvd1[2]; + __be32 caps_127_96; + __be32 caps_95_64; + __be32 caps_63_32; + __be32 caps_31_0; +}; + +int xsc_set_port_caps(struct xsc_core_device *xdev, int port_num, u32 caps) +{ + struct xsc_reg_pcap in; + struct xsc_reg_pcap out; + int err; + + memset(&in, 0, sizeof(in)); + in.caps_127_96 = cpu_to_be32(caps); + in.port_num = port_num; + + err = xsc_core_access_reg(xdev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_PCAP, 0, 1); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_set_port_caps); + +static int xsc_query_module_num(struct xsc_core_device *dev, int *module_num) +{ + *module_num = dev->mac_port; + return 0; +} + +static int xsc_query_module_id(struct xsc_core_device *dev, int module_num, + u8 *module_id) +{ + struct xsc_reg_mcia in; + struct xsc_reg_mcia out; + int err, status; + u8 *ptr; + + in.i2c_device_address = XSC_I2C_ADDR_LOW; + in.module = module_num; + in.device_address = 0; + in.page_number = 0; + in.size = 1; + + err = xsc_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_MCIA, 0, 0); + if (err) + return err; + + status = out.status; + if (status) { + xsc_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + ptr = out.dword_0; + + *module_id = ptr[0]; + + return 0; +} + +static int xsc_qsfp_eeprom_page(u16 offset) +{ + if (offset < XSC_EEPROM_PAGE_LENGTH) + /* Addresses between 0-255 - page 00 */ + return 0; + + /* Addresses between 256 - 639 belongs to pages 01, 02 and 03 + * For example, offset = 400 belongs to page 02: + * 1 + ((400 - 256)/128) = 2 + */ + return 1 + ((offset - XSC_EEPROM_PAGE_LENGTH) / + XSC_EEPROM_HIGH_PAGE_LENGTH); +} + +static int xsc_qsfp_eeprom_high_page_offset(int page_num) +{ + if (!page_num) /* Page 0 always start from low page */ + return 0; + + /* High page */ + return page_num * XSC_EEPROM_HIGH_PAGE_LENGTH; +} + +static void xsc_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = XSC_I2C_ADDR_LOW; + *page_num = xsc_qsfp_eeprom_page(*offset); + *offset -= xsc_qsfp_eeprom_high_page_offset(*page_num); +} + +static void xsc_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = XSC_I2C_ADDR_LOW; + *page_num = 0; + + if (*offset < XSC_EEPROM_PAGE_LENGTH) + return; + + *i2c_addr = XSC_I2C_ADDR_HIGH; + *offset -= XSC_EEPROM_PAGE_LENGTH; +} + +static int xsc_query_mcia(struct xsc_core_device *dev, + struct xsc_module_eeprom_query_params *params, u8 *data) +{ + struct xsc_reg_mcia in; + struct xsc_reg_mcia out; + int status, err; + void *ptr; + u16 size; + + size = min_t(int, params->size, XSC_EEPROM_MAX_BYTES); + + in.i2c_device_address = params->i2c_address; + in.module = params->module_number; + in.device_address = params->offset; + in.page_number = params->page; + in.size = size; + + err = xsc_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_MCIA, 0, 0); + if (err) + return err; + + status = out.status; + if (status) { + xsc_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + + ptr = out.dword_0; + memcpy(data, ptr, size); + + return size; +} + +int xsc_query_module_eeprom(struct xsc_core_device *dev, + u16 offset, u16 size, u8 *data) +{ + struct xsc_module_eeprom_query_params query = {0}; + u8 module_id; + int err; + + err = xsc_query_module_num(dev, &query.module_number); + if (err) + return err; + + err = xsc_query_module_id(dev, query.module_number, &module_id); + if (err) + return err; + + switch (module_id) { + case XSC_MODULE_ID_SFP: + xsc_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); + break; + case XSC_MODULE_ID_QSFP: + case XSC_MODULE_ID_QSFP_PLUS: + case XSC_MODULE_ID_QSFP28: + case XSC_MODULE_ID_QSFP_DD: + case XSC_MODULE_ID_DSFP: + case XSC_MODULE_ID_QSFP_PLUS_CMIS: + xsc_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); + break; + default: + xsc_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } + + if (offset + size > XSC_EEPROM_PAGE_LENGTH) + /* Cross pages read, read until offset 256 in low page */ + size = XSC_EEPROM_PAGE_LENGTH - offset; + + query.size = size; + query.offset = offset; + + return xsc_query_mcia(dev, &query, data); +} +EXPORT_SYMBOL_GPL(xsc_query_module_eeprom); + +int xsc_query_module_eeprom_by_page(struct xsc_core_device *dev, + struct xsc_module_eeprom_query_params *params, + u8 *data) +{ + u8 module_id; + int err; + + err = xsc_query_module_num(dev, ¶ms->module_number); + if (err) + return err; + + err = xsc_query_module_id(dev, params->module_number, &module_id); + if (err) + return err; + + switch (module_id) { + case XSC_MODULE_ID_SFP: + if (params->page > 0) + return -EINVAL; + break; + case XSC_MODULE_ID_QSFP: + case XSC_MODULE_ID_QSFP28: + case XSC_MODULE_ID_QSFP_PLUS: + case XSC_MODULE_ID_QSFP_DD: + case XSC_MODULE_ID_QSFP_PLUS_CMIS: + if (params->page > 3) + return -EINVAL; + break; + case XSC_MODULE_ID_DSFP: + break; + default: + xsc_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } + + if (params->i2c_address != XSC_I2C_ADDR_HIGH && + params->i2c_address != XSC_I2C_ADDR_LOW) { + xsc_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address); + return -EINVAL; + } + + return xsc_query_mcia(dev, params, data); +} +EXPORT_SYMBOL_GPL(xsc_query_module_eeprom_by_page); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c new file mode 100644 index 0000000000000000000000000000000000000000..0e5d365c0b23ba5a1baf37e85aebcb82093e124a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/qp.h" +#include "common/driver.h" +#include +#include "common/xsc_core.h" + +#define GROUP_DESTROY_FLAG_SHFIT 15 +#define GROUP_DESTROY_FLAG_MASK (1 << (GROUP_DESTROY_FLAG_SHFIT)) + +#define GROUP_OTHER_HASH_SIZE 16 +#define GROUP_CC_HASH_SIZE (1024 - GROUP_OTHER_HASH_SIZE) + +enum { + GROUP_MODE_PER_QP = 0, + GROUP_MODE_PER_DEST_IP, +}; + +struct { + struct list_head head; + spinlock_t lock; /* protect delayed_release_list */ + struct task_struct *poll_task; + wait_queue_head_t wq; + int wait_flag; +} delayed_release_list; + +enum { + SLEEP, + WAKEUP, + EXIT, +}; + +static bool exit_flag; + +void xsc_set_exit_flag(void) +{ + exit_flag = true; +} +EXPORT_SYMBOL_GPL(xsc_set_exit_flag); + +bool xsc_get_exit_flag(void) +{ + return exit_flag; +} +EXPORT_SYMBOL_GPL(xsc_get_exit_flag); + +bool exist_incomplete_qp_flush(void) +{ + return !list_empty(&delayed_release_list.head); +} +EXPORT_SYMBOL_GPL(exist_incomplete_qp_flush); + +static bool xsc_qp_flush_finished(struct xsc_core_device *xdev, u32 qpn) +{ + struct xsc_query_qp_flush_status_mbox_in in; + struct xsc_query_qp_flush_status_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_QP_FLUSH_STATUS); + in.qpn = cpu_to_be32(qpn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if ((!err && !out.hdr.status) || err == -ETIMEDOUT) + return true; + + xsc_core_dbg(xdev, "qp[%d] flush incomplete.\n", qpn); + return false; +} + +static int xsc_qp_flush_check(void *arg) +{ + struct xsc_qp_rsc *entry; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + + spin_lock(&delayed_release_list.lock); + entry = list_first_entry_or_null(&delayed_release_list.head, + struct xsc_qp_rsc, node); + if (!entry) { + spin_unlock(&delayed_release_list.lock); + wait_event_interruptible(delayed_release_list.wq, + delayed_release_list.wait_flag != SLEEP); + if (delayed_release_list.wait_flag == EXIT) + break; + delayed_release_list.wait_flag = SLEEP; + continue; + } + list_del(&entry->node); + spin_unlock(&delayed_release_list.lock); + + if (!exit_flag && !xsc_qp_flush_finished(entry->xdev, entry->qpn)) { + spin_lock(&delayed_release_list.lock); + list_add_tail(&entry->node, &delayed_release_list.head); + spin_unlock(&delayed_release_list.lock); + } else { + complete(&entry->delayed_release); + } + } + + return 0; +} + +void xsc_init_delayed_release(void) +{ + INIT_LIST_HEAD(&delayed_release_list.head); + spin_lock_init(&delayed_release_list.lock); + init_waitqueue_head(&delayed_release_list.wq); + delayed_release_list.wait_flag = SLEEP; + delayed_release_list.poll_task = kthread_create(xsc_qp_flush_check, NULL, "qp flush check"); + if (delayed_release_list.poll_task) + wake_up_process(delayed_release_list.poll_task); +} + +void xsc_stop_delayed_release(void) +{ + delayed_release_list.wait_flag = EXIT; + wake_up(&delayed_release_list.wq); + if (delayed_release_list.poll_task) + kthread_stop(delayed_release_list.poll_task); +} + +static void xsc_wait_qp_flush_complete(struct xsc_core_device *xdev, u32 qpn) +{ + struct xsc_qp_rsc qp_rsc; + int err = 0; + + if (exit_flag) + return; + + init_completion(&qp_rsc.delayed_release); + qp_rsc.qpn = qpn; + qp_rsc.xdev = xdev; + spin_lock(&delayed_release_list.lock); + list_add_tail(&qp_rsc.node, &delayed_release_list.head); + spin_unlock(&delayed_release_list.lock); + delayed_release_list.wait_flag = WAKEUP; + wake_up(&delayed_release_list.wq); + + while ((err = wait_for_completion_interruptible(&qp_rsc.delayed_release)) + == -ERESTARTSYS) { + xsc_core_dbg(xdev, "qp %d wait for completion is interrupted, err = %d\n", + qpn, err); + if (need_resched()) + schedule(); + } +} + +int create_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + int err; + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, qp->qpn, qp); + spin_unlock_irq(&table->lock); + if (err) + return err; + + atomic_set(&qp->refcount, 1); + init_completion(&qp->free); + qp->pid = current->pid; + + return 0; +} +EXPORT_SYMBOL_GPL(create_resource_common); + +void destroy_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + unsigned long flags; + + spin_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, qp->qpn); + spin_unlock_irqrestore(&table->lock, flags); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); + wait_for_completion(&qp->free); +} +EXPORT_SYMBOL_GPL(destroy_resource_common); + +void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + struct xsc_core_qp *qp; + + spin_lock(&table->lock); + + qp = radix_tree_lookup(&table->tree, qpn); + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock(&table->lock); + + if (!qp) { + xsc_core_warn(xdev, "Async event for bogus QP 0x%x\n", qpn); + return; + } + + qp->event(qp, event_type); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + +int xsc_core_create_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp, + struct xsc_create_qp_mbox_in *in, + int inlen) +{ + struct xsc_create_qp_mbox_out out; + struct xsc_destroy_qp_mbox_in din; + struct xsc_destroy_qp_mbox_out dout; + int err; + struct timespec64 ts; + + ktime_get_boottime_ts64(&ts); + + memset(&dout, 0, sizeof(dout)); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + + err = xsc_cmd_exec(xdev, in, inlen, &out, sizeof(out)); + if (err) { + xsc_core_err(xdev, "ret %d", err); + return err; + } + + if (out.hdr.status) { + xsc_core_err(xdev, "current num of QPs %u\n", atomic_read(&xdev->num_qps)); + return xsc_cmd_status_to_err(&out.hdr); + } + qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; + xsc_core_info(xdev, "qpn = %u\n", qp->qpn); + + qp->trace_info = kzalloc(sizeof(*qp->trace_info), GFP_KERNEL); + if (!qp->trace_info) { + err = -ENOMEM; + goto err_cmd; + } + qp->trace_info->pid = current->pid; + qp->trace_info->timestamp = (u64)(u32)ts.tv_sec * MSEC_PER_SEC + + ts.tv_nsec / NSEC_PER_MSEC; + + err = create_resource_common(xdev, qp); + if (err) { + xsc_core_err(xdev, "err %d", err); + goto err_trace; + } + + err = xsc_debug_qp_add(xdev, qp); + if (err) + xsc_core_err(xdev, "failed adding QP %u to debug file system\n", + qp->qpn); + + atomic_inc(&xdev->num_qps); + return 0; +err_trace: + kfree(qp->trace_info); +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + din.qpn = cpu_to_be32(qp->qpn); + xsc_cmd_exec(xdev, &din, sizeof(din), &out, sizeof(dout)); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_create_qp); + +int xsc_core_destroy_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + xsc_debug_qp_remove(xdev, qp); + xsc_remove_qptrace(xdev, qp); + kfree(qp->trace_info); + + destroy_resource_common(xdev, qp); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qp->qpn); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + atomic_dec(&xdev->num_qps); + return 0; +} +EXPORT_SYMBOL_GPL(xsc_core_destroy_qp); + +int xsc_modify_qp(struct xsc_core_device *xdev, + struct xsc_modify_qp_mbox_in *in, + struct xsc_modify_qp_mbox_out *out, + u32 qpn, u16 status) +{ + int ret = 0; + + in->hdr.opcode = cpu_to_be16(status); + in->qpn = cpu_to_be32(qpn); + in->no_need_wait = 1; + + ret = xsc_cmd_exec(xdev, in, sizeof(*in), out, sizeof(*out)); + if ((status == XSC_CMD_OP_2RST_QP || status == XSC_CMD_OP_2ERR_QP) && + out->hdr.status) { + xsc_wait_qp_flush_complete(xdev, qpn); + out->hdr.status = 0; + } + if (ret || out->hdr.status != 0) { + xsc_core_err(xdev, "failed to modify qp %u status=%u, err=%d out.status %u\n", + qpn, status, ret, out->hdr.status); + ret = -ENOEXEC; + } + + return ret; +} +EXPORT_SYMBOL_GPL(xsc_modify_qp); + +int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state, + enum xsc_qp_state new_state, + struct xsc_modify_qp_mbox_in *in, int sqd_event, + struct xsc_core_qp *qp) +{ + static const u16 optab[XSC_QP_NUM_STATE][XSC_QP_NUM_STATE] = { + [XSC_QP_STATE_RST] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_INIT] = XSC_CMD_OP_RST2INIT_QP, + }, + [XSC_QP_STATE_INIT] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_INIT] = XSC_CMD_OP_INIT2INIT_QP, + [XSC_QP_STATE_RTR] = XSC_CMD_OP_INIT2RTR_QP, + }, + [XSC_QP_STATE_RTR] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_RTR2RTS_QP, + }, + [XSC_QP_STATE_RTS] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_RTS2RTS_QP, + [XSC_QP_STATE_SQD] = XSC_CMD_OP_RTS2SQD_QP, + }, + [XSC_QP_STATE_SQD] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_SQD2RTS_QP, + [XSC_QP_STATE_SQD] = XSC_CMD_OP_SQD2SQD_QP, + }, + [XSC_QP_STATE_SQER] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_SQERR2RTS_QP, + }, + [XSC_QP_STATE_ERR] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + } + }; + + struct xsc_modify_qp_mbox_out out; + int err = 0; + u16 op; + + if (cur_state >= XSC_QP_NUM_STATE || new_state >= XSC_QP_NUM_STATE || + !optab[cur_state][new_state]) + return -EINVAL; + + memset(&out, 0, sizeof(out)); + op = optab[cur_state][new_state]; + + if (new_state == XSC_QP_STATE_RTR) { + if (qp->qp_type_internal == XSC_QUEUE_TYPE_RDMA_RC && + ((in->ctx.ip_type == 0 && in->ctx.dip[0] == in->ctx.sip[0]) || + (in->ctx.ip_type != 0 && + memcmp(in->ctx.dip, in->ctx.sip, sizeof(in->ctx.sip)) == 0))) + in->ctx.qp_out_port = xdev->caps.nif_port_num + xdev->pcie_no; + else if (in->ctx.lag_sel_en == 0) + in->ctx.qp_out_port = xdev->pf_id; + else + in->ctx.qp_out_port = in->ctx.lag_sel; + + in->ctx.pcie_no = xdev->pcie_no; + in->ctx.func_id = cpu_to_be16(xdev->glb_func_id); + } + + err = xsc_modify_qp(xdev, in, &out, qp->qpn, op); + if (err) + return err; + + if (new_state == XSC_QP_STATE_RTR) { + qp->trace_info->main_ver = YS_QPTRACE_VER_MAJOR; + qp->trace_info->sub_ver = YS_QPTRACE_VER_MINOR; + qp->trace_info->qp_type = qp->qp_type; + qp->trace_info->s_port = in->ctx.src_udp_port; + qp->trace_info->d_port = cpu_to_be16(4791); + qp->trace_info->lqpn = qp->qpn; + qp->trace_info->rqpn = be32_to_cpu(in->ctx.remote_qpn); + qp->trace_info->affinity_idx = (in->ctx.lag_sel_en == 0 ? 0 : in->ctx.lag_sel); + qp->trace_info->af_type = (in->ctx.ip_type == 0 ? AF_INET : AF_INET6); + + if (in->ctx.ip_type == 0) { + qp->trace_info->s_addr.s_addr4 = in->ctx.sip[0]; + qp->trace_info->d_addr.d_addr4 = in->ctx.dip[0]; + } else { + memcpy(qp->trace_info->s_addr.s_addr6, in->ctx.sip, + sizeof(qp->trace_info->s_addr.s_addr6)); + memcpy(qp->trace_info->d_addr.d_addr6, in->ctx.dip, + sizeof(qp->trace_info->d_addr.d_addr6)); + } + + err = xsc_create_qptrace(xdev, qp); + if (err) + return err; + } + + return xsc_cmd_status_to_err(&out.hdr); +} +EXPORT_SYMBOL_GPL(xsc_core_qp_modify); + +int xsc_core_qp_query(struct xsc_core_device *xdev, struct xsc_core_qp *qp, + struct xsc_query_qp_mbox_out *out, int outlen) +{ + struct xsc_query_qp_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_qp_query); + +void xsc_init_qp_table(struct xsc_core_device *xdev) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + + xsc_qp_debugfs_init(xdev); + xsc_qptrace_debugfs_init(xdev); +} + +void xsc_cleanup_qp_table(struct xsc_core_device *xdev) +{ + xsc_qp_debugfs_cleanup(xdev); + xsc_qptrace_debugfs_cleanup(xdev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c new file mode 100644 index 0000000000000000000000000000000000000000..59122a490eb851dbf9136572563c241cde0e392d --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 - 2022, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/driver.h" + +#define QPTS_ELEMENT_MAX_NUM 0x4000 //16384 = 16k + +static struct proc_dir_entry *g_entry; +static DECLARE_WAIT_QUEUE_HEAD(g_ring_buff_wait); +static struct xsc_qpt_update_msg *g_ring_buff; +static struct mutex g_ring_buff_lock; + +static DECLARE_WAIT_QUEUE_HEAD(g_remove_wait); +static u32 g_pid; + +static unsigned long R; +static unsigned long R_cur; +static unsigned long W; + +static void send_signal(int sig_no) +{ + int ret; + struct task_struct *task = NULL; + + if (g_pid < 2) { + pr_err("%s error, pid(%u) is invalid.\n", __func__, g_pid); + return; + } + + rcu_read_lock(); + task = pid_task(find_vpid(g_pid), PIDTYPE_PID); + rcu_read_unlock(); + + if (!task) { + pr_err("%s error, get pid_task failed, pid(%d).\n", __func__, g_pid); + return; + } + + ret = send_sig(sig_no, task, 0); + if (ret < 0) + pr_err("%s error, send signal(%d) failed.\n", __func__, sig_no); +} + +static int read_buff(struct xsc_qpt_update_msg *msg) +{ + mutex_lock(&g_ring_buff_lock); + if (R_cur == W) { + mutex_unlock(&g_ring_buff_lock); + return 0; + } + + *msg = g_ring_buff[R_cur]; + R_cur = (R_cur + 1) % QPTS_ELEMENT_MAX_NUM; + mutex_unlock(&g_ring_buff_lock); + + return 1; +} + +static void write_buff(struct xsc_qpt_update_msg *msg) +{ + mutex_lock(&g_ring_buff_lock); + g_ring_buff[W] = *msg; + W = (W + 1) % QPTS_ELEMENT_MAX_NUM; + if (R == W) + R = (R + 1) % QPTS_ELEMENT_MAX_NUM; + + if (R_cur == W) + R_cur = (R_cur + 1) % QPTS_ELEMENT_MAX_NUM; + + mutex_unlock(&g_ring_buff_lock); + + wake_up_interruptible(&g_ring_buff_wait); +} + +int qpts_write_one_msg(struct xsc_qpt_update_msg *msg) +{ + if (!msg) + return -1; + + write_buff(msg); + + return 0; +} +EXPORT_SYMBOL(qpts_write_one_msg); + +static int qpts_open(struct inode *inode, struct file *file) +{ + mutex_lock(&g_ring_buff_lock); + if (g_pid > 0) { + mutex_unlock(&g_ring_buff_lock); + goto end; + } + g_pid = current->pid; + R_cur = R; + mutex_unlock(&g_ring_buff_lock); + + return 0; +end: + pr_err("%s failed, pid:%d.\n", __func__, g_pid); + return -1; +} + +static int qpts_release(struct inode *inode, struct file *file) +{ + mutex_lock(&g_ring_buff_lock); + g_pid = 0; + mutex_unlock(&g_ring_buff_lock); + + wake_up_interruptible(&g_remove_wait); + + return 0; +} + +static ssize_t qpts_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + int error = -EINVAL, i = 0; + struct xsc_qpt_update_msg qpt_msg = {0}; + + if ((file->f_flags & O_NONBLOCK) && R_cur == W) + goto out; + + if (!buf || !count) { + pr_err("%s error, null buffer or count!\n", __func__); + goto out; + } + + error = wait_event_interruptible(g_ring_buff_wait, (R_cur != W)); + if (error) + goto out; + + while (!error && i < count && read_buff(&qpt_msg)) { + error = copy_to_user(buf, &qpt_msg, sizeof(qpt_msg)); + buf += sizeof(qpt_msg); + i += sizeof(qpt_msg); + } + + if (!error) + error = i; + +out: + return error; +} + +static __poll_t qpts_poll(struct file *file, poll_table *wait) +{ + poll_wait(file, &g_ring_buff_wait, wait); + + if (R_cur != W) + return EPOLLIN | EPOLLRDNORM; + + return 0; +} + +const struct proc_ops qpts_ops = { + .proc_open = qpts_open, + .proc_read = qpts_read, + .proc_poll = qpts_poll, + .proc_release = qpts_release, +}; + +int qpts_init(void) +{ + g_ring_buff = kcalloc(QPTS_ELEMENT_MAX_NUM, sizeof(struct xsc_qpt_update_msg), GFP_KERNEL); + if (!g_ring_buff) + return -ENOMEM; + + mutex_init(&g_ring_buff_lock); + + g_entry = proc_create_data("qpts_kmsg", 0400, NULL, &qpts_ops, NULL); + if (!g_entry) { + pr_err("Could not create /proc/qpts_kmsg file!\n"); + goto error_qpts_init; + } + + return 0; + +error_qpts_init: + kfree(g_ring_buff); + g_ring_buff = NULL; + return -1; +} + +void qpts_fini(void) +{ + mutex_lock(&g_ring_buff_lock); + if (!g_pid) + g_pid = 1; + mutex_unlock(&g_ring_buff_lock); + + if (g_pid > 1) { + send_signal(SIGKILL); + wait_event_interruptible(g_remove_wait, (g_pid == 0)); + } + + remove_proc_entry("qpts_kmsg", NULL); + + kfree(g_ring_buff); + g_ring_buff = NULL; + g_entry = NULL; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c new file mode 100644 index 0000000000000000000000000000000000000000..7471367ce83fe66a4021dadd62d0ac7c0a66b88e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/res_obj.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_cmd.h" +#include "common/qp.h" +#include "common/driver.h" + +static int xsc_alloc_obj(struct xsc_res_obj *obj, struct xsc_bdf_file *file, + void (*release_func)(void *), unsigned long key, + char *data, unsigned int datalen) +{ + obj->release_method = release_func; + obj->file = file; + obj->datalen = datalen; + if (datalen) { + obj->data = kmalloc(datalen, GFP_KERNEL); + if (!obj->data) + return -ENOMEM; + memcpy(obj->data, data, datalen); + } + + radix_tree_preload(GFP_KERNEL); + spin_lock(&file->obj_lock); + radix_tree_insert(&file->obj_tree, key, (void *)obj); + spin_unlock(&file->obj_lock); + radix_tree_preload_end(); + + return 0; +} + +static inline void xsc_free_obj(struct xsc_bdf_file *file, unsigned long key, + struct xsc_res_obj **obj) +{ + *obj = radix_tree_delete(&file->obj_tree, key); + if (!*obj) + return; + if ((*obj)->datalen) + kfree((*obj)->data); +} + +static void xsc_send_cmd_dealloc_pd(struct xsc_core_device *xdev, unsigned int pdn) +{ + struct xsc_dealloc_pd_mbox_in in; + struct xsc_dealloc_pd_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to dealloc pd %d\n", pdn); +} + +static void xsc_free_pd_obj(void *obj) +{ + struct xsc_pd_obj *pd_obj = container_of(obj, struct xsc_pd_obj, obj); + struct xsc_bdf_file *file = pd_obj->obj.file; + unsigned long key; + struct xsc_res_obj *_obj; + + xsc_send_cmd_dealloc_pd(file->xdev, pd_obj->pdn); + key = xsc_idx_to_key(RES_OBJ_PD, pd_obj->pdn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(pd_obj->obj.file->xdev, "free pd obj: %d\n", pd_obj->pdn); + kfree(pd_obj); +} + +int xsc_alloc_pd_obj(struct xsc_bdf_file *file, + unsigned int pdn, char *data, unsigned int datalen) +{ + struct xsc_pd_obj *pd_obj; + unsigned long key; + int ret; + + pd_obj = kzalloc(sizeof(*pd_obj), GFP_KERNEL); + if (!pd_obj) + return -ENOMEM; + + pd_obj->pdn = pdn; + key = xsc_idx_to_key(RES_OBJ_PD, pdn); + ret = xsc_alloc_obj(&pd_obj->obj, file, xsc_free_pd_obj, key, data, datalen); + if (ret) { + kfree(pd_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc pd %d obj\n", pdn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_pd_obj); + +void xsc_destroy_pd_obj(struct xsc_bdf_file *file, unsigned int pdn) +{ + struct xsc_pd_obj *pd_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PD, pdn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + pd_obj = container_of(obj, struct xsc_pd_obj, obj); + kfree(pd_obj); + xsc_core_dbg(file->xdev, "destroy pd %d obj\n", pdn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_pd_obj); + +static void xsc_send_cmd_destroy_mkey(struct xsc_core_device *xdev, unsigned int mkey) +{ + struct xsc_destroy_mkey_mbox_in in; + struct xsc_destroy_mkey_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mkey); + if (xdev->reg_mr_via_cmdq) + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + else + ret = xsc_destroy_mkey(xdev, &in, &out); + + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy mkey %d\n", mkey); +} + +static void xsc_send_cmd_dereg_mr(struct xsc_core_device *xdev, unsigned int mkey) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = cpu_to_be32(mkey); + if (xdev->reg_mr_via_cmdq) + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + else + ret = xsc_dereg_mr(xdev, &in, &out); + + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to dereg mr %d\n", mkey); +} + +static void xsc_free_mr_obj(void *obj) +{ + struct xsc_mr_obj *mr_obj = container_of(obj, struct xsc_mr_obj, obj); + struct xsc_bdf_file *file = mr_obj->obj.file; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mr_obj->mkey); + struct xsc_res_obj *_obj; + + xsc_send_cmd_destroy_mkey(file->xdev, mr_obj->mkey); + xsc_send_cmd_dereg_mr(file->xdev, mr_obj->mkey); + + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free mr obj: %d\n", mr_obj->mkey); + kfree(mr_obj); +} + +int xsc_alloc_mr_obj(struct xsc_bdf_file *file, + unsigned int mkey, char *data, unsigned int datalen) +{ + struct xsc_mr_obj *mr_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mkey); + int ret; + + mr_obj = kzalloc(sizeof(*mr_obj), GFP_KERNEL); + if (!mr_obj) + return -ENOMEM; + + mr_obj->mkey = mkey; + ret = xsc_alloc_obj(&mr_obj->obj, file, xsc_free_mr_obj, key, data, datalen); + if (ret) { + kfree(mr_obj); + return ret; + } + + xsc_core_dbg(file->xdev, "alloc mr %d obj\n", mkey); + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_mr_obj); + +void xsc_destroy_mr_obj(struct xsc_bdf_file *file, unsigned int mkey) +{ + struct xsc_mr_obj *mr_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mkey); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + mr_obj = container_of(obj, struct xsc_mr_obj, obj); + kfree(mr_obj); + xsc_core_dbg(file->xdev, "destroy mr %d obj\n", mkey); +} +EXPORT_SYMBOL_GPL(xsc_destroy_mr_obj); + +static void xsc_send_cmd_destroy_cq(struct xsc_core_device *xdev, unsigned int cqn) +{ + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cqn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy cq %d\n", cqn); +} + +static void xsc_free_cq_obj(void *obj) +{ + struct xsc_cq_obj *cq_obj = container_of(obj, struct xsc_cq_obj, obj); + struct xsc_bdf_file *file = cq_obj->obj.file; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cq_obj->cqn); + struct xsc_res_obj *_obj; + + xsc_send_cmd_destroy_cq(file->xdev, cq_obj->cqn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free cq obj: %d\n", cq_obj->cqn); + kfree(cq_obj); +} + +int xsc_alloc_cq_obj(struct xsc_bdf_file *file, unsigned int cqn, + char *data, unsigned int datalen) +{ + struct xsc_cq_obj *cq_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cqn); + int ret; + + cq_obj = kzalloc(sizeof(*cq_obj), GFP_KERNEL); + if (!cq_obj) + return -ENOMEM; + + cq_obj->cqn = cqn; + ret = xsc_alloc_obj(&cq_obj->obj, file, xsc_free_cq_obj, key, data, datalen); + if (ret) { + kfree(cq_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc cq %d obj\n", cqn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_cq_obj); + +void xsc_destroy_cq_obj(struct xsc_bdf_file *file, unsigned int cqn) +{ + struct xsc_cq_obj *cq_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cqn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + cq_obj = container_of(obj, struct xsc_cq_obj, obj); + kfree(cq_obj); + xsc_core_dbg(file->xdev, "destroy cq %d obj\n", cqn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_cq_obj); + +void xsc_send_cmd_2rst_qp(struct xsc_core_device *xdev, unsigned int qpn) +{ + struct xsc_modify_qp_mbox_in in; + struct xsc_modify_qp_mbox_out out; + int ret; + + ret = xsc_modify_qp(xdev, &in, &out, qpn, XSC_CMD_OP_2RST_QP); + if (ret) + xsc_core_err(xdev, "failed to reset qp %u\n", qpn); +} + +static void xsc_send_cmd_destroy_qp(struct xsc_core_device *xdev, unsigned int qpn) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qpn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy qp %d\n", qpn); +} + +static void xsc_free_qp_obj(void *obj) +{ + struct xsc_qp_obj *qp_obj = container_of(obj, struct xsc_qp_obj, obj); + struct xsc_bdf_file *file = qp_obj->obj.file; + unsigned long key; + struct xsc_res_obj *_obj; + + xsc_send_cmd_2rst_qp(file->xdev, qp_obj->qpn); + xsc_send_cmd_destroy_qp(file->xdev, qp_obj->qpn); + + key = xsc_idx_to_key(RES_OBJ_QP, qp_obj->qpn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free qp obj: %d\n", qp_obj->qpn); + kfree(qp_obj); +} + +int xsc_alloc_qp_obj(struct xsc_bdf_file *file, unsigned int qpn, + char *data, unsigned int datalen) +{ + struct xsc_qp_obj *qp_obj; + unsigned long key; + int ret; + + qp_obj = kzalloc(sizeof(*qp_obj), GFP_KERNEL); + if (!qp_obj) + return -ENOMEM; + + qp_obj->qpn = qpn; + key = xsc_idx_to_key(RES_OBJ_QP, qpn); + ret = xsc_alloc_obj(&qp_obj->obj, file, xsc_free_qp_obj, key, data, datalen); + if (ret) { + kfree(qp_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc qp %d obj\n", qpn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_qp_obj); + +void xsc_destroy_qp_obj(struct xsc_bdf_file *file, unsigned int qpn) +{ + struct xsc_qp_obj *qp_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_QP, qpn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + qp_obj = container_of(obj, struct xsc_qp_obj, obj); + kfree(qp_obj); + xsc_core_dbg(file->xdev, "destroy qp %d obj\n", qpn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_qp_obj); + +static void xsc_send_cmd_del_pct(struct xsc_core_device *xdev, + unsigned int priority) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + struct xsc_ioctl_data_tl *tl; + struct xsc_flow_pct_v4_del *pct_v4; + unsigned int inlen; + unsigned int outlen; + int ret; + + inlen = sizeof(struct xsc_ioctl_mbox_in) + sizeof(struct xsc_ioctl_data_tl) + + sizeof(struct xsc_flow_pct_v4_del); + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return; + + outlen = sizeof(struct xsc_ioctl_mbox_out) + sizeof(struct xsc_ioctl_data_tl) + + sizeof(struct xsc_flow_pct_v4_del); + out = kzalloc(outlen, GFP_KERNEL); + if (!out) { + kfree(in); + return; + } + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_IOCTL_FLOW); + in->len = sizeof(struct xsc_ioctl_data_tl) + sizeof(struct xsc_flow_pct_v4_del); + in->len = cpu_to_be16(in->len); + tl = (struct xsc_ioctl_data_tl *)in->data; + tl->opmod = XSC_IOCTL_OP_DEL; + tl->table = XSC_FLOW_TBL_PCT_V4; + tl->length = sizeof(struct xsc_flow_pct_v4_del); + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + pct_v4->priority = priority; + out->len = in->len; + ret = xsc_cmd_exec(xdev, in, inlen, out, outlen); + if (ret || out->hdr.status != 0) + xsc_core_err(xdev, "failed to del pct %d\n", priority); + + kfree(in); + kfree(out); +} + +static void xsc_free_pct_obj(void *obj) +{ + struct xsc_pct_obj *pct_obj = container_of(obj, struct xsc_pct_obj, obj); + struct xsc_bdf_file *file = pct_obj->obj.file; + struct xsc_res_obj *_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, pct_obj->pct_idx); + + xsc_send_cmd_del_pct(file->xdev, pct_obj->pct_idx); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free pct obj, priority:%d\n", pct_obj->pct_idx); + kfree(pct_obj); +} + +/* both pct4 and pct6 are allocated in the same tcam table, so we can delete pct6 + * by pct4 method + */ +int xsc_alloc_pct_obj(struct xsc_bdf_file *file, unsigned int priority, + char *data, unsigned int datalen) +{ + struct xsc_pct_obj *pct_obj; + int ret; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, priority); + + pct_obj = kzalloc(sizeof(*pct_obj), GFP_KERNEL); + if (!pct_obj) + return -ENOMEM; + + pct_obj->pct_idx = priority; + ret = xsc_alloc_obj(&pct_obj->obj, file, xsc_free_pct_obj, key, data, datalen); + if (ret) + kfree(pct_obj); + xsc_core_dbg(file->xdev, "alloc pct %d obj\n", priority); + return ret; +} +EXPORT_SYMBOL_GPL(xsc_alloc_pct_obj); + +void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority) +{ + struct xsc_pct_obj *pct_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, priority); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + pct_obj = container_of(obj, struct xsc_pct_obj, obj); + kfree(pct_obj); + xsc_core_dbg(file->xdev, "destroy pct %d obj\n", priority); +} +EXPORT_SYMBOL_GPL(xsc_destroy_pct_obj); + +void xsc_close_bdf_file(struct xsc_bdf_file *file) +{ + struct radix_tree_iter iter; + void **slot; + struct xsc_res_obj *obj; + + xsc_core_warn(file->xdev, "release bdf file:%lx\n", file->key); + spin_lock(&file->obj_lock); + radix_tree_for_each_slot(slot, &file->obj_tree, &iter, 0) { + obj = (struct xsc_res_obj *)(*slot); + obj->release_method(obj); + } + spin_unlock(&file->obj_lock); +} +EXPORT_SYMBOL_GPL(xsc_close_bdf_file); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c new file mode 100644 index 0000000000000000000000000000000000000000..057be7df0f0fbacb28f01fe640488667fd311122 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_core.h" +#include "common/xsc_lag.h" +#include "common/vport.h" +#include "eswitch.h" +#include "xsc_pci_ctrl.h" + +static int xsc_device_enable_sriov(struct xsc_core_device *dev, int num_vfs) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + u16 vf; + u16 max_msix = 0; + int err; + + max_msix = xsc_get_irq_matrix_global_available(dev); + xsc_core_info(dev, "global_available=%u\n", max_msix); + err = xsc_cmd_enable_hca(dev, num_vfs, max_msix); + if (err) + return err; + + if (!XSC_ESWITCH_MANAGER(dev)) + goto enable_vfs; + + err = xsc_eswitch_enable(dev->priv.eswitch, XSC_ESWITCH_LEGACY, + num_vfs); + if (err) { + xsc_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); + return err; + } + +enable_vfs: + err = xsc_create_vfs_sysfs(dev, num_vfs); + if (err) { + xsc_core_warn(dev, "failed to create SRIOV sysfs (%d)\n", err); + if (XSC_ESWITCH_MANAGER(dev)) + xsc_eswitch_disable(dev->priv.eswitch, true); + + return err; + } + + for (vf = 0; vf < num_vfs; vf++) + sriov->vfs_ctx[vf].enabled = 1; + + return 0; +} + +static void xsc_device_disable_sriov(struct xsc_core_device *dev, + int num_vfs, bool clear_vf) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + int vf, err; + + err = xsc_cmd_disable_hca(dev, (u16)num_vfs); + if (err) { + xsc_core_warn(dev, "failed to disable hca, num_vfs=%d, err=%d\n", + num_vfs, err); + return; + } + + for (vf = num_vfs - 1; vf >= 0; vf--) { + if (!sriov->vfs_ctx[vf].enabled) + continue; + + sriov->vfs_ctx[vf].enabled = 0; + } + + if (XSC_ESWITCH_MANAGER(dev)) + xsc_eswitch_disable(dev->priv.eswitch, clear_vf); + + xsc_destroy_vfs_sysfs(dev, num_vfs); +} + +static int xsc_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ + struct xsc_core_device *dev = pci_get_drvdata(pdev); + int err; + + if (num_vfs > dev->caps.max_vfs) { + xsc_core_warn(dev, + "invalid sriov param, num_vfs(%d) > total_vfs(%d)\n", + num_vfs, dev->caps.max_vfs); + return -EINVAL; + } + + if (num_vfs && pci_num_vf(dev->pdev)) { + if (num_vfs == pci_num_vf(dev->pdev)) + return 0; + + xsc_core_warn(dev, "VFs already enabled. Disable before enabling %d VFs\n", + num_vfs); + return -EBUSY; + } + + xsc_lag_disable(dev); + + xsc_core_info(dev, "enable %d VFs\n", num_vfs); + + err = xsc_device_enable_sriov(dev, num_vfs); + if (err) { + xsc_core_warn(dev, "xsc_device_enable_sriov failed, err=%d\n", err); + goto device_enable_sriov_err; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + xsc_core_warn(dev, "pci_enable_sriov failed, err=%d\n", err); + goto pci_enable_sriov_err; + } + + xsc_lag_enable(dev); + + return err; + +pci_enable_sriov_err: + xsc_device_disable_sriov(dev, num_vfs, true); + +device_enable_sriov_err: + xsc_lag_enable(dev); + + return err; +} + +static void xsc_sriov_disable(struct pci_dev *pdev) +{ + struct xsc_core_device *dev = pci_get_drvdata(pdev); + int num_vfs = pci_num_vf(dev->pdev); + + xsc_lag_disable(dev); + + xsc_core_info(dev, "disable %d VFs\n", num_vfs); + pci_disable_sriov(pdev); + + xsc_device_disable_sriov(dev, num_vfs, true); + + xsc_lag_enable(dev); +} + +int xsc_core_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct xsc_core_device *dev = pci_get_drvdata(pdev); + struct xsc_core_sriov *sriov = &dev->priv.sriov; + int err = 0; + + if (num_vfs) + err = xsc_sriov_enable(pdev, num_vfs); + else + xsc_sriov_disable(pdev); + + if (!err) + sriov->num_vfs = num_vfs; + return err ? err : num_vfs; +} + +int xsc_sriov_attach(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + struct xsc_core_device *pf_xdev; + struct xsc_core_sriov *sriov; + + if (!xsc_core_is_pf(dev)) { + if (!pdev->physfn) /*for vf passthrough vm*/ + return 0; + + pf_xdev = pci_get_drvdata(pdev->physfn); + sriov = &pf_xdev->priv.sriov; + + sriov->vfs[dev->vf_id].vf = dev->vf_id; + sriov->vfs[dev->vf_id].dev = dev; + return 0; + } + + if (!dev->priv.sriov.num_vfs) + return 0; + + /* If sriov VFs exist in PCI level, enable them in device level */ + return xsc_device_enable_sriov(dev, pci_num_vf(dev->pdev)); +} + +void xsc_sriov_detach(struct xsc_core_device *dev) +{ + if (!xsc_core_is_pf(dev) || !dev->priv.sriov.num_vfs) + return; + + xsc_device_disable_sriov(dev, pci_num_vf(dev->pdev), false); +} + +static u16 xsc_get_max_vfs(struct xsc_core_device *dev) +{ + /* In RH6.8 and lower pci_sriov_get_totalvfs might return -EINVAL + * return in that case 1 + */ + return (pci_sriov_get_totalvfs(dev->pdev) < 0) ? 0 : + pci_sriov_get_totalvfs(dev->pdev); +} + +static int xsc_sriov_pci_cfg_info(struct xsc_core_device *dev, + struct xsc_pci_sriov *iov) +{ + int pos; + struct pci_dev *pdev = dev->pdev; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + xsc_core_err(dev, "%s: failed to find SRIOV capability in device\n", + __func__); + return -ENODEV; + } + + iov->pos = pos; + pci_read_config_dword(pdev, pos + PCI_SRIOV_CAP, &iov->cap); + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); + pci_read_config_word(pdev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); + pci_read_config_word(pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &iov->vf_device); + pci_read_config_dword(pdev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + pci_read_config_byte(pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + return 0; +} + +int xsc_sriov_init(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct pci_dev *pdev = dev->pdev; + struct xsc_pci_sriov *iov = &sriov->pci_sriov; + int total_vfs; + u32 vf_bus, vf_devfn; + int err; + + if (!xsc_core_is_pf(dev)) + return 0; + + err = xsc_sriov_pci_cfg_info(dev, iov); + if (err) { + xsc_core_warn(dev, "%s: pci not support sriov, err=%d\n", + __func__, err); + return 0; + } + + total_vfs = pci_sriov_get_totalvfs(pdev); + if (unlikely(iov->total_vfs == 0)) { + xsc_core_warn(dev, "%s: pci not support sriov, total_vfs=%d, cur_vfs=%d\n", + __func__, iov->total_vfs, sriov->num_vfs); + return 0; + } + sriov->max_vfs = xsc_get_max_vfs(dev); + sriov->num_vfs = pci_num_vf(pdev); + + vf_bus = pdev->bus->number + ((pdev->devfn + iov->offset) >> 8); + vf_devfn = (pdev->devfn + iov->offset) & 0xff; + sriov->vf_bdf_base = (u16)((vf_bus << 8) | vf_devfn); + + sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); + if (!sriov->vfs_ctx) + return -ENOMEM; + + xsc_core_info(dev, "total_vfs=%d, cur_vfs=%d, vf_bdf_base=0x%02x\n", + total_vfs, sriov->num_vfs, sriov->vf_bdf_base); + xsc_core_info(dev, "vf_offset=%d, stride=%d, vf_device_id=0x%x\n", + iov->offset, iov->stride, iov->vf_device); + err = xsc_sriov_sysfs_init(dev); + if (err) { + xsc_core_warn(dev, "failed to init SRIOV sysfs, err=%d\n", err); + kfree(sriov->vfs_ctx); + return err; + } + + return 0; +} + +void xsc_sriov_cleanup(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + + if (!xsc_core_is_pf(dev)) + return; + + xsc_sriov_sysfs_cleanup(dev); + kfree(sriov->vfs_ctx); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..e5b07b0b5ecc484665bb3275a2e7808d79af284a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c @@ -0,0 +1,1063 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/xsc_core.h" +#include "common/vport.h" +#include "eswitch.h" + +struct vf_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_sriov_vf *vf, struct vf_attributes *attr, + char *buf); + ssize_t (*store)(struct xsc_sriov_vf *vf, struct vf_attributes *attr, + const char *buf, size_t count); +}; + +static ssize_t vf_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct vf_attributes *ga = + container_of(attr, struct vf_attributes, attr); + struct xsc_sriov_vf *g = container_of(kobj, struct xsc_sriov_vf, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t vf_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct vf_attributes *ga = + container_of(attr, struct vf_attributes, attr); + struct xsc_sriov_vf *g = container_of(kobj, struct xsc_sriov_vf, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +struct vf_group_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_vgroup *g, struct vf_group_attributes *attr, + char *buf); + ssize_t (*store)(struct xsc_vgroup *g, struct vf_group_attributes *attr, + const char *buf, size_t count); +}; + +static ssize_t vf_group_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct vf_group_attributes *ga = + container_of(attr, struct vf_group_attributes, attr); + struct xsc_vgroup *g = container_of(kobj, struct xsc_vgroup, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t vf_group_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct vf_group_attributes *ga = + container_of(attr, struct vf_group_attributes, attr); + struct xsc_vgroup *g = container_of(kobj, struct xsc_vgroup, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +static ssize_t port_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + union ib_gid gid; + int err; + u8 *p; + + err = xsc_query_hca_vport_gid(dev, 1, 1, g->vf, 0, &gid); + if (err) { + xsc_core_warn(dev, "failed to query gid at index 0 for vf %d\n", g->vf); + return err; + } + + p = &gid.raw[8]; + err = sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); + return err; +} + +static ssize_t port_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_vf_context *vfs_ctx = dev->priv.sriov.vfs_ctx; + struct xsc_hca_vport_context *in; + u64 guid = 0; + int err; + int tmp[8]; + int i; + + err = sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + &tmp[0], &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5], &tmp[6], &tmp[7]); + if (err != 8) + return -EINVAL; + + for (i = 0; i < 8; i++) + guid += ((u64)tmp[i] << ((7 - i) * 8)); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select = XSC_HCA_VPORT_SEL_PORT_GUID; + in->port_guid = guid; + err = xsc_modify_hca_vport_context(dev, 1, 1, g->vf + 1, in); + kfree(in); + if (err) + return err; + + vfs_ctx[g->vf].port_guid = guid; + + return count; +} + +static int show_nic_node_guid(struct xsc_core_device *dev, u16 vf, + __be64 *node_guid) +{ + int err; + + err = xsc_query_nic_vport_node_guid(dev, vf + 1, node_guid); + if (!err) + *node_guid = cpu_to_be64(*node_guid); + + return err; +} + +static ssize_t node_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + __be64 guid; + + int err; + u8 *p; + + err = show_nic_node_guid(dev, g->vf, &guid); + if (err) { + xsc_core_warn(dev, "failed to query node guid for vf %d (%d)\n", + g->vf, err); + return err; + } + + p = (u8 *)&guid; + err = sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); + + return err; +} + +static int modify_nic_node_guid(struct xsc_core_device *dev, u16 vf, + u64 node_guid) +{ + return xsc_modify_other_nic_vport_node_guid(dev, vf + 1, node_guid); +} + +static ssize_t node_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + u64 guid = 0; + int err; + int tmp[8]; + int i; + + err = sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + &tmp[0], &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5], &tmp[6], &tmp[7]); + if (err != 8) + return -EINVAL; + + for (i = 0; i < 8; i++) + guid += ((u64)tmp[i] << ((7 - i) * 8)); + + err = modify_nic_node_guid(dev, g->vf, guid); + if (err) { + xsc_core_warn(dev, "failed to modify node guid for vf %d (%d)\n", + g->vf, err); + return err; + } + + return count; +} + +static const char *policy_str(enum port_state_policy policy) +{ + switch (policy) { + case XSC_POLICY_DOWN: return "Down\n"; + case XSC_POLICY_UP: return "Up\n"; + case XSC_POLICY_FOLLOW: return "Follow\n"; + default: return "Invalid policy\n"; + } +} + +static ssize_t policy_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_hca_vport_context *rep; + const char *p = NULL; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = xsc_query_hca_vport_context(dev, 1, 1, g->vf, rep); + if (err) { + xsc_core_warn(dev, "failed to query port policy for vf %d (%d)\n", + g->vf, err); + goto free; + } + p = policy_str(rep->vport_state_policy); + if (p) + sprintf(buf, "%s", p); + +free: + kfree(rep); + return p ? strlen(p) : err; +} + +static int strpolicy(const char *buf, enum port_state_policy *policy) +{ + if (sysfs_streq(buf, "Down")) { + *policy = XSC_POLICY_DOWN; + return 0; + } + + if (sysfs_streq(buf, "Up")) { + *policy = XSC_POLICY_UP; + return 0; + } + + if (sysfs_streq(buf, "Follow")) { + *policy = XSC_POLICY_FOLLOW; + return 0; + } + return -EINVAL; +} + +static ssize_t policy_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_vf_context *vfs_ctx = dev->priv.sriov.vfs_ctx; + struct xsc_hca_vport_context *in; + enum port_state_policy policy; + int err; + + err = strpolicy(buf, &policy); + if (err) + return err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->vport_state_policy = policy; + in->field_select = XSC_HCA_VPORT_SEL_STATE_POLICY; + err = xsc_modify_hca_vport_context(dev, 1, 1, g->vf + 1, in); + kfree(in); + if (err) + return err; + + vfs_ctx[g->vf].policy = policy; + + return count; +} + +/* ETH SRIOV SYSFS */ +static ssize_t mac_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF Mac Address\n"); +} + +static ssize_t mac_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + u8 mac[ETH_ALEN]; + int err; + + err = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]); + if (err == 6) + goto set_mac; + + if (sysfs_streq(buf, "Random")) + eth_random_addr(mac); + else + return -EINVAL; + +set_mac: + err = xsc_eswitch_set_vport_mac(dev->priv.eswitch, g->vf + 1, mac); + return err ? err : count; +} + +static ssize_t vlan_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, ": set VF Vlan, Qos, Vlan Proto(default 802.1Q)\n"); +} + +static ssize_t vlan_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + char vproto_ext[5] = {'\0'}; + __be16 vlan_proto; + u16 vlan_id; + u8 qos; + int err; + + err = sscanf(buf, "%hu:%hhu:802.%4s", &vlan_id, &qos, vproto_ext); + if (err == 3) { + if ((strcmp(vproto_ext, "1AD") == 0) || + (strcmp(vproto_ext, "1ad") == 0)) + vlan_proto = htons(ETH_P_8021AD); + else if ((strcmp(vproto_ext, "1Q") == 0) || + (strcmp(vproto_ext, "1q") == 0)) + vlan_proto = htons(ETH_P_8021Q); + else + return -EINVAL; + } else { + err = sscanf(buf, "%hu:%hhu", &vlan_id, &qos); + if (err != 2) + return -EINVAL; + vlan_proto = htons(ETH_P_8021Q); + } + + err = xsc_eswitch_set_vport_vlan(dev->priv.eswitch, g->vf + 1, + vlan_id, qos, vlan_proto); + return err ? err : count; +} + +static const char *vlan_proto_str(u16 vlan, u8 qos, __be16 vlan_proto) +{ + if (!vlan && !qos) + return "N/A"; + + switch (vlan_proto) { + case htons(ETH_P_8021AD): return "802.1ad"; + case htons(ETH_P_8021Q): return "802.1Q"; + default: return "Invalid vlan protocol"; + } +} + +static ssize_t spoofcheck_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to enable|disable VF SpoofCheck\n" + ); +} + +static ssize_t spoofcheck_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + bool settings; + int err; + + if (sysfs_streq(buf, "ON")) + settings = true; + else if (sysfs_streq(buf, "OFF")) + settings = false; + else + return -EINVAL; + + err = xsc_eswitch_set_vport_spoofchk(dev->priv.eswitch, g->vf + 1, settings); + return err ? err : count; +} + +static ssize_t trust_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to trust|untrust VF\n" + ); +} + +static ssize_t trust_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + bool settings; + int err; + + if (sysfs_streq(buf, "ON")) + settings = true; + else if (sysfs_streq(buf, "OFF")) + settings = false; + else + return -EINVAL; + + err = xsc_eswitch_set_vport_trust(dev->priv.eswitch, g->vf + 1, settings); + return err ? err : count; +} + +static ssize_t link_state_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, "usage: write to set VF State\n"); +} + +static ssize_t link_state_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + enum port_state_policy policy; + int err; + + err = strpolicy(buf, &policy); + if (err) + return err; + + err = xsc_eswitch_set_vport_state(dev->priv.eswitch, g->vf + 1, policy); + return err ? err : count; +} + +static ssize_t max_tx_rate_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF max rate\n"); +} + +static ssize_t max_tx_rate_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 max_tx_rate; + u32 min_tx_rate; + int err; + + mutex_lock(&esw->state_lock); + min_tx_rate = esw->vports[g->vf + 1].info.min_rate; + mutex_unlock(&esw->state_lock); + + err = kstrtouint(buf, 10, &max_tx_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf + 1, + max_tx_rate, min_tx_rate); + return err ? err : count; +} + +static ssize_t min_tx_rate_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF min rate\n"); +} + +static ssize_t min_tx_rate_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 min_tx_rate; + u32 max_tx_rate; + int err; + + mutex_lock(&esw->state_lock); + max_tx_rate = esw->vports[g->vf + 1].info.max_rate; + mutex_unlock(&esw->state_lock); + + err = kstrtouint(buf, 10, &min_tx_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf + 1, + max_tx_rate, min_tx_rate); + return err ? err : count; +} + +static ssize_t min_pf_tx_rate_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, "usage: write to set PF min rate\n"); +} + +static ssize_t min_pf_tx_rate_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 min_tx_rate; + u32 max_tx_rate; + int err; + + mutex_lock(&esw->state_lock); + max_tx_rate = esw->vports[g->vf].info.max_rate; + mutex_unlock(&esw->state_lock); + + err = kstrtouint(buf, 10, &min_tx_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf, + max_tx_rate, min_tx_rate); + return err ? err : count; +} + +static ssize_t group_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF vport group\n"); +} + +static ssize_t group_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 group_id; + int err; + + err = kstrtouint(buf, 10, &group_id); + if (err != 1) + return -EINVAL; + + if (group_id > 255) + return -EINVAL; + + err = xsc_eswitch_vport_update_group(esw, g->vf + 1, group_id); + + return err ? err : count; +} + +static ssize_t max_tx_rate_group_show(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF group max rate\n"); +} + +static ssize_t max_tx_rate_group_store(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 max_rate; + int err; + + err = kstrtouint(buf, 10, &max_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vgroup_max_rate(esw, g->group_id, max_rate); + + return err ? err : count; +} + +static ssize_t min_tx_rate_group_show(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF group min rate\n"); +} + +static ssize_t min_tx_rate_group_store(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 min_rate; + int err; + + err = kstrtouint(buf, 10, &min_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vgroup_min_rate(esw, g->group_id, min_rate); + + return err ? err : count; +} + +#define _sprintf(p, buf, format, arg...) \ + ((PAGE_SIZE - (int)((p) - (buf))) <= 0 ? 0 : \ + scnprintf((p), PAGE_SIZE - (int)((p) - (buf)), format, ## arg)) + +static ssize_t trunk_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + struct xsc_vport *vport = &esw->vports[g->vf + 1]; + u16 vlan_id = 0; + char *ret = buf; + + mutex_lock(&esw->state_lock); + if (!!bitmap_weight(vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID)) { + ret += _sprintf(ret, buf, "Allowed 802.1Q VLANs:"); + for_each_set_bit(vlan_id, vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID) + ret += _sprintf(ret, buf, " %d", vlan_id); + ret += _sprintf(ret, buf, "\n"); + } + mutex_unlock(&esw->state_lock); + + return (ssize_t)(ret - buf); +} + +static ssize_t trunk_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + u16 start_vid, end_vid; + char op[5]; + int err; + + err = sscanf(buf, "%4s %hu %hu", op, &start_vid, &end_vid); + if (err != 3) + return -EINVAL; + + if (!strcmp(op, "add")) + err = xsc_eswitch_add_vport_trunk_range(dev->priv.eswitch, + g->vf + 1, + start_vid, end_vid); + else if (!strcmp(op, "rem")) + err = xsc_eswitch_del_vport_trunk_range(dev->priv.eswitch, + g->vf + 1, + start_vid, end_vid); + else + return -EINVAL; + + return err ? err : count; +} + +static ssize_t config_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + struct xsc_vport_info *ivi; + int vport = g->vf + 1; + char *p = buf; + + if (!esw || !xsc_core_is_vport_manager(dev)) + return -EPERM; + if (!(vport >= 0 && vport < esw->total_vports)) + return -EINVAL; + + mutex_lock(&esw->state_lock); + ivi = &esw->vports[vport].info; + p += _sprintf(p, buf, "VF : %d\n", g->vf); + p += _sprintf(p, buf, "MAC : %pM\n", ivi->mac); + p += _sprintf(p, buf, "VLAN : %d\n", ivi->vlan); + p += _sprintf(p, buf, "QoS : %d\n", ivi->qos); + p += _sprintf(p, buf, "VLAN Proto : %s\n", + vlan_proto_str(ivi->vlan, ivi->qos, ivi->vlan_proto)); + p += _sprintf(p, buf, "SpoofCheck : %s\n", ivi->spoofchk ? "ON" : "OFF"); + p += _sprintf(p, buf, "Trust : %s\n", ivi->trusted ? "ON" : "OFF"); + p += _sprintf(p, buf, "LinkState : %s", policy_str(ivi->link_state)); + p += _sprintf(p, buf, "MinTxRate : %d\n", ivi->min_rate); + p += _sprintf(p, buf, "MaxTxRate : %d\n", ivi->max_rate); + p += _sprintf(p, buf, "VGT+ : %s\n", + !!bitmap_weight(ivi->vlan_trunk_8021q_bitmap, VLAN_N_VID) ? + "ON" : "OFF"); + p += _sprintf(p, buf, "RateGroup : %d\n", ivi->group); + mutex_unlock(&esw->state_lock); + + return (ssize_t)(p - buf); +} + +static ssize_t config_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t config_group_show(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + char *p = buf; + + if (!esw || !xsc_core_is_vport_manager(dev)) + return -EPERM; + + mutex_lock(&esw->state_lock); + p += _sprintf(p, buf, "Num VFs : %d\n", g->num_vports); + p += _sprintf(p, buf, "MaxRate : %d\n", g->max_rate); + p += _sprintf(p, buf, "MinRate : %d\n", g->min_rate); + p += _sprintf(p, buf, "BWShare(Indirect cfg) : %d\n", g->bw_share); + mutex_unlock(&esw->state_lock); + + return (ssize_t)(p - buf); +} + +static ssize_t config_group_store(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t stats_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_vport *vport = xsc_eswitch_get_vport(dev->priv.eswitch, g->vf + 1); + struct ifla_vf_stats ifi; + struct xsc_vport_drop_stats stats = {}; + int err; + char *p = buf; + + err = xsc_eswitch_get_vport_stats(dev->priv.eswitch, g->vf + 1, &ifi); + if (err) + return -EINVAL; + + err = xsc_eswitch_query_vport_drop_stats(dev, vport, &stats); + if (err) + return -EINVAL; + + p += _sprintf(p, buf, "tx_packets : %llu\n", ifi.tx_packets); + p += _sprintf(p, buf, "tx_bytes : %llu\n", ifi.tx_bytes); + p += _sprintf(p, buf, "tx_dropped : %llu\n", stats.tx_dropped); + p += _sprintf(p, buf, "rx_packets : %llu\n", ifi.rx_packets); + p += _sprintf(p, buf, "rx_bytes : %llu\n", ifi.rx_bytes); + p += _sprintf(p, buf, "rx_broadcast : %llu\n", ifi.broadcast); + p += _sprintf(p, buf, "rx_multicast : %llu\n", ifi.multicast); + p += _sprintf(p, buf, "rx_dropped : %llu\n", stats.rx_dropped); + + return (ssize_t)(p - buf); +} + +static ssize_t stats_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t num_vfs_store(struct device *device, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = container_of(device, struct pci_dev, dev); + int req_vfs; + int err; + + if (kstrtoint(buf, 0, &req_vfs) || req_vfs < 0 || + req_vfs > pci_sriov_get_totalvfs(pdev)) + return -EINVAL; + + err = xsc_core_sriov_configure(pdev, req_vfs); + if (err < 0) + return err; + + return count; +} + +static ssize_t num_vfs_show(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = container_of(device, struct pci_dev, dev); + struct xsc_core_device *dev = pci_get_drvdata(pdev); + struct xsc_core_sriov *sriov = &dev->priv.sriov; + + return sprintf(buf, "%d\n", sriov->num_vfs); +} + +static DEVICE_ATTR_RW(num_vfs); + +static const struct sysfs_ops vf_sysfs_ops = { + .show = vf_attr_show, + .store = vf_attr_store, +}; + +static const struct sysfs_ops vf_group_sysfs_ops = { + .show = vf_group_attr_show, + .store = vf_group_attr_store, +}; + +#define VF_RATE_GROUP_ATTR(_name) struct vf_group_attributes vf_group_attr_##_name = \ + __ATTR(_name, 0644, _name##_group_show, _name##_group_store) +#define VF_ATTR(_name) struct vf_attributes vf_attr_##_name = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +VF_ATTR(node); +VF_ATTR(port); +VF_ATTR(policy); + +VF_ATTR(mac); +VF_ATTR(vlan); +VF_ATTR(link_state); +VF_ATTR(spoofcheck); +VF_ATTR(trust); +VF_ATTR(max_tx_rate); +VF_ATTR(min_tx_rate); +VF_ATTR(config); +VF_ATTR(trunk); +VF_ATTR(stats); +VF_ATTR(group); +VF_RATE_GROUP_ATTR(max_tx_rate); +VF_RATE_GROUP_ATTR(min_tx_rate); +VF_RATE_GROUP_ATTR(config); + +static struct attribute *vf_eth_attrs[] = { + &vf_attr_node.attr, + &vf_attr_mac.attr, + &vf_attr_vlan.attr, + &vf_attr_link_state.attr, + &vf_attr_spoofcheck.attr, + &vf_attr_trust.attr, + &vf_attr_max_tx_rate.attr, + &vf_attr_min_tx_rate.attr, + &vf_attr_config.attr, + &vf_attr_trunk.attr, + &vf_attr_stats.attr, + &vf_attr_group.attr, + NULL +}; +ATTRIBUTE_GROUPS(vf_eth); + +static struct attribute *vf_group_attrs[] = { + &vf_group_attr_max_tx_rate.attr, + &vf_group_attr_min_tx_rate.attr, + &vf_group_attr_config.attr, + NULL +}; +ATTRIBUTE_GROUPS(vf_group); + +static const struct kobj_type vf_type_eth = { + .sysfs_ops = &vf_sysfs_ops, + .default_groups = vf_eth_groups, +}; + +static const struct kobj_type vf_group = { + .sysfs_ops = &vf_group_sysfs_ops, + .default_groups = vf_group_groups, +}; + +static struct vf_attributes pf_attr_min_pf_tx_rate = + __ATTR(min_tx_rate, 0644, min_pf_tx_rate_show, min_pf_tx_rate_store); + +static struct attribute *pf_eth_attrs[] = { + &pf_attr_min_pf_tx_rate.attr, + NULL, +}; +ATTRIBUTE_GROUPS(pf_eth); + +static const struct kobj_type pf_type_eth = { + .sysfs_ops = &vf_sysfs_ops, + .default_groups = pf_eth_groups, +}; + +static struct attribute *vf_ib_attrs[] = { + &vf_attr_node.attr, + &vf_attr_port.attr, + &vf_attr_policy.attr, + NULL +}; +ATTRIBUTE_GROUPS(vf_ib); + +static const struct kobj_type vf_type_ib = { + .sysfs_ops = &vf_sysfs_ops, + .default_groups = vf_ib_groups, +}; + +static struct device_attribute *xsc_class_attributes[] = { + &dev_attr_num_vfs, +}; + +int xsc_sriov_sysfs_init(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct device *device = &dev->pdev->dev; + int err; + int i; + + sriov->config = kobject_create_and_add("sriov", &device->kobj); + if (!sriov->config) + return -ENOMEM; + + if (dev->caps.log_esw_max_sched_depth) { + sriov->groups_config = kobject_create_and_add("groups", + sriov->config); + if (!sriov->groups_config) { + err = -ENOMEM; + goto err_groups; + } + } + + for (i = 0; i < ARRAY_SIZE(xsc_class_attributes); i++) { + err = device_create_file(device, xsc_class_attributes[i]); + if (err) + goto err_attr; + } + + return 0; + +err_attr: + if (sriov->groups_config) { + kobject_put(sriov->groups_config); + sriov->groups_config = NULL; + } + +err_groups: + kobject_put(sriov->config); + sriov->config = NULL; + return err; +} + +void xsc_sriov_sysfs_cleanup(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct device *device = &dev->pdev->dev; + int i; + + for (i = 0; i < ARRAY_SIZE(xsc_class_attributes); i++) + device_remove_file(device, xsc_class_attributes[i]); + + if (dev->caps.log_esw_max_sched_depth) + kobject_put(sriov->groups_config); + kobject_put(sriov->config); + sriov->config = NULL; +} + +int xsc_create_vf_group_sysfs(struct xsc_core_device *dev, + u32 group_id, struct kobject *group_kobj) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + int err; + + err = kobject_init_and_add(group_kobj, &vf_group, sriov->groups_config, + "%d", group_id); + if (err) + return err; + + kobject_uevent(group_kobj, KOBJ_ADD); + + return 0; +} + +void xsc_destroy_vf_group_sysfs(struct xsc_core_device *dev, + struct kobject *group_kobj) +{ + kobject_put(group_kobj); +} + +int xsc_create_vfs_sysfs(struct xsc_core_device *dev, int num_vfs) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct xsc_sriov_vf *tmp; + static const struct kobj_type *sysfs; + int err; + int vf; + + sysfs = &vf_type_ib; + sysfs = &vf_type_eth; + + sriov->vfs = kcalloc(num_vfs + 1, sizeof(*sriov->vfs), GFP_KERNEL); + if (!sriov->vfs) + return -ENOMEM; + + for (vf = 0; vf < num_vfs; vf++) { + tmp = &sriov->vfs[vf]; + tmp->dev = dev; + tmp->vf = vf; + err = kobject_init_and_add(&tmp->kobj, sysfs, sriov->config, + "%d", vf); + if (err) + goto err_vf; + + kobject_uevent(&tmp->kobj, KOBJ_ADD); + } + + tmp = &sriov->vfs[vf]; + tmp->dev = dev; + tmp->vf = 0; + err = kobject_init_and_add(&tmp->kobj, &pf_type_eth, + sriov->config, "%s", "pf"); + if (err) { + --vf; + goto err_vf; + } + + kobject_uevent(&tmp->kobj, KOBJ_ADD); + + return 0; + +err_vf: + for (; vf >= 0; vf--) { + tmp = &sriov->vfs[vf]; + kobject_put(&tmp->kobj); + } + + kfree(sriov->vfs); + sriov->vfs = NULL; + return err; +} + +void xsc_destroy_vfs_sysfs(struct xsc_core_device *dev, int num_vfs) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct xsc_sriov_vf *tmp; + int vf; + + if (num_vfs) { + tmp = &sriov->vfs[num_vfs]; + kobject_put(&tmp->kobj); + } + for (vf = 0; vf < num_vfs; vf++) { + tmp = &sriov->vfs[vf]; + kobject_put(&tmp->kobj); + } + + kfree(sriov->vfs); + sriov->vfs = NULL; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h b/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h new file mode 100644 index 0000000000000000000000000000000000000000..7f6561c1e005def0e13c5ed73f825e71e39a52a0 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef CMDQ_DEFINE_H +#define CMDQ_DEFINE_H + +#define CMDQ_PA_REG_ADDR 0xFC00000 +#define CMDQ_PA_REG_WIDTH 64 + +#define CMDQ_LOG_SIZE_REG_ADDR 0xFC00008 +#define CMDQ_LOG_SIZE_WIDTH 4 + +#define CMDQ_DB_REG_ADDR 0xFC0000C +#define CMDQ_DB_REG_WIDTH 32 + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c new file mode 100644 index 0000000000000000000000000000000000000000..acbe7e83a9e20f251a034a6ae03097fd7cf8ac56 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c @@ -0,0 +1,954 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "eswitch.h" +#include "common/xsc_fs.h" +#include "net/xsc_eth.h" +#include "common/xsc_lag.h" + +static int _xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, + u16 vport, void *out, int outlen) +{ + struct xsc_query_vport_state_in in; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_VPORT_STATE); + in.vport_number = cpu_to_be16(vport); + if (vport) + in.other_vport = 1; + + return xsc_cmd_exec(dev, &in, sizeof(in), out, outlen); +} + +u8 xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, u16 vport) +{ + struct xsc_query_vport_state_out out; + + memset(&out, 0, sizeof(out)); + _xsc_query_vport_state(dev, opmod, vport, &out, sizeof(out)); + + return out.state; +} +EXPORT_SYMBOL(xsc_query_vport_state); + +int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u16 opmod, + u16 vport, u8 other_vport, u8 state) +{ + struct xsc_modify_vport_state_in in; + struct xsc_modify_vport_state_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_VPORT_STATE); + in.vport_number = cpu_to_be16(vport); + in.other_vport = other_vport; + in.admin_state = state; + + return xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +} + +int __xsc_query_nic_vport_context(struct xsc_core_device *dev, + u16 vport, void *out, int outlen, + int force_other) +{ + struct xsc_query_nic_vport_context_in in; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + in.vport_number = cpu_to_be16(vport); + if (vport || force_other) + in.other_vport = 1; + + return xsc_cmd_exec(dev, &in, sizeof(in), out, outlen); +} + +static int xsc_query_nic_vport_context(struct xsc_core_device *dev, u16 vport, + void *out, int outlen) +{ + return __xsc_query_nic_vport_context(dev, vport, out, outlen, 0); +} + +int xsc_modify_nic_vport_context(struct xsc_core_device *dev, void *in, + int inlen) +{ + struct xsc_modify_nic_vport_context_out out; + struct xsc_modify_nic_vport_context_in *tmp; + int err; + + memset(&out, 0, sizeof(out)); + tmp = (struct xsc_modify_nic_vport_context_in *)in; + tmp->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "fail to modify nic vport err=%d status=%d\n", + err, out.hdr.status); + } + return err; +} + +int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 *min_inline) +{ + struct xsc_query_nic_vport_context_out out; + int err; + + memset(&out, 0, sizeof(out)); + err = xsc_query_nic_vport_context(dev, vport, &out, sizeof(out)); + if (!err) + *min_inline = out.nic_vport_ctx.min_wqe_inline_mode; + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_min_inline); + +void xsc_query_min_inline(struct xsc_core_device *dev, + u8 *min_inline_mode) +{ + switch (dev->caps.wqe_inline_mode) { + case XSC_CAP_INLINE_MODE_VPORT_CONTEXT: + if (!xsc_query_nic_vport_min_inline(dev, 0, min_inline_mode)) + break; + fallthrough; + case XSC_CAP_INLINE_MODE_L2: + *min_inline_mode = XSC_INLINE_MODE_L2; + break; + case XSC_CAP_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = XSC_INLINE_MODE_NONE; + break; + } +} +EXPORT_SYMBOL_GPL(xsc_query_min_inline); + +int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 min_inline) +{ + struct xsc_modify_nic_vport_context_in in; + + memset(&in, 0, sizeof(in)); + in.field_select.min_inline = 1; + in.vport_number = vport; + in.other_vport = 1; + in.nic_vport_ctx.min_wqe_inline_mode = min_inline; + + return xsc_modify_nic_vport_context(dev, &in, sizeof(in)); +} + +static int __xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, + int force_other) +{ + struct xsc_query_nic_vport_context_out out; + u8 *out_addr; + int err; + + memset(&out, 0, sizeof(out)); + out_addr = out.nic_vport_ctx.permanent_address; + + err = __xsc_query_nic_vport_context(dev, vport, &out, sizeof(out), + force_other); + if (!err) + ether_addr_copy(addr, out_addr); + + return err; +} + +int xsc_query_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr) +{ + return __xsc_query_nic_vport_mac_address(dev, vport, addr, 1); +} +EXPORT_SYMBOL_GPL(xsc_query_other_nic_vport_mac_address); + +int xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr) +{ + return __xsc_query_nic_vport_mac_address(dev, vport, addr, 0); +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mac_address); + +static int __xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, int force_other, bool perm_mac) +{ + struct xsc_modify_nic_vport_context_in *in; + struct xsc_modify_nic_vport_context_out out; + struct xsc_adapter *adapter = netdev_priv(dev->netdev); + struct xsc_vport *evport = NULL; + int err, in_sz, i; + u8 *mac_addr; + u16 caps = 0; + u16 caps_mask = 0; + u16 lag_id = xsc_get_lag_id(dev); + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; + + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->lag_id = cpu_to_be16(lag_id); + + if (perm_mac) { + in->field_select.permanent_address = 1; + mac_addr = in->nic_vport_ctx.permanent_address; + } else { + in->field_select.current_address = 1; + mac_addr = in->nic_vport_ctx.current_address; + } + + if (force_other) { + in->other_vport = 1; + in->vport_number = cpu_to_be16(vport); + evport = xsc_eswitch_get_vport(adapter->xdev->priv.eswitch, i + 1); + } + + if (xsc_get_pp_bypass_res(dev, false)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + caps_mask |= BIT(XSC_TBM_CAP_PP_BYPASS); + in->caps = cpu_to_be16(caps); + in->caps_mask = cpu_to_be16(caps_mask); + + ether_addr_copy(mac_addr, addr); + + in->field_select.addresses_list = 1; + if (evport) + in->nic_vport_ctx.vlan = cpu_to_be16(evport->vlan_id); + + in->nic_vport_ctx.vlan_allowed = 1; + + err = xsc_modify_nic_vport_context(dev, in, in_sz); + if (be16_to_cpu(out.outer_vlan_id)) + goto ret; + + for (i = 0; i < VLAN_N_VID; i++) { + if (test_bit(i, adapter->vlan_params.active_cvlans)) { + in->nic_vport_ctx.vlan = cpu_to_be16(i); + in->nic_vport_ctx.vlan_allowed = 1; + err |= xsc_modify_nic_vport_context(dev, in, in_sz); + } + if (test_bit(i, adapter->vlan_params.active_svlans)) { + in->nic_vport_ctx.vlan = cpu_to_be16(i); + in->nic_vport_ctx.vlan_allowed = 1; + err |= xsc_modify_nic_vport_context(dev, in, in_sz); + } + } + +ret: + kfree(in); + return err; +} + +static int __xsc_modify_vport_max_rate(struct xsc_core_device *dev, + u16 vport, u32 rate) +{ + struct xsc_vport_rate_limit_mobox_in in; + struct xsc_vport_rate_limit_mobox_out out; + int err = 0; + + memset(&in, 0, sizeof(struct xsc_vport_rate_limit_mobox_in)); + memset(&out, 0, sizeof(struct xsc_vport_rate_limit_mobox_out)); + + in.vport_number = cpu_to_be16(vport); + if (vport) + in.other_vport = 1; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_VPORT_RATE_LIMIT); + in.rate = cpu_to_be32(rate); + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "modify_vport_max_rate failed!err=%d, status=%u\n", + err, out.hdr.status); + return -EINVAL; + } + + return 0; +} + +int xsc_modify_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac) +{ + return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 1, perm_mac); +} +EXPORT_SYMBOL(xsc_modify_other_nic_vport_mac_address); + +int xsc_modify_vport_max_rate(struct xsc_core_device *dev, + u16 vport, u32 rate) +{ + return __xsc_modify_vport_max_rate(dev, vport, rate); +} +EXPORT_SYMBOL(xsc_modify_vport_max_rate); + +int xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac) +{ + return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 0, perm_mac); +} +EXPORT_SYMBOL(xsc_modify_nic_vport_mac_address); + +int xsc_query_nic_vport_mtu(struct xsc_core_device *dev, u16 *mtu) +{ + struct xsc_query_nic_vport_context_out out; + int err; + + memset(&out, 0, sizeof(out)); + err = xsc_query_nic_vport_context(dev, 0, &out, sizeof(out)); + if (!err) + *mtu = out.nic_vport_ctx.mtu; + + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mtu); + +int xsc_modify_nic_vport_mtu(struct xsc_core_device *dev, u16 mtu) +{ + struct xsc_modify_nic_vport_context_in in; + int err; + + memset(&in, 0, sizeof(in)); + in.field_select.mtu = 1; + in.nic_vport_ctx.mtu = mtu; + + err = xsc_modify_nic_vport_context(dev, &in, sizeof(in)); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mtu); + +int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, + u16 vport, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size) +{ + struct xsc_query_nic_vport_context_in in; + struct xsc_query_nic_vport_context_out *out; + int max_list_size; + int req_list_size; + int out_sz; + int err; + int i; + + req_list_size = *list_size; + + max_list_size = list_type == XSC_NVPRT_LIST_TYPE_UC ? + 1 << dev->caps.log_max_current_uc_list : + 1 << dev->caps.log_max_current_mc_list; + + if (req_list_size > max_list_size) { + xsc_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n", + req_list_size, max_list_size); + req_list_size = max_list_size; + } + + out_sz = sizeof(struct xsc_query_nic_vport_context_out) + + req_list_size * 8; + + memset(&in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + in.hdr.opcode = XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT; + in.allowed_list_type = list_type; + in.vport_number = vport; + in.other_vport = 1; + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, out_sz); + if (err) + goto out; + + req_list_size = out->nic_vport_ctx.allowed_list_size; + *list_size = req_list_size; + for (i = 0; i < req_list_size; i++) { + u8 *mac_addr = (u8 *)out->nic_vport_ctx.current_uc_mac_address[i]; + + ether_addr_copy(addr_list[i], mac_addr); + } +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mac_list); + +int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size) +{ + struct xsc_modify_nic_vport_context_out out; + struct xsc_modify_nic_vport_context_in *in; + int max_list_size; + int in_sz; + int err; + int i; + + max_list_size = list_type == XSC_NVPRT_LIST_TYPE_UC ? + 1 << dev->caps.log_max_current_uc_list : + 1 << dev->caps.log_max_current_mc_list; + + if (list_size > max_list_size) + return -ENOSPC; + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + + list_size * 8; + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->hdr.opcode = XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT; + in->field_select.addresses_list = 1; + in->nic_vport_ctx.allowed_list_type = list_type; + in->nic_vport_ctx.allowed_list_size = list_size; + + for (i = 0; i < list_size; i++) { + u8 *curr_mac = + (u8 *)(in->nic_vport_ctx.current_uc_mac_address[i]); + ether_addr_copy(curr_mac, addr_list[i]); + } + + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, in_sz, &out, sizeof(out)); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mac_list); + +int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, + unsigned long *vlans) +{ + struct xsc_query_nic_vport_context_in in; + struct xsc_query_nic_vport_context_out *out; + int req_list_size; + int out_sz; + int err; + int i; + + req_list_size = 1 << dev->caps.log_max_vlan_list; + out_sz = sizeof(*out) + req_list_size * 8; + + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT; + in.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN; + in.vport_number = vport; + + if (vport) + in.other_vport = 1; + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, out_sz); + if (err) + goto out; + + req_list_size = out->nic_vport_ctx.allowed_list_size; + + for (i = 0; i < req_list_size; i++) { + u16 *vlan_addr = (u16 *)&out->nic_vport_ctx.current_uc_mac_address[i]; + + bitmap_set(vlans, (*vlan_addr & 0xfff), 1); + } +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_vlans); + +int xsc_modify_nic_vport_vlans(struct xsc_core_device *dev, + u16 vid, bool add) +{ + struct xsc_modify_nic_vport_context_out out; + struct xsc_modify_nic_vport_context_in *in; + int in_sz; + int err; + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; + + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + in->field_select.addresses_list = 1; + + in->nic_vport_ctx.vlan_allowed = add; + in->nic_vport_ctx.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN; + in->nic_vport_ctx.vlan = cpu_to_be16(vid); + + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, in_sz, &out, sizeof(out)); + kfree(in); + + if (err || out.hdr.status) { + xsc_core_err(dev, "Failed to modify vlan err=%d out.status=%u", + err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_vlans); + +int xsc_query_nic_vport_system_image_guid(struct xsc_core_device *dev, + u64 *system_image_guid) +{ + struct xsc_query_nic_vport_context_out out; + + memset(&out, 0, sizeof(out)); + xsc_query_nic_vport_context(dev, 0, &out, sizeof(out)); + + *system_image_guid = out.nic_vport_ctx.system_image_guid; + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_system_image_guid); + +int xsc_query_nic_vport_node_guid(struct xsc_core_device *dev, u32 vport, + u64 *node_guid) +{ + struct xsc_query_nic_vport_context_out out; + + memset(&out, 0, sizeof(out)); + xsc_query_nic_vport_context(dev, vport, &out, sizeof(out)); + + *node_guid = out.nic_vport_ctx.node_guid; + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_node_guid); + +static int __xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid, + int force_other) +{ + struct xsc_modify_nic_vport_context_in in; + int err; + + /* vport = 0 only if ECPF modifying Host PF */ + if (!vport && !force_other) + return -EINVAL; + if (!dev->caps.vport_group_manager) + return -EACCES; + + memset(&in, 0, sizeof(in)); + in.field_select.node_guid = 1; + in.vport_number = vport; + if (vport || force_other) + in.other_vport = 1; + + in.nic_vport_ctx.node_guid = node_guid; + + err = xsc_modify_nic_vport_context(dev, &in, sizeof(in)); + + return err; +} + +int xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid) +{ + return __xsc_modify_nic_vport_node_guid(dev, vport, node_guid, 0); +} + +int xsc_modify_other_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid) +{ + return __xsc_modify_nic_vport_node_guid(dev, vport, node_guid, 1); +} + +int xsc_query_nic_vport_qkey_viol_cntr(struct xsc_core_device *dev, + u16 *qkey_viol_cntr) +{ + struct xsc_query_nic_vport_context_out out; + + memset(&out, 0, sizeof(out)); + xsc_query_nic_vport_context(dev, 0, &out, sizeof(out)); + + *qkey_viol_cntr = out.nic_vport_ctx.qkey_violation_counter; + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_qkey_viol_cntr); + +int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid) +{ + int in_sz = sizeof(struct xsc_query_hca_vport_gid_in); + int out_sz = sizeof(struct xsc_query_hca_vport_gid_out); + struct xsc_query_hca_vport_gid_in *in; + struct xsc_query_hca_vport_gid_out *out; + int is_group_manager; + union ib_gid *tmp; + int tbsz; + int nout; + int err; + + is_group_manager = dev->caps.vport_group_manager; + tbsz = dev->caps.port[port_num].gid_table_len; + xsc_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n", + vf_num, gid_index, tbsz); + + if (gid_index > tbsz && gid_index != 0xffff) + return -EINVAL; + + if (gid_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * sizeof(*gid); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = XSC_CMD_OP_QUERY_HCA_VPORT_GID; + if (other_vport) { + if (is_group_manager) { + in->vport_number = vf_num; + in->other_vport = 1; + } else { + err = -EPERM; + goto out; + } + } + + in->gid_index = gid_index; + in->port_num = port_num; + + err = xsc_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + tmp = (union ib_gid *)((void *)out + + sizeof(struct xsc_query_hca_vport_gid_out)); + gid->global.subnet_prefix = tmp->global.subnet_prefix; + gid->global.interface_id = tmp->global.interface_id; + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_gid); + +int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey) +{ + int in_sz = sizeof(struct xsc_query_hca_vport_pkey_in); + int out_sz = sizeof(struct xsc_query_hca_vport_pkey_out); + struct xsc_query_hca_vport_pkey_in *in; + struct xsc_query_hca_vport_pkey_out *out; + int is_group_manager; + void *pkarr; + int nout; + int tbsz; + int err; + int i; + + is_group_manager = dev->caps.vport_group_manager; + + tbsz = dev->caps.port[port_num].pkey_table_len; + if (pkey_index > tbsz && pkey_index != 0xffff) + return -EINVAL; + + if (pkey_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * sizeof(*pkey); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = XSC_CMD_OP_QUERY_HCA_VPORT_PKEY; + if (other_vport) { + if (is_group_manager) { + in->vport_number = vf_num; + in->other_vport = 1; + } else { + err = -EPERM; + goto out; + } + } + in->pkey_index = pkey_index; + + if (dev->caps.num_ports == 2) + in->port_num = port_num; + + err = xsc_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + pkarr = out->pkey; + for (i = 0; i < nout; i++, pkey++, pkarr += sizeof(*pkey)) + *pkey = *(u16 *)pkarr; + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_pkey); + +int xsc_query_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct xsc_hca_vport_context *rep) +{ + struct xsc_query_hca_vport_context_out *out = NULL; + struct xsc_query_hca_vport_context_in in; + int is_group_manager; + void *ctx; + int err; + + is_group_manager = dev->caps.vport_group_manager; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT; + + if (other_vport) { + if (is_group_manager) { + in.other_vport = 1; + in.vport_number = vf_num; + } else { + err = -EPERM; + goto ex; + } + } + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + if (dev->caps.num_ports == 2) + in.port_num = port_num; + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto ex; + + ctx = &out->hca_vport_ctx; + memcpy(rep, ctx, sizeof(struct xsc_hca_vport_context)); + +ex: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_context); + +int xsc_query_hca_vport_node_guid(struct xsc_core_device *dev, + u64 *node_guid) +{ + struct xsc_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = xsc_query_hca_vport_context(dev, 0, 1, 0, rep); + if (!err) + *node_guid = rep->node_guid; + + kfree(rep); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_node_guid); + +int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, + u16 vport, + int *promisc, + int *allmcast) +{ + struct xsc_query_nic_vport_context_out *out; + int err; + + out = kzalloc(sizeof(out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = xsc_query_nic_vport_context(dev, vport, out, sizeof(*out)); + if (err) + goto out; + + *promisc = out->nic_vport_ctx.promisc; + *allmcast = out->nic_vport_ctx.allmcast; + +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_promisc); + +int xsc_modify_nic_vport_promisc(struct xsc_core_device *dev, + bool allmulti_flag, bool promisc_flag, + int allmulti, int promisc) +{ + struct xsc_modify_nic_vport_context_in *in; + int err; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select.allmcast = allmulti_flag; + in->nic_vport_ctx.allmcast = allmulti; + + in->field_select.promisc = promisc_flag; + in->nic_vport_ctx.promisc = promisc; + + err = xsc_modify_nic_vport_context(dev, in, sizeof(*in)); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_promisc); + +int xsc_modify_nic_vport_spoofchk(struct xsc_core_device *dev, + u16 vport, int spoofchk) +{ + struct xsc_modify_nic_vport_context_in *in; + int err; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->other_vport = 1; + in->vport_number = cpu_to_be16(vport); + in->field_select.spoofchk = 1; + in->nic_vport_ctx.spoofchk = spoofchk; + + err = xsc_modify_nic_vport_context(dev, in, sizeof(*in)); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_spoofchk); + +int xsc_modify_nic_vport_trust(struct xsc_core_device *dev, + u16 vport, bool trust) +{ + struct xsc_modify_nic_vport_context_in *in; + int err; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->other_vport = 1; + in->vport_number = cpu_to_be16(vport); + in->field_select.trust = 1; + in->nic_vport_ctx.trust = (trust ? 1 : 0); + + err = xsc_modify_nic_vport_context(dev, in, sizeof(*in)); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_trust); + +int xsc_query_vport_counter(struct xsc_core_device *dev, u8 other_vport, + int vf, u8 port_num, void *out, + size_t out_sz) +{ + struct xsc_query_vport_counter_in *in; + int is_group_manager; + int err; + + is_group_manager = dev->caps.vport_group_manager; + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + return err; + } + + in->hdr.opcode = XSC_CMD_OP_QUERY_VPORT_COUNTER; + if (other_vport) { + if (is_group_manager) { + in->other_vport = 1; + in->vport_number = (vf + 1); + } else { + err = -EPERM; + goto free; + } + } + + if (dev->caps.num_ports == 2) + in->port_num = port_num; + + err = xsc_cmd_exec(dev, in, sizeof(*in), out, out_sz); +free: + kvfree(in); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_vport_counter); + +int xsc_modify_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + int vf, + struct xsc_hca_vport_context *req) +{ + struct xsc_modify_hca_vport_context_in in; + struct xsc_modify_hca_vport_context_out out; + int is_group_manager; + int err; + + xsc_core_dbg(dev, "vf %d\n", vf); + is_group_manager = dev->caps.vport_group_manager; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT; + if (other_vport) { + if (is_group_manager) { + in.other_vport = 1; + in.vport_number = vf; + } else { + err = -EPERM; + goto err; + } + } + + if (dev->caps.num_ports > 1) + in.port_num = port_num; + memcpy(&in.hca_vport_ctx, req, sizeof(*req)); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +err: + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_hca_vport_context); + +/** + * xsc_eswitch_get_total_vports - Get total vports of the eswitch + * + * @dev: Pointer to core device + * + * xsc_eswitch_get_total_vports returns total number of vports for + * the eswitch. + */ +u16 xsc_eswitch_get_total_vports(const struct xsc_core_device *dev) +{ + return XSC_SPECIAL_VPORTS(dev) + xsc_core_max_vfs(dev); +} +EXPORT_SYMBOL(xsc_eswitch_get_total_vports); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c new file mode 100644 index 0000000000000000000000000000000000000000..5d0c96f204e229134c11e5a2240a3beccb300126 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/driver.h" +#include "common/device.h" +#include "common/xsc_core.h" +#include "wq.h" + +u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq) +{ + return (u32)wq->fbc.sz_m1 + 1; +} +EXPORT_SYMBOL_GPL(xsc_wq_cyc_get_size); + +static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) +{ + return ((u32)1 << log_sz) << log_stride; +} + +int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_cqwq *wq, + struct xsc_wq_ctrl *wq_ctrl) +{ + u8 log_wq_stride = ele_log_size; + u8 log_wq_sz = q_log_size; + int err; + + err = xsc_db_alloc_node(xdev, &wq_ctrl->db, param->db_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_db_alloc_node() failed, %d\n", err); + return err; + } + + err = xsc_frag_buf_alloc_node(xdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), + &wq_ctrl->buf, + param->buf_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_frag_buf_alloc_node() failed, %d\n", err); + goto err_db_free; + } + + xsc_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc); + + wq_ctrl->xdev = xdev; + + return 0; + +err_db_free: + xsc_db_free(xdev, &wq_ctrl->db); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_eth_cqwq_create); + +int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl) +{ + u8 log_wq_stride = ele_log_size; + u8 log_wq_sz = q_log_size; + struct xsc_frag_buf_ctrl *fbc = &wq->fbc; + int err; + + err = xsc_db_alloc_node(xdev, &wq_ctrl->db, param->db_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_db_alloc_node() failed, %d\n", err); + return err; + } + + err = xsc_frag_buf_alloc_node(xdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), + &wq_ctrl->buf, param->buf_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_frag_buf_alloc_node() failed, %d\n", err); + goto err_db_free; + } + + xsc_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); + wq->sz = xsc_wq_cyc_get_size(wq); + + wq_ctrl->xdev = xdev; + + return 0; + +err_db_free: + xsc_db_free(xdev, &wq_ctrl->db); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_eth_wq_cyc_create); + +void xsc_eth_wq_destroy(struct xsc_wq_ctrl *wq_ctrl) +{ + xsc_frag_buf_free(wq_ctrl->xdev, &wq_ctrl->buf); + xsc_db_free(wq_ctrl->xdev, &wq_ctrl->db); +} +EXPORT_SYMBOL_GPL(xsc_eth_wq_destroy); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h new file mode 100644 index 0000000000000000000000000000000000000000..8811ef1bf0f772472c583dad59349c9ce84c90b1 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_WQ_H__ +#define __XSC_WQ_H__ + +#include "common/cq.h" +#include "common/qp.h" + +struct xsc_wq_param { + int buf_numa_node; + int db_numa_node; +}; + +struct xsc_wq_ctrl { + struct xsc_core_device *xdev; + struct xsc_frag_buf buf; + struct xsc_db db; +}; + +struct xsc_wq_cyc { + struct xsc_frag_buf_ctrl fbc; + u16 sz; + u16 wqe_ctr; + u16 cur_sz; +}; + +struct xsc_cqwq { + struct xsc_frag_buf_ctrl fbc; + __be32 *db; + u32 cc; /* consumer counter */ +}; + +enum xsc_res_type { + XSC_RES_UND = 0, + XSC_RES_RQ, + XSC_RES_SQ, + XSC_RES_MAX, +}; + +u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq); + +int xsc_buf_alloc_node(struct xsc_core_device *dev, int size, + struct xsc_frag_buf *buf, int node); + +/*api for eth driver*/ +int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_cqwq *wq, + struct xsc_wq_ctrl *wq_ctrl); + +int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl); +void xsc_eth_wq_destroy(struct xsc_wq_ctrl *wq_ctrl); + +static inline void xsc_init_fbc_offset(struct xsc_buf_list *frags, + u8 log_stride, u8 log_sz, + u16 strides_offset, + struct xsc_frag_buf_ctrl *fbc) +{ + fbc->frags = frags; + fbc->log_stride = log_stride; + fbc->log_sz = log_sz; + fbc->sz_m1 = (1 << fbc->log_sz) - 1; + fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; + fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; + fbc->strides_offset = strides_offset; +} + +static inline void xsc_init_fbc(struct xsc_buf_list *frags, + u8 log_stride, u8 log_sz, + struct xsc_frag_buf_ctrl *fbc) +{ + xsc_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); +} + +static inline void *xsc_frag_buf_get_wqe(struct xsc_frag_buf_ctrl *fbc, + u32 ix) +{ + unsigned int frag; + + ix += fbc->strides_offset; + frag = ix >> fbc->log_frag_strides; + + return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); +} + +static inline u32 +xsc_frag_buf_get_idx_last_contig_stride(struct xsc_frag_buf_ctrl *fbc, u32 ix) +{ + u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; + + return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); +} + +static inline int xsc_wq_cyc_missing(struct xsc_wq_cyc *wq) +{ + return wq->sz - wq->cur_sz; +} + +static inline int xsc_wq_cyc_is_empty(struct xsc_wq_cyc *wq) +{ + return !wq->cur_sz; +} + +static inline void xsc_wq_cyc_push(struct xsc_wq_cyc *wq) +{ + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void xsc_wq_cyc_push_n(struct xsc_wq_cyc *wq, u8 n) +{ + wq->wqe_ctr += n; + wq->cur_sz += n; +} + +static inline void xsc_wq_cyc_pop(struct xsc_wq_cyc *wq) +{ + wq->cur_sz--; +} + +static inline u16 xsc_wq_cyc_ctr2ix(struct xsc_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + +static inline u16 xsc_wq_cyc_get_head(struct xsc_wq_cyc *wq) +{ + return xsc_wq_cyc_ctr2ix(wq, wq->wqe_ctr); +} + +static inline u16 xsc_wq_cyc_get_tail(struct xsc_wq_cyc *wq) +{ + return xsc_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz); +} + +static inline void *xsc_wq_cyc_get_wqe(struct xsc_wq_cyc *wq, u16 ix) +{ + return xsc_frag_buf_get_wqe(&wq->fbc, ix); +} + +static inline u32 xsc_cqwq_ctr2ix(struct xsc_cqwq *wq, u32 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + +static inline u32 xsc_cqwq_get_ci(struct xsc_cqwq *wq) +{ + return xsc_cqwq_ctr2ix(wq, wq->cc); +} + +static inline u32 xsc_cqwq_get_ctr_wrap_cnt(struct xsc_cqwq *wq, u32 ctr) +{ + return ctr >> wq->fbc.log_sz; +} + +static inline u32 xsc_cqwq_get_wrap_cnt(struct xsc_cqwq *wq) +{ + return xsc_cqwq_get_ctr_wrap_cnt(wq, wq->cc); +} + +static inline void xsc_cqwq_pop(struct xsc_cqwq *wq) +{ + wq->cc++; +} + +#endif /* __XSC_WQ_H__ */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c new file mode 100644 index 0000000000000000000000000000000000000000..4d12ce7f0459c5a4dab282d8ef8c9b119ff5a4ae --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c @@ -0,0 +1,1418 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include +#include "common/xsc_lag.h" +#include "common/xsc_hsi.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" +#include "net/xsc_eth.h" + +#include +#include +#include +#include + +static struct xsc_board_lag *board_lag_array[MAX_BOARD_NUM]; + +struct xsc_board_lag *xsc_board_lag_get(struct xsc_core_device *xdev) +{ + return board_lag_array[xdev->board_info->board_id]; +} +EXPORT_SYMBOL(xsc_board_lag_get); + +void xsc_board_lag_set(struct xsc_core_device *xdev, + void *board_lag) +{ + struct xsc_board_lag *board_lag_new = board_lag; + + board_lag_new->board_id = xdev->board_info->board_id; + board_lag_array[xdev->board_info->board_id] = board_lag_new; +} + +void xsc_board_lag_reset(u32 board_id) +{ + board_lag_array[board_id] = NULL; +} + +static u8 hash_type_map[] = { + [NETDEV_LAG_HASH_NONE] = XSC_LAG_HASH_L23, + [NETDEV_LAG_HASH_L2] = XSC_LAG_HASH_L23, + [NETDEV_LAG_HASH_L34] = XSC_LAG_HASH_L34, + [NETDEV_LAG_HASH_L23] = XSC_LAG_HASH_L23, + [NETDEV_LAG_HASH_E23] = XSC_LAG_HASH_E23, + [NETDEV_LAG_HASH_E34] = XSC_LAG_HASH_E34, + [NETDEV_LAG_HASH_UNKNOWN] = XSC_LAG_HASH_L23, +}; + +static inline u8 xsc_lag_hashtype_convert(enum netdev_lag_hash hash_type) +{ + return hash_type_map[hash_type]; +} + +static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) +{ + switch (BOND_MODE(bond)) { + case BOND_MODE_ROUNDROBIN: + return NETDEV_LAG_TX_TYPE_ROUNDROBIN; + case BOND_MODE_ACTIVEBACKUP: + return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; + case BOND_MODE_BROADCAST: + return NETDEV_LAG_TX_TYPE_BROADCAST; + case BOND_MODE_XOR: + case BOND_MODE_8023AD: + return NETDEV_LAG_TX_TYPE_HASH; + default: + return NETDEV_LAG_TX_TYPE_UNKNOWN; + } +} + +enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond) +{ + switch (bond->params.xmit_policy) { + case BOND_XMIT_POLICY_LAYER2: + return NETDEV_LAG_HASH_L23; + case BOND_XMIT_POLICY_LAYER34: + return NETDEV_LAG_HASH_L34; + case BOND_XMIT_POLICY_LAYER23: + return NETDEV_LAG_HASH_L23; + case BOND_XMIT_POLICY_ENCAP23: + return NETDEV_LAG_HASH_E23; + case BOND_XMIT_POLICY_ENCAP34: + return NETDEV_LAG_HASH_E34; + default: + return NETDEV_LAG_HASH_UNKNOWN; + } +} + +static inline bool __xsc_lag_is_active(struct xsc_lag *lag) +{ + return !!(lag->lag_type & XSC_LAG_MODE_FLAGS); +} + +static inline bool __xsc_lag_is_roce(struct xsc_lag *lag) +{ + return !!(lag->lag_type & XSC_LAG_FLAG_ROCE); +} + +static inline bool __xsc_lag_is_kernel(struct xsc_lag *lag) +{ + return !!(lag->lag_type & XSC_LAG_FLAG_KERNEL); +} + +static inline struct xsc_lag *__xsc_get_lag(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag; + + if (!xdev) + return NULL; + + board_lag = xsc_board_lag_get(xdev); + if (!board_lag || xdev->bond_id == BOND_ID_INVALID) + return NULL; + + return &board_lag->xsc_lag[xdev->bond_id]; +} + +int xsc_cmd_create_lag(struct xsc_lag_event *entry) +{ + struct xsc_create_lag_mbox_in in = {}; + struct xsc_create_lag_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + struct net_device *netdev = xdev->netdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_CREATE); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.lag_sel_mode = entry->lag_sel_mode; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + in.req.slave_status = entry->slave_status; + + memcpy(in.req.netdev_addr, netdev->dev_addr, ETH_ALEN); + + xsc_core_info(xdev, "create LAG: lag_id = %d, lag_type = %d, lag_sel_mode = %d, bond_mode = %d\n", + entry->lag_id, entry->lag_type, entry->lag_sel_mode, entry->bond_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create LAG, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_cmd_add_lag_member(struct xsc_lag_event *entry) +{ + struct xsc_add_lag_member_mbox_in in = {}; + struct xsc_add_lag_member_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + struct net_device *netdev = xdev->netdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_ADD_MEMBER); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.lag_sel_mode = entry->lag_sel_mode; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + in.req.slave_status = entry->slave_status; + in.req.mad_mac_idx = entry->roce_lag_xdev->pf_id; + + memcpy(in.req.netdev_addr, netdev->dev_addr, ETH_ALEN); + + xsc_core_info(xdev, "add LAG member: lag_id = %d, lag_type = %d, bond_mode = %d\n", + entry->lag_id, entry->lag_type, entry->bond_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to add LAG member, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_cmd_remove_lag_member(struct xsc_lag_event *entry) +{ + struct xsc_remove_lag_member_mbox_in in = {}; + struct xsc_remove_lag_member_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_REMOVE_MEMBER); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + if (entry->lag_type & XSC_LAG_FLAG_ROCE && entry->is_roce_lag_xdev) { + in.req.is_roce_lag_xdev = entry->is_roce_lag_xdev; + in.req.mad_mac_idx = entry->roce_lag_xdev->pf_id; + in.req.not_roce_lag_xdev_mask = entry->not_roce_lag_xdev_mask; + } + + xsc_core_info(xdev, "remove LAG member: lag_id = %d, lag_type = %d, bond_mode = %d\n", + entry->lag_id, entry->lag_type, entry->bond_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to add LAG member, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_cmd_update_lag_member_status(struct xsc_lag_event *entry) +{ + struct xsc_update_lag_member_status_mbox_in in = {}; + struct xsc_update_lag_member_status_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS); + + in.req.lag_type = entry->lag_type; + in.req.bond_mode = entry->bond_mode; + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.mac_idx = xdev->pf_id; + in.req.slave_status = entry->slave_status; + + xsc_core_info(xdev, "update LAG member status: lag_id = %d, bond_mode = %d, lag_type = %d, slave_status = %d, mac_idx = %d\n", + entry->lag_id, entry->bond_mode, entry->lag_type, + entry->slave_status, xdev->pf_id); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to update LAG member status, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +int xsc_cmd_update_lag_hash_type(struct xsc_lag_event *entry) +{ + struct xsc_update_lag_hash_type_mbox_in in = {}; + struct xsc_update_lag_hash_type_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_UPDATE_HASH_TYPE); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_sel_mode = entry->lag_sel_mode; + + xsc_core_info(xdev, "update LAG hash type: lag_id = %d, lag_sel_mode = %d\n", + entry->lag_id, in.req.lag_sel_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to update LAG hash type, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +int xsc_cmd_destroy_lag(struct xsc_lag_event *entry) +{ + struct xsc_destroy_lag_mbox_in in = {}; + struct xsc_destroy_lag_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_DESTROY); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + + xsc_core_info(xdev, "destroy LAG: lag_id = %d\n", entry->lag_id); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy LAG, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static int xsc_lag_set_qos(struct xsc_core_device *xdev, u16 lag_id, u8 member_idx, u8 lag_op) +{ + struct xsc_set_lag_qos_mbox_in in; + struct xsc_set_lag_qos_mbox_out out; + struct xsc_set_lag_qos_request *req; + int ret; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + req = &in.req; + + req->lag_id = cpu_to_be16(lag_id); + req->member_idx = member_idx; + req->lag_op = lag_op; + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_SET_QOS); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + return ret; +} + +void xsc_create_lag(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + + if (roce_lag) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_create_lag(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (xsc_lag_set_qos(xdev, entry->lag_id, 0, QOS_LAG_OP_CREATE)) { + xsc_core_err(xdev, "failed to create QoS LAG %u\n", entry->lag_id); + goto out; + } + + if (entry->slave_status == XSC_LAG_SLAVE_ACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_ADD_MEMBER)) + xsc_core_err(xdev, "failed to add member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } + +out: + if (roce_lag) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +void xsc_add_lag_member(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + + if (roce_lag) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_add_lag_member(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (entry->slave_status == XSC_LAG_SLAVE_ACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_ADD_MEMBER)) + xsc_core_err(xdev, "failed to add member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } + + return; + +out: + if (roce_lag) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +void xsc_remove_lag_member(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + struct xsc_core_device *roce_lag_xdev = entry->roce_lag_xdev; + + if (roce_lag && entry->is_roce_lag_xdev) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_remove_lag_member(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (roce_lag && entry->is_roce_lag_xdev) { + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + xsc_add_dev_by_protocol(roce_lag_xdev, XSC_INTERFACE_PROTOCOL_IB); + } + + if (roce_lag && !entry->is_roce_lag_xdev) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_DEL_MEMBER)) + xsc_core_err(xdev, "failed to del member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + + return; + +out: + if (roce_lag && entry->is_roce_lag_xdev) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +void xsc_update_lag_member_status(struct xsc_lag_event *entry) +{ + int ret = 0; + struct xsc_core_device *xdev = entry->xdev; + + ret = xsc_cmd_update_lag_member_status(entry); + if (ret) + xsc_core_err(xdev, "failed to update LAG member status, err =%d\n", ret); + + if (entry->slave_status == XSC_LAG_SLAVE_ACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_ADD_MEMBER)) + xsc_core_err(xdev, "failed to add member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } else if (entry->slave_status == XSC_LAG_SLAVE_INACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_DEL_MEMBER)) + xsc_core_err(xdev, "failed to del member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } +} + +void xsc_update_lag_hash_type(struct xsc_lag_event *entry) +{ + int ret = 0; + struct xsc_core_device *xdev = entry->xdev; + + ret = xsc_cmd_update_lag_hash_type(entry); + if (ret) + xsc_core_err(xdev, "failed to update LAG member status, err =%d\n", ret); +} + +void xsc_destroy_lag(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + + if (roce_lag) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_destroy_lag(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (xsc_lag_set_qos(xdev, entry->lag_id, 0, QOS_LAG_OP_DESTROY)) + xsc_core_err(xdev, "failed to destroy QoS LAG %u\n", entry->lag_id); + +out: + if (roce_lag) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +static void (*handlers[XSC_LAG_EVENT_MAX])(struct xsc_lag_event *entry) = { + [XSC_LAG_CREATE] = xsc_create_lag, + [XSC_LAG_ADD_MEMBER] = xsc_add_lag_member, + [XSC_LAG_REMOVE_MEMBER] = xsc_remove_lag_member, + [XSC_LAG_UPDATE_MEMBER_STATUS] = xsc_update_lag_member_status, + [XSC_LAG_UPDATE_HASH_TYPE] = xsc_update_lag_hash_type, + [XSC_LAG_DESTROY] = xsc_destroy_lag, +}; + +static int xsc_do_bond_thread(void *arg) +{ + struct xsc_lag_event *entry; + struct xsc_board_lag *board_lag = arg; + struct lag_event_list *lag_event_list; + int status; + + lag_event_list = &board_lag->lag_event_list; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + + spin_lock(&lag_event_list->lock); + entry = list_first_entry_or_null(&lag_event_list->head, + struct xsc_lag_event, node); + if (!entry) { + spin_unlock(&lag_event_list->lock); + wait_event_interruptible(lag_event_list->wq, + lag_event_list->wait_flag != XSC_SLEEP); + if (lag_event_list->wait_flag == XSC_EXIT) + break; + lag_event_list->wait_flag = XSC_SLEEP; + continue; + } + + spin_unlock(&lag_event_list->lock); + + if (entry->event_type >= XSC_LAG_EVENT_MAX) + goto free_entry; + + status = xsc_dev_list_trylock(); + if (!status) + continue; + + (*handlers[entry->event_type])(entry); + xsc_dev_list_unlock(); + +free_entry: + list_del(&entry->node); + kfree(entry); + } + + return 0; +} + +static inline bool xsc_is_roce_lag_allowed(struct xsc_lag *lag) +{ + struct xsc_core_device *xdev; + bool roce_lag_support = true; + + list_for_each_entry(xdev, &lag->slave_list, slave_node) { + roce_lag_support &= !xsc_sriov_is_enabled(xdev); + if (!roce_lag_support) { + xsc_core_info(xdev, "create ROCE LAG while sriov is open\n"); + break; + } + + roce_lag_support &= radix_tree_empty(&xdev->priv_device.bdf_tree); + if (!roce_lag_support) { + xsc_core_info(xdev, "create ROCE LAG while the ib device is open\n"); + break; + } + } + + return roce_lag_support; +} + +static bool xsc_is_sriov_lag_allowed(struct xsc_lag *lag) +{ + struct xsc_core_device *xdev; + bool sriov_lag_support = true; + + list_for_each_entry(xdev, &lag->slave_list, slave_node) { + sriov_lag_support &= (xdev->priv.eswitch->mode == XSC_ESWITCH_OFFLOADS); + if (!sriov_lag_support) + xsc_core_info(xdev, "create SRIOV LAG while the switchdev is not open\n"); + } + + return sriov_lag_support; +} + +static u8 xsc_get_lag_type(struct xsc_lag *lag) +{ + u8 lag_type; + bool roce_lag; + bool sriov_lag; + u8 lag_mode_support; + + lag_mode_support = (lag->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || + lag->tx_type == NETDEV_LAG_TX_TYPE_HASH); + roce_lag = lag_mode_support && xsc_is_roce_lag_allowed(lag); + sriov_lag = lag_mode_support && xsc_is_sriov_lag_allowed(lag); + lag_type = sriov_lag ? XSC_LAG_FLAG_SRIOV : + (roce_lag ? XSC_LAG_FLAG_ROCE : XSC_LAG_FLAG_KERNEL); + + return lag_type; +} + +static inline void pack_add_and_wake_wq(struct xsc_board_lag *board_lag, + struct xsc_lag_event *entry) +{ + spin_lock(&board_lag->lag_event_list.lock); + list_add_tail(&entry->node, &board_lag->lag_event_list.head); + spin_unlock(&board_lag->lag_event_list.lock); + board_lag->lag_event_list.wait_flag = XSC_WAKEUP; + wake_up(&board_lag->lag_event_list.wq); +} + +static inline enum lag_slave_status lag_slave_status_get(struct net_device *ndev) +{ + struct slave *slave = NULL; + enum lag_slave_status slave_status = XSC_LAG_SLAVE_STATUS_MAX; + + if (!netif_is_bond_slave(ndev)) + goto out; + + rcu_read_lock(); + slave = bond_slave_get_rtnl(ndev); + rcu_read_unlock(); + if (bond_slave_is_up(slave) && bond_slave_can_tx(slave)) + slave_status = XSC_LAG_SLAVE_ACTIVE; + else + slave_status = XSC_LAG_SLAVE_INACTIVE; + +out: + return slave_status; +} + +void pack_lag_create(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct net_device *ndev = xdev->netdev; + struct xsc_lag_event *entry; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + lag->lag_type = xsc_get_lag_type(lag); + + entry->event_type = XSC_LAG_CREATE; + entry->xdev = xdev; + entry->lag_sel_mode = lag->hash_type; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + entry->slave_status = lag_slave_status_get(ndev); + + xsc_core_info(xdev, "lag_sel_mode = %d, slave_status = %d, lag_type = %d\n", + entry->lag_sel_mode, entry->slave_status, entry->lag_type); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_create_lag(entry); +} + +void pack_lag_add_member(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_lag_event *entry; + struct net_device *ndev = xdev->netdev; + struct xsc_core_device *roce_lag_xdev = NULL; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->lag_type = xsc_get_lag_type(lag); + if (entry->lag_type != lag->lag_type) { + xsc_core_err(xdev, "do not permit add slave to different type lag, xdev_lag_type = %d, lag_type = %d\n", + entry->lag_type, lag->lag_type); + + kfree(entry); + return; + } + + entry->event_type = XSC_LAG_ADD_MEMBER; + entry->xdev = xdev; + entry->lag_sel_mode = lag->hash_type; + entry->slave_status = lag_slave_status_get(ndev); + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + + roce_lag_xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + entry->roce_lag_xdev = roce_lag_xdev; + entry->not_roce_lag_xdev_mask = lag->not_roce_lag_xdev_mask; + + xsc_core_info(xdev, "lag_sel_mode = %d, slave_status = %d, lag_type = %d\n", + entry->lag_sel_mode, entry->slave_status, entry->lag_type); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_add_lag_member(entry); +} + +void pack_lag_remove_member(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_lag_event *entry; + struct xsc_core_device *roce_lag_xdev = NULL; + struct xsc_core_device *xdev_tmp = NULL; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + u8 cnt = 0; + u8 not_roce_lag_xdev_mask = 0; + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->event_type = XSC_LAG_REMOVE_MEMBER; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + + if (entry->lag_type & XSC_LAG_FLAG_ROCE) { + roce_lag_xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + if (roce_lag_xdev == xdev) { + entry->is_roce_lag_xdev = 1; + + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + cnt++; + if (cnt == 1) + continue; + + if (cnt == 2) { + roce_lag_xdev = xdev_tmp; + continue; + } + + not_roce_lag_xdev_mask |= BIT(xdev_tmp->pf_id); + } + entry->roce_lag_xdev = roce_lag_xdev; + entry->not_roce_lag_xdev_mask = not_roce_lag_xdev_mask; + } + } + + xsc_core_info(xdev, "lag_type = %d, is_roce_lag_xdev = %d, not_roce_lag_xdev_mask = %d\n", + entry->lag_type, entry->is_roce_lag_xdev, entry->not_roce_lag_xdev_mask); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_remove_lag_member(entry); +} + +void pack_lag_update_member_status(struct xsc_lag *lag, + struct net_device *ndev, enum lag_slave_status slave_status) +{ + struct xsc_lag_event *entry; + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress || lag->lag_type & XSC_LAG_FLAG_KERNEL) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->event_type = XSC_LAG_UPDATE_MEMBER_STATUS; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + entry->slave_status = slave_status; + + xsc_core_info(xdev, "lag_id = %d, slave_status = %d\n", + entry->lag_id, entry->slave_status); + + pack_add_and_wake_wq(board_lag, entry); +} + +void pack_lag_update_hash_type(struct xsc_lag *lag, + u8 bond_id, enum netdev_lag_hash hash_type) +{ + struct xsc_lag_event *entry; + struct xsc_core_device *xdev = NULL; + struct xsc_board_lag *board_lag; + + if (lag->mode_changes_in_progress || lag->lag_type & XSC_LAG_FLAG_KERNEL) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + + board_lag = xsc_board_lag_get(xdev); + + entry->event_type = XSC_LAG_UPDATE_HASH_TYPE; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->lag_sel_mode = lag->hash_type; + + xsc_core_info(xdev, "lag_id = %d, lag_sel_mode = %d\n", + entry->lag_id, entry->lag_sel_mode); + + pack_add_and_wake_wq(board_lag, entry); +} + +void pack_lag_destroy(struct xsc_lag *lag, struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_lag_event *entry; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->event_type = XSC_LAG_DESTROY; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + + lag->lag_type = 0; + + xsc_core_info(xdev, "lag_id = %d, board_id = %d, lag_type = %d\n", + lag->lag_id, lag->board_id, entry->lag_type); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_destroy_lag(entry); +} + +static u8 xsc_get_valid_bond_id(struct xsc_board_lag *board_lag) +{ + u8 bond_valid_mask = board_lag->bond_valid_mask; + u8 i; + + for (i = 0; i < XSC_BOARD_LAG_MAX; i++) { + if (!(bond_valid_mask & BIT(i))) { + board_lag->bond_valid_mask = (bond_valid_mask | BIT(i)); + return i; + } + } + return BOND_ID_INVALID; +} + +static void xsc_lag_setup(struct xsc_board_lag *board_lag, + struct net_device *upper, struct xsc_core_device *xdev, bool no_wq) +{ + struct bonding *bond = netdev_priv(upper); + struct xsc_lag *lag = NULL; + u8 bond_id; + + bond_id = xsc_get_valid_bond_id(board_lag); + + if (bond_id == BOND_ID_INVALID) + return; + + xdev->bond_id = bond_id; + lag = &board_lag->xsc_lag[xdev->bond_id]; + + INIT_LIST_HEAD(&lag->slave_list); + list_add(&xdev->slave_node, &lag->slave_list); + lag->xsc_member_cnt = 1; + lag->bond_dev = upper; + lag->bond_mode = BOND_MODE(bond); + lag->tx_type = bond_lag_tx_type(bond); + lag->hash_type = xsc_lag_hashtype_convert(bond_lag_hash_type(bond)); + lag->board_id = xdev->board_info->board_id; + lag->lag_id = xdev->caps.lag_logic_port_ofst + xdev->bond_id; + + xsc_core_info(xdev, "lag_id = %d, board_id = %d, bond_mode = %d\n", + lag->lag_id, lag->board_id, lag->bond_mode); + + pack_lag_create(lag, xdev, false); +} + +static bool xsc_is_ndev_xsc_pf(struct net_device *slave_ndev) +{ + struct device *dev = &slave_ndev->dev; + struct pci_dev *pdev = to_pci_dev(dev->parent); + + return (pdev->device == XSC_MS_PF_DEV_ID || + pdev->device == XSC_MV_SOC_PF_DEV_ID); +} + +static u8 xsc_get_bond_board_xsc_cnt(struct net_device *upper, + u32 board_id) +{ + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + struct net_device *ndev_tmp; + u8 slave_cnt = 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, ndev_tmp) { + if (!ndev_tmp) + continue; + if (xsc_is_ndev_xsc_pf(ndev_tmp)) { + adapter = netdev_priv(ndev_tmp); + xdev = adapter->xdev; + if (xdev->board_info->board_id == board_id) + slave_cnt++; + } + } + rcu_read_unlock(); + + return slave_cnt; +} + +static void xsc_lag_member_add(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + list_add_tail(&xdev->slave_node, &lag->slave_list); + lag->xsc_member_cnt++; + lag->not_roce_lag_xdev_mask |= BIT(xdev->pf_id); + + xsc_core_dbg(xdev, "xsc_member_cnt = %d\n", + lag->xsc_member_cnt); + + pack_lag_add_member(lag, xdev, no_wq); +} + +static void xsc_lag_member_remove(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + u8 bond_valid_mask; + + lag->xsc_member_cnt--; + + xsc_core_info(xdev, "xsc_member_cnt = %d\n", + lag->xsc_member_cnt); + + if (lag->xsc_member_cnt > 0) { + pack_lag_remove_member(lag, xdev, no_wq); + } else { + pack_lag_destroy(lag, xdev, no_wq); + + lag->lag_id = LAG_ID_INVALID; + lag->board_id = BOARD_ID_INVALID; + + bond_valid_mask = board_lag->bond_valid_mask; + board_lag->bond_valid_mask = bond_valid_mask & ~BIT(xdev->bond_id); + } + + list_del(&xdev->slave_node); + xdev->bond_id = BOND_ID_INVALID; +} + +static void xsc_lag_update_member(struct xsc_lag *lag, + struct net_device *ndev, struct net_device *upper, u8 bond_id) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + u8 xsc_slave_cnt = xsc_get_bond_board_xsc_cnt(upper, lag->board_id); + + xsc_core_dbg(xdev, "xsc_slave_cnt = %d, old_xsc_slave_cnt = %d\n", + xsc_slave_cnt, lag->xsc_member_cnt); + + if (xsc_slave_cnt > lag->xsc_member_cnt) + xsc_lag_member_add(lag, xdev, false); + + if (xsc_slave_cnt < lag->xsc_member_cnt) + xsc_lag_member_remove(lag, xdev, false); +} + +static u8 xsc_get_upper_bond_id(struct net_device *bond_ndev, + struct net_device *ndev, struct xsc_board_lag *board_lag, + bool hash_change) +{ + u8 i; + struct xsc_lag *lag; + u8 bond_valid_mask = board_lag->bond_valid_mask; + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + u8 bond_id = BOND_ID_INVALID; + + for (i = 0; i < XSC_BOARD_LAG_MAX; i++) { + if (bond_valid_mask & BIT(i)) { + lag = &board_lag->xsc_lag[i]; + if (!hash_change) { + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + if (lag->bond_dev == bond_ndev && + lag->board_id == xdev->board_info->board_id) { + bond_id = i; + break; + } + } else { + if (lag->bond_dev == bond_ndev) { + bond_id = i; + break; + } + } + } + } + + return bond_id; +} + +static struct xsc_board_lag *xsc_board_lag_filter(struct xsc_board_lag *board_lag, + struct net_device *ndev) +{ + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + + if (xsc_is_ndev_xsc_pf(ndev)) { + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + if (xdev->board_info->board_id == board_lag->board_id) + return board_lag; + } + + return NULL; +} + +static void xsc_handle_changeupper_event(struct xsc_board_lag *board_lag, + struct net_device *ndev, + struct netdev_notifier_changeupper_info *info) +{ + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + struct net_device *upper = info->upper_dev; + u8 bond_id; + struct xsc_lag *lag; + + if (!netif_is_lag_master(upper) || !ndev) + return; + + mutex_lock(&board_lag->lock); + if (!xsc_board_lag_filter(board_lag, ndev)) { + mutex_unlock(&board_lag->lock); + return; + } + + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + + bond_id = xsc_get_upper_bond_id(upper, ndev, board_lag, false); + xdev->bond_id = bond_id; + + xsc_core_dbg(xdev, "bond_id = %d\n", bond_id); + + if (bond_id != BOND_ID_INVALID) { + lag = &board_lag->xsc_lag[bond_id]; + xsc_lag_update_member(lag, ndev, upper, bond_id); + if (lag->xsc_member_cnt == 0) + memset(lag, 0, sizeof(*lag)); + } else { + xsc_lag_setup(board_lag, upper, xdev, false); + } + mutex_unlock(&board_lag->lock); +} + +static void xsc_handle_changelowerstate_event(struct xsc_board_lag *board_lag, + struct net_device *ndev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag_lower_info; + struct net_device *bond_dev; + struct slave *slave; + struct xsc_lag *lag; + u8 bond_id; + enum lag_slave_status slave_status = XSC_LAG_SLAVE_INACTIVE; + + if (!netif_is_lag_port(ndev) || !info->lower_state_info) + return; + + rcu_read_lock(); + slave = bond_slave_get_rtnl(ndev); + rcu_read_unlock(); + if (!slave || !slave->bond || !slave->bond->dev) + return; + + bond_dev = slave->bond->dev; + + lag_lower_info = info->lower_state_info; + if (lag_lower_info->link_up && lag_lower_info->tx_enabled) + slave_status = XSC_LAG_SLAVE_ACTIVE; + + mutex_lock(&board_lag->lock); + if (!xsc_board_lag_filter(board_lag, ndev)) { + mutex_unlock(&board_lag->lock); + return; + } + + bond_id = xsc_get_upper_bond_id(bond_dev, ndev, board_lag, false); + if (bond_id == BOND_ID_INVALID) { + mutex_unlock(&board_lag->lock); + return; + } + + lag = &board_lag->xsc_lag[bond_id]; + pack_lag_update_member_status(lag, ndev, slave_status); + mutex_unlock(&board_lag->lock); +} + +static void xsc_handle_changehash_event(struct xsc_board_lag *board_lag, + struct net_device *ndev) +{ + struct bonding *bond; + enum netdev_lag_hash hash_type; + struct xsc_lag *lag; + u8 bond_id; + + if (!netif_is_lag_master(ndev)) + return; + + bond = netdev_priv(ndev); + if (!bond_mode_uses_xmit_hash(bond)) + return; + + mutex_lock(&board_lag->lock); + bond_id = xsc_get_upper_bond_id(ndev, NULL, board_lag, true); + if (bond_id == BOND_ID_INVALID) { + mutex_unlock(&board_lag->lock); + return; + } + + lag = &board_lag->xsc_lag[bond_id]; + hash_type = xsc_lag_hashtype_convert(bond_lag_hash_type(bond)); + + if (hash_type != lag->hash_type) { + lag->hash_type = hash_type; + pack_lag_update_hash_type(lag, bond_id, hash_type); + } + mutex_unlock(&board_lag->lock); +} + +static int xsc_lag_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct xsc_board_lag *board_lag; + + if (event != NETDEV_CHANGE && event != NETDEV_CHANGEUPPER && + event != NETDEV_CHANGELOWERSTATE) + return NOTIFY_DONE; + + board_lag = container_of(this, struct xsc_board_lag, nb); + if (!board_lag) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + xsc_handle_changeupper_event(board_lag, ndev, ptr); + break; + case NETDEV_CHANGELOWERSTATE: + xsc_handle_changelowerstate_event(board_lag, ndev, ptr); + break; + case NETDEV_CHANGE: + xsc_handle_changehash_event(board_lag, ndev); + break; + } + + return NOTIFY_DONE; +} + +static struct xsc_board_lag *xsc_board_lag_dev_alloc(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag; + struct lag_event_list *lag_event_list; + int err; + + board_lag = kzalloc(sizeof(*board_lag), GFP_KERNEL); + if (!board_lag) + return NULL; + + lag_event_list = &board_lag->lag_event_list; + + INIT_LIST_HEAD(&lag_event_list->head); + spin_lock_init(&lag_event_list->lock); + init_waitqueue_head(&lag_event_list->wq); + lag_event_list->wait_flag = XSC_SLEEP; + lag_event_list->bond_poll_task = + kthread_create(xsc_do_bond_thread, (void *)board_lag, "xsc board lag"); + if (lag_event_list->bond_poll_task) + wake_up_process(lag_event_list->bond_poll_task); + + board_lag->nb.notifier_call = xsc_lag_netdev_event; + err = register_netdevice_notifier(&board_lag->nb); + if (err) + goto err_create_notifier; + + kref_init(&board_lag->ref); + mutex_init(&board_lag->lock); + board_lag->bond_valid_mask = 0; + + return board_lag; + +err_create_notifier: + xsc_core_err(xdev, "failed to register LAG netdev notifier\n"); + board_lag->nb.notifier_call = NULL; + kthread_stop(lag_event_list->bond_poll_task); + kfree(board_lag); + + return NULL; +} + +static int __xsc_lag_add_xdev(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (!board_lag) { + board_lag = xsc_board_lag_dev_alloc(xdev); + if (!board_lag) + return -EPIPE; + xsc_board_lag_set(xdev, board_lag); + } else { + kref_get(&board_lag->ref); + } + + xdev->bond_id = BOND_ID_INVALID; + + return 0; +} + +void xsc_lag_add_xdev(struct xsc_core_device *xdev) +{ + int err; + + xsc_dev_list_lock(); + err = __xsc_lag_add_xdev(xdev); + xsc_dev_list_unlock(); + + if (err) + xsc_core_dbg(xdev, "add xdev err=%d\n", err); +} +EXPORT_SYMBOL(xsc_lag_add_xdev); + +static void xsc_lag_dev_free(struct kref *ref) +{ + struct xsc_board_lag *board_lag = container_of(ref, struct xsc_board_lag, ref); + struct lag_event_list *lag_event_list = &board_lag->lag_event_list; + + if (board_lag->nb.notifier_call) + unregister_netdevice_notifier(&board_lag->nb); + + lag_event_list->wait_flag = XSC_EXIT; + wake_up(&lag_event_list->wq); + if (lag_event_list->bond_poll_task) + kthread_stop(lag_event_list->bond_poll_task); + + board_lag->nb.notifier_call = NULL; + mutex_destroy(&board_lag->lock); + + xsc_board_lag_reset(board_lag->board_id); + kfree(board_lag); +} + +void xsc_lag_remove_xdev(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + xsc_dev_list_lock(); + if (board_lag) + kref_put(&board_lag->ref, xsc_lag_dev_free); + xsc_dev_list_unlock(); +} +EXPORT_SYMBOL(xsc_lag_remove_xdev); + +void xsc_lag_disable(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + struct xsc_core_device *xdev_tmp = NULL; + u8 cnt = 0; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (!lag || !__xsc_lag_is_active(lag)) { + mutex_unlock(&board_lag->lock); + return; + } + + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + cnt++; + if (cnt == lag->xsc_member_cnt) + pack_lag_destroy(lag, xdev_tmp, false); + else + pack_lag_remove_member(lag, xdev_tmp, false); + } + + lag->mode_changes_in_progress++; + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_disable); + +void xsc_lag_enable(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + struct xsc_core_device *xdev_tmp = NULL; + u8 cnt = 0; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (!lag || __xsc_lag_is_active(lag)) { + mutex_unlock(&board_lag->lock); + return; + } + + lag->mode_changes_in_progress--; + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + if (cnt == 0) + pack_lag_create(lag, xdev_tmp, false); + else + pack_lag_add_member(lag, xdev_tmp, false); + + cnt++; + } + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_enable); + +void xsc_lag_add_netdev(struct net_device *ndev) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct bonding *bond = NULL; + struct net_device *upper = NULL; + struct slave *slave; + u8 bond_id = BOND_ID_INVALID; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + struct xsc_lag *lag; + + if (!board_lag || ndev->reg_state != NETREG_REGISTERED || + !netif_is_bond_slave(ndev)) + return; + + rcu_read_lock(); + slave = bond_slave_get_rcu(ndev); + rcu_read_unlock(); + bond = bond_get_bond_by_slave(slave); + upper = bond->dev; + + mutex_lock(&board_lag->lock); + bond_id = xsc_get_upper_bond_id(upper, ndev, board_lag, false); + xdev->bond_id = bond_id; + lag = __xsc_get_lag(xdev); + + if (bond_id != BOND_ID_INVALID) + xsc_lag_member_add(lag, xdev, true); + else + xsc_lag_setup(board_lag, upper, xdev, true); + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_add_netdev); + +void xsc_lag_remove_netdev(struct net_device *ndev) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + struct xsc_lag *lag; + + if (!board_lag) + return; + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (!lag) + goto out; + + if (__xsc_lag_is_active(lag)) { + xsc_lag_member_remove(lag, xdev, true); + if (lag->xsc_member_cnt == 0) + memset(lag, 0, sizeof(*lag)); + } + +out: + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_remove_netdev); + +bool xsc_lag_is_roce(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + + lag = __xsc_get_lag(xdev); + if (!lag) + return false; + + return __xsc_lag_is_roce(lag); +} +EXPORT_SYMBOL(xsc_lag_is_roce); + +struct xsc_lag *xsc_get_lag(struct xsc_core_device *xdev) +{ + return __xsc_get_lag(xdev); +} +EXPORT_SYMBOL(xsc_get_lag); + +u16 xsc_get_lag_id(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + u16 lag_id = LAG_ID_INVALID; + + xsc_board_lag_lock(xdev); + lag = __xsc_get_lag(xdev); + if (lag && __xsc_lag_is_active(lag) && !__xsc_lag_is_kernel(lag)) + lag_id = lag->lag_id; + xsc_board_lag_unlock(xdev); + + return lag_id; +} +EXPORT_SYMBOL(xsc_get_lag_id); + +struct xsc_core_device *xsc_get_roce_lag_xdev(struct xsc_core_device *xdev) +{ + struct xsc_core_device *roce_lag_xdev; + struct xsc_lag *lag; + + xsc_board_lag_lock(xdev); + if (xsc_lag_is_roce(xdev)) { + lag = __xsc_get_lag(xdev); + roce_lag_xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + } else { + roce_lag_xdev = xdev; + } + xsc_board_lag_unlock(xdev); + + return roce_lag_xdev; +} +EXPORT_SYMBOL(xsc_get_roce_lag_xdev); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..2e63e13bc97d01079af08f277b56011782b03e54 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include "common/xsc_port_ctrl.h" +#include +#include +#include +#include +#include "xsc_pci_ctrl.h" +#include "common/res_obj.h" + +#define FEATURE_ONCHIP_FT_MASK BIT(4) +#define FEATURE_DMA_RW_TBL_MASK BIT(8) +#define FEATURE_PCT_EXP_MASK BIT(19) + +#define XSC_PCI_CTRL_NAME "pci_ctrl" + +static int xsc_pci_ctrl_modify_qp(struct xsc_core_device *xdev, void *in, void *out) +{ + int ret = 0, i = 0; + struct xsc_ioctl_qp_range *resp; + struct xsc_ioctl_data_tl *tl; + int insize; + struct xsc_modify_qp_mbox_in *mailin; + struct xsc_modify_qp_mbox_out mailout; + u32 qpn; + + tl = (struct xsc_ioctl_data_tl *)out; + resp = (struct xsc_ioctl_qp_range *)(tl + 1); + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: qpn:%d, num:%d, opcode:%d\n", + resp->qpn, resp->num, resp->opcode); + if (resp->num == 0) { + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: resp->num ==0\n"); + return 0; + } + qpn = resp->qpn; + insize = sizeof(struct xsc_modify_qp_mbox_in); + mailin = kvzalloc(insize, GFP_KERNEL); + if (!mailin) { + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: enomem\n"); + return -ENOMEM; + } + for (i = 0; i < resp->num; i++) { + mailin->hdr.opcode = cpu_to_be16(resp->opcode); + mailin->qpn = cpu_to_be32(qpn + i); + ret = xsc_cmd_exec(xdev, mailin, insize, &mailout, sizeof(mailout)); + xsc_core_dbg(xdev, "modify qp state qpn:%d\n", qpn + i); + } + kvfree(mailin); + + return ret; +} + +static struct pci_dev *xsc_pci_get_pcidev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn) +{ + return pci_get_domain_bus_and_slot(domain, bus, devfn); +} + +struct xsc_core_device *xsc_pci_get_xdev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn) +{ + struct pci_dev *pdev = NULL; + struct xsc_core_device *xdev = NULL; + + pdev = xsc_pci_get_pcidev_by_bus_and_slot(domain, bus, devfn); + if (!pdev) + return NULL; + + xdev = pci_get_drvdata(pdev); + + return xdev; +} + +static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, + void *in, void *out) +{ + int ret = 0; + struct xsc_eswitch *esw = xdev->priv.eswitch; + struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_ioctl_get_phy_info_res *resp; + u16 lag_id = xsc_get_lag_id(xdev); + struct xsc_core_device *rl_xdev; + + switch (tl->opmod) { + case XSC_IOCTL_OP_GET_LOCAL: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + resp->pcie_no = xdev->pcie_no; + resp->func_id = xdev->glb_func_id; + resp->pcie_host = xdev->caps.pcie_host; + resp->mac_phy_port = xdev->mac_port; + resp->funcid_to_logic_port_off = xdev->caps.funcid_to_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = xdev->caps.lag_logic_port_ofst; + resp->send_seg_num = xdev->caps.send_ds_num; + resp->recv_seg_num = xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = xdev->caps.raw_tpe_qp_num; + resp->chip_version = xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", + resp->pcie_no, resp->func_id, resp->pcie_host, + resp->mac_phy_port, resp->lag_id, + resp->funcid_to_logic_port_off); + resp->pf0_vf_funcid_base = xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = xdev->caps.pf0_vf_funcid_top; + resp->pf1_vf_funcid_base = xdev->caps.pf1_vf_funcid_base; + resp->pf1_vf_funcid_top = xdev->caps.pf1_vf_funcid_top; + resp->pcie0_pf_funcid_base = xdev->caps.pcie0_pf_funcid_base; + resp->pcie0_pf_funcid_top = xdev->caps.pcie0_pf_funcid_top; + resp->pcie1_pf_funcid_base = xdev->caps.pcie1_pf_funcid_base; + resp->pcie1_pf_funcid_top = xdev->caps.pcie1_pf_funcid_top; + resp->hca_core_clock = xdev->caps.hca_core_clock; + resp->mac_bit = xdev->caps.mac_bit; + if (xsc_core_is_pf(xdev)) { + mutex_lock(&esw->mode_lock); + resp->esw_mode = esw->mode; + mutex_unlock(&esw->mode_lock); + } else { + resp->esw_mode = 0; + } + resp->board_id = xdev->board_info->board_id; + break; + + case XSC_IOCTL_OP_GET_INFO_BY_BDF: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + xsc_core_dbg(xdev, "ioctrl get_pcidev. domain=%u, bus=%u, devfn=%u\n", + resp->domain, resp->bus, resp->devfn); + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(resp->domain, resp->bus, resp->devfn); + if (!rl_xdev) + return -1; + + resp->pcie_no = rl_xdev->pcie_no; + resp->func_id = rl_xdev->glb_func_id; + resp->pcie_host = rl_xdev->caps.pcie_host; + resp->mac_phy_port = rl_xdev->mac_port; + resp->funcid_to_logic_port_off = rl_xdev->caps.funcid_to_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = rl_xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = xdev->caps.lag_logic_port_ofst; + resp->send_seg_num = rl_xdev->caps.send_ds_num; + resp->recv_seg_num = rl_xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = rl_xdev->caps.raw_tpe_qp_num; + resp->chip_version = rl_xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (rl_xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (rl_xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (rl_xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", + resp->pcie_no, resp->func_id, resp->pcie_host, + resp->mac_phy_port, resp->lag_id, + resp->funcid_to_logic_port_off); + resp->pf0_vf_funcid_base = rl_xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = rl_xdev->caps.pf0_vf_funcid_top; + resp->pf1_vf_funcid_base = rl_xdev->caps.pf1_vf_funcid_base; + resp->pf1_vf_funcid_top = rl_xdev->caps.pf1_vf_funcid_top; + resp->pcie0_pf_funcid_base = rl_xdev->caps.pcie0_pf_funcid_base; + resp->pcie0_pf_funcid_top = rl_xdev->caps.pcie0_pf_funcid_top; + resp->pcie1_pf_funcid_base = rl_xdev->caps.pcie1_pf_funcid_base; + resp->pcie1_pf_funcid_top = rl_xdev->caps.pcie1_pf_funcid_top; + resp->board_id = xdev->board_info->board_id; + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int xsc_pci_ctrl_get_contextinfo(struct xsc_core_device *xdev, + void *in, void *out) +{ + int ret = 0; + struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_alloc_ucontext_req *req; + struct xsc_alloc_ucontext_resp *resp; + struct xsc_core_device *rl_xdev = NULL; + + if (tl->opmod != XSC_IOCTL_OP_GET_CONTEXT) + return -EINVAL; + + req = (struct xsc_alloc_ucontext_req *)(tl + 1); + xsc_core_dbg(xdev, "xsc_tdi_alloc_context req:\n"); + xsc_core_dbg(xdev, "req->domain=%u\n", req->domain); + xsc_core_dbg(xdev, "req->bus=%u\n", req->bus); + xsc_core_dbg(xdev, "req->devfn=%u\n", req->devfn); + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(req->domain, req->bus, req->devfn); + if (!rl_xdev) + return -1; + + resp = (struct xsc_alloc_ucontext_resp *)(tl + 1); + + resp->max_cq = 1 << rl_xdev->caps.log_max_cq; + resp->max_qp = 1 << rl_xdev->caps.log_max_qp; + resp->max_rwq_indirection_table_size = rl_xdev->caps.max_rwq_indirection_table_size; + resp->qpm_tx_db = rl_xdev->regs.tx_db; + resp->qpm_rx_db = rl_xdev->regs.rx_db; + resp->cqm_next_cid_reg = rl_xdev->regs.complete_reg; + resp->cqm_armdb = rl_xdev->regs.complete_db; + resp->send_ds_num = rl_xdev->caps.send_ds_num; + resp->recv_ds_num = rl_xdev->caps.recv_ds_num; + resp->send_ds_shift = rl_xdev->caps.send_wqe_shift; + resp->recv_ds_shift = rl_xdev->caps.recv_wqe_shift; + resp->glb_func_id = rl_xdev->glb_func_id; + + resp->max_wqes = rl_xdev->caps.max_wqes; + + xsc_core_dbg(xdev, "xsc_tdi_alloc_context resp:\n"); + xsc_core_dbg(xdev, "resp->max_cq=%u\n", resp->max_cq); + xsc_core_dbg(xdev, "resp->max_qp=%u\n", resp->max_qp); + xsc_core_dbg(xdev, "resp->qpm_tx_db=%llx\n", resp->qpm_tx_db); + xsc_core_dbg(xdev, "resp->qpm_rx_db=%llx\n", resp->qpm_rx_db); + xsc_core_dbg(xdev, "resp->cqm_next_cid_reg=%llx\n", resp->cqm_next_cid_reg); + xsc_core_dbg(xdev, "resp->cqm_armdb=%llx\n", resp->cqm_armdb); + xsc_core_dbg(xdev, "resp->send_ds_num=%u\n", resp->send_ds_num); + xsc_core_dbg(xdev, "resp->send_ds_shift=%u\n", resp->send_ds_shift); + xsc_core_dbg(xdev, "resp->:recv_ds_num=%u\n", resp->recv_ds_num); + xsc_core_dbg(xdev, "resp->recv_ds_shift=%u\n", resp->recv_ds_shift); + xsc_core_dbg(xdev, "resp->glb_func_id=%u\n", resp->glb_func_id); + + return ret; +} + +int noop_pre(struct kprobe *p, struct pt_regs *regs) { return 0; } + +static struct kprobe kp = { + .symbol_name = "kallsyms_lookup_name", +}; + +unsigned long (*kallsyms_lookup_name_func)(const char *name) = NULL; + +//调用kprobe找到kallsyms_lookup_name的地址位置 +int find_kallsyms_lookup_name(void) +{ + int ret = -1; + + kp.addr = 0; + kp.pre_handler = noop_pre; + ret = register_kprobe(&kp); + if (ret < 0) + return ret; + + kallsyms_lookup_name_func = (void *)kp.addr; + unregister_kprobe(&kp); + return ret; +} + +u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev) +{ + struct db_irq_matrix *m; + static unsigned long addr; + static int flag; + char *name = "vector_matrix"; + int ret; + + if (flag == 0) { + ret = find_kallsyms_lookup_name(); + if (ret < 0) { + xsc_core_err(dev, "find kallsyms_lookup_name failed\n"); + return 0xffff; + } + + addr = kallsyms_lookup_name_func(name); + xsc_core_dbg(dev, "vector_matrix addr=0x%lx\n", addr); + if (addr == 0) { + xsc_core_err(dev, "not support, arch maybe not X86?\n"); + /* 返回0xffff,做到在不知道cpu vector剩余多少可用的情况 + * 下不影响fw用该值判断能否分配中断 + */ + return 0xffff; + } + flag = 1; + } + + m = (struct db_irq_matrix *)(*(long *)addr); + if (!m) { + xsc_core_err(dev, "vector_matrix is NULL\n"); + return 0xffff; + } + xsc_core_info(dev, "vector_matrix global_available=%u\n", m->global_available); + return m->global_available; +} + +int xsc_pci_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + int opcode, ret = 0; + struct xsc_ioctl_attr *hdr; + + hdr = (struct xsc_ioctl_attr *)in; + opcode = hdr->opcode; + switch (opcode) { + case XSC_IOCTL_GET_PHY_INFO: + ret = xsc_pci_ctrl_get_phy(xdev, in, out); + break; + case XSC_IOCTL_SET_QP_STATUS: + xsc_core_dbg(xdev, "case XSC_IOCTL_SET_QP_STATUS:\n"); + ret = xsc_pci_ctrl_modify_qp(xdev, in, out); + break; + case XSC_IOCTL_GET_CONTEXT: + xsc_core_dbg(xdev, "case XSC_IOCTL_GET_CONTEXT:\n"); + ret = xsc_pci_ctrl_get_contextinfo(xdev, in, out); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long xsc_pci_ctrl_setinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + struct xsc_set_debug_info_mbox_in in; + struct xsc_set_debug_info_mbox_out out; + struct xsc_ioctl_set_debug_info info; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(xdev, "copy user_hdr from user failed, err = %d\n", err); + return -EFAULT; + } + + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(xdev, "incorrect check field, check field=%#x\n", hdr.check_filed); + return -EFAULT; + } + + if (hdr.attr.length != sizeof(info)) { + xsc_core_err(xdev, "unexpected length, length=%d\n", hdr.attr.length); + return -EFAULT; + } + + err = copy_from_user(&info, user_hdr->attr.data, hdr.attr.length); + if (err) { + xsc_core_err(xdev, "copy attr.data from user failed, err = %d\n", err); + return -EFAULT; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_DEBUG_INFO); + switch (hdr.attr.opcode) { + case XSC_IOCTL_SET_LOG_LEVEL: + in.set_field = 0; + in.log_level = info.log_level; + break; + case XSC_IOCTL_SET_CMD_VERBOSE: + in.set_field = 1; + in.cmd_verbose = info.cmd_verbose; + break; + default: + xsc_core_err(xdev, "invalid opcode %d\n", hdr.attr.opcode); + return -EINVAL; + } + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to set debug info to fw, err = %d, status = %d\n", + err, out.hdr.status); + return -EFAULT; + } + + return 0; +} + +static long xsc_pci_ctrl_getinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_hdr *in; + int in_size; + int err; + u16 global_available; + u16 totalvfs; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + switch (hdr.attr.opcode) { + case XSC_IOCTL_GET_PHY_INFO: + case XSC_IOCTL_SET_QP_STATUS: + case XSC_IOCTL_GET_CONTEXT: + case XSC_IOCTL_GET_VECTOR_MATRIX: + break; + default: + return TRY_NEXT_CB; + } + in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + in->attr.opcode = hdr.attr.opcode; + in->attr.length = hdr.attr.length; + + if (hdr.attr.opcode == XSC_IOCTL_GET_VECTOR_MATRIX) { + global_available = xsc_get_irq_matrix_global_available(xdev); + totalvfs = (pci_sriov_get_totalvfs(xdev->pdev) < 0) ? 0 : + pci_sriov_get_totalvfs(xdev->pdev); + in->attr.error = err; + memcpy(in->attr.data, (void *)&global_available, sizeof(u16)); + memcpy(in->attr.data + sizeof(u16), (void *)&totalvfs, sizeof(u16)); + goto next; + } + + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + err = xsc_pci_ctrl_exec_ioctl(xdev, &in->attr, + (in_size - offsetof(struct xsc_ioctl_hdr, attr)), + in->attr.data, hdr.attr.length); + in->attr.error = err; +next: + if (copy_to_user((void *)user_hdr, in, in_size)) + err = -EFAULT; + kvfree(in); + return err; +} + +static int xsc_ioctl_flow_add_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl, + char *data, unsigned int datalen) +{ + int err = 0; + struct xsc_flow_pct_v4_add *pct_v4; + struct xsc_flow_pct_v6_add *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v4->priority, data, datalen); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v6->priority, data, datalen); + break; + default: + break; + } + + return err; +} + +static void xsc_ioctl_flow_destroy_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl) +{ + struct xsc_flow_pct_v4_del *pct_v4; + struct xsc_flow_pct_v6_del *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v4->priority); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v6->priority); + break; + default: + break; + } +} + +static int xsc_ioctl_flow_cmdq_handle_res_obj(struct xsc_bdf_file *file, + char *data, unsigned int datalen) +{ + struct xsc_ioctl_data_tl *tl; + int err = 0; + + tl = (struct xsc_ioctl_data_tl *)data; + + switch (tl->opmod) { + case XSC_IOCTL_OP_ADD: + err = xsc_ioctl_flow_add_obj(file, tl, data, datalen); + break; + case XSC_IOCTL_OP_DEL: + xsc_ioctl_flow_destroy_obj(file, tl); + break; + default: + break; + } + + return err; +} + +static int xsc_ioctl_flow_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + int in_size; + int out_size; + int err; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->len = __cpu_to_be16(hdr->attr.length); + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + err = xsc_ioctl_flow_cmdq_handle_res_obj(file, in->data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + out_size = sizeof(struct xsc_ioctl_mbox_out) + hdr->attr.length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + kvfree(in); + return -ENOMEM; + } + memcpy(out->data, in->data, hdr->attr.length); + out->len = in->len; + err = xsc_cmd_exec(file->xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out->data, hdr->attr.length)) + err = -EFAULT; + + kvfree(in); + kvfree(out); + return err; +} + +static int xsc_ioctl_emu_cmd(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + struct xsc_ioctl_emu_hdr *emu_hdr; + u8 *buffer; + int in_size; + int out_size; + int err; + + buffer = kvzalloc(hdr->attr.length, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + err = copy_from_user(buffer, user_hdr->attr.data, hdr->attr.length); + if (err) + goto err_copy_user_data; + + emu_hdr = (struct xsc_ioctl_emu_hdr *)buffer; + in_size = emu_hdr->in_length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_alloc_in_mem; + } + memcpy(in, emu_hdr->data, emu_hdr->in_length); + + out_size = emu_hdr->out_length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto err_alloc_out_mem; + } + + err = xsc_cmd_exec(xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data + sizeof(struct xsc_ioctl_emu_hdr), + out, out_size)) + err = -EFAULT; + + kvfree(out); + kvfree(in); + kvfree(buffer); + return err; + +err_alloc_out_mem: + kvfree(in); +err_alloc_in_mem: +err_copy_user_data: + kvfree(buffer); + return err; +} + +static int xsc_ioctl_modify_raw_qp(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_modify_raw_qp_mbox_in *in; + struct xsc_modify_raw_qp_mbox_out *out; + int err; + + if (hdr->attr.length != sizeof(struct xsc_modify_raw_qp_request)) + return -EINVAL; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->req, user_hdr->attr.data, + sizeof(struct xsc_modify_raw_qp_request)); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->pcie_no = xdev->pcie_no; + + err = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + + hdr->attr.error = __be32_to_cpu(out->hdr.status); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static void xsc_handle_multiqp_create(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out) +{ + u16 qp_num = 0; + int i = 0; + struct xsc_create_qp_request *req = NULL; + void *ptr = NULL; + int len = 0; + u32 qpn_base = be32_to_cpu(((struct xsc_create_multiqp_mbox_out *)out)->qpn_base); + + qp_num = be16_to_cpu(((struct xsc_create_multiqp_mbox_in *)in)->qp_num); + ptr = ((struct xsc_create_multiqp_mbox_in *)in)->data; + for (i = 0; i < qp_num; i++) { + req = (struct xsc_create_qp_request *)ptr; + len = sizeof(struct xsc_create_qp_request) + + be16_to_cpu(req->pa_num) * sizeof(u64); + xsc_alloc_qp_obj(file, qpn_base + i, (char *)req, len); + ptr += len; + } +} + +static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out, int opcode) +{ + unsigned int idx; + + switch (opcode) { + case XSC_CMD_OP_ALLOC_PD: + idx = be32_to_cpu(((struct xsc_alloc_pd_mbox_out *)out)->pdn); + xsc_alloc_pd_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DEALLOC_PD: + idx = be32_to_cpu(((struct xsc_dealloc_pd_mbox_in *)in)->pdn); + xsc_destroy_pd_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MKEY: + idx = be32_to_cpu(((struct xsc_create_mkey_mbox_out *)out)->mkey); + xsc_alloc_mr_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_MKEY: + idx = be32_to_cpu(((struct xsc_destroy_mkey_mbox_in *)in)->mkey); + xsc_destroy_mr_obj(file, idx); + break; + case XSC_CMD_OP_DESTROY_CQ: + idx = be32_to_cpu(((struct xsc_destroy_cq_mbox_in *)in)->cqn); + xsc_destroy_cq_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_CQ: + idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); + xsc_alloc_cq_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_CREATE_QP: + idx = be32_to_cpu(((struct xsc_create_qp_mbox_out *)out)->qpn); + xsc_alloc_qp_obj(file, idx, + (char *)&(((struct xsc_create_qp_mbox_in *)in)->req), + inlen); + break; + case XSC_CMD_OP_DESTROY_QP: + idx = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_destroy_qp_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MULTI_QP: + xsc_handle_multiqp_create(file, in, inlen, out); + break; + default: + break; + } +} + +static long xsc_pci_ctrl_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_core_device *xdev = file->xdev; + struct xsc_ioctl_hdr hdr; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EINVAL; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_FLOW: + return xsc_ioctl_flow_cmdq(file, user_hdr, &hdr); + case XSC_CMD_OP_MODIFY_RAW_QP: + return xsc_ioctl_modify_raw_qp(xdev, user_hdr, &hdr); + case XSC_CMD_OP_USER_EMU_CMD: + return xsc_ioctl_emu_cmd(xdev, user_hdr, &hdr); + default: + err = TRY_NEXT_CB; + break; + } + + return err; +} + +static long xsc_pci_ctrl_cmdq_raw(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + int op; + struct xsc_core_device *dev = file->xdev; + struct xsc_create_mkey_mbox_out *resp; + struct xsc_unregister_mr_mbox_in *req; + u8 key; + u16 out_len; + int qpn = 0; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(dev, "fail to copy from user user_hdr\n"); + return -EFAULT; + } + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(dev, "invalid check filed %u\n", hdr.check_filed); + return -EINVAL; + } + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out_len = min_t(u16, hdr.attr.length, (u16)MAX_MBOX_OUT_LEN); + out = kvzalloc(out_len, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + xsc_core_err(dev, "fail to copy_from_user user hdr attr\n"); + goto err_exit; + } + + op = be16_to_cpu(((struct xsc_inbox_hdr *)in)->opcode); + switch (op) { + case XSC_CMD_OP_CREATE_MKEY: + spin_lock(&dev->dev_res->mkey_lock); + key = 0x80 + dev->dev_res->mkey_key++; + spin_unlock(&dev->dev_res->mkey_lock); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); + else + err = xsc_create_mkey(dev, in, out); + + resp = (struct xsc_create_mkey_mbox_out *)out; + resp->mkey = xsc_idx_to_mkey(be32_to_cpu(resp->mkey) & 0xffffff) | key; + resp->mkey = cpu_to_be32(resp->mkey); + break; + case XSC_CMD_OP_DESTROY_MKEY: + if (!dev->reg_mr_via_cmdq) + err = xsc_destroy_mkey(dev, in, out); + break; + case XSC_CMD_OP_REG_MR: + if (!dev->reg_mr_via_cmdq) + err = xsc_reg_mr(dev, in, out); + break; + case XSC_CMD_OP_DEREG_MR: + req = (struct xsc_unregister_mr_mbox_in *)in; + req->mkey = be32_to_cpu(req->mkey); + req->mkey = cpu_to_be32(xsc_mkey_to_idx(req->mkey)); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); + else + err = xsc_dereg_mr(dev, in, out); + break; + case XSC_CMD_OP_DESTROY_QP: + qpn = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_send_cmd_2rst_qp(dev, qpn); + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, out_len); + break; + default: + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, out_len); + break; + } + xsc_pci_ctrl_cmdq_handle_res_obj(file, in, hdr.attr.length, out, hdr.attr.opcode); + + if (copy_to_user((void *)user_hdr->attr.data, out, out_len)) { + xsc_core_err(dev, "fail to copy_to_user user hdr attr\n"); + err = -EFAULT; + } +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int xsc_pci_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) +{ + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = xsc_pci_ctrl_cmdq(file, user_hdr); + break; + case XSC_IOCTL_DRV_GET: + err = xsc_pci_ctrl_getinfo(file->xdev, user_hdr); + break; + case XSC_IOCTL_DRV_SET: + err = xsc_pci_ctrl_setinfo(file->xdev, user_hdr); + break; + case XSC_IOCTL_CMDQ_RAW: + err = xsc_pci_ctrl_cmdq_raw(file, user_hdr); + break; + default: + err = TRY_NEXT_CB; + break; + } + + return err; +} + +void xsc_pci_ctrl_fini(void) +{ + xsc_port_ctrl_cb_dereg(XSC_PCI_CTRL_NAME); +} + +int xsc_pci_ctrl_init(void) +{ + int ret; + + ret = xsc_port_ctrl_cb_reg(XSC_PCI_CTRL_NAME, xsc_pci_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_PCI_CTRL_NAME); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..c57caed380b7f014af53607f66f1f71004ace9c2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PCI_CTRL_H +#define XSC_PCI_CTRL_H + +#include +#include +#include + +//for x86 +#ifndef NR_VECTORS +#define NR_VECTORS 256 +#endif +#define IRQ_MATRIX_BITS NR_VECTORS +#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS)) + +struct db_cpumap { + unsigned int available; + unsigned int allocated; + unsigned int managed; + unsigned int managed_allocated; + u8 initialized; + u8 online; + unsigned long alloc_map[IRQ_MATRIX_SIZE]; + unsigned long managed_map[IRQ_MATRIX_SIZE]; +}; + +struct db_irq_matrix { + unsigned int matrix_bits; + unsigned int alloc_start; + unsigned int alloc_end; + unsigned int alloc_size; + unsigned int global_available; + unsigned int global_reserved; + unsigned int systembits_inalloc; + unsigned int total_allocated; + unsigned int online_maps; + struct db_cpumap __percpu *maps; + unsigned long scratch_map[IRQ_MATRIX_SIZE]; + unsigned long system_map[IRQ_MATRIX_SIZE]; +}; + +u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev); + +int xsc_pci_ctrl_init(void); +void xsc_pci_ctrl_fini(void); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..7e5c34ebe2ddc5cf49e6900cde8a0720f0366d54 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/xsc_port_ctrl.h" +#include "common/res_obj.h" + +#define XSC_PORT_CTRL_MAX 1024 +#define XSC_PORT_CTRL_NAME_PRE "yunsilicon" +#define XSC_PORT_CTRL_NAME "port_ctrl" +#define XSC_PORT_CTRL_CB_NAME_LEN 15 +DECLARE_BITMAP(g_bitmap_dev_id, XSC_PORT_CTRL_MAX); + +struct xsc_port_ctrl_reg { + struct list_head node; + char name[XSC_PORT_CTRL_CB_NAME_LEN + 1]; + port_ctrl_cb cb; + void *data; +}; + +static dev_t g_port_ctrl_root_dev; +static struct class *g_port_ctrl_class; +static int g_port_ctrl_dev_cnt; +static struct list_head g_port_ctrl_cbs = LIST_HEAD_INIT(g_port_ctrl_cbs); +struct mutex g_port_ctrl_cbs_lock; /* protect port ctrl node list */ + +static int _port_ctrl_open(struct inode *inode, struct file *filp) +{ + struct xsc_port_ctrl *ctrl = container_of(inode->i_cdev, struct xsc_port_ctrl, cdev); + struct xsc_port_ctrl_file *file; + + file = kzalloc(sizeof(*file), GFP_KERNEL); + if (!file) + return -ENOMEM; + + INIT_RADIX_TREE(&file->bdf_tree, GFP_ATOMIC); + spin_lock_init(&file->bdf_lock); + file->ctrl = ctrl; + + file->root_bdf = kzalloc(sizeof(*file->root_bdf), GFP_KERNEL); + if (!file->root_bdf) { + kfree(file); + return -ENOMEM; + } + INIT_RADIX_TREE(&file->root_bdf->obj_tree, GFP_ATOMIC); + spin_lock_init(&file->root_bdf->obj_lock); + file->root_bdf->xdev = container_of(ctrl, struct xsc_core_device, port_ctrl); + + spin_lock(&ctrl->file_lock); + list_add_tail(&file->file_node, &ctrl->file_list); + spin_unlock(&ctrl->file_lock); + filp->private_data = file; + + xsc_core_info(file->root_bdf->xdev, "process %d open port ctrl file\n", current->pid); + + return 0; +} + +static void xsc_release_port_ctrl_file(struct xsc_port_ctrl_file *file) +{ + struct xsc_bdf_file *bdf_file; + struct radix_tree_iter iter; + void **slot; + + xsc_close_bdf_file(file->root_bdf); + kfree(file->root_bdf); + spin_lock(&file->bdf_lock); + radix_tree_for_each_slot(slot, &file->bdf_tree, &iter, 0) { + bdf_file = (struct xsc_bdf_file *)(*slot); + xsc_close_bdf_file(bdf_file); + radix_tree_iter_delete(&file->bdf_tree, &iter, slot); + kfree(bdf_file); + } + spin_unlock(&file->bdf_lock); +} + +static int _port_ctrl_release(struct inode *inode, struct file *filp) +{ + struct xsc_port_ctrl_file *file = filp->private_data; + + xsc_release_port_ctrl_file(file); + spin_lock(&file->ctrl->file_lock); + list_del(&file->file_node); + spin_unlock(&file->ctrl->file_lock); + kfree(file); + + return 0; +} + +static bool is_db_ofst(struct xsc_core_device *xdev, unsigned long offset) +{ + if (offset == (xdev->regs.tx_db & PAGE_MASK)) + return true; + else if (offset == (xdev->regs.rx_db & PAGE_MASK)) + return true; + else if (offset == (xdev->regs.complete_db & PAGE_MASK)) + return true; + else if (offset == (xdev->regs.complete_reg & PAGE_MASK)) + return true; + return false; +} + +static int _port_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + resource_size_t reg_base; + unsigned long start = (unsigned long)vma->vm_start; + unsigned long size = (unsigned long)(vma->vm_end - vma->vm_start); + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + u64 addr; + u32 db_type; + u32 domain = 0; + u32 bus; + u32 devfn; + struct xsc_port_ctrl_file *file; + struct xsc_core_device *xdev; + struct xsc_core_device *rl_xdev; + u32 bdf; + + file = filp->private_data; + xdev = container_of(file->ctrl, struct xsc_core_device, port_ctrl); + + xsc_core_dbg(xdev, "_port_ctrl_map:offset=%lx\n", offset); + + bdf = offset >> 32; + db_type = bdf & 0x0000000f; + devfn = (bdf >> 4) & 0x000000ff; + bus = (bdf >> 12) & 0x000000ff; + + xsc_core_dbg(xdev, "bus=%u,devfn=%u,db_type=%u\n", bus, devfn, db_type); + + if (bdf != 0) { + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(domain, bus, devfn); + if (!rl_xdev) + return -1; + + if (db_type == XSC_MMAP_MSG_SQDB) { + addr = rl_xdev->regs.tx_db; + } else if (db_type == XSC_MMAP_MSG_RQDB) { + addr = rl_xdev->regs.rx_db; + } else if (db_type == XSC_MMAP_MSG_CQDB) { + addr = rl_xdev->regs.complete_db; + } else if (db_type == XSC_MMAP_MSG_ARM_CQDB) { + addr = rl_xdev->regs.complete_reg; + } else { + pr_err("[%s:%d] mmap err\n", __func__, __LINE__); + return -1; + } + } else { + rl_xdev = xdev; + if (is_db_ofst(xdev, offset) || !offset) + addr = offset; + else + return -EINVAL; + } + + xsc_core_dbg(xdev, "tx_db=%llx,rx_db=%llx,cq_db=%llx,cq_reg=%llx\n", + rl_xdev->regs.tx_db, rl_xdev->regs.rx_db, + rl_xdev->regs.complete_db, rl_xdev->regs.complete_reg); + + reg_base = (pci_resource_start(rl_xdev->pdev, rl_xdev->bar_num) + (addr & PAGE_MASK)); + + if (addr) { + if (xdev->chip_ver_h == 0x100) + reg_base = xsc_core_is_pf(rl_xdev) ? reg_base - 0xA0000000 : reg_base; + else + reg_base = reg_base - 0xA0000000; + } + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (remap_pfn_range(vma, start, (reg_base >> PAGE_SHIFT), size, vma->vm_page_prot)) { + pr_err("[%s:%d] remap_pfn_range err\n", __func__, __LINE__); + return -1; + } + + return 0; +} + +static inline struct xsc_bdf_file *get_bdf_file(struct xsc_port_ctrl_file *file, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_core_device *xdev; + struct xsc_bdf_file *bdf_file; + struct xsc_core_device *rl_xdev; + unsigned long key; + + xdev = container_of(file->ctrl, struct xsc_core_device, port_ctrl); + xsc_core_dbg(xdev, "domain=%x, bus=%x, devfn=%x\n", hdr->domain, hdr->bus, hdr->devfn); + if ((hdr->domain == 0 && hdr->bus == 0 && hdr->devfn == 0) || + (hdr->domain == pci_domain_nr(xdev->pdev->bus) && + hdr->bus == xdev->pdev->bus->number && + hdr->devfn == xdev->pdev->devfn)) + return file->root_bdf; + + key = bdf_to_key(hdr->domain, hdr->bus, hdr->devfn); + spin_lock(&file->bdf_lock); + bdf_file = radix_tree_lookup(&file->bdf_tree, key); + spin_unlock(&file->bdf_lock); + if (bdf_file) { + xsc_core_dbg(bdf_file->xdev, "find the bdf file: %lx\n", bdf_file->key); + return bdf_file; + } + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(hdr->domain, hdr->bus, hdr->devfn); + if (!rl_xdev) { + xsc_core_err(bdf_file->xdev, "fail to get xdev:domain=%x, bus=%x, devfn=%x\n", + hdr->domain, hdr->bus, hdr->devfn); + return NULL; + } + + bdf_file = kzalloc(sizeof(*bdf_file), GFP_KERNEL); + if (!bdf_file) + return NULL; + + bdf_file->key = key; + INIT_RADIX_TREE(&bdf_file->obj_tree, GFP_ATOMIC); + spin_lock_init(&bdf_file->obj_lock); + bdf_file->xdev = rl_xdev; + radix_tree_preload(GFP_KERNEL); + spin_lock(&file->bdf_lock); + radix_tree_insert(&file->bdf_tree, key, bdf_file); + spin_unlock(&file->bdf_lock); + radix_tree_preload_end(); + xsc_core_dbg(rl_xdev, "bdf file not exist, create it and add to port ctrl file\n"); + + return bdf_file; +} + +static long _port_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct xsc_port_ctrl_reg *p; + struct xsc_port_ctrl_file *file; + struct xsc_ioctl_hdr __user *user_hdr; + struct xsc_bdf_file *bdf_file; + struct xsc_ioctl_hdr hdr; + int err; + + file = filp->private_data; + user_hdr = (struct xsc_ioctl_hdr __user *)arg; + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + pr_err("%s: fail to copy from user hdr\n", __func__); + return err; + } + + bdf_file = get_bdf_file(file, &hdr); + if (!bdf_file) { + pr_err("%s: fail to find bdf file\n", __func__); + return -EFAULT; + } + + list_for_each_entry(p, &g_port_ctrl_cbs, node) { + if (p->cb) { + err = p->cb(bdf_file, cmd, user_hdr, p->data); + if (err != TRY_NEXT_CB) + break; + } + } + + return err; +} + +static const struct file_operations g_port_ctrl_fops = { + .owner = THIS_MODULE, + .open = _port_ctrl_open, + .mmap = _port_ctrl_mmap, + .unlocked_ioctl = _port_ctrl_ioctl, + .compat_ioctl = _port_ctrl_ioctl, + .release = _port_ctrl_release, +}; + +static struct xsc_port_ctrl_reg *_port_ctrl_cbs_get(const char *name) +{ + struct xsc_port_ctrl_reg *p, *found; + + found = NULL; + list_for_each_entry(p, &g_port_ctrl_cbs, node) { + if (strcmp(p->name, name) == 0) { + found = p; + break; + } + } + + return found; +} + +static void _port_ctrl_data_fini(void) +{ + class_destroy(g_port_ctrl_class); + unregister_chrdev_region(g_port_ctrl_root_dev, XSC_PORT_CTRL_MAX); +} + +static int _port_ctrl_data_init(void) +{ + int ret; + int major_devid; + + ret = alloc_chrdev_region(&g_port_ctrl_root_dev, 0, XSC_PORT_CTRL_MAX, + XSC_PORT_CTRL_NAME_PRE); + if (ret < 0) { + pr_err("%s cant't get major id\n", XSC_PORT_CTRL_NAME_PRE); + return -1; + } + + major_devid = MAJOR(g_port_ctrl_root_dev); + pr_info("requested major_devid %d\n", major_devid); + + g_port_ctrl_class = class_create(XSC_PORT_CTRL_NAME_PRE); + if (IS_ERR(g_port_ctrl_class)) { + pr_err("failed to call create class witch name %s\n", + XSC_PORT_CTRL_NAME_PRE); + unregister_chrdev_region(g_port_ctrl_root_dev, XSC_PORT_CTRL_MAX); + return -1; + } + + g_port_ctrl_dev_cnt = 0; + + return 0; +} + +static void _port_ctrl_dev_del(struct xsc_core_device *dev) +{ + struct xsc_port_ctrl *ctrl; + struct xsc_port_ctrl_file *file, *n; + int dev_id = 0; + + ctrl = &dev->port_ctrl; + if (!ctrl) + return; + + dev_id = MINOR(ctrl->devid); + spin_lock(&ctrl->file_lock); + list_for_each_entry_safe(file, n, &ctrl->file_list, file_node) { + xsc_release_port_ctrl_file(file); + list_del(&file->file_node); + kfree(file); + } + spin_unlock(&ctrl->file_lock); + + device_destroy(g_port_ctrl_class, ctrl->devid); + cdev_del(&ctrl->cdev); + + clear_bit(dev_id, g_bitmap_dev_id); + g_port_ctrl_dev_cnt--; +} + +static int _port_ctrl_dev_add(struct xsc_core_device *dev) +{ + struct xsc_port_ctrl *ctrl; + int ret; + int dev_id = 0; + + if (g_port_ctrl_dev_cnt >= XSC_PORT_CTRL_MAX) { + xsc_core_err(dev, "too many port control devices\n"); + return -ENOMEM; + } + + ctrl = &dev->port_ctrl; + dev_id = find_first_zero_bit(g_bitmap_dev_id, XSC_PORT_CTRL_MAX); + ctrl->devid = g_port_ctrl_root_dev + dev_id; + ctrl->cdev.owner = THIS_MODULE; + INIT_LIST_HEAD(&ctrl->file_list); + spin_lock_init(&ctrl->file_lock); + cdev_init(&ctrl->cdev, &g_port_ctrl_fops); + ret = cdev_add(&ctrl->cdev, ctrl->devid, 1); + if (ret != 0) { + xsc_core_err(dev, "failed to add cdev\n"); + kfree(ctrl); + return -ENOMEM; + } + + ctrl->device = device_create(g_port_ctrl_class, NULL, ctrl->devid, NULL, + "%s!%s_%04x:%02x:%02x.%x", XSC_PORT_CTRL_NAME_PRE, + XSC_PORT_CTRL_NAME, pci_domain_nr(dev->pdev->bus), + dev->pdev->bus->number, + PCI_SLOT(dev->pdev->devfn), + PCI_FUNC(dev->pdev->devfn)); + if (IS_ERR(ctrl->device)) { + xsc_core_err(dev, "failed to create port control device\n"); + cdev_del(&ctrl->cdev); + kfree(ctrl); + return -ENOMEM; + } + + g_port_ctrl_dev_cnt++; + set_bit(dev_id, g_bitmap_dev_id); + + return 0; +} + +static void _port_ctrl_cb_fini(void) +{ + struct xsc_port_ctrl_reg *p, *n; + + list_for_each_entry_safe(p, n, &g_port_ctrl_cbs, node) { + mutex_lock(&g_port_ctrl_cbs_lock); + list_del(&p->node); + mutex_unlock(&g_port_ctrl_cbs_lock); + kfree(p); + } +} + +static int _port_ctrl_cb_init(void) +{ + mutex_init(&g_port_ctrl_cbs_lock); + + return 0; +} + +static void _port_ctrl_dev_flush(void) +{ +} + +void xsc_port_ctrl_fini(void) +{ + _port_ctrl_dev_flush(); + _port_ctrl_data_fini(); + _port_ctrl_cb_fini(); +} + +int xsc_port_ctrl_init(void) +{ + int ret; + + ret = _port_ctrl_data_init(); + if (ret != 0) { + pr_err("failed to initialize port ctrl data\n"); + return -1; + } + + ret = _port_ctrl_cb_init(); + if (ret != 0) { + pr_err("failed to initialize port ctrl cb\n"); + _port_ctrl_data_fini(); + return -1; + } + + return 0; +} + +void xsc_port_ctrl_remove(struct xsc_core_device *dev) +{ + _port_ctrl_dev_del(dev); +} + +int xsc_port_ctrl_probe(struct xsc_core_device *dev) +{ + int ret = 0; + + ret = _port_ctrl_dev_add(dev); + if (ret != 0) + xsc_core_err(dev, "failed to add new port control device\n"); + + return ret; +} + +int xsc_port_ctrl_cb_reg(const char *name, port_ctrl_cb cb, void *data) +{ + struct xsc_port_ctrl_reg *reg_node; + + if (strlen(name) > XSC_PORT_CTRL_CB_NAME_LEN) { + pr_err("the name is too long to register to port control\n"); + return -1; + } + + reg_node = _port_ctrl_cbs_get(name); + if (reg_node) { + pr_err("failed to register a duplicated node\n"); + return -1; + } + + reg_node = kmalloc(sizeof(*reg_node), GFP_KERNEL); + if (!reg_node) + return -1; + + strscpy(reg_node->name, name, sizeof(reg_node->name)); + reg_node->cb = cb; + reg_node->data = data; + INIT_LIST_HEAD(®_node->node); + + mutex_lock(&g_port_ctrl_cbs_lock); + list_add_tail(®_node->node, &g_port_ctrl_cbs); + mutex_unlock(&g_port_ctrl_cbs_lock); + + return 0; +} +EXPORT_SYMBOL(xsc_port_ctrl_cb_reg); + +void xsc_port_ctrl_cb_dereg(const char *name) +{ + struct xsc_port_ctrl_reg *p, *n; + + list_for_each_entry_safe(p, n, &g_port_ctrl_cbs, node) { + if (strcmp(p->name, name) == 0) { + mutex_lock(&g_port_ctrl_cbs_lock); + list_del(&p->node); + mutex_unlock(&g_port_ctrl_cbs_lock); + kfree(p); + break; + } + } +} +EXPORT_SYMBOL(xsc_port_ctrl_cb_dereg);