From 850e6cdf3ff664dd69ed963c8c83cc5bd04ff6c7 Mon Sep 17 00:00:00 2001 From: nebula_matrix_driver Date: Mon, 27 Oct 2025 19:43:43 +0800 Subject: [PATCH] anolis: net:nebula-matrix driver support the full-scenario functions ANBZ: #26669 nebula-matrix driver support the full-scenario functions support xdp support tc offload support bond Signed-off-by: nebula_matrix_driver --- drivers/net/ethernet/nebula-matrix/Kconfig | 3 +- .../net/ethernet/nebula-matrix/nbl/Makefile | 36 +- .../nbl/nbl_channel/nbl_channel.c | 122 +- .../nbl/nbl_channel/nbl_channel.h | 10 +- .../nebula-matrix/nbl/nbl_channel/nbl_cmdq.c | 17 +- .../nebula-matrix/nbl/nbl_channel/nbl_cmdq.h | 6 + .../nebula-matrix/nbl/nbl_common/nbl_common.c | 7 +- .../nebula-matrix/nbl/nbl_common/nbl_common.h | 2 +- .../nebula-matrix/nbl/nbl_common/nbl_event.h | 2 +- .../net/ethernet/nebula-matrix/nbl/nbl_core.h | 17 +- .../nebula-matrix/nbl/nbl_core/nbl_debugfs.c | 72 + .../nebula-matrix/nbl/nbl_core/nbl_debugfs.h | 8 +- .../nebula-matrix/nbl/nbl_core/nbl_dev.c | 1641 ++++--- .../nebula-matrix/nbl/nbl_core/nbl_dev.h | 95 +- .../nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c | 290 +- .../nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h | 2 +- .../nebula-matrix/nbl/nbl_core/nbl_dev_user.c | 346 +- .../nebula-matrix/nbl/nbl_core/nbl_dev_user.h | 4 +- .../nebula-matrix/nbl/nbl_core/nbl_dispatch.c | 2991 ++++++++++--- .../nebula-matrix/nbl/nbl_core/nbl_dispatch.h | 11 +- .../nebula-matrix/nbl/nbl_core/nbl_ethtool.c | 797 +++- .../nebula-matrix/nbl/nbl_core/nbl_ethtool.h | 4 +- .../nebula-matrix/nbl/nbl_core/nbl_hwmon.c | 18 +- .../nebula-matrix/nbl/nbl_core/nbl_hwmon.h | 5 + .../nebula-matrix/nbl/nbl_core/nbl_ipsec.c | 15 +- .../nebula-matrix/nbl/nbl_core/nbl_ipsec.h | 2 +- .../nebula-matrix/nbl/nbl_core/nbl_ktls.c | 16 +- .../nebula-matrix/nbl/nbl_core/nbl_ktls.h | 2 +- .../nebula-matrix/nbl/nbl_core/nbl_lag.c | 24 +- .../nebula-matrix/nbl/nbl_core/nbl_lag.h | 2 +- .../nbl/nbl_core/nbl_p4_version.h | 22 + .../nebula-matrix/nbl/nbl_core/nbl_service.c | 3575 +++++++++++---- .../nebula-matrix/nbl/nbl_core/nbl_service.h | 100 +- .../nebula-matrix/nbl/nbl_core/nbl_sysfs.c | 423 +- .../nebula-matrix/nbl/nbl_core/nbl_sysfs.h | 28 +- .../nebula-matrix/nbl/nbl_core/nbl_tc.c | 522 ++- .../nebula-matrix/nbl/nbl_core/nbl_tc.h | 15 +- .../nebula-matrix/nbl/nbl_core/nbl_tc_tun.c | 26 +- .../nebula-matrix/nbl/nbl_core/nbl_tc_tun.h | 8 +- .../nbl/nbl_export/nbl_export_rdma.h | 5 +- .../nebula-matrix/nbl/nbl_hw/nbl_accel.c | 6 +- .../nebula-matrix/nbl/nbl_hw/nbl_accel.h | 2 +- .../nebula-matrix/nbl/nbl_hw/nbl_adminq.c | 409 +- .../nebula-matrix/nbl/nbl_hw/nbl_adminq.h | 43 +- .../nebula-matrix/nbl/nbl_hw/nbl_fc.c | 3 +- .../nebula-matrix/nbl/nbl_hw/nbl_fc.h | 2 +- .../nebula-matrix/nbl/nbl_hw/nbl_fd.c | 2 +- .../nebula-matrix/nbl/nbl_hw/nbl_fd.h | 2 +- .../nebula-matrix/nbl/nbl_hw/nbl_hw.h | 35 +- .../nbl_hw/nbl_hw_leonis/base/nbl_datapath.h | 23 +- .../nbl_hw_leonis/base/nbl_datapath_dpa.h | 5 + .../nbl_hw_leonis/base/nbl_datapath_dped.h | 7 +- .../nbl_hw_leonis/base/nbl_datapath_dstore.h | 5 + .../nbl_hw_leonis/base/nbl_datapath_ucar.h | 5 + .../nbl_hw_leonis/base/nbl_datapath_upa.h | 5 + .../nbl_hw_leonis/base/nbl_datapath_uped.h | 5 + .../nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h | 22 +- .../nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h | 5 + .../base/nbl_intf_pcompleter_host.h | 108 + .../nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h | 5 + .../nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h | 8 +- .../nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h | 5 + .../nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h | 6 + .../nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h | 5 + .../nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h | 5 + .../nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h | 5 + .../nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h | 5 + .../nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h | 5 + .../nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h | 5 + .../nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c | 5 +- .../nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h | 2 +- .../nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c | 1614 +++++-- .../nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h | 139 +- .../nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c | 1011 ++++- .../nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h | 107 +- .../nbl_hw_leonis/nbl_phy_leonis_regs.c | 3863 +++++++++++++++++ .../nbl_hw_leonis/nbl_phy_leonis_regs.h | 12 + .../nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c | 539 ++- .../nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h | 2 +- .../nbl_hw_leonis/nbl_resource_leonis.c | 521 ++- .../nbl_hw_leonis/nbl_resource_leonis.h | 4 +- .../nbl_hw_leonis/nbl_tc_flow_filter_leonis.c | 392 +- .../nbl_hw_leonis/nbl_tc_flow_filter_leonis.h | 6 +- .../nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c | 142 +- .../nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h | 21 +- .../nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c | 8 +- .../nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h | 7 +- .../nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c | 14 +- .../nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h | 6 + .../nebula-matrix/nbl/nbl_hw/nbl_interrupt.c | 31 +- .../nebula-matrix/nbl/nbl_hw/nbl_interrupt.h | 11 +- .../nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h | 6 + .../nebula-matrix/nbl/nbl_hw/nbl_phy.h | 7 +- .../nebula-matrix/nbl/nbl_hw/nbl_queue.h | 2 +- .../nebula-matrix/nbl/nbl_hw/nbl_resource.c | 15 + .../nebula-matrix/nbl/nbl_hw/nbl_resource.h | 186 +- .../nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.c | 338 ++ .../nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.h | 83 + .../nebula-matrix/nbl/nbl_hw/nbl_txrx.c | 661 ++- .../nebula-matrix/nbl/nbl_hw/nbl_txrx.h | 57 +- .../nebula-matrix/nbl/nbl_hw/nbl_vsi.c | 34 +- .../nebula-matrix/nbl/nbl_hw/nbl_vsi.h | 2 +- .../nbl/nbl_include/nbl_def_channel.h | 222 +- .../nbl/nbl_include/nbl_def_common.h | 47 +- .../nbl/nbl_include/nbl_def_dev.h | 4 +- .../nbl/nbl_include/nbl_def_dispatch.h | 64 +- .../nbl/nbl_include/nbl_def_phy.h | 61 +- .../nbl/nbl_include/nbl_def_resource.h | 66 +- .../nbl/nbl_include/nbl_def_service.h | 115 +- .../nbl/nbl_include/nbl_include.h | 464 +- .../nbl/nbl_include/nbl_product_base.h | 2 +- .../net/ethernet/nebula-matrix/nbl/nbl_main.c | 68 +- 112 files changed, 18779 insertions(+), 4221 deletions(-) create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_p4_version.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.h diff --git a/drivers/net/ethernet/nebula-matrix/Kconfig b/drivers/net/ethernet/nebula-matrix/Kconfig index e92a66125629..0264f950b4c4 100644 --- a/drivers/net/ethernet/nebula-matrix/Kconfig +++ b/drivers/net/ethernet/nebula-matrix/Kconfig @@ -11,7 +11,7 @@ config NET_VENDOR_NEBULA_MATRIX Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all - the questions about Nebual-matrix cards. If you say Y, you will be + the questions about Nebula-matrix cards. If you say Y, you will be asked for your specific card in the following questions. if NET_VENDOR_NEBULA_MATRIX @@ -21,6 +21,7 @@ config NBL_CORE depends on PCI && VFIO depends on ARM64 || X86_64 default m + select PLDMFW help This driver supports Nebula-matrix Ethernet Controller m18110 Family of devices. For more information about this product, go to the product diff --git a/drivers/net/ethernet/nebula-matrix/nbl/Makefile b/drivers/net/ethernet/nebula-matrix/nbl/Makefile index 4a8b896e3564..430492b32758 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/Makefile +++ b/drivers/net/ethernet/nebula-matrix/nbl/Makefile @@ -2,27 +2,10 @@ # Copyright (c) 2021 Nebula Matrix Limited. # Author: Bennie Yan -ifeq ($(KERNELRELEASE),) - -KERNELDIR ?= /lib/modules/$(shell uname -r)/build -PWD := $(shell pwd) - -modules: - $(MAKE) -C $(KERNELDIR) M=$(PWD) modules - -modules_install: - $(MAKE) -C $(KERNELDIR) M=$(PWD) modules_install - -clean: - $(MAKE) -C $(KERNELDIR) M=$(PWD) clean - -else - -obj-m := nbl_core.o +obj-$(CONFIG_NBL_CORE) := nbl_core.o nbl_core-objs += nbl_common/nbl_common.o \ nbl_common/nbl_event.o \ - nbl_common/nbl_net_sysfs.o \ nbl_channel/nbl_channel.o \ nbl_channel/nbl_cmdq.o \ nbl_hw/nbl_hw_leonis/nbl_phy_leonis.o \ @@ -34,7 +17,9 @@ nbl_core-objs += nbl_common/nbl_common.o \ nbl_hw/nbl_hw_leonis/nbl_queue_leonis.o \ nbl_hw/nbl_hw_leonis/nbl_resource_leonis.o \ nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.o \ nbl_hw/nbl_fc.o \ + nbl_hw/nbl_tc_pedit.o \ nbl_hw/nbl_resource.o \ nbl_hw/nbl_interrupt.o \ nbl_hw/nbl_txrx.o \ @@ -50,13 +35,13 @@ nbl_core-objs += nbl_common/nbl_common.o \ nbl_core/nbl_service.o \ nbl_core/nbl_dev_rdma.o \ nbl_core/nbl_sysfs.o \ - nbl_core/nbl_dev_user.o \ nbl_core/nbl_dev.o \ nbl_core/nbl_ktls.o \ nbl_core/nbl_ipsec.o \ nbl_core/nbl_tc_tun.o \ nbl_core/nbl_tc.o \ nbl_core/nbl_hwmon.o \ + nbl_core/nbl_dev_user.o \ nbl_main.o # Do not modify include path, unless you are adding a new file which needs some headers in its @@ -69,13 +54,8 @@ nbl_core-objs += nbl_common/nbl_common.o \ # in your own .h private. # # Try not to break these rules, sincerely. -ccflags-y := -Werror -Wall -I $(src) -I $(src)/nbl_include -I $(src)/nbl_export -I $(src)/nbl_hw - -CFLAGS_nbl_hw/nbl_hw_leonis/nbl_phy_leonis.o += -I $(src)/nbl_hw -CFLAGS_nbl_hw/nbl_hw_leonis/nbl_flow_leonis.o += -I $(src)/nbl_hw -CFLAGS_nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.o += -I $(src)/nbl_hw -CFLAGS_nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.o += -I $(src)/nbl_hw -CFLAGS_nbl_hw/nbl_hw_leonis/nbl_queue_leonis.o += -I $(src)/nbl_hw -CFLAGS_nbl_hw/nbl_hw_leonis/nbl_resource_leonis.o += -I $(src)/nbl_hw +ccflags-y += -I$(srctree)/$(src) +ccflags-y += -I$(srctree)/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw +ccflags-y += -I$(srctree)/drivers/net/ethernet/nebula-matrix/nbl/nbl_export -endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c index 0219833c2eec..0bb778e88dd0 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c @@ -9,6 +9,23 @@ static int nbl_chan_send_ack(void *priv, struct nbl_chan_ack_info *chan_ack); +static void nbl_chan_delete_msg_handler(struct nbl_channel_mgt *chan_mgt, u16 msg_type) +{ + u8 chan_type; + struct nbl_chan_info *chan_info; + + nbl_common_free_hash_node(chan_mgt->handle_hash_tbl, &msg_type); + + if (msg_type < NBL_CHAN_MSG_ADMINQ_GET_EMP_VERSION) + chan_type = NBL_CHAN_TYPE_MAILBOX; + else + chan_type = NBL_CHAN_TYPE_ADMINQ; + + chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + if (chan_info && chan_info->clean_task) + nbl_common_flush_task(chan_info->clean_task); +} + static int nbl_chan_add_msg_handler(struct nbl_channel_mgt *chan_mgt, u16 msg_type, nbl_chan_resp func, void *priv) { @@ -28,6 +45,7 @@ static int nbl_chan_init_msg_handler(struct nbl_channel_mgt *chan_mgt, u8 user_n struct nbl_hash_tbl_key tbl_key; struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); int ret = 0; + struct device *dev = NBL_COMMON_TO_DEV(common); struct nbl_chan_notify_userdev *notify; @@ -57,6 +75,7 @@ static int nbl_chan_init_msg_handler(struct nbl_channel_mgt *chan_mgt, u8 user_n chan_mgt->notify = NULL; devm_kfree(dev, notify); } + return ret; } @@ -65,6 +84,7 @@ static void nbl_chan_remove_msg_handler(struct nbl_channel_mgt *chan_mgt) nbl_common_remove_hash_table(chan_mgt->handle_hash_tbl, NULL); chan_mgt->handle_hash_tbl = NULL; + if (chan_mgt->notify) { devm_kfree(NBL_COMMON_TO_DEV(chan_mgt->common), chan_mgt->notify); chan_mgt->notify = NULL; @@ -284,7 +304,7 @@ static int nbl_chan_cfg_mailbox_qinfo_map_table(struct nbl_channel_mgt *chan_mgt for (func_id = 0; func_id < NBL_MAX_PF; func_id++) { if (!(pf_mask & (1 << func_id))) phy_ops->cfg_mailbox_qinfo(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), func_id, - common->bus, common->devid, + common->hw_bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common) + func_id); } @@ -297,7 +317,7 @@ static int nbl_chan_cfg_adminq_qinfo_map_table(struct nbl_channel_mgt *chan_mgt) struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); phy_ops->cfg_adminq_qinfo(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), - common->bus, common->devid, + common->hw_bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common)); return 0; @@ -383,6 +403,7 @@ static int nbl_chan_alloc_all_rx_bufs(struct nbl_channel_mgt *chan_mgt, rxq->next_to_clean = 0; rxq->next_to_use = chan_info->num_rxq_entries - 1; rxq->tail_ptr = chan_info->num_rxq_entries - 1; + /* mb for notify */ mb(); @@ -693,7 +714,8 @@ static int nbl_chan_update_txqueue(struct nbl_channel_mgt *chan_mgt, return 0; } -static int nbl_chan_kick_tx_ring(struct nbl_channel_mgt *chan_mgt, struct nbl_chan_info *chan_info) +static int nbl_chan_kick_tx_ring(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) { struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); @@ -727,7 +749,8 @@ static int nbl_chan_kick_tx_ring(struct nbl_channel_mgt *chan_mgt, struct nbl_ch return 0; } -static void nbl_chan_recv_ack_msg(void *priv, u16 srcid, u16 msgid, void *data, u32 data_len) +static void nbl_chan_recv_ack_msg(void *priv, u16 srcid, u16 msgid, + void *data, u32 data_len) { struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); @@ -825,7 +848,6 @@ static int nbl_chan_msg_forward_userdev(struct nbl_channel_mgt *chan_mgt, /* make sure to update head after content */ smp_wmb(); *head = tmp; - eventfd_signal(chan_mgt->notify->eventfd, 1); return 0; @@ -846,10 +868,8 @@ static void nbl_chan_recv_msg(struct nbl_channel_mgt *chan_mgt, void *data, u32 srcid = tx_desc->srcid; msgid = tx_desc->msgid; - if (msg_type >= NBL_CHAN_MSG_MAX) { - dev_warn(dev, "Invalid chan message type %u\n", msg_type); - return; - } + if (msg_type >= NBL_CHAN_MSG_MAX) + goto send_warning; if (tx_desc->data_len) { payload = (void *)tx_desc->data; @@ -865,7 +885,7 @@ static void nbl_chan_recv_msg(struct nbl_channel_mgt *chan_mgt, void *data, u32 msg_handler->func(msg_handler->priv, srcid, msgid, payload, payload_len); } - if (chan_mgt->notify) { + if (chan_mgt->notify && msg_type < NBL_CHAN_MSG_MAILBOX_MAX) { mutex_lock(&chan_mgt->notify->lock); if (chan_mgt->notify->eventfd && test_bit(msg_type, chan_mgt->notify->msgtype) && chan_mgt->notify->shm_msg_ring) { @@ -875,6 +895,7 @@ static void nbl_chan_recv_msg(struct nbl_channel_mgt *chan_mgt, void *data, u32 mutex_unlock(&chan_mgt->notify->lock); } +send_warning: if (warn) { NBL_CHAN_ACK(chan_ack, srcid, msg_type, msgid, -EPERM, NULL, 0); nbl_chan_send_ack(chan_mgt, &chan_ack); @@ -987,6 +1008,7 @@ static int nbl_chan_send_msg(void *priv, struct nbl_chan_send_info *chan_send) union nbl_chan_msg_id msgid = {{0}}; struct nbl_chan_tx_param tx_param = {0}; int i = NBL_CHAN_TX_WAIT_ACK_TIMES, resend_times = 0, ret = 0; + bool need_resend = true; /* neend resend when ack timeout*/ if (chan_send->arg_len > NBL_CHAN_BUF_LEN - sizeof(struct nbl_chan_tx_desc)) return -EINVAL; @@ -1041,13 +1063,18 @@ static int nbl_chan_send_msg(void *priv, struct nbl_chan_send_info *chan_send) if (!chan_send->ack) return 0; + if (chan_send->dstid != common->mgt_pf && chan_send->msg_type != NBL_CHAN_MSG_KEEP_ALIVE) + need_resend = false; + if (test_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state)) { ret = wait_event_timeout(wait_head->wait_queue, wait_head->acked, NBL_CHAN_ACK_WAIT_TIME); if (!ret) { + wait_head->status = NBL_MBX_STATUS_TIMEOUT; + if (!need_resend) + return 0; nbl_err(common, NBL_DEBUG_MBX, "Channel waiting ack failed, message type: %d, msg id: %u\n", chan_send->msg_type, msgid.id); - wait_head->status = NBL_MBX_STATUS_TIMEOUT; goto check_rx_dma_err; } @@ -1121,6 +1148,13 @@ static int nbl_chan_send_ack(void *priv, struct nbl_chan_ack_info *chan_ack) return 0; } +static void nbl_chan_unregister_msg(void *priv, u16 msg_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + + nbl_chan_delete_msg_handler(chan_mgt, msg_type); +} + static int nbl_chan_register_msg(void *priv, u16 msg_type, nbl_chan_resp func, void *callback_priv) { struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; @@ -1156,20 +1190,19 @@ static int nbl_chan_dump_txq(void *priv, struct seq_file *m, u8 type) struct nbl_chan_tx_desc *desc; int i; - seq_printf(m, "q_base_addr:%llx, txq size:%u, next_to_use:%u, tail_ptr:%u, " - "next_to_clean:%u\n", txq->dma, + seq_printf(m, "txq size:%u, next_to_use:%u, tail_ptr:%u, next_to_clean:%u\n", chan_info->num_txq_entries, txq->next_to_use, txq->tail_ptr, txq->next_to_clean); seq_printf(m, "reset times %d\n", chan_info->txq_reset_times); for (i = 0; i < chan_info->num_txq_entries; i++) { desc = NBL_CHAN_TX_RING_TO_DESC(txq, i); wait = &chan_info->wait[i]; - seq_printf(m, "%u: flags 0x%x, srcid %u, dstid %u, data_len %u," - " buf_len %u, msg_type %u, msgid %u, ", i, + seq_printf(m, "%u: flags 0x%x, srcid %u, dstid %u, data_len %u,\n" + "buf_len %u, msg_type %u, msgid %u, ", i, desc->flags, desc->srcid, desc->dstid, desc->data_len, desc->buf_len, desc->msg_type, desc->msgid); - seq_printf(m, "acked %u, ack_err %u, ack_data_len %u," - " need_waked %u, msg_type %u\n", wait->acked, wait->ack_err, + seq_printf(m, "acked %u, ack_err %u, ack_data_len %u,\n" + "need_waked %u, msg_type %u\n", wait->acked, wait->ack_err, wait->ack_data_len, wait->need_waked, wait->msg_type); } @@ -1188,15 +1221,14 @@ static int nbl_chan_dump_rxq(void *priv, struct seq_file *m, u8 type) struct nbl_chan_buf *rx_buf; int i; - seq_printf(m, "q_base_addr:%llx, rxq size:%u, next_to_use:%u, tail_ptr:%u, " - "next_to_clean:%u\n", rxq->dma, + seq_printf(m, "rxq size:%u, next_to_use:%u, tail_ptr:%u, next_to_clean:%u\n", chan_info->num_rxq_entries, rxq->next_to_use, rxq->tail_ptr, rxq->next_to_clean); seq_printf(m, "reset times %d\n", chan_info->rxq_reset_times); for (i = 0; i < chan_info->num_rxq_entries; i++) { rx_desc = NBL_CHAN_RX_RING_TO_DESC(rxq, i); rx_buf = NBL_CHAN_RX_RING_TO_BUF(rxq, i); tx_desc = (struct nbl_chan_tx_desc *)rx_buf->va; - seq_printf(m, "%u: rx_desc flags 0x%x, buf_len 0x%x, buf_id 0x%x, buffer_addr 0x%llx, " + seq_printf(m, "%u: rx_desc flags 0x%x, buf_len 0x%x, buf_id 0x%x, buffer_addr 0x%llx,\n" "tx_dedc srcid %u, dstid %u, data_len %u, buf_len %u, msg_type %u, msgid %u\n", i, rx_desc->flags, rx_desc->buf_len, rx_desc->buf_id, rx_desc->buf_addr, tx_desc->srcid, tx_desc->dstid, tx_desc->data_len, tx_desc->buf_len, @@ -1370,6 +1402,7 @@ static struct nbl_channel_ops chan_ops = { .send_msg = nbl_chan_send_msg, .send_ack = nbl_chan_send_ack, .register_msg = nbl_chan_register_msg, + .unregister_msg = nbl_chan_unregister_msg, .cfg_chan_qinfo_map_table = nbl_chan_cfg_qinfo_map_table, .check_queue_exist = nbl_chan_check_queue_exist, .setup_queue = nbl_chan_setup_queue, @@ -1551,52 +1584,3 @@ void nbl_chan_remove_common(void *p) nbl_chan_remove_ops(dev, chan_ops_tbl); } -int nbl_chan_init_bootis(void *p, struct nbl_init_param *param) -{ - struct nbl_adapter *adapter = (struct nbl_adapter *)p; - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); - struct nbl_channel_ops_tbl **chan_ops_tbl = &NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); - - /* if no chan cap, also alloc chan_ops_tbl. other layer can call chan_ops->get_queue_cap */ - *chan_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_channel_ops_tbl), GFP_KERNEL); - if (!*chan_ops_tbl) - return -ENOMEM; - - nbl_chan_setup_ops(dev, chan_ops_tbl, NULL); - - return 0; -} - -void nbl_chan_remove_bootis(void *p) -{ - struct nbl_adapter *adapter = (struct nbl_adapter *)p; - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); - - devm_kfree(dev, NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter)); - NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter) = NULL; -} - -int nbl_chan_init_virtio(void *p, struct nbl_init_param *param) -{ - struct nbl_adapter *adapter = (struct nbl_adapter *)p; - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); - struct nbl_channel_ops_tbl **chan_ops_tbl = &NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); - - /* if no chan cap, also alloc chan_ops_tbl. other layer can call chan_ops->get_queue_cap */ - *chan_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_channel_ops_tbl), GFP_KERNEL); - if (!*chan_ops_tbl) - return -ENOMEM; - - nbl_chan_setup_ops(dev, chan_ops_tbl, NULL); - - return 0; -} - -void nbl_chan_remove_virtio(void *p) -{ - struct nbl_adapter *adapter = (struct nbl_adapter *)p; - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); - - devm_kfree(dev, NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter)); - NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter) = NULL; -} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h index 90aea419412b..ad9406ac998c 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -18,9 +18,9 @@ #define NBL_CHAN_MGT_TO_ADMINQ(chan_mgt) ((chan_mgt)->chan_info[NBL_CHAN_TYPE_ADMINQ]) #define NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type) ((chan_mgt)->chan_info[chan_type]) -#define NBL_CHAN_TX_RING_TO_DESC(tx_ring, i) \ +#define NBL_CHAN_TX_RING_TO_DESC(tx_ring, i) \ (&(((struct nbl_chan_tx_desc *)((tx_ring)->desc))[i])) -#define NBL_CHAN_RX_RING_TO_DESC(rx_ring, i) \ +#define NBL_CHAN_RX_RING_TO_DESC(rx_ring, i) \ (&(((struct nbl_chan_rx_desc *)((rx_ring)->desc))[i])) #define NBL_CHAN_TX_RING_TO_BUF(tx_ring, i) (&(((tx_ring)->buf)[i])) #define NBL_CHAN_RX_RING_TO_BUF(rx_ring, i) (&(((rx_ring)->buf)[i])) @@ -34,7 +34,7 @@ #define NBL_CHAN_TX_WAIT_US 100 #define NBL_CHAN_TX_REKICK_WAIT_TIMES 2000 -#define NBL_CHAN_TX_WAIT_TIMES 10000 +#define NBL_CHAN_TX_WAIT_TIMES 30000 #define NBL_CHAN_TX_WAIT_ACK_US_MIN 100 #define NBL_CHAN_TX_WAIT_ACK_US_MAX 120 @@ -52,7 +52,7 @@ #define NBL_CHAN_RX_DESC_AVAIL BIT(3) #define NBL_CHAN_RX_DESC_USED BIT(4) -#define NBL_CHAN_ACK_WAIT_TIME (2 * HZ) +#define NBL_CHAN_ACK_WAIT_TIME (3 * HZ) /* adminq */ #define NBL_ADMINQ_QUEUE_LEN 256 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c index ed7560cfd8dd..947aed54f8ba 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c @@ -1,5 +1,7 @@ -/* Nebula-matrix DPDK user-network - * Copyright(c) 2021-2030 nbl, Inc. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: */ #include @@ -583,8 +585,8 @@ nbl_cmdq_do_send(void *priv, const struct nbl_cmd_hdr *hdr, nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq firmware timeout!\n"); } else { status = NBL_CMDQ_NOHIT_ERR; - nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq param error, block:%d module:%d " - "table:%d.\n", desc->block, desc->module, desc->table); + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq param error, block:%d module:%d table:%d.\n", + desc->block, desc->module, desc->table); } /* mark desc as done by driver */ @@ -676,13 +678,10 @@ static void nbl_cmdq_get_param(void *priv, void *cmdq_param) int nbl_chan_send_cmdq(void *priv, const void *hdr, void *cmd) { struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; - struct nbl_common_info *common = chan_mgt->common; int ret; - if (!chan_mgt->cmdq_mgt) { - nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq not initialized yet"); + if (!chan_mgt->cmdq_mgt) return NBL_CMDQ_NOT_READY; - } ret = nbl_cmdq_send(priv, hdr, cmd); if (ret == (int)NBL_CMDQ_NEED_RESET) @@ -743,7 +742,7 @@ int nbl_chan_cmdq_mgt_start(struct device *dev, void *priv) ret = nbl_cmdq_init_queue(*cmdq_mgt); cmdq_param.vsi_id = common->vsi_id; - cmdq_param.bdf_num = (common->bus << 8 | common->devid << 3 | + cmdq_param.bdf_num = (u16)(common->hw_bus << 8 | common->devid << 3 | NBL_COMMON_TO_PCI_FUNC_ID(common)); nbl_cmdq_get_param(chan_mgt, &cmdq_param); nbl_cmdq_init(chan_mgt, &cmdq_param); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h index bcd400672d3b..742815d34032 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h @@ -1,3 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + /* Nebula-matrix DPDK user-network * Copyright(c) 2021-2030 nBL, Inc. */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c index d49f0f3f8919..e66f014bf544 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c @@ -170,6 +170,7 @@ u32 nbl_common_pf_id_subtraction_mgtpf_id(struct nbl_common_info *common, u32 pf /** * alloc a index resource poll, the index_size max is 64 * 1024 * the poll support start_index not zero; + * the poll support multi thread */ void *nbl_common_init_index_table(struct nbl_index_tbl_key *key) { @@ -254,7 +255,6 @@ void nbl_common_remove_index_table(void *priv, struct nbl_index_tbl_del_key *key void nbl_common_scan_index_table(void *priv, struct nbl_index_tbl_scan_key *key) { struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; - struct device *dev; struct nbl_index_entry_node *idx_node; struct hlist_node *list_node; int i; @@ -262,7 +262,6 @@ void nbl_common_scan_index_table(void *priv, struct nbl_index_tbl_scan_key *key) if (!index_mgt) return; - dev = index_mgt->tbl_key.dev; for (i = 0; i < index_mgt->bucket_size; i++) { hlist_for_each_entry_safe(idx_node, list_node, index_mgt->key_hash + i, node) { if (key && key->action_func) @@ -375,7 +374,7 @@ int nbl_common_alloc_index(void *priv, void *key, struct nbl_index_key_extra *ex return index; key_node_size = sizeof(struct nbl_index_entry_node) + key_size + data_size; - idx_node = devm_kzalloc(index_mgt->tbl_key.dev, key_node_size, GFP_KERNEL); + idx_node = devm_kzalloc(index_mgt->tbl_key.dev, key_node_size, GFP_ATOMIC); if (!idx_node) return index; @@ -528,11 +527,9 @@ int nbl_common_alloc_hash_node(void *priv, void *key, void *data, void **out_dat struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; struct nbl_hash_entry_node *hash_node; u32 hash_value; - u32 node_size; u16 key_size; u16 data_size; - node_size = sizeof(struct nbl_hash_entry_node); hash_node = devm_kzalloc(tbl_mgt->tbl_key.dev, sizeof(struct nbl_hash_entry_node), GFP_KERNEL); if (!hash_node) diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h index 4c6e08d54f2b..b70eef908c5d 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h index 6a9b4e8375d2..36a48bc80dbb 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h index 0961c3527d7c..94324c7474c3 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -65,6 +65,7 @@ #define NBL_CAP_IS_USER(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_USER_BIT) #define NBL_CAP_IS_GRC(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_GRC_BIT) #define NBL_CAP_IS_BLK(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_BLK_BIT) +#define NBL_CAP_IS_OCP(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_OCP_BIT) #define NBL_CAP_IS_DPU_HOST(val) ({ typeof(val) _val = (val); \ !NBL_CAP_TEST_BIT(_val, NBL_CAP_IS_NIC_BIT) && \ NBL_CAP_TEST_BIT(_val, NBL_CAP_DPU_IS_HOST_BIT); }) @@ -89,6 +90,7 @@ enum { NBL_CAP_IS_BLK_BIT, NBL_CAP_HAS_USER_BIT, NBL_CAP_HAS_GRC_BIT, + NBL_CAP_IS_OCP_BIT, NBL_CAP_HAS_FACTORY_CTRL_BIT, }; @@ -102,6 +104,7 @@ enum nbl_adapter_state { NBL_TESTING, NBL_USER, NBL_FATAL_ERR, + NBL_XDP, NBL_STATE_NBITS }; @@ -143,7 +146,7 @@ struct nbl_adapter { struct nbl_rep_data { struct net_device *netdev; - struct nbl_netdev_rep_attr rep_attr; + struct nbl_netdev_name_attr dev_name_attr; struct u64_stats_sync rep_syncp; u64 rx_packets; u64 rx_bytes; @@ -162,10 +165,8 @@ struct nbl_netdev_priv { u16 rx_queue_num; u16 queue_size; /* default traffic destination in kernel/dpdk/coexist scene */ - u16 normal_vsi; - u16 other_vsi; - u16 async_other_vsi; - u16 async_pending_vsi; + u16 data_vsi; + u16 user_vsi; s64 last_st_time; }; @@ -195,7 +196,7 @@ struct nbl_software_tool_id_entry { u8 refcount; }; -#define NBL_ST_MAX_DEVICE_NUM 64 +#define NBL_ST_MAX_DEVICE_NUM 96 struct nbl_software_tool_table { DECLARE_BITMAP(devid, NBL_ST_MAX_DEVICE_NUM); int major; @@ -205,7 +206,7 @@ struct nbl_software_tool_table { extern spinlock_t nbl_tc_flow_inst_lock; -#define NBL_TC_FLOW_INST_COUNT (8) +#define NBL_TC_FLOW_INST_COUNT (NBL_DRIVER_DEV_MAX) struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *param); void nbl_core_remove(struct nbl_adapter *adapter); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c index 154ef13822cd..7401ff81c7de 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c @@ -179,12 +179,35 @@ static int nbl_debugfs_ring_dump(struct inode *inode, struct file *file) SINGLE_FOPS_RO(ring_fops, nbl_debugfs_ring_dump); +static int nbl_stats_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_service_ops *serv_ops = NBL_DEBUGFS_MGT_TO_SERV_OPS(debugfs_mgt); + u64 rx_dropped = 0; + + serv_ops->get_rx_dropped(NBL_DEBUGFS_MGT_TO_SERV_PRIV(debugfs_mgt), &rx_dropped); + + seq_puts(m, "Dump stats:\n"); + seq_printf(m, "rx_dropped: %llu\n", rx_dropped); + + return 0; +} + +static int nbl_debugfs_stats_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_stats_dump, inode->i_private); +} + +SINGLE_FOPS_RO(stats_fops, nbl_debugfs_stats_dump); + static void nbl_serv_debugfs_setup_netops(struct nbl_debugfs_mgt *debugfs_mgt) { debugfs_create_file("txrx_ring_index", 0644, debugfs_mgt->nbl_debugfs_root, debugfs_mgt, &ring_index_fops); debugfs_create_file("txrx_ring", 0444, debugfs_mgt->nbl_debugfs_root, debugfs_mgt, &ring_fops); + debugfs_create_file("stats", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &stats_fops); } static int nbl_ring_stats_dump(struct seq_file *m, void *v) @@ -291,6 +314,50 @@ static void nbl_serv_debugfs_setup_pmdops(struct nbl_debugfs_mgt *debugfs_mgt) debugfs_mgt, &pmd_debug_fops); } +static int nbl_dvn_desc_req_dump(struct seq_file *m, void *v) +{ + u32 desc_req; + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + + desc_req = disp_ops->get_dvn_desc_req(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt)); + seq_printf(m, "dvn_desc_req split:%d, packed:%d\n", desc_req >> 16, desc_req & 0xFFFF); + + return 0; +} + +static int nbl_dvn_desc_req_open(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_dvn_desc_req_dump, inode->i_private); +} + +static ssize_t nbl_dvn_desc_req_write(struct file *file, const char __user *buf, + size_t count, loff_t *offp) +{ + struct nbl_debugfs_mgt *debugfs_mgt = file_inode(file)->i_private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + char buffer[12] = {0}; + size_t size = min(count, sizeof(buffer)); + u32 desc_req = 0; + + if (copy_from_user(buffer, buf, size)) + return -EFAULT; + + if (kstrtouint(buffer, 10, &desc_req)) + return -EFAULT; + + disp_ops->set_dvn_desc_req(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), desc_req); + return size; +} + +COMPLETE_FOPS_RW(dvn_desc_req_fops, nbl_dvn_desc_req_open, nbl_dvn_desc_req_write); + +static void nbl_serv_debugfs_setup_dvn_desc_reqops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + debugfs_create_file("dvn_desc_req", 0644, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &dvn_desc_req_fops); +} + static void nbl_serv_debugfs_setup_commonops(struct nbl_debugfs_mgt *debugfs_mgt) { struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); @@ -322,6 +389,7 @@ void nbl_debugfs_func_init(void *p, struct nbl_init_param *param) if (!*debugfs_mgt) return; + NBL_DEBUGFS_MGT_TO_SERV_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); NBL_DEBUGFS_MGT_TO_COMMON(*debugfs_mgt) = common; @@ -343,6 +411,10 @@ void nbl_debugfs_func_init(void *p, struct nbl_init_param *param) NBL_PMD_DEBUG)) nbl_serv_debugfs_setup_pmdops(*debugfs_mgt); + if (disp_ops->get_product_fix_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV((*debugfs_mgt)), + NBL_DVN_DESC_REQ_SYSFS_CAP)) + nbl_serv_debugfs_setup_dvn_desc_reqops(*debugfs_mgt); + if (param->caps.has_net) { nbl_serv_debugfs_setup_netops(*debugfs_mgt); if (!param->caps.is_vf) diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h index 4fbab252ff2e..65d5ac9e61e3 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -10,6 +10,11 @@ #include "nbl_core.h" #define NBL_DEBUGFS_MGT_TO_COMMON(debugfs_mgt) ((debugfs_mgt)->common) +#define NBL_DEBUGFS_MGT_TO_SERV_OPS_TBL(debugfs_mgt) ((debugfs_mgt)->serv_ops_tbl) +#define NBL_DEBUGFS_MGT_TO_SERV_OPS(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_SERV_OPS_TBL(debugfs_mgt)->ops) +#define NBL_DEBUGFS_MGT_TO_SERV_PRIV(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_SERV_OPS_TBL(debugfs_mgt)->priv) #define NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt) ((debugfs_mgt)->disp_ops_tbl) #define NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt) \ (NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt)->ops) @@ -23,6 +28,7 @@ struct nbl_debugfs_mgt { struct dentry *nbl_debugfs_root; + struct nbl_service_ops_tbl *serv_ops_tbl; struct nbl_dispatch_ops_tbl *disp_ops_tbl; struct nbl_channel_ops_tbl *chan_ops_tbl; struct nbl_common_info *common; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c index 954ae7e8f07b..43376a82fb33 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c @@ -5,6 +5,8 @@ */ #include +#include +#include #include "nbl_dev.h" #include "nbl_lag.h" @@ -12,6 +14,9 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "netif debug level (0=none,...,16=all), adapter debug_mask (<-1)"); +int adaptive_rxbuf_len_disable = 1; +module_param(adaptive_rxbuf_len_disable, int, 0); +MODULE_PARM_DESC(debug, "Disable the rx buffer length adaptive to the MTU"); static int net_msix_mask_en = 1; module_param(net_msix_mask_en, int, 0); MODULE_PARM_DESC(net_msix_mask_en, "net msix interrupt mask enable"); @@ -98,7 +103,8 @@ static void nbl_dev_free_board_id(struct nbl_dev_board_id_table *index_table, u3 memset(&index_table->entry[i], 0, sizeof(index_table->entry[i])); } -static void nbl_dev_set_netdev_priv(struct net_device *netdev, struct nbl_dev_vsi *vsi) +static void nbl_dev_set_netdev_priv(struct net_device *netdev, struct nbl_dev_vsi *vsi, + struct nbl_dev_vsi *user_vsi) { struct nbl_netdev_priv *net_priv = netdev_priv(netdev); @@ -107,10 +113,11 @@ static void nbl_dev_set_netdev_priv(struct net_device *netdev, struct nbl_dev_vs net_priv->queue_size = vsi->queue_size; net_priv->rep = NULL; net_priv->netdev = netdev; - net_priv->normal_vsi = vsi->vsi_id; - net_priv->other_vsi = vsi->vsi_id; - net_priv->async_other_vsi = vsi->vsi_id; - net_priv->async_pending_vsi = vsi->vsi_id; + net_priv->data_vsi = vsi->vsi_id; + if (user_vsi) + net_priv->user_vsi = user_vsi->vsi_id; + else + net_priv->user_vsi = vsi->vsi_id; } /* ---------- Interrupt config ---------- */ @@ -147,7 +154,7 @@ static __maybe_unused void nbl_dev_notify_ipsec_hard_expire(void *priv, u16 src_ if (x) { x->km.state = XFRM_STATE_EXPIRED; hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT); - xfrm_state_put(x); + xfrm_state_put_sync(x); } #endif } @@ -205,7 +212,7 @@ static irqreturn_t nbl_dev_clean_abnormal_event(int __always_unused irq, void *d struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - nbl_dev_grc_process_abnormal_event(rdma_dev); + nbl_dev_rdma_process_abnormal_event(rdma_dev); if (serv_ops->get_product_flex_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_SECURITY_ACCEL_CAP)) @@ -236,30 +243,6 @@ static void nbl_dev_register_net_irq(struct nbl_dev_mgt *dev_mgt, u16 queue_num) msix_info->serv_info[NBL_MSIX_NET_TYPE].hw_self_mask_en = net_msix_mask_en; } -static void nbl_dev_register_virtio_irq(struct nbl_dev_mgt *dev_mgt) -{ - struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); - struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); - - msix_info->serv_info[NBL_MSIX_VIRTIO_TYPE].num = 1; -} - -static void nbl_dev_register_factory_ctrl_irq(struct nbl_dev_mgt *dev_mgt) -{ - struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); - struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_common_irq_num common_irq_num = {0}; - struct nbl_ctrl_irq_num ctrl_irq_num = {0}; - - /* Register mailbox irq, draco need this. */ - serv_ops->get_common_irq_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &common_irq_num); - msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num = common_irq_num.mbx_irq_num; - - serv_ops->get_ctrl_irq_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &ctrl_irq_num); - msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num = ctrl_irq_num.adminq_irq_num; -} - static void nbl_dev_register_ctrl_irq(struct nbl_dev_mgt *dev_mgt) { struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); @@ -322,6 +305,7 @@ static void nbl_dev_free_net_irq(struct nbl_dev_mgt *dev_mgt) static int nbl_dev_request_mailbox_irq(struct nbl_dev_mgt *dev_mgt) { struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); u16 local_vector_id; @@ -334,8 +318,8 @@ static int nbl_dev_request_mailbox_irq(struct nbl_dev_mgt *dev_mgt) local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; irq_num = msix_info->msix_entries[local_vector_id].vector; - snprintf(dev_common->mailbox_name, sizeof(dev_common->mailbox_name) - 1, "%s-%s", - dev_name(dev), "mailbox"); + snprintf(dev_common->mailbox_name, sizeof(dev_common->mailbox_name), + "nbl_mailbox@pci:%s", pci_name(NBL_COMMON_TO_PDEV(common))); err = devm_request_irq(dev, irq_num, nbl_dev_clean_mailbox, 0, dev_common->mailbox_name, dev_mgt); if (err) { @@ -408,10 +392,12 @@ static int nbl_dev_disable_mailbox_irq(struct nbl_dev_mgt *dev_mgt) static int nbl_dev_request_adminq_irq(struct nbl_dev_mgt *dev_mgt, struct nbl_task_info *task_info) { struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); u16 local_vector_id; u32 irq_num; + char *irq_name; int err; if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) @@ -419,9 +405,12 @@ static int nbl_dev_request_adminq_irq(struct nbl_dev_mgt *dev_mgt, struct nbl_ta local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; irq_num = msix_info->msix_entries[local_vector_id].vector; + irq_name = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].irq_name; + snprintf(irq_name, NBL_STRING_NAME_LEN, "nbl_adminq@pci:%s", + pci_name(NBL_COMMON_TO_PDEV(common))); err = devm_request_irq(dev, irq_num, nbl_dev_clean_adminq, - 0, "adminq_irq", task_info); + 0, irq_name, task_info); if (err) { dev_err(dev, "Request adminq irq handler failed err: %d\n", err); return err; @@ -488,20 +477,25 @@ static int nbl_dev_disable_adminq_irq(struct nbl_dev_mgt *dev_mgt) static int nbl_dev_request_abnormal_irq(struct nbl_dev_mgt *dev_mgt) { struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); - u16 local_vector_id; + char *irq_name; u32 irq_num; int err; + u16 local_vector_id; if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) return 0; local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; irq_num = msix_info->msix_entries[local_vector_id].vector; + irq_name = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].irq_name; + snprintf(irq_name, NBL_STRING_NAME_LEN, "nbl_abnormal@pci:%s", + pci_name(NBL_COMMON_TO_PDEV(common))); err = devm_request_irq(dev, irq_num, nbl_dev_clean_abnormal_event, - 0, "abnormal_irq", dev_mgt); + 0, irq_name, dev_mgt); if (err) { dev_err(dev, "Request abnormal_irq irq handler failed err: %d\n", err); return err; @@ -690,6 +684,100 @@ static void nbl_dev_clear_interrupt_scheme(struct nbl_dev_mgt *dev_mgt) nbl_dev_free_msix_intr(dev_mgt); } +static void nbl_fw_tracer_clean_saved_traces_array(struct nbl_health_reporters *reps) +{ + mutex_destroy(&reps->temp_st_arr.lock); + mutex_destroy(&reps->reboot_st_arr.lock); +} + +static void nbl_dev_destroy_health(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + + if (!IS_ERR_OR_NULL(ctrl_dev->health_reporters.fw_temp_reporter)) + devl_health_reporter_destroy(ctrl_dev->health_reporters.fw_temp_reporter); + + if (!IS_ERR_OR_NULL(ctrl_dev->health_reporters.fw_reboot_reporter)) + devl_health_reporter_destroy(ctrl_dev->health_reporters.fw_reboot_reporter); + + nbl_fw_tracer_clean_saved_traces_array(&ctrl_dev->health_reporters); +} + +static void nbl_fw_temp_save_trace(struct nbl_health_reporters *reps, u8 temp, + u64 uptime) +{ + struct nbl_fw_temp_trace_data *trace_data; + + mutex_lock(&reps->temp_st_arr.lock); + trace_data = &reps->temp_st_arr.trace_data[reps->temp_st_arr.saved_traces_index]; + trace_data->timestamp = uptime; + trace_data->temp_num = temp; + + reps->temp_st_arr.saved_traces_index = + (reps->temp_st_arr.saved_traces_index + 1) & (NBL_SAVED_TRACES_NUM - 1); + mutex_unlock(&reps->temp_st_arr.lock); +} + +static void nbl_fw_reboot_save_trace(struct nbl_health_reporters *reps) +{ + struct nbl_fw_reboot_trace_data *trace_data; + struct timespec64 ts; + struct tm tm; + + ktime_get_real_ts64(&ts); + time64_to_tm(ts.tv_sec, 0, &tm); + mutex_lock(&reps->reboot_st_arr.lock); + trace_data = &reps->reboot_st_arr.trace_data[reps->reboot_st_arr.saved_traces_index]; + snprintf(trace_data->local_time, NBL_TIME_LEN, "%04ld-%02d-%02d %02d:%02d:%02d UTC", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, + tm.tm_sec); + snprintf(reps->reporter_ctx.reboot_report_time, NBL_TIME_LEN, + "%04ld-%02d-%02d %02d:%02d:%02d", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, + tm.tm_sec); + + reps->reboot_st_arr.saved_traces_index = + (reps->reboot_st_arr.saved_traces_index + 1) & (NBL_SAVED_TRACES_NUM - 1); + mutex_unlock(&reps->reboot_st_arr.lock); +} + +static void nbl_dev_health_report_temp_task(struct work_struct *work) +{ + struct nbl_fw_reporter_ctx fw_reporter_cxt; + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + report_temp_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + int err; + + fw_reporter_cxt.temp_num = reps->reporter_ctx.temp_num; + if (!reps->fw_temp_reporter) + return; + + err = devlink_health_report(reps->fw_temp_reporter, "nbl_fw_temp", &fw_reporter_cxt); + if (err) + dev_err(dev, "failed to report nbl_fw_temp health\n"); +} + +static void nbl_dev_health_report_reboot_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + report_reboot_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + int err; + + if (!reps->fw_reboot_reporter) + return; + err = devlink_health_report(reps->fw_reboot_reporter, "nbl_fw_reboot", &reps->reporter_ctx); + if (err) + dev_err(dev, "failed to report nbl_fw_reboot health\n"); +} + /* ---------- Channel config ---------- */ static int nbl_dev_setup_chan_qinfo(struct nbl_dev_mgt *dev_mgt, u8 chan_type) { @@ -888,25 +976,30 @@ static void nbl_dev_fw_reset_task(struct work_struct *work) struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); if (serv_ops->check_fw_reset(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt))) { dev_notice(NBL_COMMON_TO_DEV(common), "FW recovered"); - chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, - NBL_CHAN_TYPE_ADMINQ, false); nbl_dev_disable_adminq_irq(dev_mgt); nbl_dev_free_adminq_irq(dev_mgt, task_info); + msleep(NBL_DEV_FW_RESET_WAIT_TIME); // wait adminq timeout nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); nbl_dev_setup_chan_qinfo(dev_mgt, NBL_CHAN_TYPE_ADMINQ); nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); nbl_dev_request_adminq_irq(dev_mgt, task_info); nbl_dev_enable_adminq_irq(dev_mgt); + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, + NBL_CHAN_TYPE_ADMINQ, false); + if (NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) { nbl_dev_get_port_attributes(dev_mgt); nbl_dev_enable_port(dev_mgt, true); } task_info->fw_resetting = false; + nbl_fw_reboot_save_trace(&ctrl_dev->health_reporters); + nbl_common_queue_work(&task_info->report_reboot_task, true, false); return; } @@ -1018,7 +1111,7 @@ static void nbl_dev_chan_notify_flr_resp(void *priv, u16 src_id, u16 msg_id, serv_ops->process_flr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vfid); vsi_id = serv_ops->covert_vfid_to_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vfid); - nbl_dev_grc_process_flr_event(rdma_dev, vsi_id); + nbl_dev_rdma_process_flr_event(rdma_dev, vsi_id); } static void nbl_dev_ctrl_register_flr_chan_msg(struct nbl_dev_mgt *dev_mgt) @@ -1035,26 +1128,125 @@ static void nbl_dev_ctrl_register_flr_chan_msg(struct nbl_dev_mgt *dev_mgt) nbl_dev_chan_notify_flr_resp, dev_mgt); } -static void nbl_dev_factory_task_start(struct nbl_dev_mgt *dev_mgt) +static struct nbl_dev_temp_alarm_info temp_alarm_info[NBL_TEMP_STATUS_MAX] = { + {LOGLEVEL_WARNING, "High temperature on sensors0 resumed.\n"}, + {LOGLEVEL_WARNING, "High temperature on sensors0 observed, security(WARNING).\n"}, + {LOGLEVEL_CRIT, "High temperature on sensors0 observed, security(CRITICAL).\n"}, + {LOGLEVEL_EMERG, "High temperature on sensors0 observed, security(EMERGENCY).\n"}, +}; + +static void nbl_dev_handle_temp_ext(struct nbl_dev_mgt *dev_mgt, u8 *data, u16 data_len) { - struct nbl_dev_factory *factory_dev = NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); - struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(factory_dev); + u16 temp = (u16)*data; + u64 uptime = 0; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + enum nbl_dev_temp_status old_temp_status = ctrl_dev->temp_status; + enum nbl_dev_temp_status new_temp_status = NBL_TEMP_STATUS_NORMAL; - if (!task_info->timer_setup) + /* no resume if temp exceed NBL_TEMP_EMERG_THRESHOLD, even if the temp resume nomal. + * Because the hw has shutdown. + */ + if (old_temp_status == NBL_TEMP_STATUS_EMERG) return; - mod_timer(&task_info->serv_timer, round_jiffies(jiffies + task_info->serv_timer_period)); + /* if temp in (85-105) and not in normal_status, no resume to avoid alarm oscillate */ + if (temp > NBL_TEMP_NOMAL_THRESHOLD && + temp < NBL_TEMP_WARNING_THRESHOLD && + old_temp_status > NBL_TEMP_STATUS_NORMAL) + return; + + if (temp >= NBL_TEMP_WARNING_THRESHOLD && + temp < NBL_TEMP_CRIT_THRESHOLD) + new_temp_status = NBL_TEMP_STATUS_WARNING; + else if (temp >= NBL_TEMP_CRIT_THRESHOLD && + temp < NBL_TEMP_EMERG_THRESHOLD) + new_temp_status = NBL_TEMP_STATUS_CRIT; + else if (temp >= NBL_TEMP_EMERG_THRESHOLD) + new_temp_status = NBL_TEMP_STATUS_EMERG; + + if (new_temp_status == old_temp_status) + return; + + ctrl_dev->temp_status = new_temp_status; + + /* temp fall only alarm when the alarm need to resume */ + if (new_temp_status < old_temp_status && new_temp_status != NBL_TEMP_STATUS_NORMAL) + return; + + if (data_len > sizeof(u16)) + uptime = *(u64 *)(data + sizeof(u16)); + if (new_temp_status != NBL_TEMP_STATUS_NORMAL) { + ctrl_dev->health_reporters.reporter_ctx.temp_num = temp; + nbl_fw_temp_save_trace(&ctrl_dev->health_reporters, temp, uptime); + nbl_common_queue_work(&ctrl_dev->task_info.report_temp_task, false, false); + } + nbl_log(common, temp_alarm_info[new_temp_status].logvel, + "[%llu] %s", uptime, temp_alarm_info[new_temp_status].alarm_info); + + if (new_temp_status == NBL_TEMP_STATUS_EMERG) { + ctrl_dev->task_info.reset_event = NBL_HW_FATAL_ERR_EVENT; + nbl_common_queue_work(&ctrl_dev->task_info.reset_task, false, false); + } } -static void nbl_dev_factory_task_stop(struct nbl_dev_mgt *dev_mgt) +static const char *nbl_log_level_name(int level) { - struct nbl_dev_factory *factory_dev = NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); - struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(factory_dev); + switch (level) { + case NBL_EMP_ALERT_LOG_FATAL: + return "FATAL"; + case NBL_EMP_ALERT_LOG_ERROR: + return "ERROR"; + case NBL_EMP_ALERT_LOG_WARNING: + return "WARNING"; + case NBL_EMP_ALERT_LOG_INFO: + return "INFO"; + default: + return "UNKNOWN"; + } +} - if (!task_info->timer_setup) +static void nbl_dev_handle_emp_log_ext(struct nbl_dev_mgt *dev_mgt, u8 *data, u16 data_len) +{ + struct nbl_emp_alert_log_event *log_event = (struct nbl_emp_alert_log_event *)data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + nbl_log(common, LOGLEVEL_INFO, "[FW][%llu] <%s> %.*s", log_event->uptime, + nbl_log_level_name(log_event->level), data_len - sizeof(u64) - sizeof(u8), + log_event->data); +} + +static void nbl_dev_chan_notify_evt_alert_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_chan_param_emp_alert_event *alert_param = + (struct nbl_chan_param_emp_alert_event *)data; + + switch (alert_param->type) { + case NBL_EMP_EVENT_TEMP_ALERT: + nbl_dev_handle_temp_ext(dev_mgt, alert_param->data, alert_param->len); + return; + case NBL_EMP_EVENT_LOG_ALERT: + nbl_dev_handle_emp_log_ext(dev_mgt, alert_param->data, alert_param->len); return; + default: + return; + } +} - del_timer_sync(&task_info->serv_timer); +static void nbl_dev_ctrl_register_emp_ext_alert_chan_msg(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + /* draco use mailbox communication with emp */ + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_ADMINQ_EXT_ALERT, + nbl_dev_chan_notify_evt_alert_resp, dev_mgt); } static int nbl_dev_setup_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) @@ -1108,7 +1300,15 @@ static int nbl_dev_setup_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) nbl_dev_recovery_abnormal_task); task_info->timer_setup = true; } + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_HEALTH_REPORT_TEMP_CAP)) + nbl_common_alloc_task(&task_info->report_temp_task, + &nbl_dev_health_report_temp_task); + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_HEALTH_REPORT_REBOOT_CAP)) + nbl_common_alloc_task(&task_info->report_reboot_task, + &nbl_dev_health_report_reboot_task); if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_TASK_RESET_CTRL_CAP)) nbl_common_alloc_task(&task_info->reset_task, &nbl_dev_ctrl_reset_task); @@ -1167,69 +1367,13 @@ static void nbl_dev_remove_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_TASK_RESET_CTRL_CAP)) nbl_common_release_task(&task_info->reset_task); -} - -static int nbl_dev_setup_factory_dev_task(struct nbl_dev_mgt *dev_mgt) -{ - struct nbl_dev_factory *factory_dev = NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); - struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); - struct nbl_task_info *task_info = NBL_DEV_FACTORY_TO_TASK_INFO(factory_dev); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - task_info->dev_mgt = dev_mgt; - - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_TASK_CLEAN_MAILBOX_CAP)) - nbl_common_alloc_task(&common_dev->clean_mbx_task, nbl_dev_clean_mailbox_task); - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_TASK_FW_HB_CAP)) { - nbl_common_alloc_task(&task_info->fw_hb_task, nbl_dev_fw_heartbeat_task); - task_info->timer_setup = true; - } + NBL_TASK_HEALTH_REPORT_TEMP_CAP)) + nbl_common_release_task(&task_info->report_temp_task); if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_TASK_FW_RESET_CAP)) { - nbl_common_alloc_delayed_task(&task_info->fw_reset_task, nbl_dev_fw_reset_task); - task_info->timer_setup = true; - } - - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_TASK_CLEAN_ADMINDQ_CAP)) { - nbl_common_alloc_task(&task_info->clean_adminq_task, nbl_dev_clean_adminq_task); - task_info->timer_setup = true; - } - - if (task_info->timer_setup) { - timer_setup(&task_info->serv_timer, nbl_dev_ctrl_task_timer, 0); - task_info->serv_timer_period = HZ; - } - - return 0; -} - -static void nbl_dev_remove_factory_dev_task(struct nbl_dev_mgt *dev_mgt) -{ - struct nbl_dev_factory *factory_dev = NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); - struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); - struct nbl_task_info *task_info = NBL_DEV_FACTORY_TO_TASK_INFO(factory_dev); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_TASK_FW_RESET_CAP)) - nbl_common_release_delayed_task(&task_info->fw_reset_task); - - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_TASK_FW_HB_CAP)) - nbl_common_release_task(&task_info->fw_hb_task); - - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_TASK_CLEAN_ADMINDQ_CAP)) - nbl_common_release_task(&task_info->clean_adminq_task); - - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_TASK_CLEAN_MAILBOX_CAP)) - nbl_common_release_task(&common_dev->clean_mbx_task); + NBL_TASK_HEALTH_REPORT_REBOOT_CAP)) + nbl_common_release_task(&task_info->report_reboot_task); } static int nbl_dev_update_template_config(struct nbl_dev_mgt *dev_mgt) @@ -1246,6 +1390,7 @@ static int nbl_dev_setup_common_dev(struct nbl_adapter *adapter, struct nbl_init struct nbl_dev_common *common_dev; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_board_port_info board_info = { 0 }; int board_id; common_dev = devm_kzalloc(NBL_ADAPTER_TO_DEV(adapter), @@ -1279,6 +1424,9 @@ static int nbl_dev_setup_common_dev(struct nbl_adapter *adapter, struct nbl_init &NBL_COMMON_TO_ETH_MODE(common), &NBL_COMMON_TO_ETH_ID(common), &NBL_COMMON_TO_LOGIC_ETH_ID(common)); + serv_ops->get_board_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &board_info); + + NBL_COMMON_TO_ETH_MAX_SPEED(common) = nbl_port_speed_to_speed(board_info.eth_speed); nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_MAILBOX, &common_dev->clean_mbx_task); NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = common_dev; @@ -1325,119 +1473,189 @@ static void nbl_dev_remove_common_dev(struct nbl_adapter *adapter) NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = NULL; } -static int nbl_dev_setup_factory_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +static void nbl_devlink_fmsg_fill_temp_trace(struct devlink_fmsg *fmsg, + struct nbl_fw_temp_trace_data *trace_data) { - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - struct nbl_dev_factory *factory_dev; - struct nbl_dev_common *common_dev; - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); - int i, ret = 0; - u32 board_key; - - board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | - dev_mgt->common->pdev->bus->number; - if (param->caps.is_nic) - NBL_COMMON_TO_BOARD_ID(common) = - nbl_dev_alloc_board_id(&board_id_table, board_key); - - common_dev = devm_kzalloc(NBL_ADAPTER_TO_DEV(adapter), - sizeof(struct nbl_dev_common), GFP_KERNEL); - if (!common_dev) - goto alloc_common_dev_fail; - common_dev->dev_mgt = dev_mgt; - NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = common_dev; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp); + devlink_fmsg_u8_pair_put(fmsg, "high temperature", trace_data->temp_num); + devlink_fmsg_obj_nest_end(fmsg); +} - factory_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_factory), GFP_KERNEL); - if (!factory_dev) - goto alloc_factory_dev_fail; - NBL_DEV_FACTORY_TO_TASK_INFO(factory_dev)->adapter = adapter; - NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt) = factory_dev; +static int nbl_fw_temp_trace_get_entry(struct nbl_dev_ctrl *ctrl_dev, struct devlink_fmsg *fmsg) +{ + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + struct nbl_fw_temp_trace_data *trace_data = reps->temp_st_arr.trace_data; + u8 index, start_index, end_index; + u8 saved_traces_index; - nbl_dev_register_factory_ctrl_irq(dev_mgt); + if (!trace_data[0].timestamp) + return -ENOMSG; - ret = serv_ops->init_chip_factory(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - if (ret) { - dev_err(dev, "factory dev chip_init failed\n"); - goto chip_init_fail; + mutex_lock(&reps->temp_st_arr.lock); + saved_traces_index = reps->temp_st_arr.saved_traces_index; + if (trace_data[saved_traces_index].timestamp) + start_index = saved_traces_index; + else + start_index = 0; + devlink_fmsg_arr_pair_nest_start(fmsg, "dump nbl fw traces"); + end_index = (saved_traces_index - 1) & (NBL_SAVED_TRACES_NUM - 1); + index = start_index; + for (; index != end_index; ) { + nbl_devlink_fmsg_fill_temp_trace(fmsg, &trace_data[index]); + index = (index + 1) & (NBL_SAVED_TRACES_NUM - 1); } + nbl_devlink_fmsg_fill_temp_trace(fmsg, &trace_data[index]); + devlink_fmsg_arr_pair_nest_end(fmsg); + mutex_unlock(&reps->temp_st_arr.lock); - /* Register both mailbox and adminq, leonis need adminq and draco need mailbox */ - for (i = 0; i < NBL_CHAN_TYPE_MAX; i++) { - ret = nbl_dev_setup_chan_qinfo(dev_mgt, i); - if (ret) { - dev_err(dev, "factory dev setup chan qinfo failed\n"); - goto setup_chan_qinfo_fail; - } + return 0; +} - ret = nbl_dev_setup_chan_queue(dev_mgt, i); - if (ret) { - dev_err(dev, "factory dev setup chan queue failed\n"); - goto setup_chan_queue_fail; - } - } +static int nbl_fw_temp_reporter_disgnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct nbl_dev_mgt *dev_mgt = devlink_health_reporter_priv(reporter); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); - ret = nbl_dev_setup_factory_dev_task(dev_mgt); - if (ret) { - dev_err(dev, "factory dev task failed\n"); - goto setup_ctrl_dev_task_fail; + return nbl_fw_temp_trace_get_entry(ctrl_dev, fmsg); +} + +static int nbl_fw_temp_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + if (priv_ctx) { + struct nbl_fw_reporter_ctx *fw_reporter_ctx = + (struct nbl_fw_reporter_ctx *)priv_ctx; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u32_pair_put(fmsg, "high temperature", fw_reporter_ctx->temp_num); + devlink_fmsg_obj_nest_end(fmsg); } + return 0; +} - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) - serv_ops->setup_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); +static void nbl_fw_tracer_init_saved_traces_array(struct nbl_health_reporters *reps) +{ + reps->temp_st_arr.saved_traces_index = 0; + reps->reboot_st_arr.saved_traces_index = 0; + mutex_init(&reps->temp_st_arr.lock); + mutex_init(&reps->reboot_st_arr.lock); +} - return 0; +static struct devlink_health_reporter_ops nbl_fw_temp_reporter_ops = { + .name = "nbl_fw_temp", + .diagnose = nbl_fw_temp_reporter_disgnose, + .dump = nbl_fw_temp_reporter_dump, +}; -setup_ctrl_dev_task_fail: -setup_chan_queue_fail: - while (--i + 1) - nbl_dev_remove_chan_queue(dev_mgt, i); -setup_chan_qinfo_fail: - serv_ops->destroy_chip_factory(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); -chip_init_fail: - devm_kfree(dev, factory_dev); - NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) = NULL; -alloc_factory_dev_fail: - devm_kfree(dev, common_dev); - NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = NULL; -alloc_common_dev_fail: - nbl_dev_free_board_id(&board_id_table, board_key); - return ret; +static void nbl_devlink_fmsg_fill_reboot_trace(struct devlink_fmsg *fmsg, + struct nbl_fw_reboot_trace_data *trace_data) +{ + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_string_pair_put(fmsg, "reboot time", trace_data->local_time); + devlink_fmsg_obj_nest_end(fmsg); } -static bool nbl_dev_remove_factory_ctrl_dev(struct nbl_adapter *adapter) +static int nbl_fw_reboot_trace_get_entry(struct nbl_dev_ctrl *ctrl_dev, struct devlink_fmsg *fmsg) { - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_dev_factory **factory_dev = &NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); - struct nbl_dev_common **common_dev = &NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - u32 board_key; - int i; + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + struct nbl_fw_reboot_trace_data *trace_data = reps->reboot_st_arr.trace_data; + u8 index, start_index, end_index; + u8 saved_traces_index; - if (!*factory_dev) - return false; + if (!trace_data[0].local_time[0]) + return -ENOMSG; - board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | - dev_mgt->common->pdev->bus->number; - if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) - serv_ops->remove_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); + mutex_lock(&reps->reboot_st_arr.lock); + saved_traces_index = reps->reboot_st_arr.saved_traces_index; + if (trace_data[saved_traces_index].local_time[0]) + start_index = saved_traces_index; + else + start_index = 0; + devlink_fmsg_arr_pair_nest_start(fmsg, "dump nbl fw traces"); + end_index = (saved_traces_index - 1) & (NBL_SAVED_TRACES_NUM - 1); + index = start_index; + for (; index != end_index; ) { + nbl_devlink_fmsg_fill_reboot_trace(fmsg, &trace_data[index]); + index = (index + 1) & (NBL_SAVED_TRACES_NUM - 1); + } + nbl_devlink_fmsg_fill_reboot_trace(fmsg, &trace_data[index]); + devlink_fmsg_arr_pair_nest_end(fmsg); + mutex_unlock(&reps->reboot_st_arr.lock); - nbl_dev_remove_factory_dev_task(dev_mgt); + return 0; +} - for (i = 0; i < NBL_CHAN_TYPE_MAX; i++) - nbl_dev_remove_chan_queue(dev_mgt, i); +static int nbl_fw_reboot_reporter_disgnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct nbl_dev_mgt *dev_mgt = devlink_health_reporter_priv(reporter); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); - serv_ops->destroy_chip_factory(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + return nbl_fw_reboot_trace_get_entry(ctrl_dev, fmsg); +} - devm_kfree(NBL_ADAPTER_TO_DEV(adapter), *factory_dev); - *factory_dev = NULL; - devm_kfree(NBL_ADAPTER_TO_DEV(adapter), *common_dev); - *common_dev = NULL; +static int nbl_fw_reboot_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + if (priv_ctx) { + struct nbl_fw_reporter_ctx *fw_reporter_ctx = + (struct nbl_fw_reporter_ctx *)priv_ctx; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_string_pair_put(fmsg, "reboot time", + fw_reporter_ctx->reboot_report_time); + devlink_fmsg_obj_nest_end(fmsg); + } + return 0; +} - nbl_dev_free_board_id(&board_id_table, board_key); +static struct devlink_health_reporter_ops nbl_fw_reboot_reporter_ops = { + .name = "nbl_fw_reboot", + .diagnose = nbl_fw_reboot_reporter_disgnose, + .dump = nbl_fw_reboot_reporter_dump, +}; - return true; +static void nbl_setup_devlink_reporter(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_health_reporters *reps = &ctrl_dev->health_reporters; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct devlink *devlink = dev_common->devlink; + struct devlink_health_reporter_ops *fw_reboot_ops; + struct devlink_health_reporter_ops *fw_temp_ops; + const u64 graceful_period = 0; + + fw_temp_ops = &nbl_fw_temp_reporter_ops; + fw_reboot_ops = &nbl_fw_reboot_reporter_ops; + + nbl_fw_tracer_init_saved_traces_array(&ctrl_dev->health_reporters); + reps->fw_temp_reporter = + devl_health_reporter_create(devlink, fw_temp_ops, graceful_period, dev_mgt); + if (IS_ERR(reps->fw_temp_reporter)) { + dev_err(dev, "failed to create fw temp reporter err = %ld\n", + PTR_ERR(reps->fw_temp_reporter)); + return; + } + reps->fw_reboot_reporter = + devl_health_reporter_create(devlink, fw_reboot_ops, graceful_period, dev_mgt); + if (IS_ERR(reps->fw_reboot_reporter)) { + dev_err(dev, "failed to create fw reboot reporter err = %ld\n", + PTR_ERR(reps->fw_reboot_reporter)); + if (reps->fw_temp_reporter) + devl_health_reporter_destroy(reps->fw_temp_reporter); + return; + } +} + +static int nbl_dev_health_init(struct nbl_dev_mgt *dev) +{ + nbl_setup_devlink_reporter(dev); + return 0; } static int nbl_dev_setup_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) @@ -1487,6 +1705,9 @@ static int nbl_dev_setup_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_p } } + nbl_dev_ctrl_register_flr_chan_msg(dev_mgt); + nbl_dev_ctrl_register_emp_ext_alert_chan_msg(dev_mgt); + ret = nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); if (ret) { dev_err(dev, "ctrl dev setup chan queue failed\n"); @@ -1511,6 +1732,7 @@ static int nbl_dev_setup_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_p serv_ops->cfg_eth_bond_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), true); serv_ops->cfg_fd_update_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), true); + serv_ops->cfg_mirror_outputport_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), true); return 0; @@ -1544,6 +1766,7 @@ static void nbl_dev_remove_ctrl_dev(struct nbl_adapter *adapter) dev_mgt->common->pdev->bus->number; serv_ops->cfg_fd_update_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), false); serv_ops->cfg_eth_bond_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), false); + serv_ops->cfg_mirror_outputport_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), false); if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) serv_ops->remove_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); @@ -1606,6 +1829,25 @@ static netdev_tx_t nbl_dev_start_xmit(struct sk_buff *skb, struct net_device *ne return pt_ops->start_xmit(skb, netdev); } +static int +nbl_dev_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **frame, u32 flags) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_resource_pt_ops *pt_ops = NBL_DEV_MGT_TO_RES_PT_OPS(dev_mgt); + + return pt_ops->xdp_xmit(netdev, n, frame, flags); +} + +static netdev_tx_t nbl_dev_set_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_xdp(netdev, xdp); +} + static netdev_tx_t nbl_dev_rep_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); @@ -1833,6 +2075,16 @@ nbl_dev_netdev_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type, return serv_ops->rep_setup_tc(netdev, type, type_data); } +static int +nbl_dev_netdev_rep_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->change_rep_mtu(netdev, new_mtu); +} + static int nbl_dev_netdev_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) { @@ -1843,6 +2095,16 @@ nbl_dev_netdev_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_v return serv_ops->get_vf_config(netdev, vf_id, ivi); } +static int +nbl_dev_netdev_get_vf_stats(struct net_device *netdev, int vf_id, struct ifla_vf_stats *vf_stats) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_vf_stats(netdev, vf_id, vf_stats); +} + static u16 nbl_dev_netdev_select_queue(struct net_device *netdev, struct sk_buff *skb, struct net_device *sb_dev) @@ -1913,7 +2175,7 @@ static const struct net_device_ops netdev_ops_leonis_rep = { .ndo_vlan_rx_kill_vid = nbl_dev_netdev_rep_rx_kill_vid, .ndo_features_check = nbl_dev_netdev_features_check, .ndo_setup_tc = nbl_dev_netdev_rep_setup_tc, - .ndo_change_mtu = nbl_dev_netdev_change_mtu, + .ndo_change_mtu = nbl_dev_netdev_rep_change_mtu, .ndo_get_phys_port_name = nbl_dev_rep_get_phys_port_name, .ndo_get_port_parent_id = nbl_dev_rep_get_port_parent_id, }; @@ -1922,6 +2184,8 @@ static const struct net_device_ops netdev_ops_leonis_pf = { .ndo_open = nbl_dev_netdev_open, .ndo_stop = nbl_dev_netdev_stop, .ndo_start_xmit = nbl_dev_start_xmit, + .ndo_xdp_xmit = nbl_dev_xdp_xmit, + .ndo_bpf = nbl_dev_set_xdp, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats64 = nbl_dev_netdev_get_stats64, .ndo_set_rx_mode = nbl_dev_netdev_set_rx_mode, @@ -1939,6 +2203,7 @@ static const struct net_device_ops netdev_ops_leonis_pf = { .ndo_set_vf_mac = nbl_dev_netdev_set_vf_mac, .ndo_set_vf_rate = nbl_dev_netdev_set_vf_rate, .ndo_get_vf_config = nbl_dev_netdev_get_vf_config, + .ndo_get_vf_stats = nbl_dev_netdev_get_vf_stats, .ndo_select_queue = nbl_dev_netdev_select_queue, .ndo_set_vf_vlan = nbl_dev_netdev_set_vf_vlan, .ndo_setup_tc = nbl_dev_netdev_setup_tc, @@ -1965,8 +2230,8 @@ static const struct net_device_ops netdev_ops_leonis_vf = { .ndo_get_phys_port_name = nbl_dev_ndo_get_phys_port_name, }; -static void nbl_dev_setup_netops_leonis(void *priv, struct net_device *netdev, - struct nbl_init_param *param) +static int nbl_dev_setup_netops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); @@ -1987,6 +2252,7 @@ static void nbl_dev_setup_netops_leonis(void *priv, struct net_device *netdev, serv_ops->set_netdev_ops(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &netdev_ops_leonis_rep, false); } + return 0; } static void nbl_dev_remove_netops(struct net_device *netdev) @@ -2184,199 +2450,333 @@ static int nbl_dev_set_coalesce(struct net_device *netdev, struct ethtool_coales struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->set_coalesce(netdev, ec, kernel_ec, extack); + return serv_ops->set_coalesce(netdev, ec, kernel_ec, extack); +} + +static int nbl_dev_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxnfc(netdev, cmd, rule_locs); +} + +static int nbl_dev_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_rxnfc(netdev, cmd); +} + +static u32 nbl_dev_get_rxfh_indir_size(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxfh_indir_size(netdev); +} + +static u32 nbl_dev_get_rxfh_key_size(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxfh_key_size(netdev); +} + +static int nbl_dev_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxfh(netdev, indir, key, hfunc); +} + +static int +nbl_dev_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_rxfh(netdev, indir, key, hfunc); +} + +static u32 nbl_dev_get_msglevel(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_msglevel(netdev); +} + +static void nbl_dev_set_msglevel(struct net_device *netdev, u32 msglevel) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->set_msglevel(netdev, msglevel); +} + +static int nbl_dev_get_regs_len(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_regs_len(netdev); +} + +static void nbl_dev_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_ethtool_dump_regs(netdev, regs, p); +} + +static int nbl_dev_get_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_per_queue_coalesce(netdev, q_num, ec); +} + +static int nbl_dev_set_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_per_queue_coalesce(netdev, q_num, ec); +} + +static void nbl_dev_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->self_test(netdev, eth_test, data); +} + +static u32 nbl_dev_get_priv_flags(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_priv_flags(netdev); } -static int nbl_dev_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +static int nbl_dev_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_rxnfc(netdev, cmd, rule_locs); + return serv_ops->set_priv_flags(netdev, priv_flags); } -static int nbl_dev_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +static int nbl_dev_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->set_rxnfc(netdev, cmd); + return serv_ops->set_pause_param(netdev, param); } -static u32 nbl_dev_get_rxfh_indir_size(struct net_device *netdev) +static void nbl_dev_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_rxfh_indir_size(netdev); + serv_ops->get_pause_param(netdev, param); } -static u32 nbl_dev_get_rxfh_key_size(struct net_device *netdev) +static int nbl_dev_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_rxfh_key_size(netdev); + return serv_ops->set_fec_param(netdev, fec); } -static int nbl_dev_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +static int nbl_dev_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_rxfh(netdev, indir, key, hfunc); + return serv_ops->get_fec_param(netdev, fec); } -static u32 nbl_dev_get_msglevel(struct net_device *netdev) +static int nbl_dev_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *ts_info) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_msglevel(netdev); + return serv_ops->get_ts_info(netdev, ts_info); } -static void nbl_dev_set_msglevel(struct net_device *netdev, u32 msglevel) +static int nbl_dev_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - serv_ops->set_msglevel(netdev, msglevel); + return serv_ops->set_phys_id(netdev, state); } -static int nbl_dev_get_regs_len(struct net_device *netdev) +static int nbl_dev_nway_reset(struct net_device *netdev) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_regs_len(netdev); + return serv_ops->nway_reset(netdev); } -static void nbl_dev_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) +static int nbl_dev_flash_device(struct net_device *netdev, struct ethtool_flash *flash) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - serv_ops->get_ethtool_dump_regs(netdev, regs, p); + return serv_ops->flash_device(netdev, flash); } -static int nbl_dev_get_per_queue_coalesce(struct net_device *netdev, - u32 q_num, struct ethtool_coalesce *ec) +static int nbl_dev_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_per_queue_coalesce(netdev, q_num, ec); + return serv_ops->get_dump_flag(netdev, dump); } -static int nbl_dev_set_per_queue_coalesce(struct net_device *netdev, - u32 q_num, struct ethtool_coalesce *ec) +static int nbl_dev_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, void *buffer) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->set_per_queue_coalesce(netdev, q_num, ec); + return serv_ops->get_dump_data(netdev, dump, buffer); } -static void nbl_dev_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +static int nbl_dev_set_dump(struct net_device *netdev, struct ethtool_dump *dump) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - serv_ops->self_test(netdev, eth_test, data); + return serv_ops->set_dump(netdev, dump); } -static u32 nbl_dev_get_priv_flags(struct net_device *netdev) +static int nbl_dev_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_priv_flags(netdev); + return serv_ops->set_wol(netdev, wol); } -static int nbl_dev_set_priv_flags(struct net_device *netdev, u32 priv_flags) +static void nbl_dev_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->set_priv_flags(netdev, priv_flags); + serv_ops->get_wol(netdev, wol); } -static int nbl_dev_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) +static void nbl_dev_get_eth_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *eth_ctrl_stats) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->set_pause_param(netdev, param); + serv_ops->get_eth_ctrl_stats(netdev, eth_ctrl_stats); } -static void nbl_dev_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) +static void +nbl_dev_get_pause_stats(struct net_device *netdev, struct ethtool_pause_stats *pause_stats) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - serv_ops->get_pause_param(netdev, param); + serv_ops->get_pause_stats(netdev, pause_stats); } -static int nbl_dev_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) +static void +nbl_dev_get_eth_mac_stats(struct net_device *netdev, struct ethtool_eth_mac_stats *eth_mac_stats) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->set_fec_param(netdev, fec); + serv_ops->get_eth_mac_stats(netdev, eth_mac_stats); } -static int nbl_dev_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) +static void nbl_dev_get_fec_stats(struct net_device *netdev, struct ethtool_fec_stats *fec_stats) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_fec_param(netdev, fec); + serv_ops->get_fec_stats(netdev, fec_stats); } -static int nbl_dev_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *ts_info) +static int nbl_dev_get_link_ext_state(struct net_device *netdev, + struct ethtool_link_ext_state_info *link_ext_state_info) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->get_ts_info(netdev, ts_info); + return serv_ops->get_link_ext_state(netdev, link_ext_state_info); } -static int nbl_dev_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +static void nbl_dev_get_link_ext_stats(struct net_device *netdev, + struct ethtool_link_ext_stats *stats) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->set_phys_id(netdev, state); + return serv_ops->get_link_ext_stats(netdev, stats); } -static int nbl_dev_nway_reset(struct net_device *netdev) +static void nbl_dev_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **range) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - return serv_ops->nway_reset(netdev); + return serv_ops->get_rmon_stats(netdev, rmon_stats, range); } static const struct ethtool_ops ethtool_ops_leonis_rep = { @@ -2417,6 +2817,7 @@ static const struct ethtool_ops ethtool_ops_leonis_pf = { .get_rxfh_indir_size = nbl_dev_get_rxfh_indir_size, .get_rxfh_key_size = nbl_dev_get_rxfh_key_size, .get_rxfh = nbl_dev_get_rxfh, + .set_rxfh = nbl_dev_set_rxfh, .get_msglevel = nbl_dev_get_msglevel, .set_msglevel = nbl_dev_set_msglevel, .get_regs_len = nbl_dev_get_regs_len, @@ -2433,6 +2834,19 @@ static const struct ethtool_ops ethtool_ops_leonis_pf = { .get_ts_info = nbl_dev_get_ts_info, .set_phys_id = nbl_dev_set_phys_id, .nway_reset = nbl_dev_nway_reset, + .flash_device = nbl_dev_flash_device, + .get_dump_flag = nbl_dev_get_dump_flag, + .get_dump_data = nbl_dev_get_dump_data, + .set_dump = nbl_dev_set_dump, + .set_wol = nbl_dev_set_wol, + .get_wol = nbl_dev_get_wol, + .get_eth_ctrl_stats = nbl_dev_get_eth_ctrl_stats, + .get_pause_stats = nbl_dev_get_pause_stats, + .get_eth_mac_stats = nbl_dev_get_eth_mac_stats, + .get_fec_stats = nbl_dev_get_fec_stats, + .get_link_ext_state = nbl_dev_get_link_ext_state, + .get_link_ext_stats = nbl_dev_get_link_ext_stats, + .get_rmon_stats = nbl_dev_get_rmon_stats, }; static const struct ethtool_ops ethtool_ops_leonis_vf = { @@ -2457,6 +2871,7 @@ static const struct ethtool_ops ethtool_ops_leonis_vf = { .get_rxfh_indir_size = nbl_dev_get_rxfh_indir_size, .get_rxfh_key_size = nbl_dev_get_rxfh_key_size, .get_rxfh = nbl_dev_get_rxfh, + .set_rxfh = nbl_dev_set_rxfh, .get_msglevel = nbl_dev_get_msglevel, .set_msglevel = nbl_dev_set_msglevel, .get_regs_len = nbl_dev_get_regs_len, @@ -2466,8 +2881,8 @@ static const struct ethtool_ops ethtool_ops_leonis_vf = { .get_ts_info = nbl_dev_get_ts_info, }; -static void nbl_dev_setup_ethtool_ops_leonis(void *priv, struct net_device *netdev, - struct nbl_init_param *param) +static int nbl_dev_setup_ethtool_ops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) { bool is_vf = param->caps.is_vf; bool is_rep = param->is_rep; @@ -2478,6 +2893,7 @@ static void nbl_dev_setup_ethtool_ops_leonis(void *priv, struct net_device *netd netdev->ethtool_ops = ðtool_ops_leonis_vf; else netdev->ethtool_ops = ðtool_ops_leonis_pf; + return 0; } static void nbl_dev_remove_ethtool_ops(struct net_device *netdev) @@ -2486,13 +2902,6 @@ static void nbl_dev_remove_ethtool_ops(struct net_device *netdev) } #ifdef CONFIG_TLS_DEVICE -#define NBL_DEV_KTLS_OPS_TBL \ -do { \ - NBL_DEV_KTLS_OPS(tls_dev_add, serv_ops->add_tls_dev); \ - NBL_DEV_KTLS_OPS(tls_dev_del, serv_ops->del_tls_dev); \ - NBL_DEV_KTLS_OPS(tls_dev_resync, serv_ops->resync_tls_dev); \ -} while (0) - static int nbl_dev_tls_dev_add(struct net_device *netdev, struct sock *sk, enum tls_offload_ctx_dir direction, struct tls_crypto_info *crypto_info, @@ -2532,21 +2941,18 @@ static const struct tlsdev_ops ktls_ops = { .tls_dev_resync = nbl_dev_tls_dev_resync, }; -static void nbl_dev_setup_ktls_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, - struct nbl_init_param *param) +static int nbl_dev_setup_ktls_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) { struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - if (param->is_rep) - return; - if (!serv_ops->get_product_flex_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_SECURITY_ACCEL_CAP)) - return; + return 0; netdev->hw_features |= NETIF_F_HW_TLS_RX; netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->tlsdev_ops = &ktls_ops; + return 0; } static void nbl_dev_remove_ktls_ops(struct net_device *netdev) @@ -2558,9 +2964,9 @@ static void nbl_dev_remove_ktls_ops(struct net_device *netdev) } #else -static ivoidnt nbl_dev_setup_ktls_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev - struct nbl_init_param *param) +static int nbl_dev_setup_ktls_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) { + return 0; } static void nbl_dev_remove_ktls_ops(struct net_device *netdev) {} @@ -2626,18 +3032,14 @@ static const struct xfrmdev_ops xfrm_ops = { .xdo_dev_state_advance_esn = nbl_dev_xdo_dev_state_advance_esn, }; -static void nbl_dev_setup_xfrm_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, - struct nbl_init_param *param) +static int nbl_dev_setup_xfrm_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) { struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); enum nbl_flex_cap_type cap_type = NBL_SECURITY_ACCEL_CAP; - if (param->is_rep) - return; - if (!serv_ops->get_product_flex_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), cap_type)) - return; + return 0; chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_MSG_NOTIFY_IPSEC_HARD_EXPIRE, nbl_dev_notify_ipsec_hard_expire, dev_mgt); @@ -2654,6 +3056,7 @@ static void nbl_dev_setup_xfrm_ops(struct nbl_dev_mgt *dev_mgt, struct net_devic netdev->hw_enc_features |= NETIF_F_GSO_ESP; netdev->xfrmdev_ops = &xfrm_ops; + return 0; } static void nbl_dev_remove_xfrm_ops(struct net_device *netdev) @@ -2682,6 +3085,174 @@ static void nbl_dev_remove_xfrm_ops(struct net_device *netdev) } #endif +static int nbl_dev_ieee_setets(struct net_device *netdev, struct ieee_ets *ets) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->ieee_setets(netdev, ets); +} + +static int nbl_dev_ieee_getets(struct net_device *netdev, struct ieee_ets *ets) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->ieee_getets(netdev, ets); +} + +static int nbl_dev_ieee_setpfc(struct net_device *netdev, struct ieee_pfc *pfc) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->ieee_setpfc(netdev, pfc); +} + +static int nbl_dev_ieee_getpfc(struct net_device *netdev, struct ieee_pfc *pfc) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->ieee_getpfc(netdev, pfc); +} + +static int nbl_dev_ieee_setapp(struct net_device *netdev, struct dcb_app *app) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->ieee_setapp(netdev, app); +} + +static int nbl_dev_ieee_delapp(struct net_device *netdev, struct dcb_app *app) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->ieee_delapp(netdev, app); +} + +static u8 nbl_dev_getdcbx(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->ieee_getdcbx(netdev); +} + +static u8 nbl_dev_setdcbx(struct net_device *netdev, u8 mode) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->ieee_setdcbx(netdev, mode); +} + +static int nbl_dev_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->dcbnl_getnumtcs(netdev, tcid, num); +} + +static void nbl_dev_setpfccfg(struct net_device *netdev, int prio, u8 set) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->dcbnl_setpfccfg(netdev, prio, set); +} + +static void nbl_dev_getpfccfg(struct net_device *netdev, int prio, u8 *setting) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->dcbnl_getpfccfg(netdev, prio, setting); +} + +static u8 nbl_dev_getstate(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->dcbnl_getstate(netdev); +} + +static u8 nbl_dev_setstate(struct net_device *netdev, u8 state) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->dcbnl_setstate(netdev, state); +} + +static u8 nbl_dev_getpfcstate(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->dcbnl_getpfcstate(netdev); +} + +static u8 nbl_dev_getcap(struct net_device *netdev, int capid, u8 *cap) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->dcbnl_getcap(netdev, capid, cap); +} + +static const struct dcbnl_rtnl_ops dcbnl_ops_leonis_pf = { + .ieee_setets = nbl_dev_ieee_setets, + .ieee_getets = nbl_dev_ieee_getets, + .ieee_setpfc = nbl_dev_ieee_setpfc, + .ieee_getpfc = nbl_dev_ieee_getpfc, + .ieee_setapp = nbl_dev_ieee_setapp, + .ieee_delapp = nbl_dev_ieee_delapp, + .getdcbx = nbl_dev_getdcbx, + .setdcbx = nbl_dev_setdcbx, + .getnumtcs = nbl_dev_getnumtcs, + .setpfccfg = nbl_dev_setpfccfg, + .getpfccfg = nbl_dev_getpfccfg, + .getstate = nbl_dev_getstate, + .setstate = nbl_dev_setstate, + .getpfcstate = nbl_dev_getpfcstate, + .getcap = nbl_dev_getcap, +}; + +static int nbl_dev_setup_dcbnl_ops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) +{ + bool is_vf = param->caps.is_vf; + + if (!is_vf) + netdev->dcbnl_ops = &dcbnl_ops_leonis_pf; + return 0; +} + +static void nbl_dev_remove_dcbnl_ops(struct net_device *netdev) +{ + netdev->dcbnl_ops = NULL; +} + static void nbl_dev_set_eth_mac_addr(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) { struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); @@ -2699,19 +3270,28 @@ static int nbl_dev_cfg_netdev(struct net_device *netdev, struct nbl_dev_mgt *dev { struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_net_ops *net_dev_ops = NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt); + u64 vlan_features = 0; + int ret = 0; if (param->pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; if (!param->is_rep) netdev->watchdog_timeo = 5 * HZ; + vlan_features = register_result->vlan_features ? register_result->vlan_features + : register_result->features; netdev->hw_features |= nbl_features_to_netdev_features(register_result->hw_features); netdev->features |= nbl_features_to_netdev_features(register_result->features); - netdev->vlan_features |= netdev->features; + netdev->vlan_features |= nbl_features_to_netdev_features(vlan_features); + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT; + + netdev->priv_flags |= IFF_UNICAST_FLT; SET_DEV_MIN_MTU(netdev, ETH_MIN_MTU); SET_DEV_MAX_MTU(netdev, register_result->max_mtu); netdev->mtu = min_t(u16, register_result->max_mtu, NBL_DEFAULT_MTU); + serv_ops->change_mtu(netdev, netdev->mtu); if (is_valid_ether_addr(register_result->mac)) eth_hw_addr_set(netdev, register_result->mac); @@ -2724,20 +3304,48 @@ static int nbl_dev_cfg_netdev(struct net_device *netdev, struct nbl_dev_mgt *dev netdev->needed_headroom = serv_ops->get_tx_headroom(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - net_dev_ops->setup_netdev_ops(dev_mgt, netdev, param); - net_dev_ops->setup_ethtool_ops(dev_mgt, netdev, param); - nbl_dev_setup_ktls_ops(dev_mgt, netdev, param); - nbl_dev_setup_xfrm_ops(dev_mgt, netdev, param); + ret = net_dev_ops->setup_netdev_ops(dev_mgt, netdev, param); + if (ret) + goto set_ops_fail; + + ret = net_dev_ops->setup_ethtool_ops(dev_mgt, netdev, param); + if (ret) + goto set_ethtool_fail; + + ret = net_dev_ops->setup_dcbnl_ops(dev_mgt, netdev, param); + if (ret) + goto set_dcbnl_fail; + + if (!param->is_rep) { + ret = nbl_dev_setup_ktls_ops(dev_mgt, netdev); + if (ret) + goto set_ktls_fail; + ret = nbl_dev_setup_xfrm_ops(dev_mgt, netdev); + if (ret) + goto set_xfrm_fail; + } nbl_dev_set_eth_mac_addr(dev_mgt, netdev); return 0; + +set_xfrm_fail: + nbl_dev_remove_ktls_ops(netdev); +set_ktls_fail: + nbl_dev_remove_dcbnl_ops(netdev); +set_dcbnl_fail: + nbl_dev_remove_ethtool_ops(netdev); +set_ethtool_fail: + nbl_dev_remove_netops(netdev); +set_ops_fail: + return ret; } static void nbl_dev_reset_netdev(struct net_device *netdev) { nbl_dev_remove_ktls_ops(netdev); nbl_dev_remove_xfrm_ops(netdev); + nbl_dev_remove_dcbnl_ops(netdev); nbl_dev_remove_ethtool_ops(netdev); nbl_dev_remove_netops(netdev); } @@ -2892,10 +3500,16 @@ static int nbl_dev_vsi_common_start(struct nbl_dev_mgt *dev_mgt, struct net_devi ret = serv_ops->setup_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); if (ret) { - dev_err(dev, "Setup q2vsi failed\n"); + dev_err(dev, "Setup rss failed\n"); goto set_rss_fail; } + ret = serv_ops->setup_rss_indir(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + if (ret) { + dev_err(dev, "Setup rss indir failed\n"); + goto setup_rss_indir_fail; + } + if (vsi->use_independ_irq) { ret = serv_ops->enable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); if (ret) { @@ -2915,6 +3529,7 @@ static int nbl_dev_vsi_common_start(struct nbl_dev_mgt *dev_mgt, struct net_devi init_tx_rate_fail: serv_ops->disable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); enable_napi_fail: +setup_rss_indir_fail: serv_ops->remove_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); set_rss_fail: serv_ops->remove_q2vsi(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); @@ -2979,7 +3594,8 @@ static int nbl_dev_vsi_data_start(void *dev_priv, struct net_device *netdev, u16 vid; vid = vsi->register_result.vlan_tci & VLAN_VID_MASK; - ret = serv_ops->start_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, vsi->vsi_id, vid); + ret = serv_ops->start_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, vsi->vsi_id, vid, + vsi->register_result.trusted); if (ret) { dev_err(dev, "Set netdev flow table failed\n"); goto set_flow_fail; @@ -3387,41 +4003,6 @@ struct nbl_dev_vsi *nbl_dev_vsi_select(struct nbl_dev_mgt *dev_mgt, u8 vsi_index return NULL; } -static int nbl_dev_vsi_handle_switch_event(u16 type, void *event_data, void *callback_data) -{ - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; - struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_service_traffic_switch info = {0}; - struct nbl_event_dev_mode_switch_data *data = - (struct nbl_event_dev_mode_switch_data *)event_data; - struct nbl_dev_vsi *data_vsi = NULL, *user_vsi = NULL; - int op = data->op; - - data_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; - user_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; - - info.normal_vsi = data_vsi->vsi_id; - info.sync_other_vsi = data_vsi->vsi_id; - info.async_other_vsi = data_vsi->vsi_id; - info.has_lacp = data_vsi->feature.has_lacp; - info.has_lldp = data_vsi->feature.has_lldp; - - /* user enable promisc must be user vsi */ - if (op == NBL_DEV_KERNEL_TO_USER || op == NBL_DEV_SET_USER_PROMISC_MODE) { - info.normal_vsi = user_vsi->vsi_id; - if (data->promosic) { - info.sync_other_vsi = user_vsi->vsi_id; - info.async_other_vsi = user_vsi->vsi_id; - info.promisc = data->promosic; - } - } - - data->ret = serv_ops->switch_traffic_default_dest(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &info); - - return 0; -} - static int nbl_dev_vsi_handle_netdev_event(u16 type, void *event_data, void *callback_data) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; @@ -3451,6 +4032,7 @@ static struct nbl_dev_net_ops netdev_ops[NBL_PRODUCT_MAX] = { { .setup_netdev_ops = nbl_dev_setup_netops_leonis, .setup_ethtool_ops = nbl_dev_setup_ethtool_ops_leonis, + .setup_dcbnl_ops = nbl_dev_setup_dcbnl_ops_leonis, }, }; @@ -3557,59 +4139,25 @@ static void nbl_dev_remove_net_dev(struct nbl_adapter *adapter) struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_net **net_dev = &NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_dev_vsi *vsi; - int i = 0; - - if (!*net_dev) - return; - - for (i = 0; i < NBL_VSI_MAX; i++) { - vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; - - if (!vsi) - continue; - - vsi->ops->remove(dev_mgt, vsi); - } - nbl_dev_vsi_destroy(dev_mgt); - - nbl_dev_unregister_net(dev_mgt); - - devm_kfree(dev, *net_dev); - *net_dev = NULL; -} - -static int nbl_dev_setup_virtio_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) -{ - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_dev_virtio *virtio_dev; - struct device *dev = NBL_ADAPTER_TO_DEV(adapter); - - if (!serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_VIRTIO_CAP)) - return 0; + int i = 0; - virtio_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_virtio), GFP_KERNEL); - if (!virtio_dev) - return -ENOMEM; - NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt) = virtio_dev; + if (!*net_dev) + return; - nbl_dev_register_virtio_irq(dev_mgt); - virtio_dev->device_msix = 0; + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; - return 0; -} + if (!vsi) + continue; -static void nbl_dev_remove_virtio_dev(struct nbl_adapter *adapter) -{ - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_dev_virtio *virtio_dev = NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt); + vsi->ops->remove(dev_mgt, vsi); + } + nbl_dev_vsi_destroy(dev_mgt); - if (!virtio_dev) - return; + nbl_dev_unregister_net(dev_mgt); - devm_kfree(NBL_ADAPTER_TO_DEV(adapter), virtio_dev); - NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt) = NULL; + devm_kfree(dev, *net_dev); + *net_dev = NULL; } static int nbl_dev_setup_dev_mgt(struct nbl_common_info *common, struct nbl_dev_mgt **dev_mgt) @@ -3665,10 +4213,6 @@ int nbl_dev_init(void *p, struct nbl_init_param *param) NBL_DEV_MGT_TO_SERV_OPS_TBL(*dev_mgt) = serv_ops_tbl; NBL_DEV_MGT_TO_CHAN_OPS_TBL(*dev_mgt) = chan_ops_tbl; - /* If we have factory_dev, no need to go further */ - if (param->caps.has_factory_ctrl) - return nbl_dev_setup_factory_ctrl_dev(adapter, param); - ret = nbl_dev_setup_common_dev(adapter, param); if (ret) goto setup_common_dev_fail; @@ -3683,14 +4227,9 @@ int nbl_dev_init(void *p, struct nbl_init_param *param) if (ret) goto setup_net_dev_fail; - ret = nbl_dev_setup_virtio_dev(adapter, param); - if (ret) - goto setup_virtio_dev_fail; - ret = nbl_dev_setup_rdma_dev(adapter, param); if (ret) goto setup_rdma_dev_fail; - ret = nbl_dev_setup_ops(dev, dev_ops_tbl, adapter); if (ret) goto setup_ops_fail; @@ -3700,8 +4239,6 @@ int nbl_dev_init(void *p, struct nbl_init_param *param) setup_ops_fail: nbl_dev_remove_rdma_dev(adapter); setup_rdma_dev_fail: - nbl_dev_remove_virtio_dev(adapter); -setup_virtio_dev_fail: nbl_dev_remove_net_dev(adapter); setup_net_dev_fail: nbl_dev_remove_ctrl_dev(adapter); @@ -3723,12 +4260,7 @@ void nbl_dev_remove(void *p) nbl_dev_remove_ops(dev, dev_ops_tbl); - /* If we succeed in factory_dev remove, no need to go further */ - if (nbl_dev_remove_factory_ctrl_dev(adapter)) - return; - nbl_dev_remove_rdma_dev(adapter); - nbl_dev_remove_virtio_dev(adapter); nbl_dev_remove_net_dev(adapter); nbl_dev_remove_ctrl_dev(adapter); nbl_dev_remove_common_dev(adapter); @@ -3833,90 +4365,6 @@ static void nbl_dev_handle_fatal_err(struct nbl_dev_mgt *dev_mgt) nbl_info(common, NBL_DEBUG_MAIN, "dev in fatal_err status."); } -static struct nbl_dev_temp_alarm_info temp_alarm_info[NBL_TEMP_STATUS_MAX] = { - {LOGLEVEL_WARNING, "High temperature on sensors0 resumed.\n"}, - {LOGLEVEL_WARNING, "High temperature on sensors0 observed, security(WARNING).\n"}, - {LOGLEVEL_CRIT, "High temperature on sensors0 observed, security(CRITICAL).\n"}, - {LOGLEVEL_EMERG, "High temperature on sensors0 observed, security(EMERGENCY).\n"}, -}; - -static void nbl_dev_handle_temp_ext(struct nbl_dev_mgt *dev_mgt, u8 *data) -{ - u16 temp = (u16)*data; - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); - enum nbl_dev_temp_status old_temp_status = ctrl_dev->temp_status; - enum nbl_dev_temp_status new_temp_status = NBL_TEMP_STATUS_NORMAL; - - /* no resume if temp exceed NBL_TEMP_EMERG_THRESHOLD, even if the temp resume nomal. - * Because the hw has shutdown. - */ - if (old_temp_status == NBL_TEMP_STATUS_EMERG) - return; - - /* if temp in (85-105) and not in normal_status, no resume to avoid alarm oscillate */ - if (temp > NBL_TEMP_NOMAL_THRESHOLD && - temp < NBL_TEMP_WARNING_THRESHOLD && - old_temp_status > NBL_TEMP_STATUS_NORMAL) - return; - - if (temp >= NBL_TEMP_WARNING_THRESHOLD && - temp < NBL_TEMP_CRIT_THRESHOLD) - new_temp_status = NBL_TEMP_STATUS_WARNING; - else if (temp >= NBL_TEMP_CRIT_THRESHOLD && - temp < NBL_TEMP_EMERG_THRESHOLD) - new_temp_status = NBL_TEMP_STATUS_CRIT; - else if (temp >= NBL_TEMP_EMERG_THRESHOLD) - new_temp_status = NBL_TEMP_STATUS_EMERG; - - if (new_temp_status == old_temp_status) - return; - - ctrl_dev->temp_status = new_temp_status; - - /* temp fall only alarm when the alarm need to resume */ - if (new_temp_status < old_temp_status && new_temp_status != NBL_TEMP_STATUS_NORMAL) - return; - - nbl_log(common, temp_alarm_info[new_temp_status].logvel, - temp_alarm_info[new_temp_status].alarm_info); - - if (new_temp_status == NBL_TEMP_STATUS_EMERG) { - ctrl_dev->task_info.reset_event = NBL_HW_FATAL_ERR_EVENT; - nbl_common_queue_work(&ctrl_dev->task_info.reset_task, false, false); - } -} - -static void nbl_dev_chan_notify_evt_alert_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) -{ - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; - struct nbl_chan_param_emp_alert_event *alert_param = - (struct nbl_chan_param_emp_alert_event *)data; - - switch (alert_param->type) { - case NBL_EMP_EVENT_TEMP_ALERT: - nbl_dev_handle_temp_ext(dev_mgt, alert_param->data); - return; - default: - return; - } -} - -static void nbl_dev_ctrl_register_emp_ext_alert_chan_msg(struct nbl_dev_mgt *dev_mgt) -{ - struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); - - /* draco use mailbox communication with emp */ - if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), - NBL_CHAN_TYPE_MAILBOX)) - return; - - chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), - NBL_CHAN_MSG_ADMINQ_EXT_ALERT, - nbl_dev_chan_notify_evt_alert_resp, dev_mgt); -} - /* ---------- Dev start process ---------- */ static int nbl_dev_start_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) { @@ -3939,8 +4387,7 @@ static int nbl_dev_start_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_p if (err) goto enable_adminq_irq_err; - nbl_dev_ctrl_register_flr_chan_msg(dev_mgt); - nbl_dev_ctrl_register_emp_ext_alert_chan_msg(dev_mgt); + nbl_dev_health_init(dev_mgt); nbl_dev_get_port_attributes(dev_mgt); nbl_dev_init_port(dev_mgt); @@ -3969,6 +4416,7 @@ static void nbl_dev_stop_ctrl_dev(struct nbl_adapter *adapter) nbl_dev_ctrl_task_stop(dev_mgt); nbl_dev_enable_port(dev_mgt, false); nbl_dev_disable_adminq_irq(dev_mgt); + nbl_dev_destroy_health(dev_mgt); nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); nbl_dev_disable_abnormal_irq(dev_mgt); nbl_dev_free_abnormal_irq(dev_mgt); @@ -4092,10 +4540,12 @@ static int nbl_dev_eswitch_load_rep(struct nbl_adapter *adapter, int num_vfs) struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_init_param param; struct nbl_dev_rep *rep_dev; int i, ret = 0; u16 vf_base_vsi_id; + char net_dev_name[IFNAMSIZ]; rep_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_rep), GFP_KERNEL); if (!rep_dev) @@ -4123,10 +4573,11 @@ static int nbl_dev_eswitch_load_rep(struct nbl_adapter *adapter, int num_vfs) nbl_dev_get_rep_queue_num(adapter, &rep_dev->rep[i].base_queue_id, &rep_dev->rep[i].rep_queue_num); - /* add rep_id sysfs here */ - nbl_net_addr_rep_attr(&rep_dev->rep[i].rep_attr, i); + /* add dev_name sysfs here */ + snprintf(net_dev_name, IFNAMSIZ, "%s_%d", net_dev->netdev->name, i); + nbl_net_add_name_attr(&rep_dev->rep[i].dev_name_attr, net_dev_name); ret = sysfs_create_file(&rep_dev->rep[i].netdev->dev.kobj, - &rep_dev->rep[i].rep_attr.attr); + &rep_dev->rep[i].dev_name_attr.attr); if (ret) { dev_err(dev, "nbl rep add rep_id net-fs failed"); return ret; @@ -4160,16 +4611,16 @@ static int nbl_dev_eswitch_unload_rep(struct nbl_dev_mgt *dev_mgt) } serv_ops->unset_rep_netdev_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - serv_ops->free_rep_data(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); for (i = 0; i < rep_dev->num_vfs; i++) { netdev = rep_data[i].netdev; if (!netdev) continue; - sysfs_remove_file(&netdev->dev.kobj, &rep_data[i].rep_attr.attr); + sysfs_remove_file(&netdev->dev.kobj, &rep_data[i].dev_name_attr.attr); unregister_netdev(netdev); nbl_dev_reset_netdev(netdev); free_netdev(netdev); } + serv_ops->free_rep_data(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); devm_kfree(dev, rep_data); devm_kfree(dev, rep_dev); NBL_DEV_MGT_TO_REP_DEV(dev_mgt) = NULL; @@ -4260,6 +4711,29 @@ int nbl_dev_setup_vf_config(void *p, int num_vfs) return serv_ops->setup_vf_config(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), num_vfs, false); } +void nbl_dev_register_dev_name(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + + /* get pf_name then register it to AF */ + serv_ops->register_dev_name(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + common->vsi_id, net_dev->netdev->name); +} + +void nbl_dev_get_dev_name(void *p, char *dev_name) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + + serv_ops->get_dev_name(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->vsi_id, dev_name); +} + void nbl_dev_remove_vf_config(void *p) { struct nbl_adapter *adapter = (struct nbl_adapter *)p; @@ -4361,28 +4835,43 @@ static void nbl_dev_remove_rep_res(struct nbl_dev_mgt *dev_mgt) } } +static int nbl_dev_setup_devlink_port(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, + struct nbl_init_param *param) +{ + return 0; +} + +static void nbl_dev_remove_devlink_port(struct nbl_dev_mgt *dev_mgt) +{ +} + static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) { struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); +#ifdef CONFIG_PCI_ATS + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(common); +#endif struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct net_device *netdev = net_dev->netdev; struct nbl_netdev_priv *net_priv; struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); struct nbl_dev_vsi *vsi; + struct nbl_dev_vsi *user_vsi; struct nbl_dev_vsi *xdp_vsi; - struct nbl_event_callback callback = {0}; struct nbl_ring_param ring_param = {0}; u16 net_vector_id, queue_num, xdp_queue_num = 0; int ret; + char dev_name[IFNAMSIZ] = {0}; vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_DATA); if (!vsi) return -EFAULT; + user_vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_USER); queue_num = vsi->queue_num; netdev = alloc_etherdev_mqs(sizeof(struct nbl_netdev_priv), queue_num, queue_num); if (!netdev) { @@ -4394,7 +4883,7 @@ static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa SET_NETDEV_DEV(netdev, dev); net_priv = netdev_priv(netdev); net_priv->adapter = adapter; - nbl_dev_set_netdev_priv(netdev, vsi); + nbl_dev_set_netdev_priv(netdev, vsi, user_vsi); net_dev->netdev = netdev; common->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); @@ -4445,6 +4934,12 @@ static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa goto set_queue_fail; } + ret = serv_ops->init_hw_stats(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) { + dev_err(dev, "init hw stats failed\n"); + goto init_hw_stats_fail; + } + ret = nbl_init_lag(dev_mgt, param); if (ret) { dev_err(dev, "init bond failed\n"); @@ -4467,6 +4962,13 @@ static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa } netif_carrier_off(netdev); + + ret = nbl_dev_setup_devlink_port(dev_mgt, netdev, param); + if (ret) { + dev_err(dev, "Setup devlink_port failed\n"); + goto setup_devlink_port_fail; + } + ret = register_netdev(netdev); if (ret) { dev_err(dev, "Register netdev failed\n"); @@ -4474,6 +4976,9 @@ static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa } if (!param->caps.is_vf) { + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_MIRROR_SYSFS_CAP)) + nbl_netdev_add_mirror_sysfs(netdev, net_dev); if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_QOS_SYSFS_CAP)) nbl_netdev_add_sysfs(netdev, net_dev); @@ -4483,27 +4988,48 @@ static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_pa if (ret) goto setup_vf_res_fail; } - - callback.callback = nbl_dev_vsi_handle_switch_event; - callback.callback_data = dev_mgt; - nbl_event_register(NBL_EVENT_DEV_MODE_SWITCH, &callback, - NBL_COMMON_TO_ETH_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else { + /* vf device need get pf name as its base name */ + nbl_net_add_name_attr(&net_dev->dev_attr.dev_name_attr, dev_name); +#ifdef CONFIG_PCI_ATS + if (pdev->physfn) { + nbl_dev_get_dev_name(adapter, dev_name); + memcpy(net_dev->dev_attr.dev_name_attr.net_dev_name, dev_name, IFNAMSIZ); + ret = sysfs_create_file(&netdev->dev.kobj, + &net_dev->dev_attr.dev_name_attr.attr); + if (ret) { + dev_err(dev, "nbl vf device add dev_name:%s net-fs failed", + dev_name); + goto add_vf_sys_attr_fail; + } + dev_dbg(dev, "nbl vf device get dev_name:%s", dev_name); + } else { + dev_dbg(dev, "nbl vf device no need change name"); + } +#endif } set_bit(NBL_DOWN, adapter->state); return 0; - setup_vf_res_fail: nbl_netdev_remove_sysfs(net_dev); + nbl_netdev_remove_mirror_sysfs(net_dev); +#ifdef CONFIG_PCI_ATS +add_vf_sys_attr_fail: +#endif unregister_netdev(netdev); register_netdev_fail: + nbl_dev_remove_devlink_port(dev_mgt); +setup_devlink_port_fail: nbl_dev_free_net_irq(dev_mgt); request_irq_fail: vsi->ops->stop(dev_mgt, vsi); start_vsi_fail: nbl_deinit_lag(dev_mgt); enable_bond_fail: + serv_ops->remove_hw_stats(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +init_hw_stats_fail: serv_ops->remove_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); set_queue_fail: vsi->ops->netdev_destroy(dev_mgt, vsi); @@ -4523,10 +5049,10 @@ static void nbl_dev_stop_net_dev(struct nbl_adapter *adapter) struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - struct nbl_event_callback callback = {0}; struct nbl_event_callback netdev_callback = {0}; struct nbl_dev_vsi *vsi; struct net_device *netdev; + char dev_name[IFNAMSIZ] = {0}; if (!net_dev) return; @@ -4538,18 +5064,23 @@ static void nbl_dev_stop_net_dev(struct nbl_adapter *adapter) return; if (!common->is_vf) { - callback.callback = nbl_dev_vsi_handle_switch_event; - callback.callback_data = dev_mgt; - nbl_event_unregister(NBL_EVENT_DEV_MODE_SWITCH, &callback, - NBL_COMMON_TO_ETH_ID(common), NBL_COMMON_TO_BOARD_ID(common)); - serv_ops->remove_vf_resource(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); nbl_netdev_remove_sysfs(net_dev); + nbl_netdev_remove_mirror_sysfs(net_dev); + } else { + /* remove vf dev_name attr */ + if (memcmp(net_dev->dev_attr.dev_name_attr.net_dev_name, dev_name, IFNAMSIZ)) + nbl_net_remove_dev_attr(net_dev); } nbl_dev_remove_rep_res(dev_mgt); - + serv_ops->change_mtu(netdev, 0); unregister_netdev(netdev); + rtnl_lock(); + netif_device_detach(netdev); + rtnl_unlock(); + + nbl_dev_remove_devlink_port(dev_mgt); netdev_callback.callback = nbl_dev_vsi_handle_netdev_event; netdev_callback.callback_data = dev_mgt; @@ -4563,6 +5094,8 @@ static void nbl_dev_stop_net_dev(struct nbl_adapter *adapter) nbl_deinit_lag(dev_mgt); + serv_ops->remove_hw_stats(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->remove_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); serv_ops->remove_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); serv_ops->free_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); @@ -4723,20 +5256,28 @@ static void nbl_dev_devlink_free(void *devlink_ptr) static int nbl_dev_setup_devlink(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) { struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); struct devlink *devlink; struct devlink_ops *devlink_ops; struct nbl_devlink_priv *priv; int ret = 0; - if (param->caps.is_vf || param->product_type == NBL_VIRTIO_TYPE) + if (param->caps.is_vf) return 0; devlink_ops = devm_kzalloc(dev, sizeof(*devlink_ops), GFP_KERNEL); if (!devlink_ops) return -ENOMEM; - devlink_ops->eswitch_mode_set = nbl_dev_set_devlink_eswitch_mode; - devlink_ops->eswitch_mode_get = nbl_dev_get_devlink_eswitch_mode; + + if (!param->caps.is_vf) { + devlink_ops->eswitch_mode_set = nbl_dev_set_devlink_eswitch_mode; + devlink_ops->eswitch_mode_get = nbl_dev_get_devlink_eswitch_mode; + devlink_ops->info_get = serv_ops->get_devlink_info; + + if (param->caps.has_ctrl) + devlink_ops->flash_update = serv_ops->update_devlink_flash; + } devlink = devlink_alloc(devlink_ops, sizeof(*priv), dev); @@ -4856,116 +5397,11 @@ static void nbl_dev_suspend_common_dev(struct nbl_adapter *adapter) nbl_dev_free_mailbox_irq(dev_mgt); } -static int nbl_dev_start_virtio_dev(struct nbl_adapter *adapter) -{ - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); - struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); - struct nbl_dev_virtio *virtio_dev = NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt); - struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - u16 rdma_vector_id; - - if (!virtio_dev) - return 0; - - serv_ops->configure_virtio_dev_msix(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - virtio_dev->device_msix); - - rdma_vector_id = msix_info->serv_info[NBL_MSIX_RDMA_TYPE].base_vector_id; - serv_ops->configure_rdma_msix_off(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), rdma_vector_id); - - serv_ops->configure_virtio_dev_ready(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); - - return 0; -} - -static void nbl_dev_stop_virtio_dev(struct nbl_adapter *adapter) -{ - // not need to do anything -} - -static int nbl_dev_start_factory_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) -{ - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - int ret = 0; - - ret = nbl_dev_configure_msix_map(dev_mgt); - if (ret) - goto config_msix_map_err; - - ret = nbl_dev_init_interrupt_scheme(dev_mgt); - if (ret) - goto init_interrupt_scheme_err; - - ret = nbl_dev_request_mailbox_irq(dev_mgt); - if (ret) - goto mailbox_request_irq_err; - - ret = nbl_dev_enable_mailbox_irq(dev_mgt); - if (ret) - goto enable_mailbox_irq_err; - - ret = nbl_dev_request_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt)->task_info); - if (ret) - goto request_adminq_irq_err; - - ret = nbl_dev_enable_adminq_irq(dev_mgt); - if (ret) - goto enable_adminq_irq_err; - - ret = nbl_dev_setup_devlink(dev_mgt, param); - if (ret) - goto setup_devlink_err; - - nbl_dev_factory_task_start(dev_mgt); - - return 0; - -setup_devlink_err: - nbl_dev_disable_adminq_irq(dev_mgt); -enable_adminq_irq_err: - nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt)->task_info); -request_adminq_irq_err: - nbl_dev_disable_mailbox_irq(dev_mgt); -enable_mailbox_irq_err: - nbl_dev_free_mailbox_irq(dev_mgt); -mailbox_request_irq_err: - nbl_dev_clear_interrupt_scheme(dev_mgt); -init_interrupt_scheme_err: - nbl_dev_destroy_msix_map(dev_mgt); -config_msix_map_err: - return ret; -} - -static bool nbl_dev_stop_factory_ctrl_dev(struct nbl_adapter *adapter) -{ - struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); - - if (!NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt)) - return false; - - nbl_dev_remove_devlink(dev_mgt); - - nbl_dev_factory_task_stop(dev_mgt); - nbl_dev_disable_adminq_irq(dev_mgt); - nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt)->task_info); - nbl_dev_free_mailbox_irq(dev_mgt); - nbl_dev_disable_mailbox_irq(dev_mgt); - nbl_dev_clear_interrupt_scheme(dev_mgt); - nbl_dev_destroy_msix_map(dev_mgt); - - return true; -} - int nbl_dev_start(void *p, struct nbl_init_param *param) { struct nbl_adapter *adapter = (struct nbl_adapter *)p; int ret = 0; - /* If we have factory_dev, no need to go further */ - if (param->caps.has_factory_ctrl) - return nbl_dev_start_factory_ctrl_dev(adapter, param); - ret = nbl_dev_start_common_dev(adapter, param); if (ret) goto start_common_dev_fail; @@ -4980,22 +5416,15 @@ int nbl_dev_start(void *p, struct nbl_init_param *param) if (ret) goto start_net_dev_fail; - ret = nbl_dev_start_virtio_dev(adapter); - if (ret) - goto start_virtio_dev_fail; - ret = nbl_dev_start_rdma_dev(adapter); if (ret) goto start_rdma_dev_fail; - if (param->caps.has_user) nbl_dev_start_user_dev(adapter); return 0; start_rdma_dev_fail: - nbl_dev_stop_virtio_dev(adapter); -start_virtio_dev_fail: nbl_dev_stop_net_dev(adapter); start_net_dev_fail: nbl_dev_stop_ctrl_dev(adapter); @@ -5009,12 +5438,7 @@ void nbl_dev_stop(void *p) { struct nbl_adapter *adapter = (struct nbl_adapter *)p; - /* If we succeed in factory_dev stop, no need to go further */ - if (nbl_dev_stop_factory_ctrl_dev(adapter)) - return; - nbl_dev_stop_user_dev(adapter); - nbl_dev_stop_virtio_dev(adapter); nbl_dev_stop_rdma_dev(adapter); nbl_dev_stop_ctrl_dev(adapter); nbl_dev_stop_net_dev(adapter); @@ -5027,10 +5451,6 @@ int nbl_dev_resume(void *p) struct nbl_init_param *param = &adapter->init_param; int ret = 0; - /* If we have factory_dev, no need to go further */ - if (param->caps.has_factory_ctrl) - return nbl_dev_start_factory_ctrl_dev(adapter, param); - ret = nbl_dev_resume_common_dev(adapter, param); if (ret) goto start_common_dev_fail; @@ -5064,15 +5484,16 @@ int nbl_dev_resume(void *p) int nbl_dev_suspend(void *p) { struct nbl_adapter *adapter = (struct nbl_adapter *)p; - - /* If we succeed in factory_dev stop, no need to go further */ - if (nbl_dev_stop_factory_ctrl_dev(adapter)) - return 0; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); nbl_dev_suspend_rdma_dev(adapter); nbl_dev_stop_ctrl_dev(adapter); nbl_dev_suspend_net_dev(adapter); nbl_dev_suspend_common_dev(adapter); + pci_save_state(adapter->pdev); + pci_wake_from_d3(adapter->pdev, common->wol_ena); + pci_set_power_state(adapter->pdev, PCI_D3hot); + return 0; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h index 061405ea86c9..917cc74e4671 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -15,10 +15,8 @@ #define NBL_DEV_MGT_TO_COMMON(dev_mgt) ((dev_mgt)->common) #define NBL_DEV_MGT_TO_DEV(dev_mgt) NBL_COMMON_TO_DEV(NBL_DEV_MGT_TO_COMMON(dev_mgt)) #define NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) ((dev_mgt)->common_dev) -#define NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt) ((dev_mgt)->factory_dev) #define NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) ((dev_mgt)->ctrl_dev) #define NBL_DEV_MGT_TO_NET_DEV(dev_mgt) ((dev_mgt)->net_dev) -#define NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt) ((dev_mgt)->virtio_dev) #define NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt) ((dev_mgt)->rdma_dev) #define NBL_DEV_MGT_TO_USER_DEV(dev_mgt) ((dev_mgt)->user_dev) #define NBL_DEV_MGT_TO_REP_DEV(dev_mgt) ((dev_mgt)->rep_dev) @@ -49,6 +47,11 @@ #define NBL_DEV_BATCH_RESET_FUNC_NUM (32) #define NBL_DEV_BATCH_RESET_USEC (1000000) +#define NBL_TIME_LEN (32) +#define NBL_SAVED_TRACES_NUM (16) + +#define NBL_DEV_FW_RESET_WAIT_TIME (3500) + enum nbl_reset_status { NBL_RESET_INIT, NBL_RESET_SEND, @@ -67,6 +70,8 @@ struct nbl_task_info { struct work_struct adapt_desc_gother_task; struct work_struct clean_abnormal_irq_task; struct work_struct recovery_abnormal_task; + struct work_struct report_temp_task; + struct work_struct report_reboot_task; struct work_struct reset_task; enum nbl_reset_event reset_event; enum nbl_reset_status reset_status[NBL_MAX_FUNC]; @@ -95,6 +100,7 @@ enum nbl_msix_serv_type { }; struct nbl_msix_serv_info { + char irq_name[NBL_STRING_NAME_LEN]; u16 num; u16 base_vector_id; /* true: hw report msix, hw need to mask actively */ @@ -131,13 +137,55 @@ enum nbl_dev_temp_status { NBL_TEMP_STATUS_MAX }; +enum nbl_emp_log_level { + NBL_EMP_ALERT_LOG_FATAL = 0, + NBL_EMP_ALERT_LOG_ERROR = 1, + NBL_EMP_ALERT_LOG_WARNING = 2, + NBL_EMP_ALERT_LOG_INFO = 3, +}; + +struct nbl_fw_reporter_ctx { + u64 timestamp; + u32 temp_num; + char reboot_report_time[NBL_TIME_LEN]; +}; + +struct nbl_fw_temp_trace_data { + u64 timestamp; + u32 temp_num; +}; + +struct nbl_fw_reboot_trace_data { + char local_time[NBL_TIME_LEN]; +}; + +struct nbl_health_reporters { + struct { + struct nbl_fw_temp_trace_data trace_data[NBL_SAVED_TRACES_NUM]; + u8 saved_traces_index; + struct mutex lock; /* protect reading data of temp_trace_data*/ + } temp_st_arr; + + struct { + struct nbl_fw_reboot_trace_data trace_data[NBL_SAVED_TRACES_NUM]; + u8 saved_traces_index; + struct mutex lock; /* protect reading data of reboot_trace_data*/ + } reboot_st_arr; + + struct nbl_fw_reporter_ctx reporter_ctx; + struct devlink_health_reporter *fw_temp_reporter; + struct devlink_health_reporter *fw_reboot_reporter; +}; + struct nbl_dev_ctrl { struct nbl_task_info task_info; enum nbl_dev_temp_status temp_status; + struct nbl_health_reporters health_reporters; }; enum nbl_dev_emp_alert_event { NBL_EMP_EVENT_TEMP_ALERT = 1, + NBL_EMP_EVENT_LOG_ALERT = 2, NBL_EMP_EVENT_MAX }; @@ -161,14 +209,21 @@ struct nbl_dev_vsi_controller { }; struct nbl_dev_net_ops { - void (*setup_netdev_ops)(void *priv, struct net_device *netdev, + int (*setup_netdev_ops)(void *priv, struct net_device *netdev, + struct nbl_init_param *param); + int (*setup_ethtool_ops)(void *priv, struct net_device *netdev, struct nbl_init_param *param); - void (*setup_ethtool_ops)(void *priv, struct net_device *netdev, - struct nbl_init_param *param); + int (*setup_dcbnl_ops)(void *priv, struct net_device *netdev, + struct nbl_init_param *param); +}; + +struct nbl_dev_attr_info { + struct nbl_netdev_name_attr dev_name_attr; }; struct nbl_dev_net { struct net_device *netdev; + struct nbl_dev_attr_info dev_attr; struct nbl_lag_member *lag_mem; struct nbl_dev_net_ops *ops; u8 lag_inited; @@ -179,6 +234,7 @@ struct nbl_dev_net { u16 user_queue_num; u16 total_vfs; struct nbl_net_qos qos_config; + struct nbl_net_mirror mirror_config; }; struct nbl_dev_virtio { @@ -192,7 +248,7 @@ struct nbl_dev_rdma_event_data { * * callback_data will always be dev_mgt, which will not be released, so don't bother. */ - struct nbl_event_rdma_bond_update event_data; + struct nbl_event_param event_data; void *callback_data; u16 type; }; @@ -204,9 +260,9 @@ struct nbl_dev_rdma { struct work_struct abnormal_event_task; - struct work_struct lag_event_task; - struct list_head lag_event_param_list; - struct mutex lag_event_lock; /* Protect lag_event_param_list */ + struct work_struct event_task; + struct list_head event_param_list; + struct mutex event_lock; /* Protect event_param_list */ int adev_index; u32 mem_type; @@ -218,7 +274,10 @@ struct nbl_dev_rdma { bool bond_shaping_configed; bool is_halting; - bool pf_event_ready; + bool event_ready; + bool mirror_enable; + bool has_abnormal_event_task; + atomic_t adev_busy; }; struct nbl_dev_emp_console { @@ -238,6 +297,7 @@ struct nbl_dev_user_iommu_group { struct rb_root dma_tree; struct iommu_group *iommu_group; struct device *dev; + struct device *mdev; struct vfio_device *vdev; }; @@ -256,6 +316,8 @@ struct nbl_dev_user { bool iommu_status; bool remap_status; bool user_promisc_mode; + bool user_mcast_mode; + u16 user_vsi; }; struct nbl_vfio_device { @@ -265,7 +327,6 @@ struct nbl_vfio_device { #define NBL_USERDEV_TO_VFIO_DEV(user) ((user)->vdev) #define NBL_VFIO_DEV_TO_USERDEV(vdev) (*(struct nbl_dev_user **)((vdev) + 1)) - struct nbl_dev_rep { struct nbl_rep_data *rep; int num_vfs; @@ -276,10 +337,8 @@ struct nbl_dev_mgt { struct nbl_service_ops_tbl *serv_ops_tbl; struct nbl_channel_ops_tbl *chan_ops_tbl; struct nbl_dev_common *common_dev; - struct nbl_dev_factory *factory_dev; struct nbl_dev_ctrl *ctrl_dev; struct nbl_dev_net *net_dev; - struct nbl_dev_virtio *virtio_dev; struct nbl_dev_rdma *rdma_dev; struct nbl_dev_emp_console *emp_console; struct nbl_dev_rep *rep_dev; @@ -348,8 +407,8 @@ int nbl_dev_start_rdma_dev(struct nbl_adapter *adapter); void nbl_dev_stop_rdma_dev(struct nbl_adapter *adapter); int nbl_dev_resume_rdma_dev(struct nbl_adapter *adapter); int nbl_dev_suspend_rdma_dev(struct nbl_adapter *adapter); -void nbl_dev_grc_process_abnormal_event(struct nbl_dev_rdma *rdma_dev); -void nbl_dev_grc_process_flr_event(struct nbl_dev_rdma *rdma_dev, u16 vsi_id); +void nbl_dev_rdma_process_abnormal_event(struct nbl_dev_rdma *rdma_dev); +void nbl_dev_rdma_process_flr_event(struct nbl_dev_rdma *rdma_dev, u16 vsi_id); size_t nbl_dev_rdma_qos_cfg_store(struct nbl_dev_mgt *dev_mgt, int offset, const char *buf, size_t count); size_t nbl_dev_rdma_qos_cfg_show(struct nbl_dev_mgt *dev_mgt, int offset, char *buf); @@ -361,5 +420,9 @@ void nbl_dev_remove_hwmon(struct nbl_adapter *adapter); struct nbl_dev_vsi *nbl_dev_vsi_select(struct nbl_dev_mgt *dev_mgt, u8 vsi_index); int nbl_netdev_add_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev); +int nbl_netdev_add_mirror_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev); void nbl_netdev_remove_sysfs(struct nbl_dev_net *net_dev); +void nbl_netdev_remove_mirror_sysfs(struct nbl_dev_net *net_dev); +void nbl_net_add_name_attr(struct nbl_netdev_name_attr *dev_name_attr, char *rep_name); +void nbl_net_remove_dev_attr(struct nbl_dev_net *net_dev); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c index 4e9e7b2e2929..5dc798efed26 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c @@ -11,6 +11,17 @@ static int nbl_dev_create_rdma_aux_dev(struct nbl_dev_mgt *dev_mgt, u8 type, static void nbl_dev_destroy_rdma_aux_dev(struct nbl_dev_rdma *rdma_dev, struct auxiliary_device **adev); +static void nbl_dev_rdma_pending_and_flush_event_task(struct nbl_dev_rdma *rdma_dev) +{ + atomic_inc(&rdma_dev->adev_busy); + nbl_common_flush_task(&rdma_dev->event_task); +} + +static void nbl_dev_rdma_resume_event_task(struct nbl_dev_rdma *rdma_dev) +{ + atomic_dec(&rdma_dev->adev_busy); +} + static int nbl_dev_rdma_bond_active_num(struct nbl_core_dev_info *cdev_info) { int i, count = 0; @@ -146,7 +157,7 @@ static int nbl_dev_grc_process_send(struct pci_dev *pdev, u8 *req_args, u8 req_l } } -static void nbl_dev_grc_handle_abnormal_event(struct work_struct *work) +static void nbl_dev_rdma_handle_abnormal_event_task(struct work_struct *work) { struct nbl_dev_rdma *rdma_dev = container_of(work, struct nbl_dev_rdma, abnormal_event_task); @@ -168,13 +179,13 @@ static void nbl_dev_grc_handle_abnormal_event(struct work_struct *work) dev_link->abnormal_event_process(&dev_link->adev); } -void nbl_dev_grc_process_abnormal_event(struct nbl_dev_rdma *rdma_dev) +void nbl_dev_rdma_process_abnormal_event(struct nbl_dev_rdma *rdma_dev) { - if (rdma_dev && !rdma_dev->is_halting && rdma_dev->pf_event_ready) + if (rdma_dev && !rdma_dev->is_halting) nbl_common_queue_work_rdma(&rdma_dev->abnormal_event_task, false); } -void nbl_dev_grc_process_flr_event(struct nbl_dev_rdma *rdma_dev, u16 vsi_id) +void nbl_dev_rdma_process_flr_event(struct nbl_dev_rdma *rdma_dev, u16 vsi_id) { struct nbl_aux_dev *dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); @@ -268,6 +279,28 @@ static void nbl_dev_rdma_update_bond_member(struct nbl_dev_mgt *dev_mgt, nbl_dev_rdma_cfg_bond(dev_mgt, dev_link->cdev_info, false); } +static int nbl_dev_rdma_update_adev_mtu(struct nbl_dev_mgt *dev_mgt, + struct nbl_event_param *event_param) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + int new_mtu = event_param->mtu; + struct nbl_aux_dev *dev_link = NULL; + + if (rdma_dev && rdma_dev->grc_adev) + dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + else if (rdma_dev && rdma_dev->adev) + dev_link = container_of(rdma_dev->adev, struct nbl_aux_dev, adev); + else if (rdma_dev && rdma_dev->bond_adev) + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + else + return 0; + + if (dev_link && dev_link->cdev_info && dev_link->cdev_info->change_mtu_notify) + dev_link->cdev_info->change_mtu_notify(&dev_link->adev, new_mtu); + + return 0; +} + static int nbl_dev_rdma_handle_bond_event(u16 type, void *event_data, void *callback_data) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; @@ -304,7 +337,7 @@ static int nbl_dev_rdma_handle_bond_event(u16 type, void *event_data, void *call * * This make sure that we always use the lastest param, functionally correct. * - * But this will require the task function(nbl_dev_rdma_process_bond_event) to lock all its + * But this will require the task function(nbl_dev_rdma_process_event_task) to lock all its * body, for that we must make sure that once we get a param, we will use it until we * finished all the process, or else we will have trouble for using differnet param while * processing. @@ -321,17 +354,79 @@ static int nbl_dev_rdma_handle_bond_event(u16 type, void *event_data, void *call * Then the lock only needs to lock the list itself(rather than the whole aux_dev process), * thus no trouble for deadlock. */ - mutex_lock(&rdma_dev->lag_event_lock); + mutex_lock(&rdma_dev->event_lock); + /* Always add_tail and dequeue the first, to maintain the order of notify */ + list_add_tail(&data->node, &rdma_dev->event_param_list); + mutex_unlock(&rdma_dev->event_lock); + + if (rdma_dev->event_ready) + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); + + return 0; +} + +static int +nbl_dev_rdma_handle_mirror_outputport_event(u16 type, void *event_data, void *callback_data) +{ + bool mirror_enable = *(bool *)event_data; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_dev_rdma_event_data *data = NULL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->type = type; + data->callback_data = callback_data; + if (mirror_enable) + data->event_data.subevent = NBL_SUBEVENT_RELEASE_ADEV; + else + data->event_data.subevent = NBL_SUBEVENT_CREATE_ADEV; + + mutex_lock(&rdma_dev->event_lock); /* Always add_tail and dequeue the first, to maintain the order of notify */ - list_add_tail(&data->node, &rdma_dev->lag_event_param_list); - mutex_unlock(&rdma_dev->lag_event_lock); + list_add_tail(&data->node, &rdma_dev->event_param_list); + mutex_unlock(&rdma_dev->event_lock); - if (rdma_dev && rdma_dev->pf_event_ready) - nbl_common_queue_work_rdma(&rdma_dev->lag_event_task, true); + if (rdma_dev->event_ready) + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); return 0; } +static int +nbl_dev_rdma_handle_mirror_selectport_event(u16 type, void *event_data, void *callback_data) +{ + bool mirror_enable = *(bool *)event_data; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev; + + nbl_dev_rdma_pending_and_flush_event_task(rdma_dev); + + adev = rdma_dev->adev ? rdma_dev->adev : rdma_dev->bond_adev; + if (!adev) + goto resume_event_task; + + if (rdma_dev->mirror_enable == mirror_enable) + goto resume_event_task; + + rdma_dev->mirror_enable = mirror_enable; + dev_link = container_of(adev, struct nbl_aux_dev, adev); + if (!dev_link->cdev_info) + goto resume_event_task; + + dev_link->cdev_info->mirror_enable = mirror_enable; + if (dev_link->mirror_enable_notify) + dev_link->mirror_enable_notify(adev, mirror_enable); + +resume_event_task: + nbl_dev_rdma_resume_event_task(rdma_dev); + return 0; +} + static int nbl_dev_rdma_handle_offload_status(u16 type, void *event_data, void *callback_data) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; @@ -340,16 +435,19 @@ static int nbl_dev_rdma_handle_offload_status(u16 type, void *event_data, void * (struct nbl_event_offload_status_data *)event_data; struct nbl_aux_dev *dev_link; + nbl_dev_rdma_pending_and_flush_event_task(rdma_dev); if (!rdma_dev->bond_adev) - return 0; + goto resume_event_task; if (data->pf_vsi_id != NBL_COMMON_TO_VSI_ID(NBL_DEV_MGT_TO_COMMON(dev_mgt))) - return 0; + goto resume_event_task; dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); - if (dev_link->cdev_info->offload_status_notify) + if (dev_link->cdev_info && dev_link->cdev_info->offload_status_notify) dev_link->cdev_info->offload_status_notify(rdma_dev->bond_adev, data->status); +resume_event_task: + nbl_dev_rdma_resume_event_task(rdma_dev); return 0; } @@ -359,7 +457,7 @@ static int nbl_dev_rdma_process_adev_event(void *event_data, void *callback_data struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); - struct nbl_event_rdma_bond_update *event = (struct nbl_event_rdma_bond_update *)event_data; + struct nbl_event_param *event = (struct nbl_event_param *)event_data; struct nbl_lag_member_list_param *list_param = &event->param; struct nbl_rdma_register_param register_param = {0}; struct nbl_core_dev_lag_info lag_info = {0}; @@ -407,48 +505,57 @@ static int nbl_dev_rdma_process_adev_event(void *event_data, void *callback_data return 0; } -static int nbl_dev_rdma_process_bond_event(struct work_struct *work) +static int nbl_dev_rdma_process_event_task(struct work_struct *work) { - struct nbl_dev_rdma *rdma_dev = container_of(work, struct nbl_dev_rdma, lag_event_task); + struct nbl_dev_rdma *rdma_dev = container_of(work, struct nbl_dev_rdma, event_task); struct nbl_dev_mgt *dev_mgt; struct nbl_common_info *common; struct nbl_lag_member_list_param *list_param; struct nbl_dev_rdma_event_data *data = NULL; - struct nbl_event_rdma_bond_update *lag_event = NULL; + struct nbl_event_param *event_param = NULL; + + if (!!atomic_read(&rdma_dev->adev_busy)) { + msleep(20); + goto queue_rework; + } - mutex_lock(&rdma_dev->lag_event_lock); + mutex_lock(&rdma_dev->event_lock); - if (!nbl_list_empty(&rdma_dev->lag_event_param_list)) { - data = list_first_entry(&rdma_dev->lag_event_param_list, + if (!nbl_list_empty(&rdma_dev->event_param_list)) { + data = list_first_entry(&rdma_dev->event_param_list, struct nbl_dev_rdma_event_data, node); list_del(&data->node); } - mutex_unlock(&rdma_dev->lag_event_lock); + mutex_unlock(&rdma_dev->event_lock); if (!data) return 0; dev_mgt = (struct nbl_dev_mgt *)data->callback_data; common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - lag_event = &data->event_data; - list_param = &lag_event->param; + event_param = &data->event_data; + list_param = &event_param->param; - nbl_info(common, NBL_DEBUG_MAIN, "process rdma lag subevent %u.", lag_event->subevent); + nbl_info(common, NBL_DEBUG_MAIN, "process rdma lag subevent %u.", event_param->subevent); - switch (lag_event->subevent) { + switch (event_param->subevent) { case NBL_SUBEVENT_UPDATE_BOND_MEMBER: nbl_dev_rdma_update_bond_member(dev_mgt, list_param); break; + case NBL_SUBEVENT_UPDATE_MTU: + nbl_dev_rdma_update_adev_mtu(dev_mgt, event_param); + break; default: - nbl_dev_rdma_process_adev_event(lag_event, dev_mgt); + nbl_dev_rdma_process_adev_event(event_param, dev_mgt); break; } kfree(data); + +queue_rework: /* Always queue it again, because we don't know if there is another param need to process */ - if (rdma_dev && rdma_dev->pf_event_ready) - nbl_common_queue_work_rdma(&rdma_dev->lag_event_task, true); + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); return 0; } @@ -460,9 +567,11 @@ static int nbl_dev_rdma_handle_reset_event(u16 type, void *event_data, void *cal struct nbl_aux_dev *dev_link; struct auxiliary_device *adev; + nbl_dev_rdma_pending_and_flush_event_task(rdma_dev); + adev = rdma_dev->adev ? rdma_dev->adev : rdma_dev->bond_adev; if (!adev) - return -1; + goto resume_event_task; dev_link = container_of(adev, struct nbl_aux_dev, adev); if (dev_link->reset_event_notify) @@ -475,6 +584,40 @@ static int nbl_dev_rdma_handle_reset_event(u16 type, void *event_data, void *cal dev_link->reset_event_notify(adev, event); } +resume_event_task: + nbl_dev_rdma_resume_event_task(rdma_dev); + return 0; +} + +static int nbl_dev_rdma_handle_change_mtu_event(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + int new_mtu = *(int *)event_data; + struct nbl_dev_rdma_event_data *data = NULL; + + /* Move mtu update event to adev task, to avoid adev driver probe hold the rtnl_lock. + * if flush the adev task will dead loop(the os has hold the rtnl_lock before call driver's + * set_mtu ops. + */ + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->type = type; + data->callback_data = callback_data; + data->event_data.mtu = new_mtu; + data->event_data.subevent = NBL_SUBEVENT_UPDATE_MTU; + + mutex_lock(&rdma_dev->event_lock); + /* Always add_tail and dequeue the first, to maintain the order of notify */ + list_add_tail(&data->node, &rdma_dev->event_param_list); + mutex_unlock(&rdma_dev->event_lock); + + if (rdma_dev->event_ready) + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); + return 0; } @@ -667,6 +810,7 @@ static int nbl_dev_create_rdma_aux_dev(struct nbl_dev_mgt *dev_mgt, u8 type, goto malloc_cdev_info_err; } + dev_link->cdev_info->mirror_enable = rdma_dev->mirror_enable; ret = auxiliary_device_init(adev); if (ret) { dev_err(dev, "auxiliary_device_init fail ret= %d", ret); @@ -708,7 +852,7 @@ static void nbl_dev_destroy_rdma_aux_dev(struct nbl_dev_rdma *rdma_dev, if (!adev || !*adev) return; - if (rdma_dev->pf_event_ready) + if (rdma_dev->has_abnormal_event_task) nbl_common_flush_task(&rdma_dev->abnormal_event_task); auxiliary_device_delete(*adev); @@ -755,16 +899,23 @@ int nbl_dev_setup_rdma_dev(struct nbl_adapter *adapter, struct nbl_init_param *p rdma_dev->adev_index = register_param.id; msix_info->serv_info[NBL_MSIX_RDMA_TYPE].num += register_param.intr_num; + nbl_common_alloc_task(&rdma_dev->event_task, (void *)nbl_dev_rdma_process_event_task); + INIT_LIST_HEAD(&rdma_dev->event_param_list); + mutex_init(&rdma_dev->event_lock); if (!NBL_COMMON_TO_VF_CAP(common)) { - nbl_common_alloc_task(&rdma_dev->lag_event_task, - (void *)nbl_dev_rdma_process_bond_event); - INIT_LIST_HEAD(&rdma_dev->lag_event_param_list); - mutex_init(&rdma_dev->lag_event_lock); - event_callback.callback_data = dev_mgt; event_callback.callback = nbl_dev_rdma_handle_bond_event; nbl_event_register(NBL_EVENT_RDMA_BOND_UPDATE, &event_callback, NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_mirror_selectport_event; + nbl_event_register(NBL_EVENT_MIRROR_SELECTPORT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_mirror_outputport_event; + nbl_event_register(NBL_EVENT_MIRROR_OUTPUTPORT_DEVLAYER, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); } NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt) = rdma_dev; @@ -790,17 +941,26 @@ void nbl_dev_remove_rdma_dev(struct nbl_adapter *adapter) nbl_event_unregister(NBL_EVENT_RDMA_BOND_UPDATE, &event_callback, NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); - mutex_lock(&rdma_dev->lag_event_lock); - list_for_each_entry_safe(data, data_safe, - &rdma_dev->lag_event_param_list, node) { - list_del(&data->node); - kfree(data); - } - mutex_unlock(&rdma_dev->lag_event_lock); + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_mirror_selectport_event; + nbl_event_unregister(NBL_EVENT_MIRROR_SELECTPORT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_mirror_outputport_event; + nbl_event_unregister(NBL_EVENT_MIRROR_OUTPUTPORT_DEVLAYER, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } - nbl_common_release_task(&rdma_dev->lag_event_task); + mutex_lock(&rdma_dev->event_lock); + list_for_each_entry_safe(data, data_safe, &rdma_dev->event_param_list, node) { + list_del(&data->node); + kfree(data); } + mutex_unlock(&rdma_dev->event_lock); + nbl_common_release_task(&rdma_dev->event_task); + if (rdma_dev->has_rdma) serv_ops->unregister_rdma(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_VSI_ID(common)); @@ -824,9 +984,11 @@ int nbl_dev_start_rdma_dev(struct nbl_adapter *adapter) if (!rdma_dev || (!rdma_dev->has_rdma && !rdma_dev->has_grc)) return 0; - if (!NBL_COMMON_TO_VF_CAP(common)) + if (!!NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) { nbl_common_alloc_task(&rdma_dev->abnormal_event_task, - nbl_dev_grc_handle_abnormal_event); + nbl_dev_rdma_handle_abnormal_event_task); + rdma_dev->has_abnormal_event_task = true; + } if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_TYPE_MAILBOX)) @@ -850,8 +1012,6 @@ int nbl_dev_start_rdma_dev(struct nbl_adapter *adapter) event_callback.callback = nbl_dev_rdma_handle_offload_status; nbl_event_register(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_callback, NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); - - rdma_dev->pf_event_ready = true; } event_callback.callback_data = rdma_dev; @@ -859,8 +1019,13 @@ int nbl_dev_start_rdma_dev(struct nbl_adapter *adapter) nbl_event_register(NBL_EVENT_RESET_EVENT, &event_callback, NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); - if (rdma_dev && rdma_dev->pf_event_ready) - nbl_common_queue_work_rdma(&rdma_dev->lag_event_task, true); + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_change_mtu_event; + nbl_event_register(NBL_EVENT_CHANGE_MTU, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + rdma_dev->event_ready = true; + nbl_common_queue_work_rdma(&rdma_dev->event_task, true); return 0; @@ -884,21 +1049,26 @@ void nbl_dev_stop_rdma_dev(struct nbl_adapter *adapter) event_callback.callback = nbl_dev_rdma_handle_offload_status; nbl_event_unregister(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_callback, NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); - rdma_dev->pf_event_ready = false; - nbl_common_flush_task(&rdma_dev->abnormal_event_task); - nbl_common_flush_task(&rdma_dev->lag_event_task); } + rdma_dev->event_ready = false; + nbl_common_flush_task(&rdma_dev->event_task); + event_callback.callback_data = rdma_dev; event_callback.callback = nbl_dev_rdma_handle_reset_event; nbl_event_unregister(NBL_EVENT_RESET_EVENT, &event_callback, NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_change_mtu_event; + nbl_event_unregister(NBL_EVENT_CHANGE_MTU, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->bond_adev); nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->adev); nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->grc_adev); - if (!NBL_COMMON_TO_VF_CAP(common)) + if (rdma_dev->has_abnormal_event_task) nbl_common_release_task(&rdma_dev->abnormal_event_task); } @@ -906,17 +1076,15 @@ int nbl_dev_resume_rdma_dev(struct nbl_adapter *adapter) { struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); if (!rdma_dev || (!rdma_dev->has_rdma && !rdma_dev->has_grc)) return 0; - if (!NBL_COMMON_TO_VF_CAP(common)) + if (rdma_dev->has_abnormal_event_task) nbl_common_alloc_task(&rdma_dev->abnormal_event_task, - nbl_dev_grc_handle_abnormal_event); + nbl_dev_rdma_handle_abnormal_event_task); - if (!NBL_COMMON_TO_VF_CAP(common)) - nbl_common_alloc_task(&rdma_dev->lag_event_task, nbl_dev_rdma_process_bond_event); + nbl_common_alloc_task(&rdma_dev->event_task, nbl_dev_rdma_process_event_task); return 0; } @@ -925,15 +1093,13 @@ int nbl_dev_suspend_rdma_dev(struct nbl_adapter *adapter) { struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); if (!rdma_dev) return 0; - if (!NBL_COMMON_TO_VF_CAP(common)) - nbl_common_release_task(&rdma_dev->lag_event_task); + nbl_common_release_task(&rdma_dev->event_task); - if (!NBL_COMMON_TO_VF_CAP(common)) + if (rdma_dev->has_abnormal_event_task) nbl_common_release_task(&rdma_dev->abnormal_event_task); return 0; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h index d560fa0a2ebf..a76dbefde40f 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c index 96c8e7c36b94..6c9ce5cbe90a 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c @@ -7,6 +7,9 @@ #include "nbl_service.h" extern int device_driver_attach(struct device_driver *drv, struct device *dev); +#define VENDOR_PHYTIUM 0x70 +#define VENDOR_MASK 0xFF +#define VENDOR_OFFSET 24 static struct nbl_userdev { struct cdev cdev; @@ -33,7 +36,6 @@ struct nbl_userdev_dma { unsigned long vaddr; size_t size; unsigned long pfn; - unsigned int ref_cnt; }; bool nbl_dma_iommu_status(struct pci_dev *pdev) @@ -76,62 +78,48 @@ static void nbl_user_change_kernel_network(struct nbl_dev_user *user) { struct nbl_adapter *adapter = user->adapter; struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - struct nbl_event_dev_mode_switch_data data = {0}; struct net_device *netdev = net_dev->netdev; + int ret; if (user->network_type == NBL_KERNEL_NETWORK) return; - rtnl_lock(); - clear_bit(NBL_USER, adapter->state); - - data.op = NBL_DEV_USER_TO_KERNEL; - data.promosic = user->user_promisc_mode; - nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), - NBL_COMMON_TO_BOARD_ID(common)); - if (data.ret) { - netdev_err(netdev, "network changes to kernel space failed %d\n", data.ret); - goto unlock; + ret = serv_ops->switch_traffic_default_dest(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_DEV_USER_TO_KERNEL); + if (ret) { + netdev_err(netdev, "network changes to kernel space failed %d\n", ret); + return; } user->network_type = NBL_KERNEL_NETWORK; netdev_info(netdev, "network changes to kernel space\n"); - -unlock: - rtnl_unlock(); } static int nbl_user_change_user_network(struct nbl_dev_user *user) { struct nbl_adapter *adapter = user->adapter; struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct net_device *netdev = net_dev->netdev; - struct nbl_event_dev_mode_switch_data data = {0}; int ret = 0; - rtnl_lock(); + if (user->network_type == NBL_USER_NETWORK) + return 0; - data.op = NBL_DEV_KERNEL_TO_USER; - data.promosic = user->user_promisc_mode; + ret = serv_ops->switch_traffic_default_dest(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_DEV_KERNEL_TO_USER); - nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), - NBL_COMMON_TO_BOARD_ID(common)); - if (data.ret) { - netdev_err(netdev, "network changes to user space failed %u\n", data.ret); - goto unlock; + if (ret) { + netdev_err(netdev, "network changes to user space failed %u\n", ret); + return ret; } - set_bit(NBL_USER, adapter->state); user->network_type = NBL_USER_NETWORK; netdev_info(netdev, "network changes to user\n"); -unlock: - rtnl_unlock(); - return ret; } @@ -158,6 +146,16 @@ static int nbl_cdev_open(struct inode *inode, struct file *filep) if (opened) return -EBUSY; + rtnl_lock(); + if (test_bit(NBL_XDP, p->state)) { + atomic_set(&user->open_cnt, 0); + rtnl_unlock(); + return -EIO; + } + + set_bit(NBL_USER, p->state); + rtnl_unlock(); + filep->private_data = p; return 0; @@ -175,8 +173,10 @@ static int nbl_cdev_release(struct inode *inode, struct file *filp) nbl_user_change_kernel_network(user); serv_ops->config_fd_flow_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_CHAN_FDIR_RULE_ISOLATE, NBL_FD_STATE_FLUSH); + serv_ops->clear_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), user->user_vsi); atomic_set(&user->open_cnt, 0); user->user_promisc_mode = 0; + clear_bit(NBL_USER, adapter->state); return 0; } @@ -264,6 +264,7 @@ static int nbl_userdev_register_net(struct nbl_adapter *adapter, void *resp, struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); struct nbl_register_net_result *result = (struct nbl_register_net_result *)resp; struct nbl_dev_vsi *vsi; + int ret = 0; vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; @@ -272,10 +273,14 @@ static int nbl_userdev_register_net(struct nbl_adapter *adapter, void *resp, result->rx_queue_num = vsi->queue_num; result->rdma_enable = 0; result->queue_offset = vsi->queue_offset; + result->trusted = 1; + + if (vsi->queue_num == 0) + ret = -ENOSPC; chan_send->ack_len = sizeof(struct nbl_register_net_result); - return 0; + return ret; } static int nbl_userdev_alloc_txrx_queues(struct nbl_adapter *adapter, void *resp, @@ -361,10 +366,6 @@ static long nbl_userdev_channel_ioctl(struct nbl_adapter *adapter, unsigned long case NBL_CHAN_MSG_GET_VSI_ID: ret = nbl_userdev_get_vsi_id(adapter, resp, &chan_send); break; - case NBL_CHAN_MSG_ADD_MACVLAN: - WARN_ON(1); - break; - case NBL_CHAN_MSG_DEL_MACVLAN: case NBL_CHAN_MSG_UNREGISTER_NET: case NBL_CHAN_MSG_ADD_MULTI_RULE: case NBL_CHAN_MSG_DEL_MULTI_RULE: @@ -396,7 +397,10 @@ static long nbl_userdev_channel_ioctl(struct nbl_adapter *adapter, unsigned long static long nbl_userdev_switch_network(struct nbl_adapter *adapter, unsigned long arg) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_dev_vsi *vsi; int timeout = 50; int type; @@ -420,10 +424,16 @@ static long nbl_userdev_switch_network(struct nbl_adapter *adapter, unsigned lon } /* todolist: concurreny about adapter->state */ - if (type == NBL_USER_NETWORK) + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + if (type == NBL_USER_NETWORK) { nbl_user_change_user_network(user); - else + serv_ops->set_promisc_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, user->user_promisc_mode); + serv_ops->cfg_multi_mcast(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, user->user_mcast_mode); + } else { nbl_user_change_kernel_network(user); + } return 0; } @@ -543,38 +553,44 @@ static long nbl_userdev_get_dma_limit(struct nbl_adapter *adapter, unsigned long return copy_to_user((void __user *)arg, &user->dma_limit, sizeof(user->dma_limit)); } -static long nbl_userdev_set_promisc_mode(struct nbl_adapter *adapter, unsigned long arg) +static long nbl_userdev_set_multi_mode(struct nbl_adapter *adapter, unsigned int cmd, + unsigned long arg) { struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); - struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - struct nbl_event_dev_mode_switch_data data = {0}; - int user_promisc_mode; + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_dev_vsi *vsi; + u16 user_multi_mode; int ret = 0; - if (get_user(user_promisc_mode, (unsigned long __user *)arg)) { + if (get_user(user_multi_mode, (unsigned long __user *)arg)) { dev_err(NBL_ADAPTER_TO_DEV(adapter), "set promic mode get mode failed\n"); return -EFAULT; } - if (user_promisc_mode == user->user_promisc_mode) + if (cmd == NBL_DEV_USER_SET_PROMISC_MODE && user_multi_mode == user->user_promisc_mode) return 0; + if (cmd == NBL_DEV_USER_SET_MCAST_MODE && user_multi_mode == user->user_mcast_mode) + return 0; + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; if (user->network_type == NBL_USER_NETWORK) { - data.op = NBL_DEV_SET_USER_PROMISC_MODE; - data.promosic = user_promisc_mode; - nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), - NBL_COMMON_TO_BOARD_ID(common)); - ret = data.ret; - if (ret) { - dev_err(NBL_ADAPTER_TO_DEV(adapter), - "user set promic mode %u failed %d\n", user_promisc_mode, ret); - return ret; - } + if (cmd == NBL_DEV_USER_SET_PROMISC_MODE) + ret = serv_ops->set_promisc_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, user_multi_mode); + else + ret = serv_ops->cfg_multi_mcast(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, user_multi_mode); } - user->user_promisc_mode = user_promisc_mode; + if (cmd == NBL_DEV_USER_SET_PROMISC_MODE) + user->user_promisc_mode = user_multi_mode; + else + user->user_mcast_mode = user_multi_mode; + return ret; } @@ -612,7 +628,8 @@ static long nbl_userdev_common_ioctl(struct nbl_adapter *adapter, unsigned int c ret = nbl_userdev_get_dma_limit(adapter, arg); break; case NBL_DEV_USER_SET_PROMISC_MODE: - ret = nbl_userdev_set_promisc_mode(adapter, arg); + case NBL_DEV_USER_SET_MCAST_MODE: + ret = nbl_userdev_set_multi_mode(adapter, cmd, arg); break; default: break; @@ -698,6 +715,95 @@ static struct nbl_userdev_dma *nbl_userdev_find_dma(struct nbl_dev_user_iommu_gr return NULL; } +static struct rb_node *nbl_userdev_find_dma_first_node(struct nbl_dev_user_iommu_group *group, + dma_addr_t start, size_t size) +{ + struct rb_node *res = NULL; + struct rb_node *node = group->dma_tree.rb_node; + struct nbl_userdev_dma *dma_res = NULL; + + while (node) { + struct nbl_userdev_dma *dma = rb_entry(node, struct nbl_userdev_dma, node); + + if (start < dma->vaddr + dma->size) { + res = node; + dma_res = dma; + if (start >= dma->vaddr) + break; + node = node->rb_left; + } else { + node = node->rb_right; + } + } + if (res && size && dma_res->vaddr >= start + size) + res = NULL; + return res; +} + +/** + * check dma conflict when multi devices in one iommu group, That is, when ACS not support. + * return -1 means multi devices conflict. + * return 1 means mapping exist and not conflict. + * return 0 means mapping not existed. + */ +static int nbl_userdev_check_dma_conflict(struct nbl_dev_user *user, + unsigned long vaddr, dma_addr_t iova, size_t size) +{ + struct nbl_dev_user_iommu_group *group = user->group; + struct nbl_userdev_dma *dma; + struct rb_node *n; + struct page *h_page; + size_t unmapped = 0; + unsigned long vfn, pfn, vaddr_new; + dma_addr_t iova_new; + int ret; + + dma = nbl_userdev_find_dma(group, vaddr, 1); + if (dma && dma->vaddr != vaddr) + return -1; + + dma = nbl_userdev_find_dma(group, vaddr + size - 1, 0); + if (dma && dma->vaddr + dma->size != vaddr + size) + return -1; + + if (!nbl_userdev_find_dma(group, vaddr, size)) + return 0; + n = nbl_userdev_find_dma_first_node(group, vaddr, size); + vaddr_new = vaddr; + iova_new = iova; + while (n) { + dma = rb_entry(n, struct nbl_userdev_dma, node); + if (dma->iova >= iova + size) + break; + + if (dma->vaddr >= vaddr + size) + break; + + if (dma->vaddr != vaddr_new || dma->iova != iova_new) + break; + + vfn = vaddr_new >> PAGE_SHIFT; + ret = vfio_pin_pages(NBL_USERDEV_TO_VFIO_DEV(user), + vaddr_new, 1, IOMMU_READ | IOMMU_WRITE, &h_page); + if (ret <= 0) + break; + pfn = page_to_pfn(h_page); + vfio_unpin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr_new, 1); + if (pfn != dma->pfn) + break; + + n = rb_next(n); + unmapped += dma->size; + vaddr_new += dma->size; + iova_new += dma->size; + } + + if (unmapped != size) + return -1; + + return 1; +} + static void nbl_userdev_link_dma(struct nbl_dev_user_iommu_group *group, struct nbl_userdev_dma *new) { @@ -718,10 +824,24 @@ static void nbl_userdev_link_dma(struct nbl_dev_user_iommu_group *group, rb_insert_color(&new->node, &group->dma_tree); } +#ifdef CONFIG_ARM64 +static int check_phytium_cpu(void) +{ + u32 midr = read_cpuid_id(); + u32 vendor = (midr >> VENDOR_OFFSET) & VENDOR_MASK; + + if (vendor == VENDOR_PHYTIUM) + return 1; + + return 0; +} +#endif + static void nbl_userdev_remove_dma(struct nbl_dev_user_iommu_group *group, struct nbl_userdev_dma *dma) { struct nbl_vfio_batch batch; + size_t unmmaped; long npage, batch_pages; unsigned long vaddr; int ret, caps; @@ -730,7 +850,16 @@ static void nbl_userdev_remove_dma(struct nbl_dev_user_iommu_group *group, dev_dbg(group->dev, "dma remove: vaddr 0x%lx, iova 0x%llx, size 0x%lx\n", dma->vaddr, dma->iova, dma->size); - iommu_unmap(iommu_get_domain_for_dev(group->dev), dma->iova, dma->size); + unmmaped = iommu_unmap(iommu_get_domain_for_dev(group->dev), dma->iova, dma->size); + WARN_ON(unmmaped != dma->size); + /** + * For kylin + FT Server, Exist dma invalid content when smmu translate mode. + * We can flush iommu tlb force to avoid the problem. + */ +#ifdef CONFIG_ARM64 + if (check_phytium_cpu()) + iommu_flush_iotlb_all(iommu_get_domain_for_dev(group->dev)); +#endif ret = nbl_vfio_batch_init(&batch); if (ret) { @@ -774,9 +903,8 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a struct device *dev = &pdev->dev; struct nbl_vfio_batch batch; struct nbl_userdev_dma *dma; - struct page *h_page; unsigned long minsz, pfn_base = 0, pfn; - unsigned long vaddr, vfn; + unsigned long vaddr; dma_addr_t iova; u32 mask = NBL_DEV_USER_DMA_MAP_FLAG_READ | NBL_DEV_USER_DMA_MAP_FLAG_WRITE; size_t size; @@ -798,34 +926,17 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a if (!npage) return ret; + mutex_lock(&user->group->dma_tree_lock); - /* rb-tree find */ - dma = nbl_userdev_find_dma(user->group, vaddr, map.size); - if (dma && dma->iova == iova && dma->size == map.size) { - vfn = vaddr >> PAGE_SHIFT; - ret = vfio_pin_pages(NBL_USERDEV_TO_VFIO_DEV(user), - vaddr, 1, IOMMU_READ | IOMMU_WRITE, &h_page); - if (ret <= 0) { - dev_err(dev, "vfio_pin_pages failed %d\n", ret); - goto mutext_unlock; - } + ret = nbl_userdev_check_dma_conflict(user, vaddr, iova, map.size); + if (ret < 0) { + dev_err(dev, "multiple dma not equal\n"); + ret = -EINVAL; + goto mutext_unlock; + } - pfn = page_to_pfn(h_page); + if (ret) { ret = 0; - vfio_unpin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr, 1); - if (pfn != dma->pfn) { - dev_err(dev, "multiple dma pfn not equal, new pfn %lu, dma pfn %lu\n", - pfn, dma->pfn); - ret = -EINVAL; - goto mutext_unlock; - } - - dev_info(dev, "existing dma info, ref_cnt++\n"); - dma->ref_cnt++; - goto mutext_unlock; - } else if (dma) { - dev_info(dev, "multiple dma not equal\n"); - ret = -EINVAL; goto mutext_unlock; } @@ -850,6 +961,7 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a batch.pages_in[0] = vaddr >> PAGE_SHIFT; for (i = 1; i < batch_pages; i++) batch.pages_in[i] = batch.pages_in[i - 1] + 1; + ret = vfio_pin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr, batch_pages, IOMMU_READ | IOMMU_WRITE, batch.h_page); @@ -861,6 +973,7 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a for (i = 0; i < batch_pages; i++) batch.pages_out[i] = page_to_pfn(batch.h_page[i]); + batch.offset = 0; batch.size = ret; if (!pfn_base) { @@ -885,11 +998,12 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a ret = iommu_map(iommu_get_domain_for_dev(dev), iova, phys, size, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); + if (ret) { dev_err(dev, "iommu_map failed\n"); goto unwind; } - dev_dbg(dev, "iommu map succeed, iova 0x%llx, phys 0x%llx, " + dev_dbg(dev, "iommu map succeed, iova 0x%llx, phys 0x%llx,\n" "size 0x%llx\n", (u64)iova, (u64)phys, (u64)size); pfn_base = pfn; pinned = 0; @@ -900,13 +1014,15 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a if (pinned) { size = pinned << PAGE_SHIFT; phys = pfn_base << PAGE_SHIFT; + ret = iommu_map(iommu_get_domain_for_dev(dev), iova, phys, size, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); + if (ret) { dev_err(dev, "iommu_map failed\n"); goto unwind; } - dev_dbg(dev, "iommu map succeed, iova 0x%llx, phys 0x%llx, " + dev_dbg(dev, "iommu map succeed, iova 0x%llx, phys 0x%llx,\n" "size 0x%llx\n", (u64)iova, (u64)phys, (u64)size); } nbl_vfio_batch_fini(&batch); @@ -914,7 +1030,6 @@ static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long a dma->iova = map.iova; dma->size = map.size; dma->vaddr = map.vaddr; - dma->ref_cnt = 1; nbl_userdev_link_dma(user->group, dma); dev_info(dev, "dma map info: vaddr=0x%llx, iova=0x%llx, size=0x%llx\n", @@ -963,6 +1078,8 @@ static long nbl_userdev_dma_unmap_ioctl(struct nbl_dev_user *user, unsigned long struct nbl_dev_user_dma_unmap unmap; struct nbl_userdev_dma *dma; unsigned long minsz; + size_t unmapped = 0; + struct rb_node *n; minsz = offsetofend(struct nbl_dev_user_dma_unmap, size); @@ -977,19 +1094,27 @@ static long nbl_userdev_dma_unmap_ioctl(struct nbl_dev_user *user, unsigned long mutex_lock(&user->group->dma_tree_lock); user->group->vdev = NBL_USERDEV_TO_VFIO_DEV(user); - dma = nbl_userdev_find_dma(user->group, unmap.vaddr, unmap.size); - /* unmmap pages: rb-tree lock */ - if (dma) { - if (dma->vaddr != unmap.vaddr || dma->iova != unmap.iova || dma->size != unmap.size) - dev_err(dev, "dma unmap not equal, unmap vaddr 0x%llx, iova 0x%llx, " - "size 0x%llx, dma rbtree vaddr 0x%lx, iova 0x%llx, size 0x%lx\n", - unmap.vaddr, unmap.iova, unmap.size, - dma->vaddr, dma->iova, dma->size); - dma->ref_cnt--; - if (!dma->ref_cnt) - nbl_userdev_remove_dma(user->group, dma); + dma = nbl_userdev_find_dma(user->group, unmap.vaddr, 1); + if (dma && dma->vaddr != unmap.vaddr) + return -1; + + dma = nbl_userdev_find_dma(user->group, unmap.vaddr + unmap.size - 1, 0); + if (dma && dma->vaddr + dma->size != unmap.vaddr + unmap.size) + goto unlock; + + n = nbl_userdev_find_dma_first_node(user->group, unmap.vaddr, unmap.size); + while (n) { + dma = rb_entry(n, struct nbl_userdev_dma, node); + if (dma->vaddr >= unmap.vaddr + unmap.size) + break; + + n = rb_next(n); + nbl_userdev_remove_dma(user->group, dma); + unmapped += dma->size; } +unlock: mutex_unlock(&user->group->dma_tree_lock); + unmap.size = unmapped; return 0; } @@ -1052,17 +1177,18 @@ static void nbl_userdev_release_group(struct kref *kref) group = container_of(kref, struct nbl_dev_user_iommu_group, kref); list_del(&group->group_next); mutex_unlock(&nbl_userdev.glock); + mutex_lock(&group->dma_tree_lock); while ((node = rb_first(&group->dma_tree))) nbl_userdev_remove_dma(group, rb_entry(node, struct nbl_userdev_dma, node)); iommu_group_put(group->iommu_group); + mutex_unlock(&group->dma_tree_lock); kfree(group); } static void nbl_userdev_group_put(struct nbl_dev_user *user, struct nbl_dev_user_iommu_group *group) { group->vdev = NBL_USERDEV_TO_VFIO_DEV(user); - kref_put_mutex(&group->kref, nbl_userdev_release_group, &nbl_userdev.glock); } @@ -1139,6 +1265,16 @@ static int nbl_vfio_open(struct vfio_device *vdev) if (opened) return -EBUSY; + rtnl_lock(); + if (test_bit(NBL_XDP, adapter->state)) { + atomic_set(&user->open_cnt, 0); + rtnl_unlock(); + return -EIO; + } + + set_bit(NBL_USER, adapter->state); + rtnl_unlock(); + /* add iommu group list */ iommu_group = iommu_group_get(&pdev->dev); if (!iommu_group) { @@ -1167,6 +1303,8 @@ static int nbl_vfio_open(struct vfio_device *vdev) clear_open_cnt: atomic_set(&user->open_cnt, 0); + clear_bit(NBL_USER, adapter->state); + return ret; } @@ -1176,6 +1314,7 @@ static void nbl_vfio_close(struct vfio_device *vdev) struct nbl_adapter *adapter; struct pci_dev *pdev; struct nbl_dev_mgt *dev_mgt; + struct nbl_dev_net *net_dev; struct nbl_channel_ops *chan_ops; struct nbl_service_ops *serv_ops; @@ -1183,6 +1322,7 @@ static void nbl_vfio_close(struct vfio_device *vdev) adapter = user->adapter; pdev = adapter->pdev; dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); @@ -1194,7 +1334,9 @@ static void nbl_vfio_close(struct vfio_device *vdev) nbl_user_change_kernel_network(user); serv_ops->config_fd_flow_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_CHAN_FDIR_RULE_ISOLATE, NBL_FD_STATE_FLUSH); + serv_ops->clear_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), user->user_vsi); atomic_set(&user->open_cnt, 0); + clear_bit(NBL_USER, adapter->state); user->user_promisc_mode = 0; dev_info(&pdev->dev, "nbl vfio close\n"); @@ -1209,7 +1351,7 @@ static int nbl_vfio_init(struct vfio_device *vdev) return 0; } -static struct vfio_device_ops nbl_vfio_dev_ops = { +static const struct vfio_device_ops nbl_vfio_dev_ops = { .name = "vfio-nbl", .open_device = nbl_vfio_open, .close_device = nbl_vfio_close, @@ -1342,6 +1484,7 @@ void nbl_dev_start_user_dev(struct nbl_adapter *adapter) mdev = &user->mdev; mdev->bus = &nbl_bus_type; drv = &nbl_userdev_driver; + device_initialize(mdev); mdev->parent = dev; mdev->release = nbl_mdev_device_release; @@ -1360,6 +1503,7 @@ void nbl_dev_start_user_dev(struct nbl_adapter *adapter) dev_info(dev, "MDEV: created\n"); ret = device_driver_attach(drv, mdev); + if (ret) { dev_err(dev, "driver attach failed %d\n", ret); device_del(mdev); @@ -1396,6 +1540,8 @@ void nbl_dev_start_user_dev(struct nbl_adapter *adapter) atomic_set(&user->open_cnt, 0); user->network_type = NBL_KERNEL_NETWORK; user->user_promisc_mode = 0; + user->user_mcast_mode = 0; + user->user_vsi = user_vsi->vsi_id; NBL_DEV_MGT_TO_USER_DEV(dev_mgt) = user; @@ -1460,7 +1606,6 @@ void nbl_dev_user_module_init(void) bus_unregister(&nbl_bus_type); return; } - nbl_userdev.cls = class_create("nbl_userdev"); if (IS_ERR(nbl_userdev.cls)) { pr_err("nbl_userdev class alloc failed\n"); @@ -1507,7 +1652,6 @@ void nbl_dev_user_module_destroy(void) nbl_userdev.cls = NULL; driver_unregister(&nbl_userdev_driver); bus_unregister(&nbl_bus_type); - nbl_userdev.success = 0; } } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h index 8aa5a764b09b..a22c795bbe4f 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -68,6 +68,8 @@ struct nbl_dev_user_dma_unmap { #define NBL_DEV_USER_SET_PROMISC_MODE _IO(NBL_DEV_USER_TYPE, 10) +#define NBL_DEV_USER_SET_MCAST_MODE _IO(NBL_DEV_USER_TYPE, 11) + void nbl_dev_start_user_dev(struct nbl_adapter *adapter); void nbl_dev_stop_user_dev(struct nbl_adapter *adapter); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c index b30da28d6056..47d408809288 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c @@ -133,6 +133,7 @@ static void nbl_disp_chan_add_multi_rule_resp(void *priv, u16 src_id, u16 msg_id struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; + u8 broadcast_mac[ETH_ALEN]; int err = NBL_CHAN_RESP_OK; int ret = 0; u16 vsi_id; @@ -141,8 +142,10 @@ static void nbl_disp_chan_add_multi_rule_resp(void *priv, u16 src_id, u16 msg_id chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); vsi_id = *(u16 *)data; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_rule, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + memset(broadcast_mac, 0xFF, ETH_ALEN); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), broadcast_mac, 0, vsi_id); if (ret) err = NBL_CHAN_RESP_ERR; @@ -175,6 +178,7 @@ static void nbl_disp_chan_del_multi_rule_resp(void *priv, u16 src_id, u16 msg_id struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; + u8 broadcast_mac[ETH_ALEN]; int err = NBL_CHAN_RESP_OK; u16 vsi_id; @@ -182,11 +186,75 @@ static void nbl_disp_chan_del_multi_rule_resp(void *priv, u16 src_id, u16 msg_id chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); vsi_id = *(u16 *)data; + memset(broadcast_mac, 0xFF, ETH_ALEN); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), broadcast_mac, 0, vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_MULTI_RULE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_rule, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +static int nbl_disp_cfg_multi_mcast(void *priv, u16 vsi, u16 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_MULTI_RULE, msg_id, err, NULL, 0); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + if (enable) + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_mcast, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + else + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_mcast, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + return ret; +} + +static int nbl_disp_chan_cfg_multi_mcast_req(void *priv, u16 vsi_id, u16 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + struct nbl_chan_param_cfg_multi_mcast mcast; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + mcast.vsi = vsi_id; + mcast.enable = enable; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_MULTI_MCAST_RULE, + &mcast, sizeof(mcast), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_multi_mcast_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_multi_mcast *mcast; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + mcast = (struct nbl_chan_param_cfg_multi_mcast *)data; + + if (mcast->enable) + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_mcast, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mcast->vsi); + else + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_mcast, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mcast->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_MULTI_MCAST_RULE, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } @@ -285,25 +353,30 @@ static void nbl_disp_chan_register_net_resp(void *priv, u16 src_id, u16 msg_id, struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_register_net_info *param; + struct nbl_chan_param_register_net_info param; struct nbl_register_net_result result = {0}; struct nbl_register_net_param register_param = {0}; struct nbl_chan_ack_info chan_ack; + int copy_len; int err = NBL_CHAN_RESP_OK; int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_register_net_info *)data; + memset(¶m, 0, sizeof(struct nbl_chan_param_register_net_info)); + copy_len = data_len < sizeof(struct nbl_chan_param_register_net_info) ? + data_len : sizeof(struct nbl_chan_param_register_net_info); + memcpy(¶m, data, copy_len); - register_param.pf_bar_start = param->pf_bar_start; - register_param.pf_bdf = param->pf_bdf; - register_param.vf_bar_start = param->vf_bar_start; - register_param.vf_bar_size = param->vf_bar_size; - register_param.total_vfs = param->total_vfs; - register_param.offset = param->offset; - register_param.stride = param->stride; + register_param.pf_bar_start = param.pf_bar_start; + register_param.pf_bdf = param.pf_bdf; + register_param.vf_bar_start = param.vf_bar_start; + register_param.vf_bar_size = param.vf_bar_size; + register_param.total_vfs = param.total_vfs; + register_param.offset = param.offset; + register_param.stride = param.stride; + register_param.is_vdpa = param.is_vdpa; NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, ®ister_param, &result); @@ -467,7 +540,6 @@ static int nbl_disp_chan_register_vsi2q_req(void *priv, u16 vsi_index, u16 vsi_i param.queue_num = queue_num; NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REGISTER_VSI2Q, ¶m, sizeof(param), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } @@ -657,6 +729,48 @@ static void nbl_disp_chan_setup_queue_resp(void *priv, u16 src_id, u16 msg_id, chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } +static int nbl_disp_chan_remove_queue_req(void *priv, struct nbl_txrx_queue_param *queue_param, + bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_queue param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(¶m.queue_param, queue_param, sizeof(param.queue_param)); + param.is_tx = is_tx; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_QUEUE, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_queue *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_setup_queue *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_queue, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + ¶m->queue_param, param->is_tx); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_QUEUE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + static void nbl_disp_chan_remove_all_queues_req(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -731,7 +845,20 @@ static void nbl_disp_chan_cfg_dsch_resp(void *priv, u16 src_id, u16 msg_id, chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_chan_setup_cqs_req(void *priv, u16 vsi_id, u16 real_qps) +static int nbl_disp_setup_cqs(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, real_qps, rss_indir_set); + return ret; +} + +static int nbl_disp_chan_setup_cqs_req(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; @@ -744,6 +871,7 @@ static int nbl_disp_chan_setup_cqs_req(void *priv, u16 vsi_id, u16 real_qps) param.vsi_id = vsi_id; param.real_qps = real_qps; + param.rss_indir_set = rss_indir_set; NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_CQS, ¶m, sizeof(param), NULL, 0, 1); @@ -756,18 +884,23 @@ static void nbl_disp_chan_setup_cqs_resp(void *priv, u16 src_id, u16 msg_id, struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; - struct nbl_chan_param_setup_cqs *param; + struct nbl_chan_param_setup_cqs param; struct nbl_chan_ack_info chan_ack; + int copy_len; int err = NBL_CHAN_RESP_OK; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - param = (struct nbl_chan_param_setup_cqs *)data; + memset(¶m, 0, sizeof(struct nbl_chan_param_setup_cqs)); + param.rss_indir_set = true; + copy_len = data_len < sizeof(struct nbl_chan_param_setup_cqs) ? + data_len : sizeof(struct nbl_chan_param_setup_cqs); + memcpy(¶m, data, copy_len); err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->vsi_id, param->real_qps); + param.vsi_id, param.real_qps, param.rss_indir_set); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_CQS, msg_id, err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); @@ -1110,6 +1243,64 @@ static void nbl_disp_chan_get_queue_err_stats_resp(void *priv, u16 src_id, u16 m ret, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, src_id); } +static int nbl_disp_get_eth_abnormal_stats(void *priv, u8 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_eth_abnormal_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, eth_abnormal_stats); +} + +static int +nbl_disp_chan_get_eth_abnormal_stats_req(void *priv, u8 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, ð_id, + sizeof(eth_id), eth_abnormal_stats, sizeof(*eth_abnormal_stats), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_eth_abnormal_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_eth_abnormal_stats eth_abnormal_stats = { 0 }; + int err = NBL_CHAN_RESP_OK; + int ret; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_eth_abnormal_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u8 *)data, + ð_abnormal_stats); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth abnormal stats resp failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, msg_id, err, + ð_abnormal_stats, sizeof(eth_abnormal_stats)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, src_id); +} + static void nbl_disp_chan_get_coalesce_req(void *priv, u16 vector_id, struct nbl_chan_param_get_coalesce *ec) { @@ -1315,7 +1506,7 @@ static void nbl_disp_chan_get_rxfh_rss_key_resp(void *priv, u16 src_id, u16 msg_ kfree(rss_key); } -static void nbl_disp_chan_get_rxfh_rss_alg_sel_req(void *priv, u8 *rss_alg_sel, u8 eth_id) +static void nbl_disp_chan_get_rxfh_rss_alg_sel_req(void *priv, u16 vsi_id, u8 *rss_alg_sel) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops; @@ -1325,9 +1516,9 @@ static void nbl_disp_chan_get_rxfh_rss_alg_sel_req(void *priv, u8 *rss_alg_sel, chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_CHAN_SEND(chan_send, common->mgt_pf, - NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, ð_id, - sizeof(eth_id), rss_alg_sel, sizeof(u8), 1); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, &vsi_id, + sizeof(vsi_id), rss_alg_sel, sizeof(u8), 1); chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } @@ -1338,20 +1529,74 @@ static void nbl_disp_chan_get_rxfh_rss_alg_sel_resp(void *priv, u16 src_id, u16 struct nbl_resource_ops *res_ops; struct nbl_channel_ops *chan_ops; struct nbl_chan_ack_info chan_ack; - u8 rss_alg_sel, eth_id; + u16 vsi_id; + u8 rss_alg_sel; int ret = NBL_CHAN_RESP_OK; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - eth_id = *(u8 *)data; + vsi_id = *(u16 *)data; NBL_OPS_CALL(res_ops->get_rss_alg_sel, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &rss_alg_sel, eth_id)); + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, &rss_alg_sel)); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, msg_id, ret, &rss_alg_sel, sizeof(rss_alg_sel)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } +static int nbl_disp_set_rxfh_rss_alg_sel(void *priv, u16 vsi_id, u8 rss_alg_sel) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->set_rss_alg_sel, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, rss_alg_sel)); + return ret; +} + +static int nbl_disp_chan_set_rxfh_rss_alg_sel_req(void *priv, u16 vsi_id, u8 rss_alg_sel) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_rxfh_rss_alg_sel param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.rss_alg_sel = rss_alg_sel; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_RXFH_RSS_ALG_SEL, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_rxfh_rss_alg_sel_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_rxfh_rss_alg_sel *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_rxfh_rss_alg_sel *)data; + + err = NBL_OPS_CALL(res_ops->set_rss_alg_sel, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->rss_alg_sel)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_RXFH_RSS_ALG_SEL, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + static void nbl_disp_chan_get_phy_caps_req(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -2272,7 +2517,7 @@ static void nbl_disp_unregister_xdp_rxq(void *priv, u8 ring_index) NBL_OPS_CALL(res_ops->unregister_xdp_rxq, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); } -static struct napi_struct *nbl_disp_get_vector_napi(void *priv, u16 index) +static struct nbl_napi_struct *nbl_disp_get_vector_napi(void *priv, u16 index) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; @@ -2418,27 +2663,25 @@ static void nbl_disp_remove_all_queues(void *priv, u16 vsi_id) NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); } -static int nbl_disp_cfg_dsch(void *priv, u16 vsi_id, bool vld) +static int nbl_disp_remove_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; - int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vld); - return ret; + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, is_tx); } -static int nbl_disp_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) +static int nbl_disp_cfg_dsch(void *priv, u16 vsi_id, bool vld) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; int ret = 0; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, real_qps); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vld); return ret; } @@ -2497,14 +2740,14 @@ static int nbl_disp_add_multi_rule(void *priv, u16 vsi) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + u8 broadcast_mac[ETH_ALEN]; int ret = 0; - if (!disp_mgt) - return -EINVAL; - + memset(broadcast_mac, 0xFF, ETH_ALEN); res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_rule, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), broadcast_mac, 0, vsi); + return ret; } @@ -2512,13 +2755,12 @@ static void nbl_disp_del_multi_rule(void *priv, u16 vsi) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; + u8 broadcast_mac[ETH_ALEN]; - if (!disp_mgt) - return; - + memset(broadcast_mac, 0xFF, ETH_ALEN); res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_rule, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), broadcast_mac, 0, vsi); } static int nbl_disp_setup_multi_group(void *priv) @@ -2558,6 +2800,61 @@ static void nbl_disp_get_private_stat_len(void *priv, u32 *len) NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), len); } +static int nbl_disp_get_pause_stats(void *priv, u32 eth_id, + struct nbl_pause_stats *pause_stats, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pause_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pause_stats); +} + +static int nbl_disp_chan_get_pause_stats_req(void *priv, u32 eth_id, + struct nbl_pause_stats *pause_stats, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PAUSE_STATS, ð_id, + sizeof(eth_id), pause_stats, data_len, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_pause_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_chan_ack_info chan_ack; + struct nbl_pause_stats pause_stats = {0}; + u32 *param = (u32 *)(data); + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL(res_ops->get_pause_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *param, &pause_stats)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth pause stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PAUSE_STATS, msg_id, + ret, &pause_stats, sizeof(struct nbl_pause_stats)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_PAUSE_STATS, src_id); +} + static void nbl_disp_get_private_stat_data(void *priv, u32 eth_id, u64 *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -2565,7 +2862,7 @@ static void nbl_disp_get_private_stat_data(void *priv, u32 eth_id, u64 *data, u3 res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_private_stat_data, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, data); + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, data, data_len); } static void nbl_disp_get_private_stat_data_req(void *priv, u32 eth_id, u64 *data, u32 data_len) @@ -2607,7 +2904,8 @@ static void nbl_disp_chan_get_private_stat_data_resp(void *priv, u16 src_id, u16 } NBL_OPS_CALL(res_ops->get_private_stat_data, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, recv_data)); + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, + recv_data, param->data_len)); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_STATS, msg_id, ret, recv_data, param->data_len); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); @@ -2615,27 +2913,196 @@ static void nbl_disp_chan_get_private_stat_data_resp(void *priv, u16 src_id, u16 kfree(recv_data); } -static void nbl_disp_fill_private_stat_strings(void *priv, u8 *strings) +static int nbl_disp_get_eth_ctrl_stats(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats, + u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->fill_private_stat_strings, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), strings); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_eth_ctrl_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, + eth_ctrl_stats); } -static u16 nbl_disp_get_max_desc_num(void *priv) +static int nbl_disp_chan_get_eth_ctrl_stats_req(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats, + u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - u16 ret = 0; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; - ret = NBL_OPS_CALL(res_ops->get_max_desc_num, ()); - return ret; + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_CTRL_STATS, ð_id, + sizeof(eth_id), eth_ctrl_stats, data_len, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static u16 nbl_disp_get_min_desc_num(void *priv) +static void nbl_disp_chan_get_eth_ctrl_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_eth_ctrl_stats eth_ctrl_stats = {0}; + struct nbl_chan_ack_info chan_ack; + u32 *param = (u32 *)(data); + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL(res_ops->get_eth_ctrl_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *param, ð_ctrl_stats)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth ctrl stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_CTRL_STATS, msg_id, + ret, ð_ctrl_stats, sizeof(struct nbl_eth_ctrl_stats)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_ETH_CTRL_STATS, src_id); +} + +static int nbl_disp_get_eth_mac_stats(void *priv, u32 eth_id, + struct nbl_eth_mac_stats *eth_mac_stats, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_eth_mac_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, eth_mac_stats); +} + +static int nbl_disp_chan_get_eth_mac_stats_req(void *priv, u32 eth_id, + struct nbl_eth_mac_stats *eth_mac_stats, + u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_MAC_STATS, ð_id, + sizeof(eth_id), eth_mac_stats, data_len, 1); + + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_eth_mac_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_chan_ack_info chan_ack; + struct nbl_eth_mac_stats eth_mac_stats = {0}; + u32 *param = (u32 *)(data); + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL(res_ops->get_eth_mac_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *param, ð_mac_stats)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth mac stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_MAC_STATS, msg_id, + ret, ð_mac_stats, sizeof(struct nbl_eth_mac_stats)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_ETH_MAC_STATS, src_id); +} + +static int nbl_disp_get_rmon_stats(void *priv, u32 eth_id, + struct nbl_rmon_stats *rmon_stats, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_rmon_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, rmon_stats); +} + +static int nbl_disp_chan_get_rmon_stats_req(void *priv, u32 eth_id, + struct nbl_rmon_stats *rmon_stats, + u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_RMON_STATS, ð_id, + sizeof(eth_id), rmon_stats, data_len, 1); + + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rmon_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_chan_ack_info chan_ack; + struct nbl_rmon_stats rmon_stats = {0}; + u32 *param = (u32 *)(data); + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL(res_ops->get_rmon_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *param, &rmon_stats)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth mac stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RMON_STATS, msg_id, + ret, &rmon_stats, sizeof(struct nbl_rmon_stats)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_RMON_STATS, src_id); +} + +static void nbl_disp_fill_private_stat_strings(void *priv, u8 *strings) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->fill_private_stat_strings, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), strings); +} + +static u16 nbl_disp_get_max_desc_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_max_desc_num, ()); + return ret; +} + +static u16 nbl_disp_get_min_desc_num(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); @@ -2845,6 +3312,67 @@ static void nbl_disp_get_rxfh_indir(void *priv, u16 vsi_id, u32 *indir, u32 indi NBL_OPS_CALL(res_ops->get_rxfh_indir, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, indir)); } +static int nbl_disp_set_rxfh_indir(void *priv, u16 vsi_id, const u32 *indir, u32 indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->set_rxfh_indir, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, indir, indir_size)); + return ret; +} + +static int nbl_disp_chan_set_rxfh_indir_req(void *priv, + u16 vsi_id, const u32 *indir, u32 indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_rxfh_indir *param = NULL; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret = 0; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + return -ENOMEM; + + param->vsi_id = vsi_id; + param->indir_size = indir_size; + memcpy(param->indir, indir, indir_size * sizeof(param->indir[0])); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_SET_RXFH_INDIR, param, + sizeof(*param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + kfree(param); + return ret; +} + +static void nbl_disp_chan_set_rxfh_indir_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_rxfh_indir *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_rxfh_indir *)data; + + err = NBL_OPS_CALL(res_ops->set_rxfh_indir, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->indir, param->indir_size)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_RXFH_INDIR, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + static void nbl_disp_get_rxfh_rss_key(void *priv, u8 *rss_key, u32 key_size) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -2854,14 +3382,14 @@ static void nbl_disp_get_rxfh_rss_key(void *priv, u8 *rss_key, u32 key_size) NBL_OPS_CALL(res_ops->get_rxfh_rss_key, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rss_key)); } -static void nbl_disp_get_rxfh_rss_alg_sel(void *priv, u8 *alg_sel, u8 eth_id) +static void nbl_disp_get_rxfh_rss_alg_sel(void *priv, u16 vsi_id, u8 *alg_sel) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops; res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); NBL_OPS_CALL(res_ops->get_rss_alg_sel, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), alg_sel, eth_id)); + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, alg_sel)); } static void nbl_disp_get_phy_caps(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) @@ -3727,54 +4255,6 @@ static void nbl_disp_del_lldp_flow(void *priv, u16 vsi_id) NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); } -static int nbl_disp_chan_cfg_lag_mcc_req(void *priv, u16 eth_id, u16 lag_id, bool enable) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_cfg_lag_mcc param = {0}; - struct nbl_chan_send_info chan_send; - - param.eth_id = eth_id; - param.lag_id = lag_id; - param.enable = enable; - - NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_MCC, ¶m, sizeof(param), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); -} - -static void nbl_disp_chan_cfg_lag_mcc_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_cfg_lag_mcc *param = NULL; - struct nbl_chan_ack_info chan_ack; - int err = NBL_CHAN_RESP_OK; - int ret = 0; - - param = (struct nbl_chan_param_cfg_lag_mcc *)data; - - err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_lag_mcc, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->eth_id, param->lag_id, param->enable); - - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_MCC, msg_id, err, NULL, 0); - ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - if (ret) - dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", - ret, NBL_CHAN_MSG_CFG_LAG_MCC); -} - -static int nbl_disp_cfg_lag_mcc(void *priv, u16 eth_id, u16 lag_id, bool enable) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_lag_mcc, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - eth_id, lag_id, enable); -} - static int nbl_disp_cfg_duppkt_info(void *priv, struct nbl_lag_member_list_param *param) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -5532,6 +6012,29 @@ nbl_disp_chan_get_upcall_port_resp(void *priv, u16 src_id, u16 msg_id, chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } +static void nbl_disp_get_board_info(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_board_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), board_info)); +} + +static void +nbl_disp_chan_get_board_info_req(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_BOARD_INFO, NULL, + 0, board_info, sizeof(*board_info), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + static void nbl_disp_chan_get_board_info_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) @@ -5544,7 +6047,6 @@ nbl_disp_chan_get_board_info_resp(void *priv, u16 src_id, u16 msg_id, res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_board_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &board_info)); NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BOARD_INFO, @@ -5745,6 +6247,61 @@ static void nbl_disp_chan_recv_port_notify_resp(void *priv, u16 src_id, u16 msg_ (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data)); } +static int nbl_disp_get_fec_stats(void *priv, u8 eth_id, + struct nbl_fec_stats *fec_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_fec_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, fec_stats)); + return ret; +} + +static int nbl_disp_chan_get_fec_stats_req(void *priv, u8 eth_id, + struct nbl_fec_stats *fec_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_FEC_STATS, ð_id, sizeof(eth_id), + fec_stats, sizeof(*fec_stats), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_fec_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_chan_ack_info chan_ack; + struct nbl_fec_stats info = {0}; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_fec_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u8 *)data, &info)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get eth fec stats failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FEC_STATS, msg_id, err, + &info, sizeof(info)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_FEC_STATS, src_id); +} + static int nbl_disp_get_port_state(void *priv, u8 eth_id, struct nbl_port_state *port_state) { @@ -6034,23 +6591,125 @@ static void nbl_disp_chan_get_link_state_resp(void *priv, u16 src_id, u16 msg_id chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_get_reg_dump(void *priv, u32 *data, u32 len) +static int nbl_disp_get_link_down_count(void *priv, u8 eth_id, u64 *link_down_count) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, len)); + return NBL_OPS_CALL(res_ops->get_link_down_count, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, link_down_count)); } -static void nbl_disp_chan_get_reg_dump_req(void *priv, u32 *data, u32 len) +static int nbl_disp_chan_get_link_down_count_req(void *priv, u8 eth_id, u64 *link_down_count) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops; struct nbl_chan_send_info chan_send; struct nbl_common_info *common; - u32 *result = NULL; - result = kmalloc(len, GFP_KERNEL); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_LINK_DOWN_COUNT, ð_id, + sizeof(eth_id), link_down_count, sizeof(*link_down_count), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_link_down_count_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + u64 link_down_count = 0; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = res_ops->get_link_down_count(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, &link_down_count); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_DOWN_COUNT, msg_id, err, + &link_down_count, sizeof(link_down_count)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_link_status_opcode(void *priv, u8 eth_id, u32 *link_status_opcode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_link_status_opcode, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, link_status_opcode)); +} + +static int nbl_disp_chan_get_link_status_opcode_req(void *priv, u8 eth_id, u32 *link_status_opcode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_LINK_STATUS_OPCODE, ð_id, + sizeof(eth_id), link_status_opcode, sizeof(*link_status_opcode), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_link_status_opcode_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + u32 link_status_opcode = 0; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = res_ops->get_link_status_opcode(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, &link_status_opcode); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_STATUS_OPCODE, msg_id, err, + &link_status_opcode, sizeof(link_status_opcode)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, len)); +} + +static void nbl_disp_chan_get_reg_dump_req(void *priv, u32 *data, u32 len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + u32 *result = NULL; + + result = kmalloc(len, GFP_KERNEL); if (!result) return; @@ -6064,6 +6723,57 @@ static void nbl_disp_chan_get_reg_dump_req(void *priv, u32 *data, u32 len) kfree(result); } +static int nbl_disp_set_wol(void *priv, u8 eth_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_wol, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, enable)); +} + +static int nbl_disp_chan_set_wol_req(void *priv, u8 eth_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_set_wol param = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_WOL, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_wol_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_wol *param; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_set_wol *)data; + ret = res_ops->set_wol(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->enable); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_WOL, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + static void nbl_disp_chan_get_reg_dump_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { @@ -6304,14 +7014,6 @@ nbl_disp_chan_deinit_flow_resp(void *priv, u16 src_id, u16 msg_id, chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_configure_virtio_dev_msix(void *priv, u16 vector) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - - return res_ops->configure_virtio_dev_msix(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector); -} - static void nbl_disp_configure_rdma_msix_off(void *priv, u16 vector) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -6320,14 +7022,6 @@ static void nbl_disp_configure_rdma_msix_off(void *priv, u16 vector) return res_ops->configure_rdma_msix_off(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector); } -static void nbl_disp_configure_virtio_dev_ready(void *priv) -{ - struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - - return res_ops->configure_virtio_dev_ready(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); -} - static int nbl_disp_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -6786,6 +7480,14 @@ static void nbl_disp_adapt_desc_gother(void *priv) NBL_OPS_CALL(res_ops->adapt_desc_gother, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); } +static void nbl_disp_set_desc_high_throughput(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_desc_high_throughput, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + static void nbl_disp_flr_clear_rdma(void *priv, u16 vf_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -6867,6 +7569,24 @@ static void nbl_disp_unmask_all_interrupts(void *priv) NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); } +static u32 nbl_disp_get_perf_dump_length(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_perf_dump_length, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static u32 nbl_disp_get_perf_dump_data(void *priv, u8 *buffer, u32 size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_perf_dump_data, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), buffer, size); +} + static void nbl_disp_keep_alive_req(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; @@ -7843,918 +8563,1872 @@ static void nbl_disp_register_func_mac(void *priv, u8 *mac, u16 func_id) (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, func_id)); } -static void nbl_disp_chan_register_func_mac_req(void *priv, u8 *mac, u16 func_id) +static bool nbl_disp_check_vf_is_active(void *priv, u16 func_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = false; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->check_vf_is_active, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id)); + return ret; +} + +static bool nbl_disp_chan_check_vf_is_active_req(void *priv, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_register_func_mac param; - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - - param.func_id = func_id; - ether_addr_copy(param.mac, mac); + bool is_active; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_REGISTER_FUNC_MAC, ¶m, sizeof(param), NULL, 0, 1); + NBL_CHAN_CHECK_VF_IS_ACTIVE, &func_id, sizeof(func_id), + &is_active, sizeof(is_active), 1); chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return is_active; } -static void nbl_disp_chan_register_func_mac_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_check_vf_is_active_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_register_func_mac *param; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; + u16 func_id; + bool is_active; + int err = NBL_CHAN_RESP_OK; + int ret = 0; - param = (struct nbl_chan_param_register_func_mac *)data; - NBL_OPS_CALL(res_ops->register_func_mac, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, param->func_id)); + func_id = *(u16 *)data; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_MAC, msg_id, ret, NULL, 0); - chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + is_active = NBL_OPS_CALL(res_ops->check_vf_is_active, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_CHECK_VF_IS_ACTIVE, msg_id, + err, &is_active, sizeof(is_active)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_CHECK_VF_IS_ACTIVE); } -static int nbl_disp_register_func_vlan(void *priv, u16 func_id, u16 vlan_tci, - u16 vlan_proto, bool *should_notify) +static int nbl_disp_check_vf_is_vdpa(void *priv, u16 func_id, u8 *is_vdpa) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_resource_ops *res_ops; + int ret = false; - return NBL_OPS_CALL(res_ops->register_func_vlan, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, vlan_tci, - vlan_proto, should_notify)); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->check_vf_is_vdpa, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, is_vdpa)); + return ret; } -static int nbl_disp_chan_register_func_vlan_req(void *priv, u16 func_id, u16 vlan_tci, - u16 vlan_proto, bool *should_notify) +static int nbl_disp_chan_check_vf_is_vdpa_req(void *priv, u16 func_id, u8 *is_vdpa) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_register_vlan param; - bool result; - int ret; - - param.func_id = func_id; - param.vlan_tci = vlan_tci; - param.vlan_proto = vlan_proto; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_REGISTER_FUNC_VLAN, ¶m, sizeof(param), - &result, sizeof(result), 1); - ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - if (!ret) - *should_notify = result; - - return ret; + NBL_CHAN_CHECK_VF_IS_VDPA, &func_id, sizeof(func_id), + is_vdpa, sizeof(*is_vdpa), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_register_func_vlan_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_check_vf_is_vdpa_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_register_vlan *param; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; - bool notify = false; + u16 func_id; + int err = NBL_CHAN_RESP_OK; + u8 is_vdpa = 0; + int ret = 0; - param = (struct nbl_chan_param_register_vlan *)data; - ret = NBL_OPS_CALL(res_ops->register_func_vlan, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, - param->vlan_tci, param->vlan_proto, ¬ify)); + func_id = *(u16 *)data; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_VLAN, - msg_id, ret, ¬ify, sizeof(notify)); - chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + err = NBL_OPS_CALL(res_ops->check_vf_is_vdpa, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, &is_vdpa)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_CHECK_VF_IS_VDPA, msg_id, + err, &is_vdpa, sizeof(is_vdpa)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_CHECK_VF_IS_VDPA); } -static int nbl_disp_register_func_rate(void *priv, u16 func_id, int rate) +static int nbl_disp_get_vdpa_vf_stats(void *priv, u16 func_id, struct nbl_vf_stats *vf_stats) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_resource_ops *res_ops; + int ret = false; - return NBL_OPS_CALL(res_ops->register_func_rate, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, rate)); + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_vdpa_vf_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, vf_stats)); + return ret; } -static int nbl_disp_chan_register_func_rate_req(void *priv, u16 func_id, int tx_rate) +static int nbl_disp_chan_get_vdpa_vf_stats_req(void *priv, u16 func_id, + struct nbl_vf_stats *vf_stats) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_set_tx_rate param; - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - - param.func_id = func_id; - param.tx_rate = tx_rate; - NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_REGISTER_FUNC_RATE, ¶m, sizeof(param), NULL, 0, 1); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_VDPA_VF_STATS, + &func_id, sizeof(func_id), vf_stats, sizeof(*vf_stats), 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_register_func_rate_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_vdpa_vf_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_set_tx_rate *param; struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; + u16 func_id; + struct nbl_vf_stats vf_stats = {0}; + int err = NBL_CHAN_RESP_OK; - param = (struct nbl_chan_param_set_tx_rate *)data; - ret = NBL_OPS_CALL(res_ops->register_func_rate, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, param->tx_rate)); + func_id = *(u16 *)data; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_RATE, msg_id, ret, NULL, 0); + err = NBL_OPS_CALL(res_ops->get_vdpa_vf_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, &vf_stats)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VDPA_VF_STATS, msg_id, + err, &vf_stats, sizeof(vf_stats)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_set_tx_rate(void *priv, u16 func_id, int tx_rate) +static int nbl_disp_get_uvn_pkt_drop_stats(void *priv, u16 vsi_id, + u16 num_queues, u32 *uvn_stat_pkt_drop) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; - return NBL_OPS_CALL(res_ops->set_tx_rate, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, tx_rate)); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_uvn_pkt_drop_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, num_queues, uvn_stat_pkt_drop); + return ret; } -static int nbl_disp_chan_set_tx_rate_req(void *priv, u16 func_id, int tx_rate) +static int nbl_disp_chan_get_uvn_pkt_drop_stats_req(void *priv, u16 vsi_id, u16 num_queues, + u32 *uvn_stat_pkt_drop) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_set_tx_rate param; + struct nbl_chan_param_get_uvn_pkt_drop_stats param = {0}; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - param.func_id = func_id; - param.tx_rate = tx_rate; + param.vsi_id = vsi_id; + param.num_queues = num_queues; - NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_SET_TX_RATE, ¶m, sizeof(param), NULL, 0, 1); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_GET_UVN_PKT_DROP_STATS, + ¶m, sizeof(param), + uvn_stat_pkt_drop, num_queues * sizeof(*uvn_stat_pkt_drop), 1); return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_set_tx_rate_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_uvn_pkt_drop_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_set_tx_rate *param; + struct nbl_chan_param_get_uvn_pkt_drop_stats *param = {0}; struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; + u32 *uvn_stat_pkt_drop = NULL; + int err = NBL_CHAN_RESP_OK; - param = (struct nbl_chan_param_set_tx_rate *)data; - ret = NBL_OPS_CALL(res_ops->set_tx_rate, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, param->tx_rate)); + param = (struct nbl_chan_param_get_uvn_pkt_drop_stats *)data; + uvn_stat_pkt_drop = kcalloc(param->num_queues, sizeof(*uvn_stat_pkt_drop), GFP_KERNEL); + if (!uvn_stat_pkt_drop) { + err = -ENOMEM; + goto send_ack; + } - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_TX_RATE, msg_id, ret, NULL, 0); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_uvn_pkt_drop_stats, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->num_queues, uvn_stat_pkt_drop); +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_GET_UVN_PKT_DROP_STATS, msg_id, + err, uvn_stat_pkt_drop, param->num_queues * sizeof(*uvn_stat_pkt_drop)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(uvn_stat_pkt_drop); } -static int nbl_disp_register_func_link_forced(void *priv, u16 func_id, u8 link_forced, - bool *should_notify) +static int nbl_disp_get_ustore_pkt_drop_stats(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; - return NBL_OPS_CALL(res_ops->register_func_link_forced, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, - link_forced, should_notify)); + ret = NBL_OPS_CALL(res_ops->get_ustore_pkt_drop_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; } -static int nbl_disp_chan_register_func_link_forced_req(void *priv, u16 func_id, u8 link_forced, - bool *should_notify) +static int nbl_disp_chan_get_ustore_pkt_drop_stats_req(void *priv) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_register_func_link_forced param; - struct nbl_chan_param_register_func_link_forced result; - int ret = 0; - - param.func_id = func_id; - param.link_forced = link_forced; - - NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, ¶m, sizeof(param), - &result, sizeof(result), 1); - ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - if (ret) - return ret; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - *should_notify = result.should_notify; - return 0; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_GET_USTORE_PKT_DROP_STATS, + NULL, 0, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_register_func_link_forced_resp(void *priv, u16 src_id, u16 msg_id, +static void nbl_disp_chan_get_ustore_pkt_drop_stats_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_register_func_link_forced *param; - struct nbl_chan_param_register_func_link_forced result = {0}; struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; - - param = (struct nbl_chan_param_register_func_link_forced *)data; - ret = NBL_OPS_CALL(res_ops->register_func_link_forced, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->func_id, param->link_forced, &result.should_notify)); + int err = NBL_CHAN_RESP_OK; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, - msg_id, ret, &result, sizeof(result)); + err = NBL_OPS_CALL(res_ops->get_ustore_pkt_drop_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_GET_USTORE_PKT_DROP_STATS, msg_id, + err, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_link_forced(void *priv, u16 vsi_id) +static int nbl_disp_get_ustore_total_pkt_drop_stats(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; - return NBL_OPS_CALL(res_ops->get_link_forced, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + ret = NBL_OPS_CALL(res_ops->get_ustore_total_pkt_drop_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, ustore_stats)); + + return ret; } -static int nbl_disp_chan_get_link_forced_req(void *priv, u16 vsi_id) +static int nbl_disp_chan_get_ustore_total_pkt_drop_stats_req(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - int link_forced = 0; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_GET_LINK_FORCED, &vsi_id, sizeof(vsi_id), - &link_forced, sizeof(link_forced), 1); - chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - - return link_forced; + NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS, + ð_id, sizeof(eth_id), ustore_stats, sizeof(*ustore_stats), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_link_forced_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_ustore_total_pkt_drop_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_ack_info chan_ack; - int ret = 0; + u8 eth_id; + struct nbl_ustore_stats ustore_stats = {0}; + int err = NBL_CHAN_RESP_OK; - ret = NBL_OPS_CALL(res_ops->get_link_forced, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data)); + eth_id = *(u8 *)data; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_FORCED, - msg_id, NBL_CHAN_RESP_OK, &ret, sizeof(ret)); + err = NBL_OPS_CALL(res_ops->get_ustore_total_pkt_drop_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &ustore_stats)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS, msg_id, + err, &ustore_stats, sizeof(ustore_stats)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_get_driver_version(void *priv, char *ver, int len) +static void nbl_disp_chan_register_func_mac_req(void *priv, u8 *mac, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_register_func_mac param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - NBL_OPS_CALL(res_ops->get_driver_version, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ver, len)); + param.func_id = func_id; + ether_addr_copy(param.mac, mac); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_MAC, ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_setup_rdma_id(void *priv) +static void nbl_disp_chan_register_func_mac_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_func_mac *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - NBL_OPS_CALL(res_ops->setup_rdma_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + param = (struct nbl_chan_param_register_func_mac *)data; + NBL_OPS_CALL(res_ops->register_func_mac, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, param->func_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_MAC, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_remove_rdma_id(void *priv) +static int nbl_disp_register_func_trust(void *priv, u16 func_id, + bool trusted, bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->remove_rdma_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return NBL_OPS_CALL(res_ops->register_func_trust, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, + trusted, should_notify)); } -static int nbl_disp_chan_get_fd_flow_req(void *priv, u16 vsi_id, u32 location, - enum nbl_chan_fdir_rule_type rule_type, - struct nbl_chan_param_fdir_replace *cmd) +static int nbl_disp_chan_register_func_trust_req(void *priv, u16 func_id, + bool trusted, bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_get_fd_flow param = {0}; - int ret = 0; + struct nbl_chan_param_register_trust param; + bool result; + int ret; - param.vsi_id = vsi_id; - param.location = location; - param.rule_type = rule_type; + param.func_id = func_id; + param.trusted = trusted; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_GET_FD_FLOW, ¶m, - sizeof(param), cmd, NBL_CHAN_FDIR_FLOW_RULE_SIZE, 1); + NBL_CHAN_MSG_REGISTER_FUNC_TRUST, ¶m, sizeof(param), + &result, sizeof(result), 1); ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - if (ret) - return ret; + if (!ret) + *should_notify = result; - return 0; + return ret; } -static void nbl_disp_chan_get_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_register_func_trust_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_get_fd_flow *param = NULL; - struct nbl_chan_param_fdir_replace *result; + struct nbl_chan_param_register_trust *param; struct nbl_chan_ack_info chan_ack; - int ret = 0; + int ret = NBL_CHAN_RESP_OK; + bool notify = false; - result = kzalloc(NBL_CHAN_FDIR_FLOW_RULE_SIZE, GFP_KERNEL); - if (!result) { - ret = -ENOMEM; - goto send_ack; - } - param = (struct nbl_chan_param_get_fd_flow *)data; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->location, - param->rule_type, result); -send_ack: - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW, msg_id, - ret, result, sizeof(*result) + result->tlv_length); + param = (struct nbl_chan_param_register_trust *)data; + ret = NBL_OPS_CALL(res_ops->register_func_trust, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, + param->trusted, ¬ify)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_TRUST, + msg_id, ret, ¬ify, sizeof(notify)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); - kfree(result); } -static int nbl_disp_get_fd_flow(void *priv, u16 vsi_id, u32 location, - enum nbl_chan_fdir_rule_type rule_type, - struct nbl_chan_param_fdir_replace *cmd) +static int nbl_disp_register_func_vlan(void *priv, u16 func_id, u16 vlan_tci, + u16 vlan_proto, bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, location, - rule_type, cmd); + return NBL_OPS_CALL(res_ops->register_func_vlan, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, vlan_tci, + vlan_proto, should_notify)); } -static int nbl_disp_chan_get_fd_flow_cnt_req(void *priv, enum nbl_chan_fdir_rule_type rule_type, - u16 vsi_id) +static int nbl_disp_chan_register_func_vlan_req(void *priv, u16 func_id, u16 vlan_tci, + u16 vlan_proto, bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_fdir_flowcnt param; - int result = 0, ret = 0; + struct nbl_chan_param_register_vlan param; + bool result; + int ret; + + param.func_id = func_id; + param.vlan_tci = vlan_tci; + param.vlan_proto = vlan_proto; - param.rule_type = rule_type; - param.vsi = vsi_id; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_GET_FD_FLOW_CNT, ¶m, - sizeof(param), &result, sizeof(result), 1); + NBL_CHAN_MSG_REGISTER_FUNC_VLAN, ¶m, sizeof(param), + &result, sizeof(result), 1); ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - if (ret) - return ret; + if (!ret) + *should_notify = result; - return result; + return ret; } -static void nbl_disp_chan_get_fd_flow_cnt_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_register_func_vlan_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_vlan *param; struct nbl_chan_ack_info chan_ack; - struct nbl_chan_param_fdir_flowcnt *param; - int result = 0, err = NBL_CHAN_RESP_OK; + int ret = NBL_CHAN_RESP_OK; + bool notify = false; - param = (struct nbl_chan_param_fdir_flowcnt *)data; - result = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_cnt, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->rule_type, param->vsi); - if (result < 0) { - err = result; - result = 0; - } + param = (struct nbl_chan_param_register_vlan *)data; + ret = NBL_OPS_CALL(res_ops->register_func_vlan, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, + param->vlan_tci, param->vlan_proto, ¬ify)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_CNT, msg_id, - err, &result, sizeof(result)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_VLAN, + msg_id, ret, ¬ify, sizeof(notify)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_fd_flow_cnt(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id) +static int nbl_disp_register_func_rate(void *priv, u16 func_id, int rate) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_cnt, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, vsi_id); + return NBL_OPS_CALL(res_ops->register_func_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, rate)); } -static int nbl_disp_chan_get_fd_flow_all_req(void *priv, - struct nbl_chan_param_get_fd_flow_all *param, - u32 *rule_locs) +static int nbl_disp_chan_register_func_rate_req(void *priv, u16 func_id, int tx_rate) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_result_get_fd_flow_all *result = NULL; - int ret = 0; + struct nbl_chan_param_set_tx_rate param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.func_id = func_id; + param.tx_rate = tx_rate; - result = (struct nbl_chan_result_get_fd_flow_all *)rule_locs; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_GET_FD_FLOW_ALL, param, - sizeof(*param), result, sizeof(*result), 1); - ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - if (ret) - goto send_fail; -send_fail: - return ret; + NBL_CHAN_MSG_REGISTER_FUNC_RATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_fd_flow_all_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_register_func_rate_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_get_fd_flow_all *param = NULL; - struct nbl_chan_result_get_fd_flow_all *result = NULL; + struct nbl_chan_param_set_tx_rate *param; struct nbl_chan_ack_info chan_ack; - int ret = 0; + int ret = NBL_CHAN_RESP_OK; - result = kzalloc(sizeof(*result), GFP_KERNEL); - if (!result) { - ret = -ENOMEM; - goto send_ack; - } + param = (struct nbl_chan_param_set_tx_rate *)data; + ret = NBL_OPS_CALL(res_ops->register_func_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, param->tx_rate)); - param = (struct nbl_chan_param_get_fd_flow_all *)data; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_all, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, result->rule_locs); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_RATE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} -send_ack: - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_ALL, msg_id, - ret, result, sizeof(*result)); +static int nbl_disp_set_tx_rate(void *priv, u16 func_id, int tx_rate, int burst) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_tx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, tx_rate, burst)); +} + +static int nbl_disp_chan_set_tx_rate_req(void *priv, u16 func_id, int tx_rate, int burst) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_txrx_rate param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.func_id = func_id; + param.txrx_rate = tx_rate; + param.burst = burst; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_TX_RATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_tx_rate_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_txrx_rate *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_txrx_rate *)data; + ret = NBL_OPS_CALL(res_ops->set_tx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, + param->txrx_rate, param->burst)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_TX_RATE, msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} - kfree(result); +static int nbl_disp_set_rx_rate(void *priv, u16 func_id, int rx_rate, int burst) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_rx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, rx_rate, burst)); } -static int nbl_disp_get_fd_flow_all(void *priv, struct nbl_chan_param_get_fd_flow_all *param, - u32 *rule_locs) +static int nbl_disp_chan_set_rx_rate_req(void *priv, u16 func_id, int rx_rate, int burst) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_txrx_rate param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.func_id = func_id; + param.txrx_rate = rx_rate; + param.burst = burst; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_RX_RATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_rx_rate_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_txrx_rate *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_all, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, rule_locs); + param = (struct nbl_chan_param_set_txrx_rate *)data; + ret = NBL_OPS_CALL(res_ops->set_rx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, + param->txrx_rate, param->burst)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_RX_RATE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_chan_get_fd_flow_max_req(void *priv) +static int nbl_disp_register_func_link_forced(void *priv, u16 func_id, u8 link_forced, + bool *should_notify) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->register_func_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, + link_forced, should_notify)); +} + +static int nbl_disp_chan_register_func_link_forced_req(void *priv, u16 func_id, u8 link_forced, + bool *should_notify) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - int ret = 0, result = 0; + struct nbl_chan_param_register_func_link_forced param; + struct nbl_chan_param_register_func_link_forced result; + int ret = 0; + + param.func_id = func_id; + param.link_forced = link_forced; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_GET_FD_FLOW_MAX, NULL, 0, &result, sizeof(result), 1); + NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, ¶m, sizeof(param), + &result, sizeof(result), 1); ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); if (ret) return ret; - return result; + *should_notify = result.should_notify; + return 0; } -static void nbl_disp_chan_get_fd_flow_max_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_register_func_link_forced_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_func_link_forced *param; + struct nbl_chan_param_register_func_link_forced result = {0}; struct nbl_chan_ack_info chan_ack; - int result = 0, err = NBL_CHAN_RESP_OK; + int ret = NBL_CHAN_RESP_OK; - result = NBL_OPS_CALL(res_ops->get_fd_flow_max, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); - if (result < 0) { - err = result; - result = 0; - } + param = (struct nbl_chan_param_register_func_link_forced *)data; + ret = NBL_OPS_CALL(res_ops->register_func_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->func_id, param->link_forced, &result.should_notify)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_MAX, msg_id, - err, &result, sizeof(result)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, + msg_id, ret, &result, sizeof(result)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_fd_flow_max(void *priv) +static int nbl_disp_get_link_forced(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL(res_ops->get_fd_flow_max, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return NBL_OPS_CALL(res_ops->get_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); } -static int nbl_disp_chan_replace_fd_flow_req(void *priv, struct nbl_chan_param_fdir_replace *info) +static int nbl_disp_chan_get_link_forced_req(void *priv, u16 vsi_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); struct nbl_chan_send_info chan_send = {0}; + int link_forced = 0; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_REPLACE_FD_FLOW, info, - sizeof(struct nbl_chan_param_fdir_replace) + info->tlv_length, NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_MSG_GET_LINK_FORCED, &vsi_id, sizeof(vsi_id), + &link_forced, sizeof(link_forced), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return link_forced; } -static void nbl_disp_chan_replace_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, +static void nbl_disp_chan_get_link_forced_resp(void *priv, u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_fdir_replace *param = NULL; struct nbl_chan_ack_info chan_ack; int ret = 0; - param = (struct nbl_chan_param_fdir_replace *)data; - - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->replace_fd_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + ret = NBL_OPS_CALL(res_ops->get_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REPLACE_FD_FLOW, msg_id, ret, NULL, 0); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_FORCED, + msg_id, NBL_CHAN_RESP_OK, &ret, sizeof(ret)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_replace_fd_flow(void *priv, struct nbl_chan_param_fdir_replace *info) +static void nbl_disp_get_driver_version(void *priv, char *ver, int len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->replace_fd_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), info); + NBL_OPS_CALL(res_ops->get_driver_version, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ver, len)); } -static int nbl_disp_chan_remove_fd_flow_req(void *priv, enum nbl_chan_fdir_rule_type rule_type, - u32 loc, u16 vsi_id) +static void nbl_disp_setup_rdma_id(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->setup_rdma_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_remove_rdma_id(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->remove_rdma_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_get_max_mtu(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_max_mtu, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_set_mtu(void *priv, u16 vsi_id, u16 mtu) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->set_mtu, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, mtu)); + return ret; +} + +static int nbl_disp_chan_set_mtu_req(void *priv, u16 vsi_id, u16 mtu) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - struct nbl_chan_param_fdir_del param = {0}; struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_mtu param = {0}; - param.rule_type = rule_type; - param.location = loc; - param.vsi = vsi_id; + param.mtu = mtu; + param.vsi_id = vsi_id; - NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_REMOVE_FD_FLOW, ¶m, sizeof(param), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_MTU_SET, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), + &chan_send); } -static void nbl_disp_chan_remove_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_set_mtu_resp(void *priv, + u16 src_id, u16 msg_id, void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_fdir_del *param = NULL; struct nbl_chan_ack_info chan_ack; - int ret = 0; + struct nbl_chan_param_set_mtu *param = NULL; + int err = NBL_CHAN_RESP_OK; - param = (struct nbl_chan_param_fdir_del *)data; + param = (struct nbl_chan_param_set_mtu *)data; + err = NBL_OPS_CALL(res_ops->set_mtu, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->mtu)); - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_fd_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->rule_type, - param->location, param->vsi); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_MTU_SET, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_FD_FLOW, msg_id, ret, NULL, 0); +static int nbl_disp_chan_get_fd_flow_req(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_fd_flow param = {0}; + int ret = 0; + + param.vsi_id = vsi_id; + param.location = location; + param.rule_type = rule_type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW, ¶m, + sizeof(param), cmd, NBL_CHAN_FDIR_FLOW_RULE_SIZE, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; + + return 0; +} + +static void nbl_disp_chan_get_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_fd_flow *param = NULL; + struct nbl_chan_param_fdir_replace *result; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + result = kzalloc(NBL_CHAN_FDIR_FLOW_RULE_SIZE, GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto send_ack; + } + param = (struct nbl_chan_param_get_fd_flow *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->location, + param->rule_type, result); +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW, msg_id, + ret, result, sizeof(*result) + result->tlv_length); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + kfree(result); +} + +static int nbl_disp_get_fd_flow(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, location, + rule_type, cmd); +} + +static int nbl_disp_chan_get_fd_flow_cnt_req(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_fdir_flowcnt param; + int result = 0, ret = 0; + + param.rule_type = rule_type; + param.vsi = vsi_id; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW_CNT, ¶m, + sizeof(param), &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; + + return result; +} + +static void nbl_disp_chan_get_fd_flow_cnt_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_fdir_flowcnt *param; + int result = 0, err = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_fdir_flowcnt *)data; + result = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_cnt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->rule_type, param->vsi); + if (result < 0) { + err = result; + result = 0; + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_CNT, msg_id, + err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_fd_flow_cnt(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_cnt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, vsi_id); +} + +static int nbl_disp_chan_get_fd_flow_all_req(void *priv, + struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_result_get_fd_flow_all *result = NULL; + int ret = 0; + + result = (struct nbl_chan_result_get_fd_flow_all *)rule_locs; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW_ALL, param, + sizeof(*param), result, sizeof(*result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + goto send_fail; +send_fail: + return ret; +} + +static void nbl_disp_chan_get_fd_flow_all_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_fd_flow_all *param = NULL; + struct nbl_chan_result_get_fd_flow_all *result = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto send_ack; + } + + param = (struct nbl_chan_param_get_fd_flow_all *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_all, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, result->rule_locs); + +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_ALL, msg_id, + ret, result, sizeof(*result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(result); +} + +static int nbl_disp_get_fd_flow_all(void *priv, struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_all, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, rule_locs); +} + +static int nbl_disp_chan_get_fd_flow_max_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + int ret = 0, result = 0; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW_MAX, NULL, 0, &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; + + return result; +} + +static void nbl_disp_chan_get_fd_flow_max_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int result = 0, err = NBL_CHAN_RESP_OK; + + result = NBL_OPS_CALL(res_ops->get_fd_flow_max, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + if (result < 0) { + err = result; + result = 0; + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_MAX, msg_id, + err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_fd_flow_max(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_fd_flow_max, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_replace_fd_flow_req(void *priv, struct nbl_chan_param_fdir_replace *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REPLACE_FD_FLOW, info, + sizeof(struct nbl_chan_param_fdir_replace) + info->tlv_length, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_replace_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_fdir_replace *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + param = (struct nbl_chan_param_fdir_replace *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->replace_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REPLACE_FD_FLOW, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_replace_fd_flow(void *priv, struct nbl_chan_param_fdir_replace *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->replace_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), info); +} + +static int nbl_disp_chan_remove_fd_flow_req(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_fdir_del param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.rule_type = rule_type; + param.location = loc; + param.vsi = vsi_id; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REMOVE_FD_FLOW, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_fdir_del *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + param = (struct nbl_chan_param_fdir_del *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->rule_type, + param->location, param->vsi); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_FD_FLOW, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_remove_fd_flow(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, loc, vsi_id); +} + +static int nbl_disp_chan_config_fd_flow_state_req(void *priv, + enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_config_fd_flow_state param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.rule_type = rule_type; + param.vsi_id = vsi_id; + param.state = state; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CFG_FD_FLOW_STATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_config_fd_flow_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_config_fd_flow_state *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + param = (struct nbl_chan_param_config_fd_flow_state *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->config_fd_flow_state, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->rule_type, + param->vsi_id, param->state); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_FD_FLOW_STATE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_config_fd_flow_state(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->config_fd_flow_state, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, vsi_id, state); +} + +static void nbl_disp_cfg_fd_update_event(void *priv, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_fd_update_event, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable); +} + +static void nbl_disp_cfg_mirror_outputport_event(void *priv, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_mirror_outputport_event, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable); +} + +static void nbl_disp_dump_fd_flow(void *priv, struct seq_file *m) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->dump_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m); +} + +static void nbl_disp_chan_get_xdp_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size, + u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_queue_info result = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, + &vsi_id, sizeof(vsi_id), &result, sizeof(result), 1); + + if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { + *queue_num = result.queue_num; + *queue_size = result.queue_size; + } +} + +static void nbl_disp_chan_get_xdp_queue_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_queue_info result = {0}; + int ret = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL(res_ops->get_xdp_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, + &result.queue_size, *(u16 *)data)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_xdp_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size, vsi_id)); +} + +static void nbl_disp_set_hw_status(void *priv, enum nbl_hw_status hw_status) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_hw_status, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), hw_status); +} + +static void nbl_disp_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_active_func_bitmaps, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), bitmap, max_func); +} + +static int nbl_disp_set_tc_wgt(void *priv, u16 vsi_id, u8 *weight, u8 num_tc) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_tc_wgt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, weight, num_tc); +} + +static int nbl_disp_chan_set_tc_wgt_req(void *priv, u16 vsi_id, u8 *weight, u8 num_tc) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_tc_wgt param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.num_tc = num_tc; + memcpy(param.weight, weight, num_tc); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_TC_WGT, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_tc_wgt_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_tc_wgt *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_tc_wgt *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_tc_wgt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->weight, param->num_tc); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_TC_WGT, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_configure_rdma_bw(void *priv, u8 eth_id, int rdma_bw) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_rdma_bw, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, rdma_bw); +} + +static int nbl_disp_chan_configure_rdma_bw_req(void *priv, u8 eth_id, int rdma_bw) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_configure_rdma_bw param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.rdma_bw = rdma_bw; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CONFIGURE_RDMA_BW, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_configure_rdma_bw_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_configure_rdma_bw *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_configure_rdma_bw *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_rdma_bw, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->rdma_bw); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_RDMA_BW, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_qos, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pfc, + trust, dscp2prio_map); + if (ret) + return ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_eth_pfc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pfc); + + return ret; +} + +static int nbl_disp_chan_configure_qos_req(void *priv, u8 eth_id, u8 *pfc, + u8 trust, u8 *dscp2prio_map) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_configure_qos param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + memcpy(param.pfc, pfc, NBL_MAX_PFC_PRIORITIES); + memcpy(param.dscp2prio_map, dscp2prio_map, NBL_DSCP_MAX); + param.trust = trust; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CONFIGURE_QOS, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_configure_qos_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_configure_qos *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_configure_qos *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_qos, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->pfc, param->trust, param->dscp2prio_map); + if (ret) + goto send_ack; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_eth_pfc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->pfc); + +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_QOS, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, prio, xoff, xon); + + return ret; +} + +static int nbl_disp_chan_set_pfc_buffer_size_req(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_pfc_buffer_size param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.prio = prio; + param.xoff = xoff; + param.xon = xon; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_pfc_buffer_size_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_pfc_buffer_size *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_pfc_buffer_size *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->prio, param->xoff, param->xon); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, prio, xoff, xon); + + return ret; +} + +static int +nbl_disp_chan_get_pfc_buffer_size_req(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_pfc_buffer_size param = {0}; + struct nbl_chan_param_get_pfc_buffer_size_resp resp; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; + + param.eth_id = eth_id; + param.prio = prio; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, ¶m, sizeof(param), + &resp, sizeof(resp), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + *xoff = resp.xoff; + *xon = resp.xon; + + return ret; +} + +static void nbl_disp_chan_get_pfc_buffer_size_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_pfc_buffer_size *param; + struct nbl_chan_param_get_pfc_buffer_size_resp resp; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_get_pfc_buffer_size *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->prio, &resp.xoff, &resp.xon); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, msg_id, ret, + &resp, sizeof(resp)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_rate_limit(void *priv, enum nbl_traffic_type type, u32 rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_rate_limit, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, type, rate); + + return ret; +} + +static int +nbl_disp_chan_set_rate_limit_req(void *priv, enum nbl_traffic_type type, u32 rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_rate_limit param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.type = type; + param.rate = rate; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_RATE_LIMIT, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_rate_limit_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_rate_limit *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_rate_limit *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_rate_limit, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + src_id, param->type, param->rate); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_RATE_LIMIT, msg_id, ret, + NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_remove_fd_flow(void *priv, enum nbl_chan_fdir_rule_type rule_type, - u32 loc, u16 vsi_id) +static void nbl_disp_register_dev_name(void *priv, u16 vsi_id, char *name) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_fd_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, loc, vsi_id); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_dev_name, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, name); } -static int nbl_disp_chan_config_fd_flow_state_req(void *priv, - enum nbl_chan_fdir_rule_type rule_type, - u16 vsi_id, u16 state) +static void +nbl_disp_chan_register_dev_name_req(void *priv, u16 vsi_id, char *name) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - struct nbl_chan_param_config_fd_flow_state param = {0}; struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_pf_name param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - param.rule_type = rule_type; param.vsi_id = vsi_id; - param.state = state; + strscpy(param.dev_name, name, IFNAMSIZ); NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_CFG_FD_FLOW_STATE, ¶m, sizeof(param), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_MSG_REGISTER_PF_NAME, ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_config_fd_flow_state_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_register_dev_name_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_config_fd_flow_state *param = NULL; + struct nbl_chan_param_pf_name *param; struct nbl_chan_ack_info chan_ack; - int ret = 0; - - param = (struct nbl_chan_param_config_fd_flow_state *)data; - - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->config_fd_flow_state, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->rule_type, - param->vsi_id, param->state); + int ret = NBL_CHAN_RESP_OK; - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_FD_FLOW_STATE, msg_id, ret, NULL, 0); + param = (struct nbl_chan_param_pf_name *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_dev_name, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->dev_name); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_PF_NAME, msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_config_fd_flow_state(void *priv, enum nbl_chan_fdir_rule_type rule_type, - u16 vsi_id, u16 state) +static void nbl_disp_get_dev_name(void *priv, u16 vsi_id, char *name) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->config_fd_flow_state, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, vsi_id, state); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_dev_name, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, name); } -static void nbl_disp_cfg_fd_update_event(void *priv, bool enable) +static void +nbl_disp_chan_get_dev_name_req(void *priv, u16 vsi_id, char *name) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_pf_name param = {0}; + struct nbl_chan_param_pf_name resp = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_PF_NAME, ¶m, sizeof(param), &resp, sizeof(resp), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + strscpy(name, resp.dev_name, IFNAMSIZ); +} + +static void nbl_disp_chan_get_dev_name_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_pf_name *param; + struct nbl_chan_param_pf_name resp = {0}; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_fd_update_event, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable); + param = (struct nbl_chan_param_pf_name *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_dev_name, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, resp.dev_name); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PF_NAME, msg_id, ret, &resp, sizeof(resp)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_dump_fd_flow(void *priv, struct seq_file *m) +static int nbl_disp_get_mirror_table_id(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->dump_fd_flow, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_mirror_table_id, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, dir, mirror_en, mt_id); } -static void nbl_disp_chan_get_xdp_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size, - u16 vsi_id) +static int nbl_disp_chan_get_mirror_table_id_req(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_get_queue_info result = {0}; + struct nbl_chan_param_get_mirror_table_id param = {0}; + struct nbl_chan_param_get_mirror_table_id resp = {0}; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; - NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, - &vsi_id, sizeof(vsi_id), &result, sizeof(result), 1); + param.vsi_id = vsi_id; + param.dir = dir; + param.mirror_en = mirror_en; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_MIRROR_TABLE_ID, ¶m, sizeof(param), + &resp, sizeof(resp), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { - *queue_num = result.queue_num; - *queue_size = result.queue_size; - } + *mt_id = resp.mt_id; + + return ret; } -static void nbl_disp_chan_get_xdp_queue_info_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_get_mirror_table_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_mirror_table_id *param; + struct nbl_chan_param_get_mirror_table_id resp = {0}; struct nbl_chan_ack_info chan_ack; - struct nbl_chan_param_get_queue_info result = {0}; int ret = NBL_CHAN_RESP_OK; - NBL_OPS_CALL(res_ops->get_xdp_queue_info, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, - &result.queue_size, *(u16 *)data)); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, msg_id, - ret, &result, sizeof(result)); + param = (struct nbl_chan_param_get_mirror_table_id *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_mirror_table_id, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->dir, param->mirror_en, &resp.mt_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MIRROR_TABLE_ID, msg_id, ret, + &resp, sizeof(resp)); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static void nbl_disp_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +static int nbl_disp_configure_mirror(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - NBL_OPS_CALL(res_ops->get_xdp_queue_info, - (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size, vsi_id)); + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_mirror, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, + mirror_en, dir, mt_id); } -static void nbl_disp_set_hw_status(void *priv, enum nbl_hw_status hw_status) +static int nbl_disp_chan_configure_mirror_req(void *priv, u16 func_id, bool mirror_en, + int dir, u8 mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_mirror param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_hw_status, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), hw_status); + param.mirror_en = mirror_en; + param.dir = dir; + param.mt_id = mt_id; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CONFIGURE_MIRROR, ¶m, sizeof(param), + NULL, 0, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return ret; } -static void nbl_disp_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) +static void nbl_disp_chan_configure_mirror_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_mirror *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; - NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_active_func_bitmaps, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), bitmap, max_func); + param = (struct nbl_chan_param_mirror *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_mirror, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + src_id, param->mirror_en, param->dir, param->mt_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_MIRROR, msg_id, ret, + NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) +static int nbl_disp_configure_mirror_table(void *priv, bool mirror_en, + u16 func_id, u8 mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - int ret; - - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_qos, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pfc, - trust, dscp2prio_map); - if (ret) - return ret; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_eth_pfc, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pfc); - - return ret; + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_mirror_table, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mirror_en, + func_id, mt_id); } -static int nbl_disp_chan_configure_qos_req(void *priv, u8 eth_id, u8 *pfc, - u8 trust, u8 *dscp2prio_map) +static int nbl_disp_chan_configure_mirror_table_req(void *priv, bool mirror_en, + u16 func_id, u8 mt_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_configure_qos param; + struct nbl_chan_param_mirror_table param = {0}; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; - param.eth_id = eth_id; - memcpy(param.pfc, pfc, NBL_MAX_PFC_PRIORITIES); - memcpy(param.dscp2prio_map, dscp2prio_map, NBL_DSCP_MAX); - param.trust = trust; + param.mirror_en = mirror_en; + param.func_id = func_id; + param.mt_id = mt_id; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_CONFIGURE_QOS, ¶m, sizeof(param), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_MSG_CONFIGURE_MIRROR_TABLE, ¶m, sizeof(param), + NULL, 0, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return ret; } -static void nbl_disp_chan_configure_qos_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_configure_mirror_table_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_configure_qos *param; + struct nbl_chan_param_mirror_table *param; struct nbl_chan_ack_info chan_ack; int ret = NBL_CHAN_RESP_OK; - param = (struct nbl_chan_param_configure_qos *)data; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_qos, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->eth_id, param->pfc, param->trust, param->dscp2prio_map); - if (ret) - goto send_ack; - - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_eth_pfc, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->pfc); - -send_ack: - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_QOS, msg_id, ret, NULL, 0); + param = (struct nbl_chan_param_mirror_table *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_mirror_table, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->mirror_en, param->func_id, param->mt_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_MIRROR_TABLE, msg_id, ret, + NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +static int nbl_disp_clear_mirror_cfg(void *priv, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - int ret; - - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_pfc_buffer_size, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, prio, xoff, xon); - return ret; + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_mirror_cfg, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id); } -static int nbl_disp_chan_set_pfc_buffer_size_req(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +static int nbl_disp_chan_clear_mirror_cfg_req(void *priv, u16 func_id) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_set_pfc_buffer_size param; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - - param.eth_id = eth_id; - param.prio = prio; - param.xoff = xoff; - param.xon = xon; + int ret; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, ¶m, sizeof(param), NULL, 0, 1); - return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + NBL_CHAN_MSG_CLEAR_MIRROR_CFG, NULL, 0, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return ret; } -static void nbl_disp_chan_set_pfc_buffer_size_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void nbl_disp_chan_clear_mirror_cfg_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_set_pfc_buffer_size *param; struct nbl_chan_ack_info chan_ack; int ret = NBL_CHAN_RESP_OK; - param = (struct nbl_chan_param_set_pfc_buffer_size *)data; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_pfc_buffer_size, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->eth_id, param->prio, param->xoff, param->xon); - - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, msg_id, ret, NULL, 0); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_mirror_cfg, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CLEAR_MIRROR_CFG, msg_id, ret, + NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } -static int nbl_disp_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +static int nbl_disp_check_flow_table_spec(void *priv, u16 vlan_list_cnt, + u16 unicast_mac_cnt, u16 multi_mac_cnt) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); - int ret; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pfc_buffer_size, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, prio, xoff, xon); - - return ret; + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->check_flow_table_spec, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vlan_list_cnt, + unicast_mac_cnt, multi_mac_cnt); } static int -nbl_disp_chan_get_pfc_buffer_size_req(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +nbl_disp_chan_check_flow_table_spec_req(void *priv, u16 vlan_list_cnt, + u16 unicast_mac_cnt, u16 multi_mac_cnt) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); struct nbl_chan_send_info chan_send = {0}; - struct nbl_chan_param_get_pfc_buffer_size param = {0}; - struct nbl_chan_param_get_pfc_buffer_size_resp resp; + struct nbl_chan_param_check_flow_spec param = {0}; struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); - int ret; - param.eth_id = eth_id; - param.prio = prio; + param.vlan_list_cnt = vlan_list_cnt; + param.unicast_mac_cnt = unicast_mac_cnt; + param.multi_mac_cnt = multi_mac_cnt; NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), - NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, ¶m, sizeof(param), - &resp, sizeof(resp), 1); - ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); - - *xoff = resp.xoff; - *xon = resp.xon; + NBL_CHAN_MSG_CHECK_FLOWTABLE_SPEC, ¶m, + sizeof(param), NULL, 0, 1); - return ret; + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); } -static void nbl_disp_chan_get_pfc_buffer_size_resp(void *priv, u16 src_id, u16 msg_id, - void *data, u32 data_len) +static void +nbl_disp_chan_check_flow_table_spec_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) { struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; - struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); - struct nbl_chan_param_get_pfc_buffer_size *param; - struct nbl_chan_param_get_pfc_buffer_size_resp resp; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); struct nbl_chan_ack_info chan_ack; - int ret = NBL_CHAN_RESP_OK; + struct nbl_chan_param_check_flow_spec *param = {0}; + int ret; - param = (struct nbl_chan_param_get_pfc_buffer_size *)data; - ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pfc_buffer_size, - NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), - param->eth_id, param->prio, &resp.xoff, &resp.xon); - NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, msg_id, ret, - &resp, sizeof(resp)); + param = (struct nbl_chan_param_check_flow_spec *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->check_flow_table_spec, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vlan_list_cnt, + param->unicast_mac_cnt, param->multi_mac_cnt); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CHECK_FLOWTABLE_SPEC, + msg_id, ret, NULL, 0); chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); } +static u32 nbl_disp_get_dvn_desc_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_dvn_desc_req, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_set_dvn_desc_req(void *priv, u32 desc_req) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_dvn_desc_req, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), desc_req)); +} + /* NBL_DISP_SET_OPS(disp_op_name, res_func, ctrl_lvl, msg_type, msg_req, msg_resp) * ctrl_lvl is to define when this disp_op should go directly to res_op, not sending a channel msg. * @@ -8881,6 +10555,9 @@ do { \ NBL_DISP_SET_OPS(setup_queue, nbl_disp_setup_queue, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_QUEUE, \ nbl_disp_chan_setup_queue_req, nbl_disp_chan_setup_queue_resp); \ + NBL_DISP_SET_OPS(remove_queue, nbl_disp_remove_queue, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_QUEUE, \ + nbl_disp_chan_remove_queue_req, nbl_disp_chan_remove_queue_resp); \ NBL_DISP_SET_OPS(remove_all_queues, nbl_disp_remove_all_queues, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_ALL_QUEUES, \ nbl_disp_chan_remove_all_queues_req, \ @@ -8913,6 +10590,9 @@ do { \ NBL_DISP_SET_OPS(del_multi_rule, nbl_disp_del_multi_rule, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_MULTI_RULE, \ nbl_disp_chan_del_multi_rule_req, nbl_disp_chan_del_multi_rule_resp); \ + NBL_DISP_SET_OPS(cfg_multi_mcast, nbl_disp_cfg_multi_mcast, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_MULTI_MCAST_RULE, \ + nbl_disp_chan_cfg_multi_mcast_req, nbl_disp_chan_cfg_multi_mcast_resp);\ NBL_DISP_SET_OPS(setup_multi_group, nbl_disp_setup_multi_group, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_MULTI_GROUP, \ nbl_disp_chan_setup_multi_group_req, \ @@ -8966,9 +10646,6 @@ do { \ NBL_DISP_SET_OPS(cfg_duppkt_mcc, nbl_disp_cfg_duppkt_mcc, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_DUPPKT_MCC, \ nbl_disp_chan_cfg_duppkt_mcc_req, nbl_disp_chan_cfg_duppkt_mcc_resp); \ - NBL_DISP_SET_OPS(cfg_lag_mcc, nbl_disp_cfg_lag_mcc, \ - NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_MCC, \ - nbl_disp_chan_cfg_lag_mcc_req, nbl_disp_chan_cfg_lag_mcc_resp); \ NBL_DISP_SET_OPS(cfg_bond_shaping, nbl_disp_cfg_bond_shaping, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_BOND_SHAPING, \ nbl_disp_chan_cfg_bond_shaping_req, \ @@ -8993,6 +10670,14 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_BASE_MAC_ADDR, \ nbl_disp_chan_get_base_mac_addr_req, \ nbl_disp_chan_get_base_mac_addr_resp); \ + NBL_DISP_SET_OPS(get_eth_mac_stats, nbl_disp_get_eth_mac_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_MAC_STATS, \ + nbl_disp_chan_get_eth_mac_stats_req, \ + nbl_disp_chan_get_eth_mac_stats_resp); \ + NBL_DISP_SET_OPS(get_rmon_stats, nbl_disp_get_rmon_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RMON_STATS, \ + nbl_disp_chan_get_rmon_stats_req, \ + nbl_disp_chan_get_rmon_stats_resp); \ NBL_DISP_SET_OPS(get_tx_headroom, nbl_disp_get_tx_headroom, \ NBL_DISP_CTRL_LVL_NET, -1, \ NULL, NULL); \ @@ -9044,8 +10729,20 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_STATS, \ nbl_disp_get_private_stat_data_req, \ nbl_disp_chan_get_private_stat_data_resp); \ + NBL_DISP_SET_OPS(get_eth_ctrl_stats, nbl_disp_get_eth_ctrl_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_CTRL_STATS, \ + nbl_disp_chan_get_eth_ctrl_stats_req, \ + nbl_disp_chan_get_eth_ctrl_stats_resp); \ + NBL_DISP_SET_OPS(get_pause_stats, nbl_disp_get_pause_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PAUSE_STATS, \ + nbl_disp_chan_get_pause_stats_req, \ + nbl_disp_chan_get_pause_stats_resp); \ NBL_DISP_SET_OPS(fill_private_stat_strings, nbl_disp_fill_private_stat_strings, \ NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_eth_abnormal_stats, nbl_disp_get_eth_abnormal_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, \ + nbl_disp_chan_get_eth_abnormal_stats_req, \ + nbl_disp_chan_get_eth_abnormal_stats_resp); \ NBL_DISP_SET_OPS(get_max_desc_num, nbl_disp_get_max_desc_num, \ NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_min_desc_num, nbl_disp_get_min_desc_num, \ @@ -9088,6 +10785,9 @@ do { \ NBL_DISP_SET_OPS(get_rxfh_indir, nbl_disp_get_rxfh_indir, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_INDIR, \ nbl_disp_chan_get_rxfh_indir_req, nbl_disp_chan_get_rxfh_indir_resp); \ + NBL_DISP_SET_OPS(set_rxfh_indir, nbl_disp_set_rxfh_indir, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_RXFH_INDIR, \ + nbl_disp_chan_set_rxfh_indir_req, nbl_disp_chan_set_rxfh_indir_resp); \ NBL_DISP_SET_OPS(get_rxfh_rss_key, nbl_disp_get_rxfh_rss_key, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_RSS_KEY, \ nbl_disp_chan_get_rxfh_rss_key_req, \ @@ -9096,6 +10796,10 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, \ nbl_disp_chan_get_rxfh_rss_alg_sel_req, \ nbl_disp_chan_get_rxfh_rss_alg_sel_resp); \ + NBL_DISP_SET_OPS(set_rxfh_rss_alg_sel, nbl_disp_set_rxfh_rss_alg_sel, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_RXFH_RSS_ALG_SEL, \ + nbl_disp_chan_set_rxfh_rss_alg_sel_req, \ + nbl_disp_chan_set_rxfh_rss_alg_sel_resp); \ NBL_DISP_SET_OPS(cfg_txrx_vlan, nbl_disp_cfg_txrx_vlan, \ NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ NBL_DISP_SET_OPS(setup_rdma_id, nbl_disp_setup_rdma_id, \ @@ -9411,6 +11115,10 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PORT_STATE, \ nbl_disp_chan_get_port_state_req, \ nbl_disp_chan_get_port_state_resp); \ + NBL_DISP_SET_OPS(get_fec_stats, nbl_disp_get_fec_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FEC_STATS, \ + nbl_disp_chan_get_fec_stats_req, \ + nbl_disp_chan_get_fec_stats_resp); \ NBL_DISP_SET_OPS(set_port_advertising, nbl_disp_set_port_advertising, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PORT_ADVERTISING, \ nbl_disp_chan_set_port_advertising_req, \ @@ -9427,6 +11135,17 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_STATE, \ nbl_disp_chan_get_link_state_req, \ nbl_disp_chan_get_link_state_resp); \ + NBL_DISP_SET_OPS(get_link_down_count, nbl_disp_get_link_down_count, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_DOWN_COUNT, \ + nbl_disp_chan_get_link_down_count_req, \ + nbl_disp_chan_get_link_down_count_resp); \ + NBL_DISP_SET_OPS(get_link_status_opcode, nbl_disp_get_link_status_opcode, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_STATUS_OPCODE, \ + nbl_disp_chan_get_link_status_opcode_req, \ + nbl_disp_chan_get_link_status_opcode_resp); \ + NBL_DISP_SET_OPS(set_wol, nbl_disp_set_wol, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_WOL, \ + nbl_disp_chan_set_wol_req, nbl_disp_chan_set_wol_resp); \ NBL_DISP_SET_OPS(cfg_eth_bond_event, nbl_disp_cfg_eth_bond_event, \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ @@ -9471,15 +11190,9 @@ do { \ NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ NBL_CHAN_GET_UPCALL_PORT, NULL, \ nbl_disp_chan_get_upcall_port_resp); \ - NBL_DISP_SET_OPS(configure_virtio_dev_msix, nbl_disp_configure_virtio_dev_msix, \ - NBL_DISP_CTRL_LVL_MGT, -1, \ - NULL, NULL); \ NBL_DISP_SET_OPS(configure_rdma_msix_off, nbl_disp_configure_rdma_msix_off, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ - NBL_DISP_SET_OPS(configure_virtio_dev_ready, nbl_disp_configure_virtio_dev_ready, \ - NBL_DISP_CTRL_LVL_MGT, -1, \ - NULL, NULL); \ NBL_DISP_SET_OPS(set_eth_mac_addr, nbl_disp_set_eth_mac_addr, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, \ nbl_disp_chan_set_eth_mac_addr_req, \ @@ -9537,6 +11250,9 @@ do { \ NBL_DISP_SET_OPS(adapt_desc_gother, nbl_disp_adapt_desc_gother, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ + NBL_DISP_SET_OPS(set_desc_high_throughput, nbl_disp_set_desc_high_throughput, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ NBL_DISP_SET_OPS(flr_clear_net, nbl_disp_flr_clear_net, \ NBL_DISP_CTRL_LVL_MGT, -1, \ NULL, NULL); \ @@ -9582,8 +11298,8 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_USER_QUEUE_INFO, \ nbl_disp_chan_get_user_queue_info_req, \ nbl_disp_chan_get_user_queue_info_resp); \ - NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ - NBL_CHAN_MSG_GET_BOARD_INFO, NULL, \ + NBL_DISP_SET_OPS(get_board_info, nbl_disp_get_board_info, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_BOARD_INFO, nbl_disp_chan_get_board_info_req, \ nbl_disp_chan_get_board_info_resp); \ NBL_DISP_SET_OPS(get_vf_base_vsi_id, nbl_disp_get_vf_base_vsi_id, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, \ @@ -9619,6 +11335,34 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_VSI_ID, \ nbl_disp_chan_get_vf_vsi_id_req, \ nbl_disp_chan_get_vf_vsi_id_resp); \ + NBL_DISP_SET_OPS(check_vf_is_active, nbl_disp_check_vf_is_active, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_CHECK_VF_IS_ACTIVE, \ + nbl_disp_chan_check_vf_is_active_req, \ + nbl_disp_chan_check_vf_is_active_resp); \ + NBL_DISP_SET_OPS(check_vf_is_vdpa, nbl_disp_check_vf_is_vdpa, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_CHECK_VF_IS_VDPA, \ + nbl_disp_chan_check_vf_is_vdpa_req, \ + nbl_disp_chan_check_vf_is_vdpa_resp); \ + NBL_DISP_SET_OPS(get_vdpa_vf_stats, nbl_disp_get_vdpa_vf_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VDPA_VF_STATS, \ + nbl_disp_chan_get_vdpa_vf_stats_req, \ + nbl_disp_chan_get_vdpa_vf_stats_resp); \ + NBL_DISP_SET_OPS(get_uvn_pkt_drop_stats, nbl_disp_get_uvn_pkt_drop_stats, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_GET_UVN_PKT_DROP_STATS, \ + nbl_disp_chan_get_uvn_pkt_drop_stats_req, \ + nbl_disp_chan_get_uvn_pkt_drop_stats_resp); \ + NBL_DISP_SET_OPS(get_ustore_pkt_drop_stats, nbl_disp_get_ustore_pkt_drop_stats, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_GET_USTORE_PKT_DROP_STATS, \ + nbl_disp_chan_get_ustore_pkt_drop_stats_req, \ + nbl_disp_chan_get_ustore_pkt_drop_stats_resp); \ + NBL_DISP_SET_OPS(get_ustore_total_pkt_drop_stats, \ + nbl_disp_get_ustore_total_pkt_drop_stats, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS, \ + nbl_disp_chan_get_ustore_total_pkt_drop_stats_req, \ + nbl_disp_chan_get_ustore_total_pkt_drop_stats_resp); \ NBL_DISP_SET_OPS(set_pmd_debug, nbl_disp_set_pmd_debug, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PMD_DEBUG, \ NULL, NULL); \ @@ -9629,6 +11373,9 @@ do { \ NBL_DISP_SET_OPS(set_tx_rate, nbl_disp_set_tx_rate, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_TX_RATE, \ nbl_disp_chan_set_tx_rate_req, nbl_disp_chan_set_tx_rate_resp); \ + NBL_DISP_SET_OPS(set_rx_rate, nbl_disp_set_rx_rate, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_RX_RATE, \ + nbl_disp_chan_set_rx_rate_req, nbl_disp_chan_set_rx_rate_resp); \ NBL_DISP_SET_OPS(register_func_link_forced, nbl_disp_register_func_link_forced, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, \ nbl_disp_chan_register_func_link_forced_req, \ @@ -9638,6 +11385,10 @@ do { \ nbl_disp_chan_get_link_forced_req, nbl_disp_chan_get_link_forced_resp);\ NBL_DISP_SET_OPS(get_driver_version, nbl_disp_get_driver_version, \ NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(register_func_trust, nbl_disp_register_func_trust, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_TRUST, \ + nbl_disp_chan_register_func_trust_req, \ + nbl_disp_chan_register_func_trust_resp); \ NBL_DISP_SET_OPS(register_func_vlan, nbl_disp_register_func_vlan, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_VLAN, \ nbl_disp_chan_register_func_vlan_req, \ @@ -9646,6 +11397,12 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_RATE, \ nbl_disp_chan_register_func_rate_req, \ nbl_disp_chan_register_func_rate_resp); \ + NBL_DISP_SET_OPS(set_mtu, nbl_disp_set_mtu, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_MTU_SET, \ + nbl_disp_chan_set_mtu_req, \ + nbl_disp_chan_set_mtu_resp); \ + NBL_DISP_SET_OPS(get_max_mtu, nbl_disp_get_max_mtu, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_fd_flow, nbl_disp_get_fd_flow, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FD_FLOW, \ nbl_disp_chan_get_fd_flow_req, nbl_disp_chan_get_fd_flow_resp); \ @@ -9680,10 +11437,18 @@ do { \ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ NBL_DISP_SET_OPS(get_active_func_bitmaps, nbl_disp_get_active_func_bitmaps, \ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(configure_rdma_bw, nbl_disp_configure_rdma_bw, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_RDMA_BW, \ + nbl_disp_chan_configure_rdma_bw_req, \ + nbl_disp_chan_configure_rdma_bw_resp); \ NBL_DISP_SET_OPS(configure_qos, nbl_disp_configure_qos, \ - NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_QOS, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_QOS, \ nbl_disp_chan_configure_qos_req, \ nbl_disp_chan_configure_qos_resp); \ + NBL_DISP_SET_OPS(set_tc_wgt, nbl_disp_set_tc_wgt, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_TC_WGT, \ + nbl_disp_chan_set_tc_wgt_req, \ + nbl_disp_chan_set_tc_wgt_resp); \ NBL_DISP_SET_OPS(get_pfc_buffer_size, nbl_disp_get_pfc_buffer_size, \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, \ nbl_disp_chan_get_pfc_buffer_size_req, \ @@ -9692,6 +11457,52 @@ do { \ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, \ nbl_disp_chan_set_pfc_buffer_size_req, \ nbl_disp_chan_set_pfc_buffer_size_resp); \ + NBL_DISP_SET_OPS(set_rate_limit, nbl_disp_set_rate_limit, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_RATE_LIMIT, \ + nbl_disp_chan_set_rate_limit_req, \ + nbl_disp_chan_set_rate_limit_resp); \ + NBL_DISP_SET_OPS(get_perf_dump_length, nbl_disp_get_perf_dump_length, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_perf_dump_data, nbl_disp_get_perf_dump_data, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_dev_name, nbl_disp_register_dev_name, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_PF_NAME, \ + nbl_disp_chan_register_dev_name_req, \ + nbl_disp_chan_register_dev_name_resp); \ + NBL_DISP_SET_OPS(get_dev_name, nbl_disp_get_dev_name, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PF_NAME, \ + nbl_disp_chan_get_dev_name_req, \ + nbl_disp_chan_get_dev_name_resp); \ + NBL_DISP_SET_OPS(get_mirror_table_id, nbl_disp_get_mirror_table_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MIRROR_TABLE_ID, \ + nbl_disp_chan_get_mirror_table_id_req, \ + nbl_disp_chan_get_mirror_table_id_resp); \ + NBL_DISP_SET_OPS(configure_mirror, nbl_disp_configure_mirror, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_MIRROR, \ + nbl_disp_chan_configure_mirror_req, \ + nbl_disp_chan_configure_mirror_resp); \ + NBL_DISP_SET_OPS(configure_mirror_table, nbl_disp_configure_mirror_table, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_MIRROR_TABLE, \ + nbl_disp_chan_configure_mirror_table_req, \ + nbl_disp_chan_configure_mirror_table_resp); \ + NBL_DISP_SET_OPS(clear_mirror_cfg, nbl_disp_clear_mirror_cfg, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_MIRROR_CFG, \ + nbl_disp_chan_clear_mirror_cfg_req, \ + nbl_disp_chan_clear_mirror_cfg_resp); \ + NBL_DISP_SET_OPS(cfg_mirror_outputport_event, nbl_disp_cfg_mirror_outputport_event, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(check_flow_table_spec, nbl_disp_check_flow_table_spec, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CHECK_FLOWTABLE_SPEC, \ + nbl_disp_chan_check_flow_table_spec_req, \ + nbl_disp_chan_check_flow_table_spec_resp); \ + NBL_DISP_SET_OPS(get_dvn_desc_req, nbl_disp_get_dvn_desc_req, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_dvn_desc_req, nbl_disp_set_dvn_desc_req, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ } while (0) /* Structure starts here, adding an op should not modify anything below */ @@ -9704,7 +11515,7 @@ static int nbl_disp_setup_msg(struct nbl_dispatch_mgt *disp_mgt) NBL_CHAN_TYPE_MAILBOX)) return 0; - mutex_init(&disp_mgt->ops_muxtex_lock); + mutex_init(&disp_mgt->ops_mutex_lock); spin_lock_init(&disp_mgt->ops_spin_lock); disp_mgt->ops_lock_required = true; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h index 70d1d30c35fd..fb6dd73706bc 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -29,9 +29,9 @@ u64 ret = 0; \ \ if (_disp_mgt->ops_lock_required) \ - mutex_lock(&_disp_mgt->ops_muxtex_lock); \ + mutex_lock(&_disp_mgt->ops_mutex_lock); \ + \ __builtin_choose_expr( \ - /* Check if the func has void return value */ \ __builtin_types_compatible_p(typeof(_func(__VA_ARGS__)), void), \ (!_func) ? 0 : _func(__VA_ARGS__), \ ret = __builtin_choose_expr( \ @@ -42,7 +42,7 @@ ); \ \ if (_disp_mgt->ops_lock_required) \ - mutex_unlock(&_disp_mgt->ops_muxtex_lock); \ + mutex_unlock(&_disp_mgt->ops_mutex_lock); \ \ (typeof(_func(__VA_ARGS__))) ret; \ }) @@ -79,8 +79,7 @@ struct nbl_dispatch_mgt { struct nbl_channel_ops_tbl *chan_ops_tbl; struct nbl_dispatch_ops_tbl *disp_ops_tbl; DECLARE_BITMAP(ctrl_lvl, NBL_DISP_CTRL_LVL_MAX); - /* use for the caller not in interrupt */ - struct mutex ops_muxtex_lock; + struct mutex ops_mutex_lock; /* use for the caller not in interrupt */ /* use for the caller is in interrupt or other can't sleep thread */ spinlock_t ops_spin_lock; bool ops_lock_required; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c index 4eb2e539aa7c..3dfc8834afaa 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c @@ -6,6 +6,37 @@ #include "nbl_ethtool.h" +#define DIAG_BLK_SZ(data_size) (sizeof(struct nbl_diag_blk) + (data_size)) +#define DIAG_GET_NEXT_BLK(dump_hdr) \ + ({ typeof(dump_hdr) _dump_hdr = (dump_hdr); \ + (struct nbl_diag_blk *)(_dump_hdr->dump + _dump_hdr->total_length); }) + +#define NBL_DIAG_DUMP_VERSION 1 +#define NBL_DIAG_FLAG_PERFORMANCE BIT(0) + +#define NBL_DRV_VER_SZ 64 +#define NBL_DEV_NAME_SZ 64 + +enum nbl_diag_type { + NBL_DIAG_DRV_VERSION = 0, + NBL_DIAG_DEVICE_NAME, + NBL_DIAG_PERFORMANCE, +}; + +struct nbl_diag_blk { + u32 type; + u32 length; + char data[]; +} __packed; + +struct nbl_diag_dump { + u32 version; + u32 flag; + u32 num_blocks; + u32 total_length; + char dump[]; +} __packed; + enum NBL_STATS_TYPE { NBL_NETDEV_STATS, NBL_ETH_STATS, @@ -38,6 +69,8 @@ enum nbl_ethtool_test_id { NBL_ETH_TEST_MAX }; +#define NBL_LEONIS_LANE_NUM (4) + #define NBL_TEST_LEN (sizeof(nbl_gstrings_test) / ETH_GSTRING_LEN) #define NBL_NETDEV_STAT(_name, stat_m, stat_n) { \ @@ -74,7 +107,6 @@ static const struct nbl_ethtool_stats nbl_gstrings_stats[] = { NBL_NETDEV_STAT("tx_errors", tx_errors, tx_errors), NBL_NETDEV_STAT("rx_dropped", rx_dropped, rx_dropped), NBL_NETDEV_STAT("tx_dropped", tx_dropped, tx_dropped), - NBL_NETDEV_STAT("eth_multicast", multicast, multicast), NBL_NETDEV_STAT("collisions", collisions, collisions), NBL_NETDEV_STAT("rx_over_errors", rx_over_errors, rx_over_errors), NBL_NETDEV_STAT("rx_crc_errors", rx_crc_errors, rx_crc_errors), @@ -103,6 +135,11 @@ static const struct nbl_ethtool_stats nbl_gstrings_stats[] = { NBL_STAT("rx_cache_empty", rx_cache_empty, rx_cache_empty), NBL_STAT("rx_cache_busy", rx_cache_busy, rx_cache_busy), NBL_STAT("rx_cache_waive", rx_cache_waive, rx_cache_waive), + + NBL_STAT("xdp_tx_packets", xdp_tx_packets, xdp_tx_packets), + NBL_STAT("xdp_redirect_packets", xdp_redirect_packets, xdp_redirect_packets), + NBL_STAT("xdp_drop_packets", xdp_drop_packets, xdp_drop_packets), + NBL_STAT("xdp_oversize_packets", xdp_oversize_packets, xdp_oversize_packets), #ifdef CONFIG_TLS_DEVICE NBL_STAT("tls_encrypted_packets", tls_encrypted_packets, tls_encrypted_packets), NBL_STAT("tls_encrypted_bytes", tls_encrypted_bytes, tls_encrypted_bytes), @@ -125,6 +162,7 @@ static const struct nbl_priv_flags_info nbl_gstrings_priv_flags[NBL_ADAPTER_FLAG {1, 0, NBL_P4_CAP, "P4-default"}, {0, 1, 0, "link-down-on-close"}, {1, 1, NBL_ETH_SUPPORT_NRZ_RS_FEC_544, "nrz-rs-fec-544"}, + {1, 1, NBL_HIGH_THROUGHPUT_CAP, "high-throughput"}, }; #define NBL_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(nbl_gstrings_priv_flags) @@ -169,9 +207,9 @@ static void nbl_stats_fill_strings(struct net_device *netdev, u8 *data) struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_serv_ring_vsi_info *vsi_info, *xdp_vsi_info; - char *p = (char *)data; + u8 *p = (char *)data; unsigned int i; - u16 xdp_ring_num = 0; + u32 xdp_ring_num = 0; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; xdp_vsi_info = &ring_mgt->vsi_info[NBL_VSI_XDP]; @@ -202,7 +240,8 @@ static void nbl_stats_fill_strings(struct net_device *netdev, u8 *data) } if (xdp_vsi_info) - xdp_ring_num = xdp_vsi_info->ring_num; + xdp_ring_num = xdp_vsi_info->ring_num < num_online_cpus() ? + xdp_vsi_info->ring_num : num_online_cpus(); for (i = 0; i < xdp_ring_num; i++) { snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_xdp_packets", i); @@ -222,7 +261,7 @@ static void nbl_priv_flags_fill_strings(struct net_device *netdev, u8 *data) struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - char *p = (char *)data; + u8 *p = (char *)data; unsigned int i; for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { @@ -264,6 +303,7 @@ static int nbl_sset_fill_count(struct net_device *netdev) struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_serv_ring_vsi_info *vsi_info, *xdp_vsi_info; u32 total_queues = 0, private_len = 0, extra_per_queue_entry = 0; + u32 xdp_queue_num = 0; vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; xdp_vsi_info = &ring_mgt->vsi_info[NBL_VSI_XDP]; @@ -277,7 +317,9 @@ static int nbl_sset_fill_count(struct net_device *netdev) /* xdp queue stat */ if (xdp_vsi_info) - total_queues += xdp_vsi_info->ring_num; + xdp_queue_num = xdp_vsi_info->ring_num < num_online_cpus() ? + xdp_vsi_info->ring_num : num_online_cpus(); + total_queues += xdp_queue_num; return NBL_GLOBAL_STATS_LEN + total_queues * (sizeof(struct nbl_queue_stats) / sizeof(u64)) + @@ -341,7 +383,7 @@ static void nbl_serv_adjust_interrpt_param(struct nbl_service_mgt *serv_mgt, boo u64 last_rx_packets; u64 last_get_stats_jiffies, time_diff; u64 tx_packets, rx_packets; - u64 tx_rates, rx_rates, pkt_rates; + u64 tx_rates, rx_rates, pkt_rates, normalized_pkt_rates; u16 local_vector_id, vector_num; u16 intr_suppress_level; @@ -368,9 +410,14 @@ static void nbl_serv_adjust_interrpt_param(struct nbl_service_mgt *serv_mgt, boo tx_rates = (tx_packets - last_tx_packets) / time_diff * HZ; rx_rates = (rx_packets - last_rx_packets) / time_diff * HZ; pkt_rates = max_t(u64, tx_rates, rx_rates); + if (netdev->mtu < ETH_DATA_LEN) + normalized_pkt_rates = pkt_rates; + else + normalized_pkt_rates = (netdev->mtu / ETH_DATA_LEN) * pkt_rates; intr_suppress_level = - disp_ops->get_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), pkt_rates, + disp_ops->get_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + normalized_pkt_rates, ring_mgt->vectors->intr_suppress_level); if (intr_suppress_level != ring_mgt->vectors->intr_suppress_level) { local_vector_id = ring_mgt->vectors[vsi_info->ring_offset].local_vector_id; @@ -382,36 +429,106 @@ static void nbl_serv_adjust_interrpt_param(struct nbl_service_mgt *serv_mgt, boo } } -void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool) +static int nbl_serv_update_hw_stats(struct nbl_service_mgt *serv_mgt, + u64 last_rx_packets, u64 rx_packets) { - struct nbl_serv_net_resource_mgt *net_resource_mgt; - struct net_device *netdev; - struct nbl_netdev_priv *net_priv; - struct nbl_adapter *adapter; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct net_device *netdev = net_resource_mgt->netdev; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + u32 *uvn_stat_pkt_drop = NULL; + u64 rx_rates; + u64 time_diff; + int i = 0; + int ret = 0; - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - netdev = net_resource_mgt->netdev; - net_priv = netdev_priv(netdev); - adapter = NBL_NETDEV_TO_ADAPTER(netdev); + if (time_after(jiffies, + net_resource_mgt->hw_stats_jiffies + net_resource_mgt->hw_stats_period)) { + time_diff = jiffies - net_resource_mgt->hw_stats_jiffies; + rx_rates = (rx_packets - last_rx_packets) / time_diff * HZ; + net_resource_mgt->hw_stats_jiffies = jiffies; + if (!common->is_vf || rx_rates > NBL_HW_STATS_RX_RATE_THRESHOLD) { + uvn_stat_pkt_drop = devm_kcalloc(dev, vsi_info->ring_num, + sizeof(*uvn_stat_pkt_drop), GFP_KERNEL); + if (!uvn_stat_pkt_drop) { + ret = -ENOMEM; + goto alloc_uvn_stat_pkt_drop_fail; + } + ret = disp_ops->get_uvn_pkt_drop_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, vsi_info->ring_num, + uvn_stat_pkt_drop); + if (ret) + goto get_uvn_pkt_drop_stats_fail; + for (i = 0; i < vsi_info->ring_num; i++) + net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop[i] += + uvn_stat_pkt_drop[i]; + } + } + + if (!common->is_vf && adapter->init_param.caps.has_ctrl) { + ret = disp_ops->get_ustore_pkt_drop_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + goto get_ustore_pkt_drop_stats_fail; + } + if (uvn_stat_pkt_drop) { + devm_kfree(dev, uvn_stat_pkt_drop); + uvn_stat_pkt_drop = NULL; + } + return 0; + +get_ustore_pkt_drop_stats_fail: +get_uvn_pkt_drop_stats_fail: + if (uvn_stat_pkt_drop) { + devm_kfree(dev, uvn_stat_pkt_drop); + uvn_stat_pkt_drop = NULL; + } +alloc_uvn_stat_pkt_drop_fail: + return ret; +} + +void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_eth_abnormal_stats eth_abnormal_stats = { 0 }; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct net_device *netdev = net_resource_mgt->netdev; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + u64 last_rx_packets = 0; + int ret = 0; if (!test_bit(NBL_RUNNING, adapter->state) || test_bit(NBL_RESETTING, adapter->state)) return; + last_rx_packets = net_resource_mgt->stats.rx_packets; nbl_serv_adjust_interrpt_param(serv_mgt, ethtool); netdev->stats.tx_packets = net_resource_mgt->stats.tx_packets; netdev->stats.tx_bytes = net_resource_mgt->stats.tx_bytes; - netdev->stats.rx_packets = net_resource_mgt->stats.rx_packets; netdev->stats.rx_bytes = net_resource_mgt->stats.rx_bytes; + if (!common->is_vf) + disp_ops->get_eth_abnormal_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), ð_abnormal_stats); + + ret = nbl_serv_update_hw_stats(serv_mgt, last_rx_packets, + net_resource_mgt->stats.rx_packets); + /* net_device_stats */ + netdev->stats.multicast = 0; netdev->stats.rx_errors = 0; netdev->stats.tx_errors = 0; + netdev->stats.rx_length_errors = eth_abnormal_stats.rx_length_errors; + netdev->stats.rx_crc_errors = eth_abnormal_stats.rx_crc_errors; + netdev->stats.rx_frame_errors = eth_abnormal_stats.rx_frame_errors; netdev->stats.rx_dropped = 0; netdev->stats.tx_dropped = 0; - netdev->stats.multicast = 0; - netdev->stats.rx_length_errors = 0; } static void @@ -429,6 +546,7 @@ nbl_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u6 struct nbl_queue_err_stats queue_err_stats = { 0 }; struct nbl_serv_ring_vsi_info *vsi_info, *xdp_vsi_info; u32 private_len = 0; + u32 xdp_ring_num = 0; char *p = NULL; int i, j, k; @@ -464,7 +582,11 @@ nbl_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u6 i += 3; } - for (j = 0; j < xdp_vsi_info->ring_num; j++) { + if (xdp_vsi_info) + xdp_ring_num = xdp_vsi_info->ring_num < num_online_cpus() ? + xdp_vsi_info->ring_num : num_online_cpus(); + + for (j = 0; j < xdp_ring_num; j++) { disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ring_mgt->xdp_ring_offset + j, &queue_stats, true); data[i] = queue_stats.packets; @@ -589,7 +711,7 @@ static int nbl_set_channels(struct net_device *netdev, struct ethtool_channels * netif_set_real_num_rx_queues(netdev, queue_pairs); disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), queue_pairs); + NBL_COMMON_TO_VSI_ID(common), queue_pairs, true); return 0; } @@ -599,6 +721,168 @@ static u32 nbl_get_link(struct net_device *netdev) return netif_carrier_ok(netdev) ? 1 : 0; } +struct nbl_ethtool_link_ext_state_opcode_mapping { + u32 status_opcode; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +static const struct nbl_ethtool_link_ext_state_opcode_mapping nbl_link_ext_state_opcode_map[] = { + /* States relating to the autonegotiation or issues therein */ + {10, ETHTOOL_LINK_EXT_STATE_AUTONEG, 0}, + {11, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED}, + {12, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED}, + {13, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED}, + {14, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE}, + {15, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE}, + {16, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD}, + + /* Failure during link training */ + {20, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0}, + {21, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED}, + {22, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT}, + {23, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY}, + {24, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT}, + + /* Logical mismatch in physical coding sublayer or forward error correction sublayer */ + {30, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, 0}, + {31, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK}, + {32, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK}, + {33, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS}, + {34, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED}, + {35, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED}, + + /* Signal integrity issues */ + {40, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0}, + {41, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE}, + + {43, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST}, + {44, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS}, + + /* No cable connected */ + {50, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, + + /* Failure is related to cable, e.g., unsupported cable */ + {60, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0}, + {61, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {62, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE}, + + /* Failure is related to EEPROM, e.g., failure during reading or parsing the data */ + {70, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0}, + + /* Failure during calibration algorithm */ + {80, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0}, + + /* The hardware is not able to provide the power required from cable or module */ + {90, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0}, + + /* The module is overheated */ + {100, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0}, + + /* module */ + {110, ETHTOOL_LINK_EXT_STATE_MODULE, 0}, + {111, ETHTOOL_LINK_EXT_STATE_MODULE, ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY}, +}; + +static void nbl_set_link_ext_state(struct nbl_ethtool_link_ext_state_opcode_mapping + link_ext_state_mapping, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + switch (link_ext_state_mapping.link_ext_state) { + case ETHTOOL_LINK_EXT_STATE_AUTONEG: + link_ext_state_info->autoneg = link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE: + link_ext_state_info->link_training = link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH: + link_ext_state_info->link_logical_mismatch = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY: + link_ext_state_info->bad_signal_integrity = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE: + link_ext_state_info->cable_issue = link_ext_state_mapping.link_ext_substate; + break; + default: + break; + } + + link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state; +} + +static int nbl_get_link_ext_state(struct net_device *netdev, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(netdev); + struct nbl_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping; + u32 status_opcode = 0; + int i = 0; + int ret = 0; + + if (netif_carrier_ok(netdev)) + return -ENODATA; + + ret = disp_ops->get_link_status_opcode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), &status_opcode); + if (ret) { + netdev_err(netdev, "Get link stats opcode failed %d\n", ret); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(nbl_link_ext_state_opcode_map); i++) { + link_ext_state_mapping = nbl_link_ext_state_opcode_map[i]; + if (link_ext_state_mapping.status_opcode == status_opcode) { + nbl_set_link_ext_state(link_ext_state_mapping, link_ext_state_info); + return 0; + } + } + + return -ENODATA; +} + +static void nbl_get_link_ext_stats(struct net_device *netdev, struct ethtool_link_ext_stats *stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + u64 link_down_count = 0; + int ret = 0; + + ret = disp_ops->get_link_down_count(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, &link_down_count); + if (ret) + netdev_err(netdev, "Get link down count failed %d\n", ret); + else + stats->link_down_events = link_down_count; +} + static void nbl_link_modes_to_ethtool(u64 modes, unsigned long *ethtool_modes_map) { if (modes & BIT(NBL_PORT_CAP_AUTONEG)) @@ -627,7 +911,7 @@ static void nbl_link_modes_to_ethtool(u64 modes, unsigned long *ethtool_modes_ma if (modes & BIT(NBL_PORT_CAP_1000BASE_X)) __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ethtool_modes_map); if (modes & BIT(NBL_PORT_CAP_10GBASE_T)) - __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ethtool_modes_map); + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ethtool_modes_map); if (modes & BIT(NBL_PORT_CAP_10GBASE_KR)) __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, ethtool_modes_map); if (modes & BIT(NBL_PORT_CAP_10GBASE_SR)) @@ -720,13 +1004,13 @@ static int nbl_get_ksettings(struct net_device *netdev, struct ethtool_link_kset if (port_state.link_state) { cmd->base.speed = port_state.link_speed; cmd->base.duplex = DUPLEX_FULL; + advertising_speed = port_state.link_speed; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; - } - - advertising_speed = net_resource_mgt->configured_speed ? + advertising_speed = net_resource_mgt->configured_speed ? net_resource_mgt->configured_speed : cmd->base.speed; + } switch (port_state.port_type) { case NBL_PORT_TYPE_UNKNOWN: @@ -1268,6 +1552,42 @@ static void nbl_fd_flow_type_translate(enum nbl_chan_fdir_flow_type flow_type, } } +static int nbl_get_rss_hash_opt(struct net_device *netdev, struct ethtool_rxnfc *nfc) +{ + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + break; + default: + return -EOPNOTSUPP; + } + + nfc->data = 0; + nfc->data = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; + + return 0; +} + +static int nbl_set_rss_hash_opt(struct net_device *netdev, struct ethtool_rxnfc *nfc) +{ + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + break; + default: + return -EOPNOTSUPP; + } + + if (nfc->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return 0; + else + return -EOPNOTSUPP; +} + static int nbl_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); @@ -1343,6 +1663,9 @@ static int nbl_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u cmd->data = disp_ops->get_fd_flow_max(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); kfree(locs_tmp); break; + case ETHTOOL_GRXFH: + ret = nbl_get_rss_hash_opt(netdev, cmd); + break; default: ret = -EOPNOTSUPP; break; @@ -2049,6 +2372,8 @@ static int nbl_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: + if (common->is_vf) + return -EOPNOTSUPP; if (ring_cookie == RX_CLS_FLOW_WAKE) return -EINVAL; @@ -2086,10 +2411,15 @@ static int nbl_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) kfree(info); break; case ETHTOOL_SRXCLSRLDEL: + if (common->is_vf) + return -EOPNOTSUPP; ret = disp_ops->remove_fd_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), NBL_CHAN_FDIR_RULE_NORMAL, cmd->fs.location, vsi_id); break; + case ETHTOOL_SRXFH: + ret = nbl_set_rss_hash_opt(netdev, cmd); + break; default: break; } @@ -2151,8 +2481,51 @@ static int nbl_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfun disp_ops->get_rxfh_rss_key(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), key, rxfh_key_size); if (hfunc) disp_ops->get_rxfh_rss_alg_sel(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - hfunc, NBL_COMMON_TO_ETH_ID(serv_mgt->common)); + NBL_COMMON_TO_VSI_ID(common), hfunc); + + return 0; +} + +static int nbl_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + u32 rxfh_indir_size = 0; + int ret = 0; + if (indir) { + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + ret = disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), + indir, rxfh_indir_size); + if (ret) { + netdev_err(netdev, "set RSS indirection table failed %d\n", ret); + return ret; + } + if (!ring_mgt->rss_indir_user) { + ring_mgt->rss_indir_user = devm_kcalloc(dev, rxfh_indir_size, + sizeof(u32), GFP_KERNEL); + if (!ring_mgt->rss_indir_user) + return -ENOMEM; + } + memcpy(ring_mgt->rss_indir_user, indir, rxfh_indir_size * sizeof(u32)); + } + if (key) { + netdev_err(netdev, "rss key donot support modify\n"); + return -EOPNOTSUPP; + } + if (hfunc) { + ret = disp_ops->set_rxfh_rss_alg_sel(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), hfunc); + if (ret) { + netdev_err(netdev, "set RSS hash function failed %d\n", ret); + return ret; + } + } return 0; } @@ -2265,8 +2638,7 @@ static int __nbl_set_per_queue_coalesce(struct net_device *netdev, if (ec->tx_max_coalesced_frames != ec->rx_max_coalesced_frames || ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) { - netdev_err(netdev, "tx and rx using the same interrupt, " - "rx params should equal to tx params\n"); + netdev_err(netdev, "rx params should equal to tx params\n"); return -EINVAL; } @@ -2523,11 +2895,16 @@ static u64 nbl_loopback_test(struct net_device *netdev) struct nbl_netdev_priv *priv = netdev_priv(netdev); struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); struct nbl_serv_ring_mgt *ring_mgt = &serv_mgt->ring_mgt; struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; u8 origin_num_txq, origin_num_rxq, origin_active_q; u64 result = 0; + u32 rxfh_indir_size = 0; + u32 *indir = NULL; + int i = 0; /* In loopback test, we only need one queue */ origin_num_txq = ring_mgt->tx_ring_num; @@ -2536,6 +2913,16 @@ static u64 nbl_loopback_test(struct net_device *netdev) ring_mgt->tx_ring_num = NBL_SELF_TEST_Q_NUM; ring_mgt->rx_ring_num = NBL_SELF_TEST_Q_NUM; + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + indir = devm_kcalloc(dev, rxfh_indir_size, sizeof(u32), GFP_KERNEL); + if (!indir) + return -ENOMEM; + for (i = 0; i < rxfh_indir_size; i++) + indir[i] = i % NBL_SELF_TEST_Q_NUM; + disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), indir, rxfh_indir_size); + if (nbl_loopback_setup_rings(adapter, netdev)) { netdev_err(netdev, "Fail to setup rings"); result |= BIT(NBL_LB_ERR_RING_SETUP); @@ -2563,6 +2950,16 @@ static u64 nbl_loopback_test(struct net_device *netdev) ring_mgt->rx_ring_num = origin_num_rxq; vsi_info->active_ring_num = origin_active_q; + if (ring_mgt->rss_indir_user) { + memcpy(indir, ring_mgt->rss_indir_user, rxfh_indir_size * sizeof(u32)); + } else { + for (i = 0; i < rxfh_indir_size; i++) + indir[i] = i % vsi_info->active_ring_num; + } + disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), indir, rxfh_indir_size); + devm_kfree(dev, indir); + return result; } @@ -2583,6 +2980,7 @@ static void nbl_self_test(struct net_device *netdev, struct ethtool_test *eth_te int ret; cur_time = ktime_get_real_seconds(); + /* test too frequently will cause to fail */ if (cur_time - priv->last_st_time < NBL_SELF_TEST_TIME_GAP) { /* pass by defalut */ @@ -2805,6 +3203,141 @@ static void nbl_get_pause_param(struct net_device *netdev, struct ethtool_pausep param->tx_pause = !!(port_state.active_fc & NBL_PORT_TX_PAUSE); } +static void nbl_get_eth_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *eth_ctrl_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_eth_ctrl_stats eth_ctrl_stats_info = {0}; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_eth_ctrl_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, ð_ctrl_stats_info, + sizeof(struct nbl_eth_ctrl_stats)); + if (ret) { + netdev_err(netdev, "Get eth_ctrl_stats failed %d\n", ret); + return; + } + + eth_ctrl_stats->MACControlFramesTransmitted = + eth_ctrl_stats_info.macctrl_frames_txd_ok; + eth_ctrl_stats->MACControlFramesReceived = eth_ctrl_stats_info.macctrl_frames_rxd; + eth_ctrl_stats->UnsupportedOpcodesReceived = + eth_ctrl_stats_info.unsupported_opcodes_rx; +} + +static void nbl_get_pause_stats(struct net_device *netdev, struct ethtool_pause_stats *pause_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_pause_stats pause_stats_info = {0}; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_pause_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, &pause_stats_info, + sizeof(struct nbl_pause_stats)); + if (ret) { + netdev_err(netdev, "Get pause_stats failed %d\n", ret); + return; + } + + pause_stats->rx_pause_frames = pause_stats_info.rx_pause_frames; + pause_stats->tx_pause_frames = pause_stats_info.tx_pause_frames; +} + +static void nbl_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *eth_mac_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops; + struct nbl_eth_mac_stats eth_mac_stats_info = {0}; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_eth_mac_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, ð_mac_stats_info, + sizeof(struct nbl_eth_mac_stats)); + if (ret) { + netdev_err(netdev, "Get eth_mac_stats failed %d\n", ret); + return; + } + + eth_mac_stats->FramesTransmittedOK = eth_mac_stats_info.frames_txd_ok; + eth_mac_stats->FramesReceivedOK = eth_mac_stats_info.frames_rxd_ok; + eth_mac_stats->OctetsTransmittedOK = eth_mac_stats_info.octets_txd_ok; + eth_mac_stats->OctetsReceivedOK = eth_mac_stats_info.octets_rxd_ok; + eth_mac_stats->MulticastFramesXmittedOK = eth_mac_stats_info.multicast_frames_txd_ok; + eth_mac_stats->BroadcastFramesXmittedOK = eth_mac_stats_info.broadcast_frames_txd_ok; + eth_mac_stats->MulticastFramesReceivedOK = eth_mac_stats_info.multicast_frames_rxd_ok; + eth_mac_stats->BroadcastFramesReceivedOK = eth_mac_stats_info.broadcast_frames_rxd_ok; +} + +static const struct ethtool_rmon_hist_range rmon_ranges[] = { + { 0, 64}, + { 65, 127}, + { 128, 255}, + { 256, 511}, + { 512, 1023}, + { 1024, 1518}, + { 1519, 2047}, + { 2048, 65535}, + {}, +}; + +static void nbl_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **range) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_rmon_stats rmon_stats_info = {0}; + struct nbl_dispatch_ops *disp_ops; + u64 *rx = rmon_stats_info.rmon_rx_range; + u64 *tx = rmon_stats_info.rmon_tx_range; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_rmon_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, &rmon_stats_info, + sizeof(struct nbl_rmon_stats)); + if (ret) { + netdev_err(netdev, "Get eth_mac_stats failed %d\n", ret); + return; + } + rmon_stats->undersize_pkts = rmon_stats_info.undersize_frames_rxd_goodfcs; + rmon_stats->oversize_pkts = rmon_stats_info.oversize_frames_rxd_goodfcs; + rmon_stats->fragments = rmon_stats_info.undersize_frames_rxd_badfcs; + rmon_stats->jabbers = rmon_stats_info.oversize_frames_rxd_badfcs; + + rmon_stats->hist[0] = rx[ETHER_STATS_PKTS_64_OCTETS]; + rmon_stats->hist[1] = rx[ETHER_STATS_PKTS_65_TO_127_OCTETS]; + rmon_stats->hist[2] = rx[ETHER_STATS_PKTS_128_TO_255_OCTETS]; + rmon_stats->hist[3] = rx[ETHER_STATS_PKTS_256_TO_511_OCTETS]; + rmon_stats->hist[4] = rx[ETHER_STATS_PKTS_512_TO_1023_OCTETS]; + rmon_stats->hist[5] = rx[ETHER_STATS_PKTS_1024_TO_1518_OCTETS]; + rmon_stats->hist[6] = rx[ETHER_STATS_PKTS_1519_TO_2047_OCTETS]; + rmon_stats->hist[7] = rx[ETHER_STATS_PKTS_2048_TO_MAX_OCTETS]; + + rmon_stats->hist_tx[0] = tx[ETHER_STATS_PKTS_64_OCTETS]; + rmon_stats->hist_tx[1] = tx[ETHER_STATS_PKTS_65_TO_127_OCTETS]; + rmon_stats->hist_tx[2] = tx[ETHER_STATS_PKTS_128_TO_255_OCTETS]; + rmon_stats->hist_tx[3] = tx[ETHER_STATS_PKTS_256_TO_511_OCTETS]; + rmon_stats->hist_tx[4] = tx[ETHER_STATS_PKTS_512_TO_1023_OCTETS]; + rmon_stats->hist_tx[5] = tx[ETHER_STATS_PKTS_1024_TO_1518_OCTETS]; + rmon_stats->hist_tx[6] = tx[ETHER_STATS_PKTS_1519_TO_2047_OCTETS]; + rmon_stats->hist_tx[7] = tx[ETHER_STATS_PKTS_2048_TO_MAX_OCTETS]; + *range = rmon_ranges; +} + static int nbl_set_fec_param(struct net_device *netdev, struct ethtool_fecparam *fec) { struct nbl_service_mgt *serv_mgt; @@ -2877,7 +3410,8 @@ static int nbl_set_fec_param(struct net_device *netdev, struct ethtool_fecparam } if (fec_mode == ETHTOOL_FEC_RS) { - if (port_state.link_speed == 10000) { + if ((port_state.link_speed == SPEED_10000 && port_state.link_state) || + net_resource_mgt->configured_speed == SPEED_10000) { netdev_err(netdev, "speed 10G cannot set fec RS, only can set fec baseR\n"); return -EINVAL; } @@ -2949,6 +3483,31 @@ static int nbl_get_fec_param(struct net_device *netdev, struct ethtool_fecparam return 0; } +static void nbl_get_fec_stats(struct net_device *netdev, struct ethtool_fec_stats *fec_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_fec_stats fec_stats_info = {0}; + unsigned int i; + int ret; + + ret = disp_ops->get_fec_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), &fec_stats_info); + if (ret) { + netdev_err(netdev, "Get fec state failed %d\n", ret); + return; + } + fec_stats->corrected_blocks.total = fec_stats_info.corrected_blocks; + fec_stats->uncorrectable_blocks.total = fec_stats_info.uncorrectable_blocks; + fec_stats->corrected_bits.total = fec_stats_info.corrected_bits; + + for (i = 0; i < NBL_LEONIS_LANE_NUM; i++) { + fec_stats->corrected_blocks.lanes[i] = fec_stats_info.corrected_lane[i]; + fec_stats->uncorrectable_blocks.lanes[i] = fec_stats_info.uncorrectable_lane[i]; + fec_stats->corrected_bits.lanes[i] = fec_stats_info.corrected_bits_lane[i]; + } +} + static int nbl_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct nbl_service_mgt *serv_mgt; @@ -3070,6 +3629,170 @@ nbl_rep_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats data[i] = rep_stats.dropped; } +static int nbl_flash_device(struct net_device *netdev, struct ethtool_flash *flash) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + const struct firmware *fw; + int ret = 0; + + if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) + return -EOPNOTSUPP; + + if (!adapter->init_param.caps.has_ctrl) + return -EOPNOTSUPP; + + ret = request_firmware_direct(&fw, flash->data, &netdev->dev); + if (ret) + return ret; + + dev_hold(netdev); + rtnl_unlock(); + + ret = nbl_serv_update_firmware(serv_mgt, fw, NULL); + release_firmware(fw); + + rtnl_lock(); + dev_put(netdev); + + return ret; +} + +static int nbl_diag_fill_device_name(struct nbl_service_mgt *serv_mgt, void *buff) +{ + struct nbl_common_info *info = serv_mgt->common; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + + snprintf(buff, NBL_DEV_NAME_SZ, "%s:%s", pci_name(info->pdev), + net_resource_mgt->netdev->name); + + return NBL_DEV_NAME_SZ; +} + +static int nbl_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u32 extra_len = 0; + + if (!adapter->init_param.caps.has_ctrl) + return -EOPNOTSUPP; + + dump->version = NBL_DIAG_DUMP_VERSION; + dump->flag = serv_mgt->net_resource_mgt->dump_flag; + + if (dump->flag & NBL_DIAG_FLAG_PERFORMANCE) { + u32 length = disp_ops->get_perf_dump_length(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + serv_mgt->net_resource_mgt->dump_perf_len = length; + extra_len += length ? DIAG_BLK_SZ(length) : 0; + } + + dump->len = sizeof(struct nbl_diag_dump) + DIAG_BLK_SZ(NBL_DRV_VER_SZ) + + DIAG_BLK_SZ(NBL_DEV_NAME_SZ) + extra_len; + + return 0; +} + +static int nbl_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, void *buffer) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_diag_dump *dump_hdr = buffer; + struct nbl_diag_blk *dump_blk; + + if (!adapter->init_param.caps.has_ctrl) + return -EOPNOTSUPP; + + memset(buffer, 0, dump->len); + dump_hdr->version = NBL_DIAG_DUMP_VERSION; + dump_hdr->flag = 0; + dump_hdr->num_blocks = 0; + dump_hdr->total_length = 0; + + /* Dump driver version */ + dump_blk = DIAG_GET_NEXT_BLK(dump_hdr); + dump_blk->type = NBL_DIAG_DRV_VERSION; + disp_ops->get_driver_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), dump_blk->data, + NBL_DRV_VER_SZ); + dump_blk->length = NBL_DRV_VER_SZ; + dump_hdr->total_length += DIAG_BLK_SZ(dump_blk->length); + dump_hdr->num_blocks++; + + /* Dump device name */ + dump_blk = DIAG_GET_NEXT_BLK(dump_hdr); + dump_blk->type = NBL_DIAG_DEVICE_NAME; + dump_blk->length = nbl_diag_fill_device_name(serv_mgt, &dump_blk->data); + dump_hdr->total_length += DIAG_BLK_SZ(dump_blk->length); + dump_hdr->num_blocks++; + + /* Dump performance registers */ + if (net_resource_mgt->dump_flag & NBL_DIAG_FLAG_PERFORMANCE) { + dump_blk = DIAG_GET_NEXT_BLK(dump_hdr); + dump_blk->type = NBL_DIAG_PERFORMANCE; + dump_blk->length = disp_ops->get_perf_dump_data(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + dump_blk->data, + net_resource_mgt->dump_perf_len); + dump_hdr->total_length += DIAG_BLK_SZ(dump_blk->length); + dump_hdr->num_blocks++; + dump_hdr->flag |= NBL_DIAG_FLAG_PERFORMANCE; + } + + return 0; +} + +static int nbl_set_dump(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + + if (!adapter->init_param.caps.has_ctrl) + return -EOPNOTSUPP; + + serv_mgt->net_resource_mgt->dump_flag = dump->flag; + + return 0; +} + +static void nbl_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + if (adapter->init_param.caps.is_ocp) { + wol->supported = WAKE_MAGIC; + wol->wolopts = common->wol_ena ? WAKE_MAGIC : 0; + } else { + wol->supported = 0; + wol->wolopts = 0; + } +} + +static int nbl_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + if (!adapter->init_param.caps.is_ocp) + return -EOPNOTSUPP; + + if (wol->wolopts && wol->wolopts != WAKE_MAGIC) + return -EOPNOTSUPP; + + if (common->wol_ena != !!wol->wolopts) { + common->wol_ena = !!wol->wolopts; + device_set_wakeup_enable(&common->pdev->dev, common->wol_ena); + netdev_dbg(netdev, "Wol magic packet %sabled", common->wol_ena ? "en" : "dis"); + } + + return 0; +} + /* NBL_SERV_ETHTOOL_OPS_TBL(ops_name, func) * * Use X Macros to reduce setup and remove codes. @@ -3098,6 +3821,7 @@ do { \ NBL_SERV_SET_ETHTOOL_OPS(get_rxfh_indir_size, nbl_get_rxfh_indir_size); \ NBL_SERV_SET_ETHTOOL_OPS(get_rxfh_key_size, nbl_get_rxfh_key_size); \ NBL_SERV_SET_ETHTOOL_OPS(get_rxfh, nbl_get_rxfh); \ + NBL_SERV_SET_ETHTOOL_OPS(set_rxfh, nbl_set_rxfh); \ NBL_SERV_SET_ETHTOOL_OPS(get_msglevel, nbl_get_msglevel); \ NBL_SERV_SET_ETHTOOL_OPS(set_msglevel, nbl_set_msglevel); \ NBL_SERV_SET_ETHTOOL_OPS(get_regs_len, nbl_get_regs_len); \ @@ -3117,6 +3841,12 @@ do { \ NBL_SERV_SET_ETHTOOL_OPS(get_rep_strings, nbl_rep_get_strings); \ NBL_SERV_SET_ETHTOOL_OPS(get_rep_sset_count, nbl_rep_get_sset_count); \ NBL_SERV_SET_ETHTOOL_OPS(get_rep_ethtool_stats, nbl_rep_get_ethtool_stats); \ + NBL_SERV_SET_ETHTOOL_OPS(flash_device, nbl_flash_device); \ + NBL_SERV_SET_ETHTOOL_OPS(get_dump_flag, nbl_get_dump_flag); \ + NBL_SERV_SET_ETHTOOL_OPS(get_dump_data, nbl_get_dump_data); \ + NBL_SERV_SET_ETHTOOL_OPS(set_dump, nbl_set_dump); \ + NBL_SERV_SET_ETHTOOL_OPS(set_wol, nbl_set_wol); \ + NBL_SERV_SET_ETHTOOL_OPS(get_wol, nbl_get_wol); \ } while (0) void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops) @@ -3124,4 +3854,11 @@ void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops) #define NBL_SERV_SET_ETHTOOL_OPS(name, func) do {serv_ops->NBL_NAME(name) = func; ; } while (0) NBL_SERV_ETHTOOL_OPS_TBL; #undef NBL_SERV_SET_ETHTOOL_OPS + serv_ops->get_eth_ctrl_stats = nbl_get_eth_ctrl_stats; + serv_ops->get_pause_stats = nbl_get_pause_stats; + serv_ops->get_eth_mac_stats = nbl_get_eth_mac_stats; + serv_ops->get_fec_stats = nbl_get_fec_stats; + serv_ops->get_link_ext_state = nbl_get_link_ext_state; + serv_ops->get_link_ext_stats = nbl_get_link_ext_stats; + serv_ops->get_rmon_stats = nbl_get_rmon_stats; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h index aa17c7aa08d5..c4f5f51ee70e 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -38,5 +38,7 @@ enum nbl_ethtool_lb_test_err_code { void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool); void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops_tbl); +int nbl_serv_update_firmware(struct nbl_service_mgt *serv_mgt, const struct firmware *fw, + struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c index c47bad576573..b806ece27061 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c @@ -1,8 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_hwmon.h" + +#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) #include #include #include #include -#include "nbl_hwmon.h" static const char * const nbl_hwmon_sensor_name[] = { "Sensor0", @@ -113,8 +121,11 @@ static const struct hwmon_chip_info nbl_hwmon_chip_info = { .info = nbl_hwmon_info, }; +#endif + int nbl_dev_setup_hwmon(struct nbl_adapter *adapter) { +#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); @@ -123,13 +134,18 @@ int nbl_dev_setup_hwmon(struct nbl_adapter *adapter) &nbl_hwmon_chip_info, NULL); return PTR_ERR_OR_ZERO(common_dev->hwmon_dev); +#else + return 0; +#endif } void nbl_dev_remove_hwmon(struct nbl_adapter *adapter) { +#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); if (common_dev->hwmon_dev) hwmon_device_unregister(common_dev->hwmon_dev); +#endif } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h index 5affd6cf993b..5f22de022023 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ #ifndef _NBL_HWMON_H #define _NBL_HWMON_H diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c index bdfd10e1c1ac..6366bd4c57b5 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c @@ -1,3 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + #include "nbl_ipsec.h" #ifdef CONFIG_TLS_DEVICE static int nbl_validate_xfrm_state(struct net_device *netdev, struct xfrm_state *x) @@ -102,7 +108,8 @@ static int nbl_validate_xfrm_state(struct net_device *netdev, struct xfrm_state static void nbl_ipsec_update_esn_state(struct xfrm_state *x, struct nbl_ipsec_esn_state *esn_state) { bool esn = !!(x->props.flags & XFRM_STATE_ESN); - bool inbound = !!(x->xso.flags & XFRM_OFFLOAD_INBOUND); + bool inbound = (x->xso.dir == XFRM_DEV_OFFLOAD_IN); + u32 bottom = 0; if (!esn) { @@ -398,7 +405,7 @@ static int nbl_xfrm_add_state(struct xfrm_state *x, struct netlink_ext_ack *exta nbl_ipsec_update_esn_state(x, &sa_entry->esn_state); nbl_ipsec_build_accel_xfrm_attrs(x, &sa_entry->attrs); - if (x->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) { index = nbl_ipsec_alloc_rx_index(netdev, &sa_entry->cfg_info); if (index < 0) { netdev_err(netdev, "No enough rx session resources\n"); @@ -467,7 +474,7 @@ static void nbl_xfrm_del_state(struct xfrm_state *x) struct nbl_ipsec_sa_entry *sa_entry = (struct nbl_ipsec_sa_entry *)x->xso.offload_handle; struct net_device *netdev = x->xso.dev; - if (x->xso.flags & XFRM_OFFLOAD_INBOUND) + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) nbl_ipsec_del_rx_flow(netdev, sa_entry->index); else nbl_ipsec_del_tx_flow(netdev, sa_entry->index); @@ -478,7 +485,7 @@ static void nbl_xfrm_free_state(struct xfrm_state *x) struct nbl_ipsec_sa_entry *sa_entry = (struct nbl_ipsec_sa_entry *)x->xso.offload_handle; struct net_device *netdev = x->xso.dev; - if (x->xso.flags & XFRM_OFFLOAD_INBOUND) + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) nbl_ipsec_free_rx_index(netdev, sa_entry->index); else nbl_ipsec_free_tx_index(netdev, sa_entry->index); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h index 747dcc057bdb..09d430eb9844 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2023 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c index 7f8f005d170f..268f39fe8209 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c @@ -1,3 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + #include "nbl_ktls.h" #ifdef CONFIG_TLS_DEVICE @@ -277,7 +283,6 @@ static int nbl_ktls_add_rx(struct net_device *netdev, struct sock *sk, ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); *ctx = priv_rx; tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ); - return 0; } @@ -346,8 +351,8 @@ static void nbl_ktls_del(struct net_device *netdev, struct tls_context *tls_ctx, nbl_ktls_del_rx(netdev, tls_ctx); } -static int nbl_ktls_rx_resync(struct net_device *netdev, struct sock *sk, - u32 tcp_seq, u8 *rec_num) +static void nbl_ktls_rx_resync(struct net_device *netdev, struct sock *sk, + u32 tcp_seq, u8 *rec_num) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct nbl_ktls_offload_context_rx **ctx = @@ -356,8 +361,6 @@ static int nbl_ktls_rx_resync(struct net_device *netdev, struct sock *sk, nbl_ktls_cfg_rx_record(netdev, priv->index, priv->tcp_seq, be64_to_cpu(*(__be64 *)rec_num), false); - - return 0; } static int nbl_ktls_resync(struct net_device *netdev, struct sock *sk, @@ -367,7 +370,8 @@ static int nbl_ktls_resync(struct net_device *netdev, struct sock *sk, if (direction != TLS_OFFLOAD_CTX_DIR_RX) return -1; - return nbl_ktls_rx_resync(netdev, sk, tcp_seq, rec_num); + nbl_ktls_rx_resync(netdev, sk, tcp_seq, rec_num); + return 0; } #define NBL_SERV_KTLS_OPS_TBL \ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h index bed7435be17b..47cc8b5328d8 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2023 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c index a67c256b0ab9..970b47a76c19 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c @@ -170,9 +170,12 @@ static void nbl_display_lag_info(struct nbl_dev_mgt *dev_mgt, u8 lag_id) static void nbl_lag_create_bond_adev(struct nbl_dev_mgt *dev_mgt, struct nbl_lag_instance *lag_info) { - struct nbl_event_rdma_bond_update event_data; + struct nbl_event_param event_data; struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_lag_member *mem_tmp, *notify_mem = NULL; + struct nbl_lag_member_list_param *list_param = &event_data.param; + struct nbl_rdma_register_param register_param = {0}; int mem_num = 0; int i = 0; @@ -191,6 +194,17 @@ static void nbl_lag_create_bond_adev(struct nbl_dev_mgt *dev_mgt, "notify to create the bond adev failed, member count %u.\n", mem_num); return; } + event_data.param.lag_num = mem_num; + + /* Checking if we can support and create the rdma bond */ + serv_ops->register_rdma_bond(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + list_param, ®ister_param); + + if (!register_param.has_rdma) { + nbl_warn(common, NBL_DEBUG_MAIN, + "Can not support to create rdma bond, vsi %u.\n", notify_mem->vsi_id); + return; + } for (i = 0; i < mem_num; i++) { event_data.subevent = NBL_SUBEVENT_RELEASE_ADEV; @@ -216,7 +230,7 @@ static void nbl_lag_member_recover_adev(struct nbl_dev_mgt *dev_mgt, struct nbl_lag_instance *lag_info, struct nbl_lag_member *lag_mem) { - struct nbl_event_rdma_bond_update event_data; + struct nbl_event_param event_data; struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_lag_member *mem_tmp, *adev_mem = NULL; int i = 0, has_self = 0, mem_num = 0; @@ -271,7 +285,7 @@ static void update_lag_member_list(struct nbl_dev_mgt *dev_mgt, struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); struct nbl_lag_member *mem_tmp; - struct nbl_event_rdma_bond_update event_data; + struct nbl_event_param event_data; struct nbl_lag_member_list_param mem_list_param = {0}; u16 mem_id, tx_enabled_id = U16_MAX; u8 fwd; @@ -430,7 +444,7 @@ static int del_lag_member(struct nbl_dev_mgt *dev_mgt, break; } - if (nbl_list_entry_is_head(mem_tmp, &lag_info->mem_list_head, mem_list_node)) + if (list_entry_is_head(mem_tmp, &lag_info->mem_list_head, mem_list_node)) return -ENOENT; if (mem_count == 0 || mem_count > NBL_LAG_MAX_PORTS) { @@ -1015,7 +1029,6 @@ static void nbl_unregister_lag_handler(struct nbl_dev_mgt *dev_mgt) if (notif_blk->notifier_call) { netdevice_nn = &lag_mem->netdevice_nn; unregister_netdevice_notifier_dev_net(net_dev->netdev, notif_blk, netdevice_nn); - nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, "nbl lag event handler unregistered.\n"); } @@ -1077,7 +1090,6 @@ static int nbl_lag_alloc_resource(struct nbl_dev_mgt *dev_mgt) lag_resource_tmp = kzalloc(sizeof(*lag_resource_tmp), GFP_KERNEL); if (!lag_resource_tmp) goto ret_fail; - kref_init(&lag_resource_tmp->kref); lag_resource_tmp->board_key = board_key; INIT_LIST_HEAD(&lag_resource_tmp->lag_instance_head); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h index 3d5b9426e1ea..4afa4416b9cb 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2021 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_p4_version.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_p4_version.h new file mode 100644 index 000000000000..2fbe7c764386 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_p4_version.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_P4_VERSION_H_ +#define _NBL_P4_VERSION_H_ + +#define NBL_SINGLE_VXLAN_TOE_ENHANCE_P4_MD5 "fc61c22894eb17f688dff153b7c29efe" +#define NBL_DUAL_VXLAN_TOE_ENHANCE_P4_MD5 "64fff3eeebdb53990c201ec70a430a55" +#define NBL_QUAD_VXLAN_TOE_ENHANCE_P4_MD5 "9b8ab0508834436e1df1eac537934485" + +#define NBL_SINGLE_PORT_HG_P4_MD5 "44757bab80dc985bffc04fe9a6d66bc1" +#define NBL_DUAL_PORT_HG_P4_MD5 "74e95394bc348b9cc6ebe5f9c28c2b8a" +#define NBL_QUAD_PORT_HG_P4_MD5 "009e209c4a3cab358bc76d5e06e3338b" + +#define NBL_SINGLE_PORT_LG_P4_MD5 "bfb18a8db52d82d2708920d0d3efc231" +#define NBL_DUAL_PORT_LG_P4_MD5 "32da40ac96884d520ebfe4179db2d7fb" +#define NBL_QUAD_PORT_LG_P4_MD5 "07453cc77b7c714c285038b05f5b53d7" + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c index 5aec71bcda85..2dd287b5b5a7 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c @@ -3,10 +3,13 @@ * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan */ + #include "nbl_ethtool.h" #include "nbl_ktls.h" #include "nbl_ipsec.h" +#include "nbl_p4_version.h" #include "nbl_tc.h" +#include static void nbl_serv_set_link_state(struct nbl_service_mgt *serv_mgt, struct net_device *netdev); static int nbl_serv_update_default_vlan(struct nbl_service_mgt *serv_mgt, u16 vid); @@ -252,10 +255,9 @@ static int nbl_serv_set_vectors(struct nbl_service_mgt *serv_mgt, return -ENOMEM; for (i = 0; i < ring_num; i++) { - ring_mgt->vectors[i].napi = + ring_mgt->vectors[i].nbl_napi = disp_ops->get_vector_napi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); - netif_napi_add(netdev, ring_mgt->vectors[i].napi, pt_ops->napi_poll); - + netif_napi_add(netdev, &ring_mgt->vectors[i].nbl_napi->napi, pt_ops->napi_poll); ring_mgt->vectors[i].netdev = netdev; cpumask_clear(&ring_mgt->vectors[i].cpumask); } @@ -269,12 +271,46 @@ static void nbl_serv_remove_vectors(struct nbl_serv_ring_mgt *ring_mgt, struct d u16 ring_num = ring_mgt->xdp_ring_offset; for (i = 0; i < ring_num; i++) - netif_napi_del(ring_mgt->vectors[i].napi); + netif_napi_del(&ring_mgt->vectors[i].nbl_napi->napi); devm_kfree(dev, ring_mgt->vectors); ring_mgt->vectors = NULL; } +static void nbl_serv_check_flow_table_spec(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + int ret; + + if (!flow_mgt->force_promisc) + return; + + ret = disp_ops->check_flow_table_spec(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->vlan_list_cnt, + flow_mgt->unicast_mac_cnt + 1, + flow_mgt->multi_mac_cnt); + + if (!ret) { + flow_mgt->force_promisc = 0; + flow_mgt->pending_async_work = 1; + } +} + +static bool nbl_serv_check_need_flow_rule(u8 *mac, u16 promisc) +{ + if (promisc & (BIT(NBL_USER_FLOW) | BIT(NBL_MIRROR))) + return false; + + if (!is_multicast_ether_addr(mac) && (promisc & BIT(NBL_PROMISC))) + return false; + + if (is_multicast_ether_addr(mac) && (promisc & BIT(NBL_ALLMULTI))) + return false; + + return true; +} + static struct nbl_serv_vlan_node *nbl_serv_alloc_vlan_node(void) { struct nbl_serv_vlan_node *vlan_node = NULL; @@ -285,6 +321,8 @@ static struct nbl_serv_vlan_node *nbl_serv_alloc_vlan_node(void) INIT_LIST_HEAD(&vlan_node->node); vlan_node->ref_cnt = 1; + vlan_node->primary_mac_effective = 0; + vlan_node->sub_mac_effective = 0; return vlan_node; } @@ -303,6 +341,8 @@ static struct nbl_serv_submac_node *nbl_serv_alloc_submac_node(void) return NULL; INIT_LIST_HEAD(&submac_node->node); + submac_node->effective = 0; + return submac_node; } @@ -311,6 +351,226 @@ static void nbl_serv_free_submac_node(struct nbl_serv_submac_node *submac_node) kfree(submac_node); } +static int nbl_serv_update_submac_node_effective(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_submac_node *submac_node, + bool effective, + u16 vsi) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + bool force_promisc = 0; + int ret = 0; + + if (submac_node->effective == effective) + return 0; + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (!vlan_node->sub_mac_effective) + continue; + + if (effective) { + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + if (ret) + goto del_macvlan_node; + } else { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + } + } + submac_node->effective = effective; + if (effective) + flow_mgt->active_submac_list++; + else + flow_mgt->active_submac_list--; + + return 0; + +del_macvlan_node: + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (vlan_node->sub_mac_effective) + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + } + + if (ret) { + force_promisc = 1; + if (flow_mgt->force_promisc ^ force_promisc) { + flow_mgt->force_promisc = force_promisc; + flow_mgt->pending_async_work = 1; + netdev_info(dev, "Reached MAC filter limit, forcing promisc/allmuti moden"); + } + } + + return 0; +} + +static int nbl_serv_update_vlan_node_effective(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_vlan_node *vlan_node, + bool effective, + u16 vsi) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_submac_node *submac_node; + bool force_promisc = 0; + int ret = 0, i = 0; + + if (vlan_node->primary_mac_effective == effective && + vlan_node->sub_mac_effective == effective) + return 0; + + if (effective && !vlan_node->primary_mac_effective) { + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->mac, vlan_node->vid, vsi); + if (ret) + goto check_ret; + } else if (!effective && vlan_node->primary_mac_effective) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->mac, vlan_node->vid, vsi); + } + + vlan_node->primary_mac_effective = effective; + + for (i = 0; i < NBL_SUBMAC_MAX; i++) + list_for_each_entry(submac_node, &flow_mgt->submac_list[i], node) { + if (!submac_node->effective) + continue; + + if (effective && !vlan_node->sub_mac_effective) { + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + if (ret) + goto del_macvlan_node; + } else if (!effective && vlan_node->sub_mac_effective) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + } + } + + vlan_node->sub_mac_effective = effective; + + return 0; + +del_macvlan_node: + for (i = 0; i < NBL_SUBMAC_MAX; i++) + list_for_each_entry(submac_node, &flow_mgt->submac_list[i], node) { + if (submac_node->effective) + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + submac_node->mac, vlan_node->vid, vsi); + } +check_ret: + if (ret) { + force_promisc = 1; + if (flow_mgt->force_promisc ^ force_promisc) { + flow_mgt->force_promisc = force_promisc; + flow_mgt->pending_async_work = 1; + netdev_info(dev, "Reached VLAN filter limit, forcing promisc/allmuti moden"); + } + } + + if (vlan_node->primary_mac_effective == effective) + return 0; + + if (!NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) + return 0; + + return ret; +} + +static void nbl_serv_del_submac_node(struct nbl_service_mgt *serv_mgt, u8 *mac, u16 vsi) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_submac_node *submac_node, *submac_node_safe; + struct list_head *submac_head; + + if (is_multicast_ether_addr(mac)) + submac_head = &flow_mgt->submac_list[NBL_SUBMAC_MULTI]; + else + submac_head = &flow_mgt->submac_list[NBL_SUBMAC_UNICAST]; + + list_for_each_entry_safe(submac_node, submac_node_safe, submac_head, node) + if (ether_addr_equal(submac_node->mac, mac)) { + if (submac_node->effective) + nbl_serv_update_submac_node_effective(serv_mgt, + submac_node, 0, vsi); + list_del(&submac_node->node); + flow_mgt->submac_list_cnt--; + if (is_multicast_ether_addr(submac_node->mac)) + flow_mgt->multi_mac_cnt--; + else + flow_mgt->unicast_mac_cnt--; + nbl_serv_free_submac_node(submac_node); + break; + } +} + +static int nbl_serv_add_submac_node(struct nbl_service_mgt *serv_mgt, u8 *mac, u16 vsi, u16 promisc) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_submac_node *submac_node; + struct list_head *submac_head; + + if (is_multicast_ether_addr(mac)) + submac_head = &flow_mgt->submac_list[NBL_SUBMAC_MULTI]; + else + submac_head = &flow_mgt->submac_list[NBL_SUBMAC_UNICAST]; + + list_for_each_entry(submac_node, submac_head, node) { + if (ether_addr_equal(submac_node->mac, mac)) + return 0; + } + + submac_node = nbl_serv_alloc_submac_node(); + if (!submac_node) + return -ENOMEM; + + submac_node->effective = 0; + ether_addr_copy(submac_node->mac, mac); + if (nbl_serv_check_need_flow_rule(mac, promisc) && + (flow_mgt->trusted_en || flow_mgt->active_submac_list < NBL_NO_TRUST_MAX_MAC)) { + nbl_serv_update_submac_node_effective(serv_mgt, submac_node, 1, vsi); + } + + list_add(&submac_node->node, submac_head); + flow_mgt->submac_list_cnt++; + if (is_multicast_ether_addr(mac)) + flow_mgt->multi_mac_cnt++; + else + flow_mgt->unicast_mac_cnt++; + + return 0; +} + +static void nbl_serv_update_mcast_submac(struct nbl_service_mgt *serv_mgt, bool multi_effective, + bool unicast_effective, u16 vsi) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_submac_node *submac_node; + + list_for_each_entry(submac_node, &flow_mgt->submac_list[NBL_SUBMAC_MULTI], node) + nbl_serv_update_submac_node_effective(serv_mgt, submac_node, + multi_effective, vsi); + + list_for_each_entry(submac_node, &flow_mgt->submac_list[NBL_SUBMAC_UNICAST], node) + nbl_serv_update_submac_node_effective(serv_mgt, submac_node, + unicast_effective, vsi); +} + +static void nbl_serv_update_promisc_vlan(struct nbl_service_mgt *serv_mgt, bool effective, u16 vsi) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) + nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, effective, vsi); +} + static void nbl_serv_del_all_vlans(struct nbl_service_mgt *serv_mgt) { struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); @@ -319,8 +579,9 @@ static void nbl_serv_del_all_vlans(struct nbl_service_mgt *serv_mgt) struct nbl_serv_vlan_node *vlan_node, *vlan_node_safe; list_for_each_entry_safe(vlan_node, vlan_node_safe, &flow_mgt->vlan_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, NBL_COMMON_TO_VSI_ID(common)); + if (vlan_node->primary_mac_effective) + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, NBL_COMMON_TO_VSI_ID(common)); list_del(&vlan_node->node); nbl_serv_free_vlan_node(vlan_node); @@ -330,16 +591,21 @@ static void nbl_serv_del_all_vlans(struct nbl_service_mgt *serv_mgt) static void nbl_serv_del_all_submacs(struct nbl_service_mgt *serv_mgt, u16 vsi) { struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_submac_node *submac_node, *submac_node_safe; + int i; - list_for_each_entry_safe(submac_node, submac_node_safe, &flow_mgt->submac_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, - NBL_DEFAULT_VLAN_ID, vsi); - - list_del(&submac_node->node); - nbl_serv_free_submac_node(submac_node); - } + for (i = 0; i < NBL_SUBMAC_MAX; i++) + list_for_each_entry_safe(submac_node, submac_node_safe, + &flow_mgt->submac_list[i], node) { + nbl_serv_update_submac_node_effective(serv_mgt, submac_node, 0, vsi); + list_del(&submac_node->node); + flow_mgt->submac_list_cnt--; + if (is_multicast_ether_addr(submac_node->mac)) + flow_mgt->multi_mac_cnt--; + else + flow_mgt->unicast_mac_cnt--; + nbl_serv_free_submac_node(submac_node); + } } static int nbl_serv_validate_tc_config(struct tc_mqprio_qopt_offload *mqprio_qopt, @@ -721,7 +987,8 @@ int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, } vsi_info->active_ring_num = real_qps; - ret = disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, real_qps); + ret = disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_info->vsi_id, real_qps, false); if (ret) goto setup_cqs_fail; @@ -754,108 +1021,93 @@ int nbl_serv_vsi_stop(void *priv, u16 vsi_index) /* modify defalt action and rss configuration */ disp_ops->remove_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id); + /* clear dsch config */ + disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, false); + /* disable and rest tx/rx logic queue */ disp_ops->remove_all_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id); - /* clear dsch config */ - disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, false); /* free tx and rx bufs */ nbl_serv_stop_rings(serv_mgt, vsi_info); return 0; } -static int nbl_serv_switch_traffic_default_dest(void *priv, struct nbl_service_traffic_switch *info) +static struct nbl_mac_filter *nbl_add_filter(struct list_head *head, + const u8 *macaddr) +{ + struct nbl_mac_filter *f; + + if (!macaddr) + return NULL; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return f; + + ether_addr_copy(f->macaddr, macaddr); + list_add_tail(&f->list, head); + + return f; +} + +static int nbl_serv_suspend_data_vsi_traffic(struct nbl_service_mgt *serv_mgt) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); struct net_device *dev = net_resource_mgt->netdev; struct nbl_netdev_priv *net_priv = netdev_priv(dev); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_serv_vlan_node *vlan_node; - int ret; - u16 from_vsi, to_vsi; - list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - if (!vlan_node->vid) { - from_vsi = net_priv->normal_vsi; - to_vsi = info->normal_vsi; - } else { - from_vsi = net_priv->other_vsi; - to_vsi = info->sync_other_vsi; - } - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, from_vsi); - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, to_vsi); - if (ret) { - netdev_err(dev, "Fail to cfg macvlan on vid %u in vsi switch", - vlan_node->vid); - goto fail; - } - } - /* arp/nd traffic */ - disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->normal_vsi); - ret = disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), info->normal_vsi); - if (ret) - goto add_multi_fail; + rtnl_lock(); + disp_ops->cfg_multi_mcast(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + net_priv->data_vsi, 0); + disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + net_priv->data_vsi, 0); - /* lldp/lacp switch */ - if (info->has_lldp) { - disp_ops->del_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); - ret = disp_ops->add_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - info->sync_other_vsi); - if (ret) - goto add_lldp_fail; - } + disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + 0, net_priv->user_vsi); - if (info->has_lacp) { - disp_ops->del_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); - ret = disp_ops->add_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - info->sync_other_vsi); - if (ret) - goto add_lacp_fail; - } + flow_mgt->promisc &= ~BIT(NBL_PROMISC); + flow_mgt->promisc &= ~BIT(NBL_ALLMULTI); + flow_mgt->promisc |= BIT(NBL_USER_FLOW); + rtnl_unlock(); + + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); + + return 0; +} - net_priv->normal_vsi = info->normal_vsi; - net_priv->other_vsi = info->sync_other_vsi; - net_priv->async_pending_vsi = info->async_other_vsi; +static int nbl_serv_restore_vsi_traffic(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_netdev_priv *net_priv = netdev_priv(dev); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - /* trigger submac update */ - net_resource_mgt->user_promisc_mode = info->promisc; - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; + rtnl_lock(); + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + 0, net_priv->user_vsi); + disp_ops->cfg_multi_mcast(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->user_vsi, 0); + disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->user_vsi, 0); + flow_mgt->promisc &= ~BIT(NBL_USER_FLOW); + rtnl_unlock(); nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); return 0; +} -add_lacp_fail: - disp_ops->add_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); - if (info->has_lldp) - disp_ops->del_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), info->sync_other_vsi); -add_lldp_fail: - if (info->has_lldp) - disp_ops->add_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); - disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), info->normal_vsi); -add_multi_fail: - disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->normal_vsi); -fail: - list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - if (!vlan_node->vid) { - from_vsi = net_priv->normal_vsi; - to_vsi = info->normal_vsi; - } else { - from_vsi = net_priv->other_vsi; - to_vsi = info->sync_other_vsi; - } - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, to_vsi); - disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, from_vsi); - } +static int nbl_serv_switch_traffic_default_dest(void *priv, int op) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - return -EINVAL; + if (op == NBL_DEV_KERNEL_TO_USER) + nbl_serv_suspend_data_vsi_traffic(serv_mgt); + else if (op == NBL_DEV_USER_TO_KERNEL) + nbl_serv_restore_vsi_traffic(serv_mgt); + + return 0; } static int nbl_serv_abnormal_event_to_queue(int event_type) @@ -1186,6 +1438,18 @@ static void nbl_serv_register_link_forced_notify(struct nbl_service_mgt *serv_mg nbl_serv_chan_notify_link_forced_resp, serv_mgt); } +static void nbl_serv_unregister_link_forced_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_NOTIFY_LINK_FORCED); +} + static void nbl_serv_update_vlan(struct work_struct *work) { struct nbl_serv_net_resource_mgt *net_resource_mgt = @@ -1210,7 +1474,7 @@ static void nbl_serv_update_vlan(struct work_struct *work) err = nbl_serv_netdev_open(netdev); if (err) { - netdev_err(netdev, "Netdev open failed after setting ringparam\n"); + netdev_err(netdev, "Netdev open failed after update_vlan\n"); goto netdev_open_fail; } } @@ -1262,45 +1526,244 @@ static void nbl_serv_register_vlan_notify(struct nbl_service_mgt *serv_mgt) nbl_serv_chan_notify_vlan_resp, serv_mgt); } -int nbl_serv_netdev_open(struct net_device *netdev) +static void nbl_serv_unregister_vlan_notify(struct nbl_service_mgt *serv_mgt) { - struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); - struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); - struct nbl_serv_tc_mgt *tc_mgt = NBL_SERV_MGT_TO_TC_MGT(serv_mgt); - struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); - struct nbl_serv_ring_vsi_info *vsi_info; - int num_cpus, real_qps, ret = 0; - bool netdev_open = true; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); - if (!test_bit(NBL_DOWN, adapter->state)) - return -EBUSY; + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; - netdev_info(netdev, "Nbl open\n"); + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_NOTIFY_VLAN); +} - if (ring_mgt->xdp_prog) - nbl_event_notify(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_open, - NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); +static int nbl_serv_chan_notify_trust_req(struct nbl_service_mgt *serv_mgt, + u16 func_id, bool trusted) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; - netif_carrier_off(netdev); - nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), true, false); - vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_NOTIFY_TRUST, &trusted, sizeof(trusted), + NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); +} - if (tc_mgt->num_tc) { - real_qps = tc_mgt->total_qps; - } else if (vsi_info->active_ring_num) { - real_qps = vsi_info->active_ring_num; - } else { - num_cpus = num_online_cpus(); - real_qps = num_cpus > vsi_info->ring_num ? vsi_info->ring_num : num_cpus; - } +static void nbl_serv_chan_notify_trust_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + bool *trusted = (bool *)data; + struct nbl_chan_ack_info chan_ack; - ret = nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, real_qps, 1); - if (ret) - goto vsi_open_fail; + flow_mgt->trusted_en = *trusted; + flow_mgt->trusted_update = 1; + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); - ret = netif_set_real_num_tx_queues(netdev, real_qps); - if (ret) + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NOTIFY_TRUST, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + +static void nbl_serv_register_trust_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_NOTIFY_TRUST, + nbl_serv_chan_notify_trust_resp, serv_mgt); +} + +static void nbl_serv_unregister_trust_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_NOTIFY_TRUST); +} + +static void nbl_serv_update_mirror_outputport(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, update_mirror_outputport); + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + bool mirror; + + mirror = !!(flow_mgt->promisc & BIT(NBL_MIRROR)); + nbl_event_notify(NBL_EVENT_MIRROR_OUTPUTPORT_DEVLAYER, &mirror, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); +} + +static int nbl_serv_chan_notify_mirror_outputport_req(struct nbl_service_mgt *serv_mgt, u16 func_id, + bool opcode) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY, + &opcode, sizeof(bool), NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); +} + +static void nbl_serv_chan_notify_mirror_outputport_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + bool opcode = *(bool *)data; + struct nbl_chan_ack_info chan_ack; + + if (!!(flow_mgt->promisc & BIT(NBL_MIRROR)) ^ opcode) { + if (opcode) + flow_mgt->promisc |= BIT(NBL_MIRROR); + else + flow_mgt->promisc &= ~BIT(NBL_MIRROR); + nbl_common_queue_work(&net_resource_mgt->update_mirror_outputport, false, false); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + +static void nbl_serv_register_mirror_outputport_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY, + nbl_serv_chan_notify_mirror_outputport_resp, serv_mgt); +} + +static void nbl_serv_unregister_mirror_outputport_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY); +} + +static int nbl_serv_chan_get_vf_stats_req(struct nbl_service_mgt *serv_mgt, + u16 func_id, struct nbl_vf_stats *vf_stats) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_GET_VF_STATS, + NULL, 0, vf_stats, sizeof(*vf_stats), 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); +} + +static void nbl_serv_chan_get_vf_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_vf_stats vf_stats = {0}; + struct nbl_stats stats = { 0 }; + int err = NBL_CHAN_RESP_OK; + + disp_ops->get_net_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &stats); + + vf_stats.rx_packets = stats.rx_packets; + vf_stats.tx_packets = stats.tx_packets; + vf_stats.rx_bytes = stats.rx_bytes; + vf_stats.tx_bytes = stats.tx_bytes; + vf_stats.multicast = stats.rx_multicast_packets; + vf_stats.rx_dropped = 0; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_STATS, msg_id, + err, &vf_stats, sizeof(vf_stats)); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + +static void nbl_serv_register_get_vf_stats(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_GET_VF_STATS, + nbl_serv_chan_get_vf_stats_resp, serv_mgt); +} + +static void nbl_serv_unregister_get_vf_stats(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->unregister_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_GET_VF_STATS); +} + +int nbl_serv_netdev_open(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_tc_mgt *tc_mgt = NBL_SERV_MGT_TO_TC_MGT(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_ring_vsi_info *vsi_info; + int num_cpus, real_qps, ret = 0; + bool netdev_open = true; + + if (!test_bit(NBL_DOWN, adapter->state)) + return -EBUSY; + + netdev_info(netdev, "Nbl open\n"); + + if (ring_mgt->xdp_prog) + nbl_event_notify(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_open, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + netif_carrier_off(netdev); + nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), true, false); + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (tc_mgt->num_tc) { + real_qps = tc_mgt->total_qps; + } else if (vsi_info->active_ring_num) { + real_qps = vsi_info->active_ring_num; + } else { + num_cpus = num_online_cpus(); + real_qps = num_cpus > vsi_info->ring_num ? vsi_info->ring_num : num_cpus; + } + + ret = nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, real_qps, 1); + if (ret) + goto vsi_open_fail; + + ret = netif_set_real_num_tx_queues(netdev, real_qps); + if (ret) goto setup_real_qps_fail; ret = netif_set_real_num_rx_queues(netdev, real_qps); if (ret) @@ -1345,6 +1808,7 @@ int nbl_serv_netdev_stop(struct net_device *netdev) netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); netif_tx_disable(netdev); + synchronize_net(); nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); if (ring_mgt->xdp_prog) @@ -1356,12 +1820,53 @@ int nbl_serv_netdev_stop(struct net_device *netdev) return 0; } -static int nbl_serv_change_mtu(struct net_device *netdev, int new_mtu) +static int nbl_serv_change_rep_mtu(struct net_device *netdev, int new_mtu) { netdev->mtu = new_mtu; return 0; } +static int nbl_serv_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int was_running = 0, err = 0; + int max_mtu; + + max_mtu = disp_ops->get_max_mtu(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (new_mtu > max_mtu) + netdev_notice(netdev, "Netdev already bind xdp prog: new_mtu(%d) > current_max_mtu(%d), try to rebuild rx buffer\n", + new_mtu, max_mtu); + + if (new_mtu) { + netdev->mtu = new_mtu; + nbl_event_notify(NBL_EVENT_CHANGE_MTU, &new_mtu, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + was_running = netif_running(netdev); + if (was_running) { + err = nbl_serv_netdev_stop(netdev); + if (err) { + netdev_err(netdev, "Netdev stop failed while change mtu\n"); + return err; + } + + err = nbl_serv_netdev_open(netdev); + if (err) { + netdev_err(netdev, "Netdev open failed after change mtu\n"); + return err; + } + } + } + + disp_ops->set_mtu(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), new_mtu); + + return 0; +} + static int nbl_serv_set_mac(struct net_device *dev, void *p) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); @@ -1372,7 +1877,6 @@ static int nbl_serv_set_mac(struct net_device *dev, void *p) struct nbl_serv_vlan_node *vlan_node; struct sockaddr *addr = p; struct nbl_netdev_priv *priv = netdev_priv(dev); - u16 vsi_id; int ret = 0; if (!is_valid_ether_addr(addr->sa_data)) { @@ -1380,26 +1884,35 @@ static int nbl_serv_set_mac(struct net_device *dev, void *p) return -EADDRNOTAVAIL; } - if (ether_addr_equal(dev->dev_addr, addr->sa_data)) + if (ether_addr_equal(flow_mgt->mac, addr->sa_data)) return 0; list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - if (vlan_node->vid == 0) - vsi_id = priv->normal_vsi; - else - vsi_id = priv->other_vsi; + if (!vlan_node->primary_mac_effective) + continue; disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, vsi_id); + vlan_node->vid, priv->data_vsi); ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, - vlan_node->vid, vsi_id); + vlan_node->vid, priv->data_vsi); if (ret) { netdev_err(dev, "Fail to cfg macvlan on vid %u", vlan_node->vid); goto fail; } } + if (flow_mgt->promisc & BIT(NBL_USER_FLOW)) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + 0, priv->user_vsi); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, + 0, priv->user_vsi); + if (ret) { + netdev_err(dev, "Fail to cfg macvlan on vid %u for user", 0); + goto fail; + } + } + disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - priv->normal_vsi, addr->sa_data); + priv->data_vsi, addr->sa_data); ether_addr_copy(flow_mgt->mac, addr->sa_data); eth_hw_addr_set(dev, addr->sa_data); @@ -1411,14 +1924,12 @@ static int nbl_serv_set_mac(struct net_device *dev, void *p) return 0; fail: list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - if (vlan_node->vid == 0) - vsi_id = priv->normal_vsi; - else - vsi_id = priv->other_vsi; + if (!vlan_node->primary_mac_effective) + continue; disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, - vlan_node->vid, vsi_id); + vlan_node->vid, priv->data_vsi); disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vlan_node->vid, vsi_id); + vlan_node->vid, priv->data_vsi); } return -EAGAIN; } @@ -1427,17 +1938,25 @@ static int nbl_serv_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); struct nbl_netdev_priv *priv = netdev_priv(dev); struct nbl_serv_vlan_node *vlan_node; - u16 vsi_id = priv->other_vsi; + bool effective = true; int ret = 0; if (vid == NBL_DEFAULT_VLAN_ID) return 0; + if (flow_mgt->vid != 0) + effective = false; + + if (!flow_mgt->unicast_flow_enable) + effective = false; + + if (!flow_mgt->trusted_en && flow_mgt->vlan_list_cnt >= NBL_NO_TRUST_MAX_VLAN) + return -ENOSPC; + nbl_debug(common, NBL_DEBUG_COMMON, "add mac-vlan dev for proto 0x%04x, vid %u.", be16_to_cpu(proto), vid); @@ -1451,31 +1970,32 @@ static int nbl_serv_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) vlan_node = nbl_serv_alloc_vlan_node(); if (!vlan_node) - return -EAGAIN; - - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - flow_mgt->mac, vid, vsi_id); - if (ret) { - nbl_serv_free_vlan_node(vlan_node); - return -EAGAIN; - } + return -ENOMEM; vlan_node->vid = vid; + ret = nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, effective, priv->data_vsi); + if (ret) + goto add_macvlan_failed; list_add(&vlan_node->node, &flow_mgt->vlan_list); + flow_mgt->vlan_list_cnt++; + + nbl_serv_check_flow_table_spec(serv_mgt); return 0; + +add_macvlan_failed: + nbl_serv_free_vlan_node(vlan_node); + return ret; } static int nbl_serv_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); struct nbl_netdev_priv *priv = netdev_priv(dev); struct nbl_serv_vlan_node *vlan_node; - u16 vsi_id = priv->other_vsi; if (vid == NBL_DEFAULT_VLAN_ID) return 0; @@ -1488,28 +2008,30 @@ static int nbl_serv_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) if (vlan_node->vid == vid) { vlan_node->ref_cnt--; if (!vlan_node->ref_cnt) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - flow_mgt->mac, vid, vsi_id); + nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, + 0, priv->data_vsi); list_del(&vlan_node->node); + flow_mgt->vlan_list_cnt--; nbl_serv_free_vlan_node(vlan_node); } break; } } + nbl_serv_check_flow_table_spec(serv_mgt); + return 0; } static int nbl_serv_update_default_vlan(struct nbl_service_mgt *serv_mgt, u16 vid) { struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_vlan_node *vlan_node; + struct nbl_serv_vlan_node *vlan_node = NULL; struct nbl_serv_vlan_node *node, *tmp; - struct nbl_serv_submac_node *submac_node; struct nbl_common_info *common; - u16 vsi; int ret; + u16 vsi; + bool other_effective = false; if (flow_mgt->vid == vid) return 0; @@ -1518,68 +2040,56 @@ static int nbl_serv_update_default_vlan(struct nbl_service_mgt *serv_mgt, u16 vi vsi = NBL_COMMON_TO_VSI_ID(common); rtnl_lock(); - /* update mac sub-interface */ - list_for_each_entry(submac_node, &flow_mgt->submac_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, - flow_mgt->vid, vsi); - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, - vid, vsi); - if (ret) { - nbl_err(common, NBL_DEBUG_COMMON, "update vlan %u, submac %pM failed\n", - vid, submac_node->mac); - goto update_submac_if_failed; + list_for_each_entry(node, &flow_mgt->vlan_list, node) { + if (node->vid == vid) { + node->ref_cnt++; + vlan_node = node; + break; } } - list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { - if (vlan_node->vid == vid) { - vlan_node->ref_cnt++; - goto free_old_vlan; - } - } + if (!vlan_node) + vlan_node = nbl_serv_alloc_vlan_node(); - /* new vlan node */ - vlan_node = nbl_serv_alloc_vlan_node(); if (!vlan_node) { - ret = -ENOMEM; - goto alloc_node_failed; + rtnl_unlock(); + return -ENOMEM; } - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, vid, vsi); - if (ret) - goto add_macvlan_failed; vlan_node->vid = vid; - list_add(&vlan_node->node, &flow_mgt->vlan_list); - -free_old_vlan: + /* restore to default vlan id 0, we need restore other vlan interface */ + if (!vid) + other_effective = true; list_for_each_entry_safe(node, tmp, &flow_mgt->vlan_list, node) { - if (node->vid == flow_mgt->vid) { + if (node->vid == flow_mgt->vid && node != vlan_node) { node->ref_cnt--; if (!node->ref_cnt) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - flow_mgt->mac, node->vid, vsi); + nbl_serv_update_vlan_node_effective(serv_mgt, node, 0, vsi); list_del(&node->node); nbl_serv_free_vlan_node(node); } - break; + } else if (node->vid != vid) { + nbl_serv_update_vlan_node_effective(serv_mgt, node, + other_effective, vsi); } } + ret = nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, 1, vsi); + if (ret) + goto free_vlan_node; + + if (vlan_node->ref_cnt == 1) + list_add(&vlan_node->node, &flow_mgt->vlan_list); + flow_mgt->vid = vid; rtnl_unlock(); return 0; -add_macvlan_failed: - nbl_serv_free_vlan_node(vlan_node); -alloc_node_failed: -update_submac_if_failed: - list_for_each_entry(submac_node, &flow_mgt->submac_list, node) { - disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, - vid, vsi); - disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, - flow_mgt->vid, vsi); - } +free_vlan_node: + vlan_node->ref_cnt--; + if (!vlan_node->ref_cnt) + nbl_serv_free_vlan_node(vlan_node); rtnl_unlock(); return ret; @@ -1587,234 +2097,183 @@ static int nbl_serv_update_default_vlan(struct nbl_service_mgt *serv_mgt, u16 vi static void nbl_serv_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { - struct nbl_queue_stats queue_stats = { 0 }; struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_ring_vsi_info *vsi_info; - u16 start, end; - int i; - - vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; - start = vsi_info->ring_offset; - end = vsi_info->ring_offset + vsi_info->ring_num; + struct nbl_stats net_stats = { 0 }; if (!stats) { - pr_err("get_stats64 is null\n"); + netdev_err(netdev, "get_link_stats64 stats is null\n"); return; } - for (i = start; i < end; i++) { - disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - i, &queue_stats, true); - stats->tx_packets += queue_stats.packets; - stats->tx_bytes += queue_stats.bytes; - } + disp_ops->get_net_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &net_stats); - for (i = start; i < end; i++) { - disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - i, &queue_stats, false); - stats->rx_packets += queue_stats.packets; - stats->rx_bytes += queue_stats.bytes; - } + stats->rx_packets = net_stats.rx_packets; + stats->tx_packets = net_stats.tx_packets; + stats->rx_bytes = net_stats.rx_bytes; + stats->tx_bytes = net_stats.tx_bytes; + stats->multicast = net_stats.rx_multicast_packets; - stats->multicast = 0; stats->rx_errors = 0; stats->tx_errors = 0; - stats->rx_length_errors = 0; - stats->rx_crc_errors = 0; - stats->rx_frame_errors = 0; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_frame_errors = netdev->stats.rx_frame_errors; stats->rx_dropped = 0; stats->tx_dropped = 0; } -static void nbl_modify_submacs(struct nbl_serv_net_resource_mgt *net_resource_mgt) +static int nbl_addr_unsync(struct net_device *netdev, const u8 *addr) { - struct netdev_hw_addr *ha; - struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); - struct nbl_netdev_priv *priv = netdev_priv(net_resource_mgt->netdev); - struct nbl_serv_submac_node *submac_node; - int uc_count, i, ret = 0; - u8 *buf = NULL; - u16 len; + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; - spin_lock_bh(&net_resource_mgt->mac_vlan_list_lock); - uc_count = netdev_uc_count(net_resource_mgt->netdev); + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - if (uc_count) { - len = uc_count * ETH_ALEN; - buf = kzalloc(len, GFP_ATOMIC); - - if (!buf) { - spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); - return; - } - - i = 0; - netdev_hw_addr_list_for_each(ha, &net_resource_mgt->netdev->uc) { - if (i >= len) - break; - memcpy(&buf[i], ha->addr, ETH_ALEN); - i += ETH_ALEN; - } - - net_resource_mgt->rxmode_set_required &= ~NBL_FLAG_AQ_MODIFY_MAC_FILTER; - } - spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); - - nbl_serv_del_all_submacs(serv_mgt, priv->async_other_vsi); - - for (i = 0; i < uc_count; i++) { - submac_node = nbl_serv_alloc_submac_node(); - if (!submac_node) - break; - - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &buf[i * ETH_ALEN], - flow_mgt->vid, priv->async_pending_vsi); - if (ret) { - nbl_serv_free_submac_node(submac_node); - break; - } - - ether_addr_copy(submac_node->mac, &buf[i * ETH_ALEN]); - list_add(&submac_node->node, &flow_mgt->submac_list); - } - - kfree(buf); - priv->async_other_vsi = priv->async_pending_vsi; -} - -static void nbl_modify_promisc_mode(struct nbl_serv_net_resource_mgt *net_resource_mgt) -{ - struct nbl_netdev_priv *priv = netdev_priv(net_resource_mgt->netdev); - struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - u16 mode = 0; - - spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); - if (net_resource_mgt->curr_promiscuout_mode & (IFF_PROMISC | IFF_ALLMULTI)) - mode = 1; - - if (net_resource_mgt->user_promisc_mode) - mode = 1; + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; - net_resource_mgt->rxmode_set_required &= ~NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; - spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + if (!nbl_add_filter(&net_resource_mgt->tmp_del_filter_list, addr)) + return -ENOMEM; - disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - priv->async_other_vsi, mode); + net_resource_mgt->update_submac = 1; + return 0; } -static struct nbl_mac_filter *nbl_find_filter(struct nbl_adapter *adapter, const u8 *macaddr) +static int nbl_addr_sync(struct net_device *netdev, const u8 *addr) { + struct nbl_adapter *adapter; struct nbl_service_mgt *serv_mgt; struct nbl_serv_net_resource_mgt *net_resource_mgt; - struct nbl_mac_filter *f; - - if (!macaddr) - return NULL; + adapter = NBL_NETDEV_TO_ADAPTER(netdev); serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - list_for_each_entry(f, &net_resource_mgt->mac_filter_list, list) { - if (ether_addr_equal(macaddr, f->macaddr)) - return f; - } - return NULL; -} + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; -static void nbl_free_filter(struct nbl_serv_net_resource_mgt *net_resource_mgt) -{ - struct nbl_mac_filter *f; - struct list_head *pos, *n; + if (!nbl_add_filter(&net_resource_mgt->tmp_add_filter_list, addr)) + return -ENOMEM; - list_for_each_safe(pos, n, &net_resource_mgt->mac_filter_list) { - f = list_entry(pos, struct nbl_mac_filter, list); - list_del(&f->list); - kfree(f); - } + net_resource_mgt->update_submac = 1; + return 0; } -static struct nbl_mac_filter *nbl_add_filter(struct nbl_adapter *adapter, const u8 *macaddr) +static void nbl_modify_submacs(struct nbl_serv_net_resource_mgt *net_resource_mgt) { - struct nbl_mac_filter *f; - struct nbl_service_mgt *serv_mgt; - struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct net_device *netdev = net_resource_mgt->netdev; + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_mac_filter *filter, *safe_filter; - if (!macaddr) - return NULL; + INIT_LIST_HEAD(&net_resource_mgt->tmp_add_filter_list); + INIT_LIST_HEAD(&net_resource_mgt->tmp_del_filter_list); + net_resource_mgt->update_submac = 0; - serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + netif_addr_lock_bh(netdev); + __dev_uc_sync(net_resource_mgt->netdev, nbl_addr_sync, nbl_addr_unsync); + __dev_mc_sync(net_resource_mgt->netdev, nbl_addr_sync, nbl_addr_unsync); + netif_addr_unlock_bh(netdev); + + if (!net_resource_mgt->update_submac) + return; - f = nbl_find_filter(adapter, macaddr); - if (!f) { - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return f; + rtnl_lock(); + list_for_each_entry_safe(filter, safe_filter, + &net_resource_mgt->tmp_del_filter_list, list) { + nbl_serv_del_submac_node(serv_mgt, filter->macaddr, priv->data_vsi); + list_del(&filter->list); + kfree(filter); + } - ether_addr_copy(f->macaddr, macaddr); - list_add_tail(&f->list, &net_resource_mgt->mac_filter_list); - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; + list_for_each_entry_safe(filter, safe_filter, + &net_resource_mgt->tmp_add_filter_list, list) { + nbl_serv_add_submac_node(serv_mgt, filter->macaddr, + priv->data_vsi, flow_mgt->promisc); + list_del(&filter->list); + kfree(filter); } - return f; + nbl_serv_check_flow_table_spec(serv_mgt); + rtnl_unlock(); } -static int nbl_addr_unsync(struct net_device *netdev, const u8 *addr) +static void nbl_modify_promisc_mode(struct nbl_serv_net_resource_mgt *net_resource_mgt) { - struct nbl_adapter *adapter; - struct nbl_mac_filter *f; - struct nbl_service_mgt *serv_mgt; - struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct net_device *netdev = net_resource_mgt->netdev; + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + bool mode = 0, multi = 0; + bool need_flow = 1; + bool unicast_enable, multicast_enable; - adapter = NBL_NETDEV_TO_ADAPTER(netdev); - serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + rtnl_lock(); + net_resource_mgt->curr_promiscuout_mode = netdev->flags; - if (ether_addr_equal(addr, netdev->dev_addr)) - return 0; + if (((netdev->flags & (IFF_PROMISC)) || flow_mgt->force_promisc) && + !NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) + mode = 1; - f = nbl_find_filter(adapter, addr); - if (f) { - list_del(&f->list); - kfree(f); - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; + if ((netdev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || flow_mgt->force_promisc) + multi = 1; + + if (flow_mgt->promisc & (BIT(NBL_USER_FLOW) | BIT(NBL_MIRROR))) { + multi = 0; + mode = 0; + need_flow = 0; } - return 0; -} + if (!flow_mgt->trusted_en) + multi = 0; -static int nbl_addr_sync(struct net_device *netdev, const u8 *addr) -{ - struct nbl_adapter *adapter; + unicast_enable = !mode && need_flow; + multicast_enable = !multi && need_flow; - adapter = NBL_NETDEV_TO_ADAPTER(netdev); - if (ether_addr_equal(addr, netdev->dev_addr)) - return 0; + if ((flow_mgt->promisc & BIT(NBL_PROMISC)) ^ (mode << NBL_PROMISC)) + if (!NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) { + disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->data_vsi, mode); + if (mode) + flow_mgt->promisc |= BIT(NBL_PROMISC); + else + flow_mgt->promisc &= ~BIT(NBL_PROMISC); + } - if (nbl_add_filter(adapter, addr)) - return 0; - else - return -ENOMEM; -} + if ((flow_mgt->promisc & BIT(NBL_ALLMULTI)) ^ (multi << NBL_ALLMULTI)) { + disp_ops->cfg_multi_mcast(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->data_vsi, multi); + if (multi) + flow_mgt->promisc |= BIT(NBL_ALLMULTI); + else + flow_mgt->promisc &= ~BIT(NBL_ALLMULTI); + } -static bool nbl_serv_promisc_mode_changed(struct net_device *dev) -{ - struct nbl_adapter *adapter; - struct nbl_service_mgt *serv_mgt; - struct nbl_serv_net_resource_mgt *net_resource_mgt; + if (flow_mgt->multicast_flow_enable ^ multicast_enable) { + nbl_serv_update_mcast_submac(serv_mgt, multicast_enable, + unicast_enable, priv->data_vsi); + flow_mgt->multicast_flow_enable = multicast_enable; + } - adapter = NBL_NETDEV_TO_ADAPTER(dev); - serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); - net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + if (flow_mgt->unicast_flow_enable ^ unicast_enable) { + nbl_serv_update_promisc_vlan(serv_mgt, unicast_enable, priv->data_vsi); + flow_mgt->unicast_flow_enable = unicast_enable; + } - return (net_resource_mgt->curr_promiscuout_mode ^ dev->flags) - & (IFF_PROMISC | IFF_ALLMULTI); + if (flow_mgt->trusted_update) { + flow_mgt->trusted_update = 0; + if (flow_mgt->active_submac_list < flow_mgt->submac_list_cnt) + nbl_serv_update_mcast_submac(serv_mgt, flow_mgt->multicast_flow_enable, + flow_mgt->unicast_flow_enable, priv->data_vsi); + } + rtnl_unlock(); } static void nbl_serv_set_rx_mode(struct net_device *dev) @@ -1827,19 +2286,6 @@ static void nbl_serv_set_rx_mode(struct net_device *dev) serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - spin_lock_bh(&net_resource_mgt->mac_vlan_list_lock); - __dev_uc_sync(dev, nbl_addr_sync, nbl_addr_unsync); - spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); - - if (!NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) { /* only pf support */ - spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); - if (nbl_serv_promisc_mode_changed(dev)) { - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; - net_resource_mgt->curr_promiscuout_mode = dev->flags; - } - spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); - } - nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); } @@ -1853,13 +2299,6 @@ static void nbl_serv_change_rx_flags(struct net_device *dev, int flag) serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); - if (nbl_serv_promisc_mode_changed(dev)) { - net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; - net_resource_mgt->curr_promiscuout_mode = dev->flags; - } - spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); - nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); } @@ -1978,6 +2417,38 @@ nbl_serv_features_check(struct sk_buff *skb, struct net_device *dev, netdev_feat NETIF_F_GSO_MASK); } +static int nbl_serv_config_rxhash(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + u32 rxfh_indir_size = 0; + u32 *indir = NULL; + int i = 0; + + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + indir = devm_kcalloc(dev, rxfh_indir_size, sizeof(u32), GFP_KERNEL); + if (!indir) + return -ENOMEM; + if (enable) { + if (ring_mgt->rss_indir_user) { + memcpy(indir, ring_mgt->rss_indir_user, rxfh_indir_size * sizeof(u32)); + } else { + for (i = 0; i < rxfh_indir_size; i++) + indir[i] = i % vsi_info->active_ring_num; + } + } + disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), + indir, rxfh_indir_size); + devm_kfree(dev, indir); + return 0; +} + static int nbl_serv_set_features(struct net_device *netdev, netdev_features_t features) { struct nbl_netdev_priv *priv = netdev_priv(netdev); @@ -1987,12 +2458,20 @@ static int nbl_serv_set_features(struct net_device *netdev, netdev_features_t fe struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); netdev_features_t changed = netdev->features ^ features; u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + bool enable = false; - if (changed & NETIF_F_NTUPLE) { - bool ena = !!(features & NETIF_F_NTUPLE); + if (!common->is_vf) { + if (changed & NETIF_F_NTUPLE) { + enable = !!(features & NETIF_F_NTUPLE); - disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, ena); + disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, enable); + } + } + + if (changed & NETIF_F_RXHASH) { + enable = !!(features & NETIF_F_RXHASH); + nbl_serv_config_rxhash(serv_mgt, enable); } return 0; @@ -2034,16 +2513,10 @@ static int nbl_serv_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); u16 function_id = U16_MAX; - if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) - return -EINVAL; - - function_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), vf_id); - + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); if (function_id == U16_MAX) { netdev_info(dev, "vf id %d invalid\n", vf_id); return -EINVAL; @@ -2061,26 +2534,19 @@ static int nbl_serv_set_vf_rate(struct net_device *dev, int vf_id, int min_rate, struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); u16 function_id = U16_MAX; int ret = 0; - if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info || min_rate > 0) + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); return -EINVAL; + } - if (vf_id < net_resource_mgt->num_vfs) { - function_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), vf_id); - - if (function_id == U16_MAX) { - netdev_info(dev, "vf id %d invalid\n", vf_id); - return -EINVAL; - } - + if (vf_id < net_resource_mgt->num_vfs) ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - function_id, max_rate); - } + function_id, max_rate, 0); if (!ret) net_resource_mgt->vf_info[vf_id].max_tx_rate = max_rate; @@ -2118,18 +2584,12 @@ static int nbl_serv_set_vf_link_state(struct net_device *dev, int vf_id, int lin struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); u16 function_id = U16_MAX; bool should_notify = false; int ret = 0; - if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) - return -EINVAL; - - function_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), vf_id); - + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); if (function_id == U16_MAX) { netdev_info(dev, "vf id %d invalid\n", vf_id); return -EINVAL; @@ -2146,27 +2606,112 @@ static int nbl_serv_set_vf_link_state(struct net_device *dev, int vf_id, int lin return ret; } +static int nbl_serv_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + bool should_notify = false; + int ret = 0; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + ret = disp_ops->register_func_trust(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, trusted, &should_notify); + if (!ret && should_notify) + nbl_serv_chan_notify_trust_req(serv_mgt, function_id, trusted); + + if (!ret) + net_resource_mgt->vf_info[vf_id].trusted = trusted; + + return ret; +} + +static int __used nbl_serv_set_vf_tx_rate(struct net_device *dev, + int vf_id, int tx_rate, + int burst, bool burst_en) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + int ret = 0; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + if (!burst_en) + burst = net_resource_mgt->vf_info[vf_id].meter_tx_burst; + + if (vf_id < net_resource_mgt->num_vfs) + ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, tx_rate, burst); + + if (!ret) { + net_resource_mgt->vf_info[vf_id].meter_tx_rate = tx_rate; + if (burst_en) + net_resource_mgt->vf_info[vf_id].meter_tx_burst = burst; + } + + return ret; +} + +static int __used nbl_serv_set_vf_rx_rate(struct net_device *dev, + int vf_id, int rx_rate, + int burst, bool burst_en) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + int ret = 0; + + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + if (!burst_en) + burst = net_resource_mgt->vf_info[vf_id].meter_tx_burst; + + if (vf_id < net_resource_mgt->num_vfs) + ret = disp_ops->set_rx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, rx_rate, burst); + + if (!ret) { + net_resource_mgt->vf_info[vf_id].meter_rx_rate = rx_rate; + if (burst_en) + net_resource_mgt->vf_info[vf_id].meter_rx_burst = burst; + } + + return ret; +} + static int nbl_serv_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan, u8 qos, __be16 proto) { struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_notify_vlan_param param = {0}; int ret = 0; u16 function_id = U16_MAX; bool should_notify = false; - if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) - return -EINVAL; - if (vlan > 4095 || qos > 7) return -EINVAL; - function_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), vf_id); - + function_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); if (function_id == U16_MAX) { netdev_info(dev, "vf id %d invalid\n", vf_id); return -EINVAL; @@ -2217,11 +2762,53 @@ static int nbl_serv_get_vf_config(struct net_device *dev, int vf_id, struct ifla ivi->vlan = vf_info[vf_id].vlan; ivi->vlan_proto = htons(vf_info[vf_id].vlan_proto); ivi->qos = vf_info[vf_id].vlan_qos; + ivi->trusted = vf_info[vf_id].trusted; ether_addr_copy(ivi->mac, vf_info[vf_id].mac); return 0; } +static int nbl_serv_get_vf_stats(struct net_device *dev, int vf_id, struct ifla_vf_stats *vf_stats) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_vf_stats stats = {0}; + u16 func_id = U16_MAX; + u8 is_vdpa = 0; + int ret = 0; + + func_id = nbl_serv_get_vf_function_id(serv_mgt, vf_id); + if (func_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + ret = disp_ops->check_vf_is_active(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); + if (!ret) + return 0; + + ret = disp_ops->check_vf_is_vdpa(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, &is_vdpa); + if (!ret && is_vdpa) + ret = disp_ops->get_vdpa_vf_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, &stats); + else + ret = nbl_serv_chan_get_vf_stats_req(serv_mgt, func_id, &stats); + + if (ret) + return -EIO; + + vf_stats->rx_packets = stats.rx_packets; + vf_stats->tx_packets = stats.tx_packets; + vf_stats->rx_bytes = stats.rx_bytes; + vf_stats->tx_bytes = stats.tx_bytes; + vf_stats->broadcast = stats.broadcast; + vf_stats->multicast = stats.multicast; + vf_stats->rx_dropped = stats.rx_dropped; + vf_stats->tx_dropped = stats.tx_dropped; + + return 0; +} + static u8 nbl_get_dscp_up(struct nbl_serv_net_resource_mgt *net_resource_mgt, struct sk_buff *skb) { u8 dscp = 0; @@ -2231,7 +2818,7 @@ static u8 nbl_get_dscp_up(struct nbl_serv_net_resource_mgt *net_resource_mgt, st else if (skb->protocol == htons(ETH_P_IPV6)) dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; - return net_resource_mgt->dscp2prio_map[dscp]; + return net_resource_mgt->qos_info.dscp2prio_map[dscp]; } static u16 @@ -2244,9 +2831,8 @@ nbl_serv_select_queue(struct net_device *netdev, struct sk_buff *skb, struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - if (net_resource_mgt->pfc_mode == NBL_TRUST_MODE_DSCP) + if (net_resource_mgt->qos_info.trust_mode == NBL_TRUST_MODE_DSCP) skb->priority = nbl_get_dscp_up(net_resource_mgt, skb); - return netdev_pick_tx(netdev, skb, sb_dev); } @@ -2331,7 +2917,7 @@ static int nbl_serv_get_phys_port_name(struct net_device *dev, char *name, size_ pf_id = 1; if (snprintf(name, len, "p%u", pf_id) >= len) - return -EINVAL; + return -EOPNOTSUPP; return 0; } @@ -2341,8 +2927,12 @@ static int nbl_serv_get_port_parent_id(struct net_device *dev, struct netdev_phy struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(dev); u8 mac[ETH_ALEN]; + if (common->devlink_port && common->devlink_port->devlink) + return -EOPNOTSUPP; + /* return success to avoid linkwatch_do_dev report warnning */ if (test_bit(NBL_FATAL_ERR, adapter->state)) return 0; @@ -2398,9 +2988,6 @@ static int nbl_serv_setup_txrx_queues(void *priv, u16 vsi_id, u16 queue_num, u16 struct nbl_serv_vector *vector; int i, ret = 0; - /* Clear cfgs, in case this function exited abnormaly last time */ - disp_ops->clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); - /* queue_num include user&kernel queue */ ret = disp_ops->alloc_txrx_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, queue_num); if (ret) @@ -2455,7 +3042,7 @@ static int nbl_serv_init_tx_rate(void *priv, u16 vsi_id) if (net_resource_mgt->max_tx_rate) { func_id = disp_ops->get_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - func_id, net_resource_mgt->max_tx_rate); + func_id, net_resource_mgt->max_tx_rate, 0); } return ret; @@ -2493,6 +3080,36 @@ static void nbl_serv_remove_rss(void *priv, u16 vsi_id) disp_ops->remove_rss(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); } +static int nbl_serv_setup_rss_indir(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + u32 rxfh_indir_size = 0; + int num_cpus = 0, real_qps = 0; + u32 *indir = NULL; + int i = 0; + + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, &rxfh_indir_size); + indir = devm_kcalloc(dev, rxfh_indir_size, sizeof(u32), GFP_KERNEL); + if (!indir) + return -ENOMEM; + + num_cpus = num_online_cpus(); + real_qps = num_cpus > vsi_info->ring_num ? vsi_info->ring_num : num_cpus; + + for (i = 0; i < rxfh_indir_size; i++) + indir[i] = i % real_qps; + + disp_ops->set_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, indir, rxfh_indir_size); + devm_kfree(dev, indir); + return 0; +} + static int nbl_serv_alloc_rings(void *priv, struct net_device *netdev, struct nbl_ring_param *param) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; @@ -2518,7 +3135,6 @@ static int nbl_serv_alloc_rings(void *priv, struct net_device *netdev, struct nb ret = nbl_serv_set_tx_rings(ring_mgt, netdev, dev); if (ret) goto set_tx_fail; - ret = nbl_serv_set_rx_rings(ring_mgt, netdev, dev); if (ret) goto set_rx_fail; @@ -2573,7 +3189,7 @@ static int nbl_serv_enable_napis(void *priv, u16 vsi_index) int i; for (i = start; i < end; i++) - napi_enable(ring_mgt->vectors[i].napi); + napi_enable(&ring_mgt->vectors[i].nbl_napi->napi); return 0; } @@ -2587,7 +3203,7 @@ static void nbl_serv_disable_napis(void *priv, u16 vsi_index) int i; for (i = start; i < end; i++) - napi_disable(ring_mgt->vectors[i].napi); + napi_disable(&ring_mgt->vectors[i].nbl_napi->napi); } static void nbl_serv_set_mask_en(void *priv, bool enable) @@ -2600,18 +3216,24 @@ static void nbl_serv_set_mask_en(void *priv, bool enable) ring_mgt->net_msix_mask_en = enable; } -static int nbl_serv_start_net_flow(void *priv, struct net_device *netdev, u16 vsi_id, u16 vid) +static int nbl_serv_start_net_flow(void *priv, struct net_device *netdev, u16 vsi_id, u16 vid, + bool trusted) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); struct nbl_serv_vlan_node *vlan_node; + u8 mac[ETH_ALEN]; int ret = 0; + flow_mgt->unicast_flow_enable = true; + flow_mgt->multicast_flow_enable = true; /* Clear cfgs, in case this function exited abnormaly last time */ disp_ops->clear_accel_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); disp_ops->clear_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + disp_ops->set_mtu(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), netdev->mtu); if (!common->is_vf) disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, 1); @@ -2619,31 +3241,33 @@ static int nbl_serv_start_net_flow(void *priv, struct net_device *netdev, u16 vs if (!list_empty(&flow_mgt->vlan_list)) return -ECONNRESET; - ret = disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); - if (ret) - goto add_multi_fail; - vlan_node = nbl_serv_alloc_vlan_node(); if (!vlan_node) goto alloc_fail; flow_mgt->vid = vid; + flow_mgt->trusted_en = trusted; + vlan_node->vid = vid; ether_addr_copy(flow_mgt->mac, netdev->dev_addr); - ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, - vid, vsi_id); + ret = nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, 1, vsi_id); if (ret) goto add_macvlan_fail; - vlan_node->vid = vid; list_add(&vlan_node->node, &flow_mgt->vlan_list); + flow_mgt->vlan_list_cnt++; + + memset(mac, 0xFF, ETH_ALEN); + ret = nbl_serv_add_submac_node(serv_mgt, mac, vsi_id, 0); + if (ret) + goto add_submac_failed; return 0; +add_submac_failed: + nbl_serv_update_vlan_node_effective(serv_mgt, vlan_node, 0, vsi_id); add_macvlan_fail: nbl_serv_free_vlan_node(vlan_node); alloc_fail: - disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); -add_multi_fail: return ret; } @@ -2657,8 +3281,9 @@ static void nbl_serv_stop_net_flow(void *priv, u16 vsi_id) struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + nbl_serv_del_all_submacs(serv_mgt, net_priv->data_vsi); nbl_serv_del_all_vlans(serv_mgt); - nbl_serv_del_all_submacs(serv_mgt, net_priv->async_other_vsi); + if (!common->is_vf) disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, 0); @@ -2670,6 +3295,30 @@ static void nbl_serv_stop_net_flow(void *priv, u16 vsi_id) memset(flow_mgt->mac, 0, sizeof(flow_mgt->mac)); } +static void nbl_serv_clear_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->clear_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_set_promisc_mode(void *priv, u16 vsi_id, u16 mode) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, mode); +} + +static int nbl_serv_cfg_multi_mcast(void *priv, u16 vsi_id, u16 enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->cfg_multi_mcast(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, enable); +} + static int nbl_serv_set_lldp_flow(void *priv, u16 vsi_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; @@ -2736,31 +3385,6 @@ static bool nbl_serv_get_product_fix_cap(void *priv, enum nbl_fix_cap_type cap_t cap_type); } -static int nbl_serv_init_chip_factory(void *priv) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct device *dev = NBL_COMMON_TO_DEV(common); - int ret = 0; - - ret = disp_ops->init_chip_module(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); - if (ret) { - dev_err(dev, "init_chip_module failed\n"); - goto module_init_fail; - } - - return 0; - -module_init_fail: - return ret; -} - -static int nbl_serv_destroy_chip_factory(void *p) -{ - return 0; -} - static int nbl_serv_init_chip(void *priv) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; @@ -2872,7 +3496,7 @@ static irqreturn_t nbl_serv_clean_rings(int __always_unused irq, void *data) { struct nbl_serv_vector *vector = (struct nbl_serv_vector *)data; - napi_schedule_irqoff(vector->napi); + napi_schedule_irqoff(&vector->nbl_napi->napi); return IRQ_HANDLED; } @@ -2897,13 +3521,13 @@ static int nbl_serv_request_net_irq(void *priv, struct nbl_msix_info_param *msix vector->rx_ring = rx_ring; irq_num = msix_info->msix_entries[i].vector; - snprintf(vector->name, sizeof(vector->name) - 1, "%s%03d-%s-%02u", "NBL", - NBL_COMMON_TO_VSI_ID(common), "TxRx", i); + snprintf(vector->name, sizeof(vector->name), "nbl_txrx%d@pci:%s", + i, pci_name(NBL_COMMON_TO_PDEV(common))); ret = devm_request_irq(dev, irq_num, nbl_serv_clean_rings, 0, vector->name, vector); if (ret) { - nbl_err(common, NBL_DEBUG_INTR, "TxRx Queue %u requests MSIX irq failed " - "with error %d", i, ret); + nbl_err(common, NBL_DEBUG_INTR, "TxRx Queue %u req irq with error %d", + i, ret); goto request_irq_err; } if (!cpumask_empty(&vector->cpumask)) @@ -3780,9 +4404,9 @@ static int nbl_serv_enable_lag_protocol(void *priv, u16 eth_id, bool lag_en) ret = disp_ops->enable_lag_protocol(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, lag_en); if (lag_en) ret = disp_ops->add_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - net_priv->other_vsi); + net_priv->data_vsi); else - disp_ops->del_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); + disp_ops->del_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->data_vsi); return ret; } @@ -3806,8 +4430,6 @@ static int nbl_serv_cfg_lag_member_fwd(void *priv, u16 eth_id, u16 lag_id, u8 fw if (net_resource_mgt->lag_info) net_resource_mgt->lag_info->lag_id = lag_id; - disp_ops->cfg_lag_mcc(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, lag_id, fwd); - return disp_ops->cfg_lag_member_fwd(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, lag_id, fwd); } @@ -3874,27 +4496,33 @@ static void nbl_serv_rx_mode_async_task(struct work_struct *work) struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = container_of(work, struct nbl_serv_net_resource_mgt, rx_mode_async); - if (serv_net_resource_mgt->rxmode_set_required & NBL_FLAG_AQ_MODIFY_MAC_FILTER) - nbl_modify_submacs(serv_net_resource_mgt); - - if (serv_net_resource_mgt->rxmode_set_required & NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE) - nbl_modify_promisc_mode(serv_net_resource_mgt); + nbl_modify_submacs(serv_net_resource_mgt); + nbl_modify_promisc_mode(serv_net_resource_mgt); } static void nbl_serv_net_task_service_timer(struct timer_list *t) { struct nbl_serv_net_resource_mgt *net_resource_mgt = from_timer(net_resource_mgt, t, serv_timer); + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); mod_timer(&net_resource_mgt->serv_timer, round_jiffies(net_resource_mgt->serv_timer_period + jiffies)); nbl_common_queue_work(&net_resource_mgt->net_stats_update, false, false); + if (flow_mgt->pending_async_work) { + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); + flow_mgt->pending_async_work = 0; + } } static void nbl_serv_setup_flow_mgt(struct nbl_serv_flow_mgt *flow_mgt) { + int i = 0; + INIT_LIST_HEAD(&flow_mgt->vlan_list); - INIT_LIST_HEAD(&flow_mgt->submac_list); + for (i = 0; i < NBL_SUBMAC_MAX; i++) + INIT_LIST_HEAD(&flow_mgt->submac_list[i]); } static void nbl_serv_register_restore_netdev_queue(struct nbl_service_mgt *serv_mgt) @@ -3918,6 +4546,19 @@ static void nbl_serv_register_restore_netdev_queue(struct nbl_service_mgt *serv_ nbl_serv_chan_restart_netdev_queue_resp, serv_mgt); } +static void nbl_serv_set_wake(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (!common->is_vf && common->is_ocp) + disp_ops->set_wol(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, common->wol_ena); +} + static void nbl_serv_remove_net_resource_mgt(void *priv) { struct device *dev; @@ -3929,6 +4570,14 @@ static void nbl_serv_remove_net_resource_mgt(void *priv) dev = NBL_COMMON_TO_DEV(common); if (net_resource_mgt) { + if (common->is_vf) { + nbl_serv_unregister_link_forced_notify(serv_mgt); + nbl_serv_unregister_vlan_notify(serv_mgt); + nbl_serv_unregister_get_vf_stats(serv_mgt); + nbl_serv_unregister_trust_notify(serv_mgt); + nbl_serv_unregister_mirror_outputport_notify(serv_mgt); + } + nbl_serv_set_wake(serv_mgt); del_timer_sync(&net_resource_mgt->serv_timer); nbl_common_release_task(&net_resource_mgt->rx_mode_async); nbl_common_release_task(&net_resource_mgt->net_stats_update); @@ -3936,8 +4585,8 @@ static void nbl_serv_remove_net_resource_mgt(void *priv) if (common->is_vf) { nbl_common_release_task(&net_resource_mgt->update_link_state); nbl_common_release_task(&net_resource_mgt->update_vlan); + nbl_common_release_task(&net_resource_mgt->update_mirror_outputport); } - nbl_free_filter(net_resource_mgt); devm_kfree(dev, net_resource_mgt); NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = NULL; } @@ -3956,9 +4605,119 @@ static int nbl_serv_phy_init(struct nbl_serv_net_resource_mgt *net_resource_mgt) disp_ops->get_phy_caps(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, &net_resource_mgt->phy_caps); + /* disable wol when driver init */ + if (!common->is_vf && common->is_ocp) + ret = disp_ops->set_wol(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, false); + + return ret; +} + +static void nbl_init_qos_config(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_dispatch_ops *disp_ops; + int i; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + if (common->is_vf) + return; + + qos_info->rdma_bw = NBL_MAX_BW >> 1; + qos_info->rdma_rate = NBL_COMMON_TO_ETH_MAX_SPEED(common); + qos_info->net_rate = NBL_COMMON_TO_ETH_MAX_SPEED(common); + qos_info->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE; + for (i = 0; i < NBL_DSCP_MAX; i++) + qos_info->dscp2prio_map[i] = i / NBL_MAX_PFC_PRIORITIES; + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) + disp_ops->get_pfc_buffer_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), i, + &qos_info->buffer_sizes[i][0], + &qos_info->buffer_sizes[i][1]); + + disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), NBL_COMMON_TO_ETH_ID(common), + qos_info->pfc, qos_info->trust_mode, qos_info->dscp2prio_map); +} + +static int nbl_serv_init_hw_stats(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + struct nbl_ustore_stats ustore_stats = {0}; + int ret = 0; + + net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop = + devm_kcalloc(dev, vsi_info->ring_num, sizeof(u64), GFP_KERNEL); + if (!net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop) { + ret = -ENOMEM; + goto alloc_total_uvn_stat_pkt_drop_fail; + } + + if (!common->is_vf) { + ret = disp_ops->get_ustore_total_pkt_drop_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, &ustore_stats); + if (ret) + goto get_ustore_total_pkt_drop_stats_fail; + net_resource_mgt->hw_stats.start_ustore_stats.rx_drop_packets = + ustore_stats.rx_drop_packets; + net_resource_mgt->hw_stats.start_ustore_stats.rx_trun_packets = + ustore_stats.rx_trun_packets; + } + + return 0; + +get_ustore_total_pkt_drop_stats_fail: + devm_kfree(dev, net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop); +alloc_total_uvn_stat_pkt_drop_fail: return ret; } +static int nbl_serv_remove_hw_stats(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + + devm_kfree(dev, net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop); + return 0; +} + +static int nbl_serv_get_rx_dropped(void *priv, u64 *rx_dropped) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_ustore_stats ustore_stats = {0}; + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + int i = 0; + + for (i = 0; i < vsi_info->active_ring_num; i++) + *rx_dropped += net_resource_mgt->hw_stats.total_uvn_stat_pkt_drop[i]; + + if (!common->is_vf) { + disp_ops->get_ustore_total_pkt_drop_stats + (NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, &ustore_stats); + *rx_dropped += ustore_stats.rx_drop_packets - + net_resource_mgt->hw_stats.start_ustore_stats.rx_drop_packets; + *rx_dropped += ustore_stats.rx_trun_packets - + net_resource_mgt->hw_stats.start_ustore_stats.rx_trun_packets; + } + return 0; +} + static int nbl_serv_setup_net_resource_mgt(void *priv, struct net_device *netdev, u16 vlan_proto, u16 vlan_tci, u32 rate) { @@ -3966,6 +4725,8 @@ static int nbl_serv_setup_net_resource_mgt(void *priv, struct net_device *netdev struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct device *dev = NBL_COMMON_TO_DEV(common); struct nbl_serv_net_resource_mgt *net_resource_mgt; + u32 delay_time; + unsigned long hw_stats_delay_time = 0; net_resource_mgt = devm_kzalloc(dev, sizeof(struct nbl_serv_net_resource_mgt), GFP_KERNEL); if (!net_resource_mgt) @@ -3979,11 +4740,18 @@ static int nbl_serv_setup_net_resource_mgt(void *priv, struct net_device *netdev NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = net_resource_mgt; nbl_serv_phy_init(net_resource_mgt); + nbl_init_qos_config(net_resource_mgt); nbl_serv_register_restore_netdev_queue(serv_mgt); if (common->is_vf) { nbl_serv_register_link_forced_notify(serv_mgt); nbl_serv_register_vlan_notify(serv_mgt); + nbl_serv_register_get_vf_stats(serv_mgt); + nbl_serv_register_trust_notify(serv_mgt); + nbl_serv_register_mirror_outputport_notify(serv_mgt); } + net_resource_mgt->hw_stats_period = NBL_HW_STATS_PERIOD_SECONDS * HZ; + get_random_bytes(&delay_time, sizeof(delay_time)); + hw_stats_delay_time = delay_time % net_resource_mgt->hw_stats_period; timer_setup(&net_resource_mgt->serv_timer, nbl_serv_net_task_service_timer, 0); net_resource_mgt->serv_timer_period = HZ; @@ -3995,16 +4763,18 @@ static int nbl_serv_setup_net_resource_mgt(void *priv, struct net_device *netdev nbl_serv_update_link_state); nbl_common_alloc_task(&net_resource_mgt->update_vlan, nbl_serv_update_vlan); + nbl_common_alloc_task(&net_resource_mgt->update_mirror_outputport, + nbl_serv_update_mirror_outputport); } - INIT_LIST_HEAD(&net_resource_mgt->mac_filter_list); + INIT_LIST_HEAD(&net_resource_mgt->tmp_add_filter_list); + INIT_LIST_HEAD(&net_resource_mgt->tmp_del_filter_list); INIT_LIST_HEAD(&net_resource_mgt->indr_dev_priv_list); - spin_lock_init(&net_resource_mgt->mac_vlan_list_lock); - spin_lock_init(&net_resource_mgt->current_netdev_promisc_flags_lock); net_resource_mgt->get_stats_jiffies = jiffies; mod_timer(&net_resource_mgt->serv_timer, - round_jiffies(jiffies + net_resource_mgt->serv_timer_period)); + jiffies + net_resource_mgt->serv_timer_period + + hw_stats_delay_time); return 0; } @@ -4290,10 +5060,10 @@ static const struct pldmfw_ops nbl_update_fw_ops = { .finalize_update = nbl_serv_finalize_update, }; -static int nbl_serv_update_firmware(struct nbl_service_mgt *serv_mgt, const struct firmware *fw, - struct netlink_ext_ack *extack) +int nbl_serv_update_firmware(struct nbl_service_mgt *serv_mgt, const struct firmware *fw, + struct netlink_ext_ack *extack) { - struct nbl_serv_update_fw_priv priv = {0}; + struct nbl_serv_update_fw_priv priv = {{0}}; int ret = 0; priv.context.ops = &nbl_update_fw_ops; @@ -4317,6 +5087,7 @@ static int nbl_serv_update_devlink_flash(struct devlink *devlink, devlink_flash_update_status_notify(devlink, "Flash start", NULL, 0, 0); ret = nbl_serv_update_firmware(serv_mgt, params->fw, extack); + if (ret) devlink_flash_update_status_notify(devlink, "Flash failed", NULL, 0, 0); else @@ -4475,14 +5246,6 @@ static void nbl_serv_init_port(void *priv) disp_ops->init_port(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); } -static void nbl_serv_configure_virtio_dev_msix(void *priv, u16 vector) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - - disp_ops->configure_virtio_dev_msix(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vector); -} - static void nbl_serv_configure_rdma_msix_off(void *priv, u16 vector) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; @@ -4491,14 +5254,6 @@ static void nbl_serv_configure_rdma_msix_off(void *priv, u16 vector) disp_ops->configure_rdma_msix_off(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vector); } -static void nbl_serv_configure_virtio_dev_ready(void *priv) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - - disp_ops->configure_virtio_dev_ready(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); -} - static int nbl_serv_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; @@ -4517,7 +5272,10 @@ static void nbl_serv_adapt_desc_gother(void *priv) struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - disp_ops->adapt_desc_gother(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (test_bit(NBL_FLAG_HIGH_THROUGHPUT, serv_mgt->flags)) + disp_ops->set_desc_high_throughput(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + else + disp_ops->adapt_desc_gother(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); } static void nbl_serv_process_flr(void *priv, u16 vfid) @@ -4580,6 +5338,13 @@ static int nbl_serv_register_vsi_info(void *priv, struct nbl_vsi_param *vsi_para NBL_ITR_DYNAMIC)) ring_mgt->vsi_info[vsi_index].itr_dynamic = true; + /** + * Clear cfgs, in case this function exited abnormaly last time. + * only for data vsi, vf in vm only support data vsi. + * DPDK user vsi can not leak resource. + */ + if (vsi_index == NBL_VSI_DATA) + disp_ops->clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_param->vsi_id); disp_ops->register_vsi_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_index, vsi_param->queue_offset, vsi_param->queue_num); @@ -4704,7 +5469,6 @@ static long nbl_serv_st_unlock_ioctl(struct file *file, unsigned int cmd, unsign ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); - if (ret) { nbl_err(common, NBL_DEBUG_ST, "Bad access.\n"); return ret; @@ -4831,90 +5595,337 @@ static void nbl_serv_remove_st(void *priv, void *st_table_param) devm_kfree(NBL_COMMON_TO_DEV(common), st_mgt); } -static int nbl_serv_set_spoof_check_addr(void *priv, u8 *mac) +static void nbl_serv_form_p4_name(struct nbl_common_info *common, int type, char *name, + u16 len, u32 version) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + char eth_num[NBL_P4_NAME_LEN] = {0}; + char ver[NBL_P4_NAME_LEN] = {0}; - return disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), mac); -} + switch (NBL_COMMON_TO_ETH_MODE(common)) { + case 1: + snprintf(eth_num, sizeof(eth_num), "single"); + break; + case 2: + snprintf(eth_num, sizeof(eth_num), "dual"); + break; + case 4: + snprintf(eth_num, sizeof(eth_num), "quad"); + break; + default: + nbl_err(common, NBL_DEBUG_CUSTOMIZED_P4, "Unknown P4 type %d", type); + return; + } -static u16 nbl_serv_get_vf_base_vsi_id(void *priv, u16 func_id) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + switch (version) { + case 0: + snprintf(ver, sizeof(ver), "lg"); + break; + case 1: + snprintf(ver, sizeof(ver), "hg"); + break; + } - return disp_ops->get_vf_base_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); + switch (type) { + case NBL_P4_DEFAULT: + /* No need to load default p4 file */ + snprintf(name, len, "nbl/snic_v3r1/m181xx_%s_port_p4_%s", eth_num, ver); + break; + default: + nbl_err(common, NBL_DEBUG_CUSTOMIZED_P4, "Unknown P4 type %d", type); + } } -static int nbl_serv_get_board_id(void *priv) +static int nbl_serv_calculate_md5sum(struct nbl_common_info *common, const u8 *data, + u32 data_len, char *md5_string) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct shash_desc *shash; + struct crypto_shash *tfm; + u8 md5_result[NBL_MD5SUM_LEN]; + int i; + int ret; - return disp_ops->get_board_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); -} + tfm = crypto_alloc_shash("md5", 0, 0); + if (IS_ERR(tfm)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Failed to allocate MD5 transform\n"); + return PTR_ERR(tfm); + } -static int nbl_serv_process_abnormal_event(void *priv) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_abnormal_event_info abnomal_info; - struct nbl_abnormal_details *detail; - u16 local_queue_id; - int type, i, ret = 0; + shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!shash) { + crypto_free_shash(tfm); + return -ENOMEM; + } - memset(&abnomal_info, 0, sizeof(abnomal_info)); + shash->tfm = tfm; - ret = disp_ops->process_abnormal_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &abnomal_info); - if (!ret) + ret = crypto_shash_init(shash); + if (ret) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Failed to initialize MD5\n"); + kfree(shash); + crypto_free_shash(tfm); return ret; + } - for (i = 0; i < NBL_ABNORMAL_EVENT_MAX; i++) { - detail = &abnomal_info.details[i]; + ret = crypto_shash_update(shash, data, data_len); + if (ret) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Failed to update MD5\n"); + kfree(shash); + crypto_free_shash(tfm); + return ret; + } - if (!detail->abnormal) - continue; + ret = crypto_shash_final(shash, md5_result); + if (ret) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Failed to finalize MD5\n"); + kfree(shash); + crypto_free_shash(tfm); + return ret; + } - type = nbl_serv_abnormal_event_to_queue(i); - local_queue_id = disp_ops->get_local_queue_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - detail->vsi_id, detail->qid); - if (local_queue_id == U16_MAX) - return 0; + for (i = 0; i < NBL_MD5SUM_LEN; i++) + sprintf(md5_string + i * 2, "%02x", md5_result[i]); - nbl_serv_restore_queue(serv_mgt, detail->vsi_id, local_queue_id, type, true); - } + md5_string[32] = '\0'; + + kfree(shash); + crypto_free_shash(tfm); return 0; } -static int nbl_serv_cfg_bond_shaping(void *priv, u8 eth_id, bool enable) +static char *nbl_serv_get_md5_verify(int type, u16 version, u8 eth_num) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + if (version == 1) { + switch (eth_num) { + case 1: return NBL_SINGLE_PORT_HG_P4_MD5; + case 2: return NBL_DUAL_PORT_HG_P4_MD5; + case 4: return NBL_QUAD_PORT_HG_P4_MD5; + default: return NULL; + } + } else if (version == 0) { + switch (eth_num) { + case 1: return NBL_SINGLE_PORT_LG_P4_MD5; + case 2: return NBL_DUAL_PORT_LG_P4_MD5; + case 4: return NBL_QUAD_PORT_LG_P4_MD5; + default: return NULL; + } + } - return disp_ops->cfg_bond_shaping(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, enable); + return NULL; } -static void nbl_serv_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, - u8 other_eth_id, bool enable) +static int nbl_serv_load_p4(struct nbl_service_mgt *serv_mgt, + const struct firmware *fw, char *verify_code, int type, u16 version) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + const struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct elf32_shdr *shdr; + struct nbl_load_p4_param param; + u8 *strtab, *name, *product_code = NULL; + int i; + char md5_result[33]; + char *md5_verify; + u32 p4_size = 0; - disp_ops->cfg_bgid_back_pressure(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), main_eth_id, - other_eth_id, enable); -} - -static void nbl_serv_cfg_eth_bond_event(void *priv, bool enable) -{ - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + if (memcmp(elf_hdr->e_ident, NBL_P4_ELF_IDENT, NBL_P4_ELF_IDENT_LEN)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "Invalid ELF file, load defalut p4 configuration"); + return 0; + } - disp_ops->cfg_eth_bond_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); -} + md5_verify = nbl_serv_get_md5_verify(type, version, NBL_COMMON_TO_ETH_MODE(common)); + + if (nbl_serv_calculate_md5sum(common, fw->data, fw->size, md5_result)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "elf md5sum calculate failed, load defalut p4 configuration"); + return 0; + } + + nbl_info(common, NBL_DEBUG_CUSTOMIZED_P4, "load p4 md5sum: %s\n", md5_result); + + if (!md5_verify || strncmp(md5_verify, md5_result, 33)) + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "elf file does not match driver version, function may be abnormal\n"); + + memset(¶m, 0, sizeof(param)); + + shdr = (struct elf32_shdr *)((u8 *)elf_hdr + elf_hdr->e_shoff); + strtab = (u8 *)elf_hdr + shdr[elf_hdr->e_shstrndx].sh_offset; + + for (i = 0; i < elf_hdr->e_shnum; i++) + if (shdr[i].sh_type == SHT_NOTE) { + name = strtab + shdr[i].sh_name; + product_code = (u8 *)elf_hdr + shdr[i].sh_offset; + } + + if (!product_code) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "Product code not exist, function may be abnormal"); + return 0; + } + + if (strncmp(product_code, verify_code, NBL_P4_VERIFY_CODE_LEN)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "Invalid product code %32s, function may be abnormal", product_code); + return 0; + } + + param.start = 1; + disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + + for (i = 0; i < elf_hdr->e_shnum; i++) + if (shdr[i].sh_type == SHT_PROGBITS && !(shdr[i].sh_flags & SHF_EXECINSTR)) { + memset(¶m, 0, sizeof(param)); + /* name is used for distinguish configuration, not used for now */ + strscpy(param.name, strtab + shdr[i].sh_name, sizeof(param.name)); + param.addr = shdr[i].sh_addr; + param.size = shdr[i].sh_size; + param.section_index = i; + param.section_offset = 0; + param.data = (u8 *)elf_hdr + shdr[i].sh_offset; + p4_size += param.size; + + disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + } + + memset(¶m, 0, sizeof(param)); + param.end = 1; + disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + + return 0; +} + +static __maybe_unused void nbl_serv_load_default_p4(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->load_p4_default(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_init_p4(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + const struct firmware *fw; + char name[NBL_P4_NAME_LEN] = {0}; + char verify_code[NBL_P4_NAME_LEN] = {0}; + int type, ret = 0; + u32 version; + + version = disp_ops->get_p4_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + type = disp_ops->get_p4_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), verify_code); + if (type < 0 || type > NBL_P4_TYPE_MAX) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, + "p4 type is invalid, load defalut p4 configuration\n"); + return 0; + } + + nbl_serv_form_p4_name(common, type, name, sizeof(name), version); + ret = firmware_request_nowarn(&fw, name, NBL_SERV_MGT_TO_DEV(serv_mgt)); + if (ret) + goto out; + + ret = nbl_serv_load_p4(serv_mgt, fw, verify_code, type, version); + + release_firmware(fw); + +out: + if (ret) + type = NBL_FLAG_P4_DEFAULT; + + nbl_info(common, NBL_DEBUG_CUSTOMIZED_P4, "Load P4 %d", type); + disp_ops->set_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), type); + + /* We always return OK, because at the very least we would use default P4 */ + return 0; +} + +static int nbl_serv_set_spoof_check_addr(void *priv, u8 *mac) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + return disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), mac); +} + +static u16 nbl_serv_get_vf_base_vsi_id(void *priv, u16 func_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_vf_base_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); +} + +static int nbl_serv_get_board_id(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_board_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_process_abnormal_event(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_abnormal_event_info abnomal_info; + struct nbl_abnormal_details *detail; + u16 local_queue_id; + int type, i, ret = 0; + + memset(&abnomal_info, 0, sizeof(abnomal_info)); + + ret = disp_ops->process_abnormal_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &abnomal_info); + if (!ret) + return ret; + + for (i = 0; i < NBL_ABNORMAL_EVENT_MAX; i++) { + detail = &abnomal_info.details[i]; + + if (!detail->abnormal) + continue; + + type = nbl_serv_abnormal_event_to_queue(i); + local_queue_id = disp_ops->get_local_queue_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + detail->vsi_id, detail->qid); + if (local_queue_id == U16_MAX) + return 0; + + nbl_serv_restore_queue(serv_mgt, detail->vsi_id, local_queue_id, type, true); + } + + return 0; +} + +static int nbl_serv_cfg_bond_shaping(void *priv, u8 eth_id, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->cfg_bond_shaping(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, enable); +} + +static void nbl_serv_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, + u8 other_eth_id, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_bgid_back_pressure(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), main_eth_id, + other_eth_id, enable); +} + +static void nbl_serv_cfg_eth_bond_event(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_eth_bond_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); +} static ssize_t nbl_serv_vf_mac_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -4939,10 +5950,34 @@ static ssize_t nbl_serv_vf_mac_store(struct kobject *kobj, struct kobj_attribute return ret ? ret : count; } +static ssize_t nbl_serv_vf_trust_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "usage: write to set vf trust\n"); +} + +static ssize_t nbl_serv_vf_trust_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + bool trusted = false; + int ret = 0; + + if (sysfs_streq(buf, "ON")) + trusted = true; + else if (sysfs_streq(buf, "OFF")) + trusted = false; + else + return -EINVAL; + + ret = nbl_serv_set_vf_trust(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, trusted); + return ret ? ret : count; +} + static ssize_t nbl_serv_vf_vlan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "usage: write to set VF Vlan," - " Qos, and optionally Vlan Protocol (default 802.1Q)\n"); + return sprintf(buf, "usage: wr to set VF Vlan,Qos,and Protocol\n"); } static ssize_t nbl_serv_vf_vlan_store(struct kobject *kobj, struct kobj_attribute *attr, @@ -5053,6 +6088,166 @@ static ssize_t nbl_serv_vf_link_state_store(struct kobject *kobj, struct kobj_at return ret ? ret : count; } +static ssize_t nbl_serv_vf_stats_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct net_device *netdev = serv_mgt->net_resource_mgt->netdev; + struct ifla_vf_stats stats = { 0 }; + int ret = 0; + + ret = nbl_serv_get_vf_stats(netdev, vf_info->vf_id, &stats); + if (ret) { + netdev_info(netdev, "get_vf %d stats failed %d\n", vf_info->vf_id, ret); + return ret; + } + + return scnprintf(buf, PAGE_SIZE, + "tx_packets : %llu\n" + "tx_bytes : %llu\n" + "tx_dropped : %llu\n" + "rx_packets : %llu\n" + "rx_bytes : %llu\n" + "rx_dropped : %llu\n" + "rx_broadcast : %llu\n" + "rx_multicast : %llu\n", + stats.tx_packets, stats.tx_bytes, stats.tx_dropped, + stats.rx_packets, stats.rx_bytes, stats.rx_dropped, + stats.broadcast, stats.multicast + ); +} + +static ssize_t nbl_serv_vf_tx_rate_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, tx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int rate = net_resource_mgt->vf_info[vf_info->vf_id].meter_tx_rate; + + return sprintf(buf, "max tx rate(Mbps): %d\n", rate); +} + +static ssize_t nbl_serv_vf_tx_rate_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, tx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + int tx_rate = 0, ret = 0; + + ret = kstrtos32(buf, 0, &tx_rate); + if (ret) + return -EINVAL; + + ret = nbl_serv_set_vf_tx_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, tx_rate, 0, false); + return ret ? ret : count; +} + +static ssize_t nbl_serv_vf_tx_burst_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, tx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int burst = net_resource_mgt->vf_info[vf_info->vf_id].meter_tx_burst; + + return sprintf(buf, "max burst depth %d\n", burst); +} + +static ssize_t nbl_serv_vf_tx_burst_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, tx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int burst = 0, ret = 0; + int rate = net_resource_mgt->vf_info[vf_info->vf_id].meter_tx_rate; + + ret = kstrtos32(buf, 0, &burst); + if (ret) + return -EINVAL; + if (burst >= NBL_MAX_BURST) + return -EINVAL; + + if (rate || !burst) + ret = nbl_serv_set_vf_tx_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, rate, burst, true); + else + return -EINVAL; + + return ret ? ret : count; +} + +static ssize_t nbl_serv_vf_rx_rate_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, rx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int rate = net_resource_mgt->vf_info[vf_info->vf_id].meter_rx_rate; + + return sprintf(buf, "max rx rate(Mbps): %d\n", rate); +} + +static ssize_t nbl_serv_vf_rx_rate_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, rx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + int rx_rate = 0, ret = 0; + + ret = kstrtos32(buf, 0, &rx_rate); + if (ret) + return -EINVAL; + + ret = nbl_serv_set_vf_rx_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, rx_rate, 0, false); + return ret ? ret : count; +} + +static ssize_t nbl_serv_vf_rx_burst_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, rx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int burst = net_resource_mgt->vf_info[vf_info->vf_id].meter_rx_burst; + + return sprintf(buf, "max burst depth %d\n", burst); +} + +static ssize_t nbl_serv_vf_rx_burst_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, rx_bps_kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int burst = 0, ret = 0; + int rate = net_resource_mgt->vf_info[vf_info->vf_id].meter_rx_rate; + + ret = kstrtos32(buf, 0, &burst); + if (ret) + return -EINVAL; + if (burst > NBL_MAX_BURST) + return -EINVAL; + + if (rate || !burst) + ret = nbl_serv_set_vf_rx_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, rate, burst, true); + else + return -EINVAL; + + return ret ? ret : count; +} + static ssize_t nbl_serv_vf_config_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct kobj_attribute *kattr = container_of(attr, struct kobj_attribute, attr); @@ -5074,6 +6269,11 @@ static ssize_t nbl_serv_vf_config_store(struct kobject *kobj, struct attribute * return -EIO; } +static void dir_release(struct kobject *kobj) +{ + //TODO +} + static struct kobj_attribute nbl_attr_vf_mac = { .attr = {.name = "mac", .mode = 0644}, @@ -5088,6 +6288,13 @@ static struct kobj_attribute nbl_attr_vf_vlan = { .store = nbl_serv_vf_vlan_store, }; +static struct kobj_attribute nbl_attr_vf_trust = { + .attr = {.name = "trust", + .mode = 0644}, + .show = nbl_serv_vf_trust_show, + .store = nbl_serv_vf_trust_store, +}; + static struct kobj_attribute nbl_attr_vf_max_tx_rate = { .attr = {.name = "max_tx_rate", .mode = 0644}, @@ -5095,13 +6302,41 @@ static struct kobj_attribute nbl_attr_vf_max_tx_rate = { .store = nbl_serv_vf_max_tx_rate_store, }; -static struct kobj_attribute nbl_attr_vf_spoofchk = { - .attr = {.name = "spoofchk", +static struct kobj_attribute nbl_attr_vf_spoofcheck = { + .attr = {.name = "spoofcheck", .mode = 0644}, .show = nbl_serv_vf_spoofchk_show, .store = nbl_serv_vf_spoofchk_store, }; +static struct kobj_attribute nbl_attr_vf_tx_rate = { + .attr = {.name = "rate", + .mode = 0644}, + .show = nbl_serv_vf_tx_rate_show, + .store = nbl_serv_vf_tx_rate_store, +}; + +static struct kobj_attribute nbl_attr_vf_tx_burst = { + .attr = {.name = "burst", + .mode = 0644}, + .show = nbl_serv_vf_tx_burst_show, + .store = nbl_serv_vf_tx_burst_store, +}; + +static struct kobj_attribute nbl_attr_vf_rx_rate = { + .attr = {.name = "rate", + .mode = 0644}, + .show = nbl_serv_vf_rx_rate_show, + .store = nbl_serv_vf_rx_rate_store, +}; + +static struct kobj_attribute nbl_attr_vf_rx_burst = { + .attr = {.name = "burst", + .mode = 0644}, + .show = nbl_serv_vf_rx_burst_show, + .store = nbl_serv_vf_rx_burst_store, +}; + static struct kobj_attribute nbl_attr_vf_link_state = { .attr = {.name = "link_state", .mode = 0644}, @@ -5109,270 +6344,1084 @@ static struct kobj_attribute nbl_attr_vf_link_state = { .store = nbl_serv_vf_link_state_store, }; +static struct kobj_attribute nbl_attr_vf_stats = { + .attr = {.name = "stats", + .mode = 0444}, + .show = nbl_serv_vf_stats_show, +}; + static struct attribute *nbl_vf_config_attrs[] = { &nbl_attr_vf_mac.attr, &nbl_attr_vf_vlan.attr, + &nbl_attr_vf_trust.attr, &nbl_attr_vf_max_tx_rate.attr, - &nbl_attr_vf_spoofchk.attr, + &nbl_attr_vf_spoofcheck.attr, &nbl_attr_vf_link_state.attr, + &nbl_attr_vf_stats.attr, NULL, }; -ATTRIBUTE_GROUPS(nbl_vf_config); +ATTRIBUTE_GROUPS(nbl_vf_config); + +static struct attribute *nbl_vf_tx_config_attrs[] = { + &nbl_attr_vf_tx_rate.attr, + &nbl_attr_vf_tx_burst.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(nbl_vf_tx_config); + +static struct attribute *nbl_vf_rx_config_attrs[] = { + &nbl_attr_vf_rx_rate.attr, + &nbl_attr_vf_rx_burst.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(nbl_vf_rx_config); + +static const struct sysfs_ops nbl_sysfs_ops_vf = { + .show = nbl_serv_vf_config_show, + .store = nbl_serv_vf_config_store, +}; + +static const struct kobj_type nbl_kobj_vf_type = { + .sysfs_ops = &nbl_sysfs_ops_vf, + .default_groups = nbl_vf_config_groups, +}; + +static const struct kobj_type nbl_kobj_dir = { + .release = dir_release, +}; + +static const struct kobj_type nbl_kobj_vf_tx_type = { + .sysfs_ops = &nbl_sysfs_ops_vf, + .default_groups = nbl_vf_tx_config_groups, +}; + +static const struct kobj_type nbl_kobj_vf_rx_type = { + .sysfs_ops = &nbl_sysfs_ops_vf, + .default_groups = nbl_vf_rx_config_groups, +}; + +static int nbl_serv_setup_vf_sysfs(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + int i = 0, ret = 0; + int index = 0; + + for (i = 0; i < net_resource_mgt->num_vfs; i++) { + index = i; + vf_info[i].priv = serv_mgt; + vf_info[i].vf_id = (u16)i; + + ret = kobject_init_and_add(&vf_info[i].kobj, &nbl_kobj_vf_type, + net_resource_mgt->sriov_kobj, "%d", i); + if (ret) + goto err; + + ret = kobject_init_and_add(&vf_info[i].meters_kobj, &nbl_kobj_dir, + &vf_info[i].kobj, "meters"); + if (ret) + goto err; + ret = kobject_init_and_add(&vf_info[i].rx_kobj, &nbl_kobj_dir, + &vf_info[i].meters_kobj, "rx"); + if (ret) + goto err; + ret = kobject_init_and_add(&vf_info[i].tx_kobj, &nbl_kobj_dir, + &vf_info[i].meters_kobj, "tx"); + if (ret) + goto err; + ret = kobject_init_and_add(&vf_info[i].rx_bps_kobj, &nbl_kobj_vf_rx_type, + &vf_info[i].rx_kobj, "bps"); + if (ret) + goto err; + ret = kobject_init_and_add(&vf_info[i].tx_bps_kobj, &nbl_kobj_vf_tx_type, + &vf_info[i].tx_kobj, "bps"); + if (ret) + goto err; + } + + return 0; + +err: + for (i = 0; i <= index; i++) { + if (vf_info[i].tx_bps_kobj.state_initialized) + kobject_put(&vf_info[i].tx_bps_kobj); + if (vf_info[i].rx_bps_kobj.state_initialized) + kobject_put(&vf_info[i].rx_bps_kobj); + if (vf_info[i].tx_kobj.state_initialized) + kobject_put(&vf_info[i].tx_kobj); + if (vf_info[i].tx_kobj.state_initialized) + kobject_put(&vf_info[i].tx_kobj); + if (vf_info[i].tx_kobj.state_initialized) + kobject_put(&vf_info[i].tx_kobj); + if (vf_info[i].tx_kobj.state_initialized) + kobject_put(&vf_info[i].tx_kobj); + } + + return 0; +} + +static void nbl_serv_remove_vf_sysfs(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + int i = 0; + + for (i = 0; i < net_resource_mgt->num_vfs; i++) { + kobject_put(&vf_info[i].tx_bps_kobj); + kobject_put(&vf_info[i].rx_bps_kobj); + kobject_put(&vf_info[i].tx_kobj); + kobject_put(&vf_info[i].rx_kobj); + kobject_put(&vf_info[i].meters_kobj); + kobject_put(&vf_info[i].kobj); + } +} + +static int nbl_serv_setup_vf_config(void *priv, int num_vfs, bool is_flush) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u16 func_id = U16_MAX; + u16 vlan_tci; + bool should_notify; + int i, ret = 0; + + net_resource_mgt->num_vfs = num_vfs; + + for (i = 0; i < net_resource_mgt->num_vfs; i++) { + func_id = nbl_serv_get_vf_function_id(serv_mgt, i); + if (func_id == U16_MAX) { + nbl_err(common, NBL_DEBUG_MAIN, "vf id %d invalid\n", i); + return -EINVAL; + } + + disp_ops->register_func_mac(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vf_info[i].mac, func_id); + + vlan_tci = vf_info[i].vlan | (u16)(vf_info[i].vlan_qos << VLAN_PRIO_SHIFT); + ret = disp_ops->register_func_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, + vlan_tci, vf_info[i].vlan_proto, + &should_notify); + if (ret) + break; + + ret = disp_ops->register_func_trust(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].trusted, + &should_notify); + + if (ret) + break; + + ret = disp_ops->register_func_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, + vf_info[i].max_tx_rate); + if (ret) + break; + + ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].max_tx_rate, 0); + if (ret) + break; + + ret = disp_ops->set_rx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].meter_rx_rate, 0); + if (ret) + break; + + ret = disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), i, + vf_info[i].spoof_check); + if (ret) + break; + + /* No need to notify vf, vf will get link forced when probe, + * Here we only flush the config. + */ + ret = disp_ops->register_func_link_forced(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].state, + &should_notify); + if (ret) + break; + } + + if (!ret && net_resource_mgt->sriov_kobj && !is_flush) + ret = nbl_serv_setup_vf_sysfs(serv_mgt); + + if (ret) + net_resource_mgt->num_vfs = 0; + + return ret; +} + +static void nbl_serv_remove_vf_config(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + int i; + + nbl_serv_remove_vf_sysfs(serv_mgt); + + for (i = 0; i < net_resource_mgt->num_vfs; i++) + memset(&vf_info[i], 0, sizeof(vf_info[i])); + + nbl_serv_setup_vf_config(priv, net_resource_mgt->num_vfs, true); + + net_resource_mgt->num_vfs = 0; +} + +static void nbl_serv_register_dev_name(void *priv, u16 vsi_id, char *name) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->register_dev_name(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, name); +} + +static void nbl_serv_get_dev_name(void *priv, u16 vsi_id, char *name) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_dev_name(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, name); +} + +static int nbl_serv_setup_vf_resource(void *priv, int num_vfs) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info; + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + int i; + + net_resource_mgt->total_vfs = num_vfs; + + net_resource_mgt->vf_info = devm_kcalloc(dev, net_resource_mgt->total_vfs, + sizeof(struct nbl_serv_vf_info), GFP_KERNEL); + if (!net_resource_mgt->vf_info) + return -ENOMEM; + + vf_info = net_resource_mgt->vf_info; + for (i = 0; i < net_resource_mgt->total_vfs; i++) { + vf_info[i].state = IFLA_VF_LINK_STATE_AUTO; + vf_info[i].spoof_check = false; + } + + net_resource_mgt->sriov_kobj = kobject_create_and_add("sriov", &dev->kobj); + if (!net_resource_mgt->sriov_kobj) + nbl_warn(NBL_SERV_MGT_TO_COMMON(serv_mgt), NBL_DEBUG_MAIN, + "Fail to create sriov sysfs"); + + return 0; +} + +static void nbl_serv_remove_vf_resource(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + + nbl_serv_remove_vf_config(priv); + + kobject_put(net_resource_mgt->sriov_kobj); + + if (net_resource_mgt->vf_info) { + devm_kfree(dev, net_resource_mgt->vf_info); + net_resource_mgt->vf_info = NULL; + } +} + +static void nbl_serv_cfg_fd_update_event(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_fd_update_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); +} + +static void nbl_serv_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_xdp_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), queue_num, queue_size, + vsi_id); +} + +static void nbl_serv_assgin_xdp_prog(struct net_device *netdev, struct bpf_prog *prog) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct bpf_prog *old_prog; + + old_prog = xchg(&ring_mgt->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + disp_ops->set_rings_xdp_prog(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), (void *)prog); +} + +static int nbl_serv_setup_xdp_prog(struct net_device *netdev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + int was_running; + int err; + + if (prog && test_bit(NBL_USER, adapter->state)) + return -EIO; + + if (!ring_mgt->vsi_info[NBL_VSI_XDP].ring_num) + return -ENOSPC; + + was_running = netif_running(netdev); + if (was_running) { + err = nbl_serv_netdev_stop(netdev); + if (err) { + netdev_err(netdev, "Netdev stop failed while setup prog\n"); + return err; + } + } + + nbl_serv_assgin_xdp_prog(netdev, prog); + + if (was_running) { + err = nbl_serv_netdev_open(netdev); + if (err) { + netdev_err(netdev, "Netdev open failed after setup prog\n"); + return err; + } + } + + if (prog) + set_bit(NBL_XDP, adapter->state); + else + clear_bit(NBL_XDP, adapter->state); + + return 0; +} + +static int nbl_serv_set_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return nbl_serv_setup_xdp_prog(netdev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } +} + +static void nbl_serv_set_hw_status(void *priv, enum nbl_hw_status hw_status) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->set_hw_status(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), hw_status); +} + +static void nbl_serv_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_active_func_bitmaps(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), bitmap, max_func); +} + +static void nbl_serv_get_rdma_rate(void *priv, int *rdma_rate) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + + *rdma_rate = qos_info->rdma_rate; +} + +static void nbl_serv_get_net_rate(void *priv, int *net_rate) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + + *net_rate = qos_info->net_rate; +} + +static void nbl_serv_get_rdma_bw(void *priv, int *rdma_bw) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + + *rdma_bw = qos_info->rdma_bw; +} + +static int nbl_serv_configure_rdma_bw(void *priv, u8 eth_id, int rdma_bw) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret; + + ret = disp_ops->configure_rdma_bw(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, rdma_bw); + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure rdma bw failed ret %d\n", ret); + return ret; + } + + qos_info->rdma_bw = rdma_bw; + + return 0; +} + +static ssize_t nbl_serv_pfc_show(void *priv, u8 eth_id, char *buf) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + + return scnprintf(buf, PAGE_SIZE, "%d,%d,%d,%d,%d,%d,%d,%d\n", + qos_info->pfc[0], qos_info->pfc[1], + qos_info->pfc[2], qos_info->pfc[3], + qos_info->pfc[4], qos_info->pfc[5], + qos_info->pfc[6], qos_info->pfc[7]); +} + +static int nbl_serv_configure_pfc(void *priv, u8 eth_id, u8 *pfc) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + bool changed = false; + int ret; + int i; + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + if (pfc[i] != qos_info->pfc[i]) { + changed = true; + break; + } + } + + if (!changed) + return 0; + + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, pfc, net_resource_mgt->qos_info.trust_mode, + net_resource_mgt->qos_info.dscp2prio_map); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure pfc failed ret %d\n", ret); + return ret; + } + + memcpy(net_resource_mgt->qos_info.pfc, pfc, NBL_MAX_PFC_PRIORITIES); + + return ret; +} + +static ssize_t nbl_serv_trust_mode_show(void *priv, u8 eth_id, char *buf) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + + return scnprintf(buf, PAGE_SIZE, "%s\n", + qos_info->trust_mode == NBL_TRUST_MODE_DSCP ? "dscp" : "802.1p"); +} + +static int nbl_serv_configure_trust(void *priv, u8 eth_id, u8 trust_mode) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret; + + if (net_resource_mgt->qos_info.trust_mode == trust_mode) + return 0; + + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, net_resource_mgt->qos_info.pfc, trust_mode, + net_resource_mgt->qos_info.dscp2prio_map); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure trust_mode failed ret %d\n", ret); + return ret; + } + + net_resource_mgt->qos_info.trust_mode = trust_mode; + + return ret; +} + +static ssize_t nbl_serv_dscp2prio_show(void *priv, u8 eth_id, char *buf) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + int len = 0; + int i; + + len += snprintf(buf + len, PAGE_SIZE - len, "dscp2prio mapping:\n"); + for (i = 0; i < NBL_DSCP_MAX; i++) + len += snprintf(buf + len, PAGE_SIZE - len, "\tprio:%d dscp:%d,\n", + qos_info->dscp2prio_map[i], i); + + return len; +} + +static int nbl_serv_configure_dscp2prio(void *priv, u8 eth_id, const char *buf, size_t count) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + char cmd[8]; + int dscp, prio, ret; + int i; + + ret = sscanf(buf, "%7[^,], %d , %d", cmd, &dscp, &prio); + + if (strncmp(cmd, "set", 3) == 0) { + if (ret != 3 || dscp < 0 || dscp >= NBL_DSCP_MAX || prio < 0 || prio > 7) + return -EINVAL; + qos_info->dscp2prio_map[dscp] = prio; + } else if (strncmp(cmd, "del", 3) == 0) { + if (ret != 3 || dscp < 0 || dscp >= NBL_DSCP_MAX) + return -EINVAL; + if (qos_info->dscp2prio_map[dscp] == 0) + return -EINVAL; + qos_info->dscp2prio_map[dscp] = 0; + } else if (strncmp(cmd, "flush", 5) == 0) { + for (i = 0; i < NBL_DSCP_MAX; i++) + qos_info->dscp2prio_map[i] = i / NBL_MAX_PFC_PRIORITIES; + } else { + return -EINVAL; + } + + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, qos_info->pfc, + qos_info->trust_mode, qos_info->dscp2prio_map); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure dscp2prio failed ret %d\n", ret); + return ret; + } + + return count; +} + +static int nbl_serv_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret; + + ret = disp_ops->set_pfc_buffer_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, prio, xoff, xon); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure pfc buffer size failed ret %d\n", ret); + return ret; + } + + qos_info->buffer_sizes[prio][0] = xoff; + qos_info->buffer_sizes[prio][1] = xon; + + return ret; +} + +static ssize_t nbl_serv_pfc_buffer_size_show(void *priv, u8 eth_id, char *buf) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + int prio; + ssize_t count = 0; + + for (prio = 0; prio < NBL_MAX_PFC_PRIORITIES; prio++) + count += snprintf(buf + count, PAGE_SIZE - count, "prio %d, xoff %d, xon %d\n", + prio, qos_info->buffer_sizes[prio][0], + qos_info->buffer_sizes[prio][1]); + + return count; +} + +static u8 nbl_serv_dcb_get_num_tc(struct net_device *netdev, struct ieee_ets *ets) +{ + bool tc_unused = false; + u8 num_tc = 0; + u8 ret = 0; + int i; + + /* Scan the ETS Config Priority Table to find traffic classes + * enabled and create a bitmask of enabled TCs + */ + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) + num_tc |= BIT(ets->prio_tc[i]); + + /* Scan bitmask for contiguous TCs starting with TC0 */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (num_tc & BIT(i)) { + if (!tc_unused) { + ret++; + } else { + netdev_err(netdev, "Non-contiguous TCs - Disabling DCB\n"); + return 1; + } + } else { + tc_unused = true; + } + } + + /* There is always at least 1 TC */ + if (!ret) + ret = 1; + + return ret; +} + +static int nbl_serv_bwchk(struct net_device *netdev, struct ieee_ets *ets) +{ + u8 num_tc, total_bw = 0; + int i; + + num_tc = nbl_serv_dcb_get_num_tc(netdev, ets); + + /* no bandwidth checks required if there's only one TC, so assign + * all bandwidth to TC0 and return + */ + if (num_tc == 1) { + ets->tc_reco_bw[0] = NBL_TC_MAX_BW; + return 0; + } + + for (i = 0; i < num_tc; i++) + total_bw += ets->tc_reco_bw[i]; + + if (!total_bw) { + ets->tc_reco_bw[0] = NBL_TC_MAX_BW; + } else if (total_bw != NBL_TC_MAX_BW) { + netdev_err(netdev, "Invalid config, total bandwidth must equal 100\n"); + return -EINVAL; + } + + return 0; +} + +static int nbl_serv_ieee_setets(struct net_device *netdev, struct ieee_ets *ets) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct ieee_ets ets_tmp = {0}; + int bwcfg = 0, bwrec = 0; + int ret; + int i; + + memcpy(&ets_tmp, ets, sizeof(ets_tmp)); + + if (nbl_serv_bwchk(netdev, &ets_tmp)) + return -EINVAL; + + for (i = 0; i < NBL_MAX_TC_NUM; i++) { + bwcfg += ets->tc_tx_bw[i]; + bwrec += ets->tc_reco_bw[i]; + } + + if (!bwcfg) + ets_tmp.tc_tx_bw[0] = NBL_TC_MAX_BW; + + if (!bwrec) + ets_tmp.tc_reco_bw[0] = NBL_TC_MAX_BW; + + ret = disp_ops->set_tc_wgt(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), + ets_tmp.tc_tx_bw, NBL_MAX_TC_NUM); + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "set_tc_wgt failed ret %d\n", ret); + return ret; + } + + memcpy(&qos_info->ets, &ets_tmp, sizeof(struct ieee_ets)); + return 0; +} + +static int nbl_serv_ieee_getets(struct net_device *netdev, struct ieee_ets *ets) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + + memcpy(ets, &qos_info->ets, sizeof(struct ieee_ets)); + ets->ets_cap = NBL_MAX_TC_NUM; + return 0; +} + +static int nbl_serv_ieee_setpfc(struct net_device *netdev, struct ieee_pfc *pfc) +{ + return 0; +} + +static int nbl_serv_ieee_getpfc(struct net_device *netdev, struct ieee_pfc *pfc) +{ + return 0; +} + +static int nbl_serv_ieee_delapp(struct net_device *netdev, struct dcb_app *app) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + int ret; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + app->protocol >= NBL_DSCP_MAX) + return -EINVAL; -static const struct sysfs_ops nbl_sysfs_ops_vf = { - .show = nbl_serv_vf_config_show, - .store = nbl_serv_vf_config_store, -}; + if (qos_info->dscp2prio_map[app->protocol] != app->priority) + return -ENOENT; -static const struct kobj_type nbl_kobj_vf_type = { - .sysfs_ops = &nbl_sysfs_ops_vf, - .default_groups = nbl_vf_config_groups, -}; + ret = dcb_ieee_delapp(netdev, app); + if (ret) + return ret; -static int nbl_serv_setup_vf_sysfs(struct nbl_service_mgt *serv_mgt) + qos_info->dscp2prio_map[app->protocol] = 0; + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), qos_info->pfc, + qos_info->trust_mode, qos_info->dscp2prio_map); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "delapp configure dscp2prio failed ret %d\n", ret); + return ret; + } + + return 0; +} + +static int nbl_serv_ieee_setapp(struct net_device *netdev, struct dcb_app *app) { - struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; - int i = 0, ret = 0; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + int ret; - for (i = 0; i < net_resource_mgt->num_vfs; i++) { - vf_info[i].priv = serv_mgt; - vf_info[i].vf_id = (u16)i; + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + app->protocol >= NBL_DSCP_MAX) + return -EINVAL; - ret = kobject_init_and_add(&vf_info[i].kobj, &nbl_kobj_vf_type, - net_resource_mgt->sriov_kobj, "vf%d", i); - if (ret) - goto err; + if (qos_info->dscp2prio_map[app->protocol] == app->priority) + return 0; + + ret = dcb_ieee_setapp(netdev, app); + if (ret) + return ret; + + qos_info->trust_mode = NBL_TRUST_MODE_DSCP; + qos_info->dscp2prio_map[app->protocol] = app->priority; + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), qos_info->pfc, + qos_info->trust_mode, qos_info->dscp2prio_map); + + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "setapp configure dscp2prio failed ret %d\n", ret); + return ret; } return 0; +} -err: - while (--i + 1) - kobject_put(&vf_info[i].kobj); +static void nbl_serv_dcbnl_getpfccfg(struct net_device *netdev, int prio, u8 *setting) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - return ret; + if (prio >= NBL_MAX_PFC_PRIORITIES) + return; + + *setting = qos_info->pfc[prio]; } -static void nbl_serv_remove_vf_sysfs(struct nbl_service_mgt *serv_mgt) +static int nbl_serv_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { - struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; - int i = 0; + *num = NBL_MAX_TC_NUM; - for (i = 0; i < net_resource_mgt->num_vfs; i++) - kobject_put(&vf_info[i].kobj); + return 0; } -static int nbl_serv_setup_vf_config(void *priv, int num_vfs, bool is_flush) +static void nbl_serv_dcbnl_setpfccfg(struct net_device *netdev, int prio, u8 set) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; - struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - u16 func_id = U16_MAX; - u16 vlan_tci; - bool should_notify; - int i, ret = 0; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + u8 pfc[NBL_MAX_PFC_PRIORITIES] = {0}; + int ret; - net_resource_mgt->num_vfs = num_vfs; + if (prio >= NBL_MAX_PFC_PRIORITIES) + return; - for (i = 0; i < net_resource_mgt->num_vfs; i++) { - func_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), i); + if (qos_info->pfc[prio] == set) + return; - if (func_id == U16_MAX) { - nbl_err(common, NBL_DEBUG_MAIN, "vf id %d invalid\n", i); - return -EINVAL; - } + memcpy(pfc, qos_info->pfc, NBL_MAX_PFC_PRIORITIES); + pfc[prio] = set; + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(common), pfc, + net_resource_mgt->qos_info.trust_mode, + net_resource_mgt->qos_info.dscp2prio_map); + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "configure pfc failed ret %d\n", ret); + return; + } - disp_ops->register_func_mac(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - vf_info[i].mac, func_id); + memcpy(qos_info->pfc, pfc, NBL_MAX_PFC_PRIORITIES); +} - vlan_tci = vf_info[i].vlan | (u16)(vf_info[i].vlan_qos << VLAN_PRIO_SHIFT); - ret = disp_ops->register_func_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, - vlan_tci, vf_info[i].vlan_proto, - &should_notify); - if (ret) - break; +static u8 nbl_serv_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - ret = disp_ops->register_func_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, - vf_info[i].max_tx_rate); - if (ret) - break; + *cap = true; - ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - func_id, vf_info[i].max_tx_rate); - if (ret) - break; + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = false; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = qos_info->dcbx_mode; + break; + default: + *cap = false; + break; + } + return 0; +} - ret = disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - NBL_COMMON_TO_VSI_ID(common), i, - vf_info[i].spoof_check); - if (ret) - break; +static u8 nbl_serv_ieee_getdcbx(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - /* No need to notify vf, vf will get link forced when probe, - * Here we only flush the config. - */ - ret = disp_ops->register_func_link_forced(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - func_id, vf_info[i].state, - &should_notify); - if (ret) - break; - } + return qos_info->dcbx_mode; +} - if (!ret && net_resource_mgt->sriov_kobj && !is_flush) - ret = nbl_serv_setup_vf_sysfs(serv_mgt); +static u8 nbl_serv_ieee_setdcbx(struct net_device *netdev, u8 mode) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - if (ret) - net_resource_mgt->num_vfs = 0; + qos_info->dcbx_mode = mode; - return ret; + return 0; } -static void nbl_serv_remove_vf_config(void *priv) +static u8 nbl_serv_dcnbl_setstate(struct net_device *netdev, u8 state) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; - int i; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - nbl_serv_remove_vf_sysfs(serv_mgt); + if (qos_info->dcbx_state == state) + return NBL_DCB_NO_HW_CHG; - for (i = 0; i < net_resource_mgt->num_vfs; i++) - memset(&vf_info[i], 0, sizeof(vf_info[i])); + qos_info->dcbx_state = state; + return NBL_DCB_HW_CHG; +} - nbl_serv_setup_vf_config(priv, net_resource_mgt->num_vfs, true); +static u8 nbl_serv_dcnbl_getstate(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; - net_resource_mgt->num_vfs = 0; + return qos_info->dcbx_state; } -static int nbl_serv_setup_vf_resource(void *priv, int num_vfs) +static u8 nbl_serv_dcnbl_getpfcstate(struct net_device *netdev) { - struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct nbl_serv_vf_info *vf_info; - struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; int i; - net_resource_mgt->total_vfs = num_vfs; - - net_resource_mgt->vf_info = devm_kcalloc(dev, net_resource_mgt->total_vfs, - sizeof(struct nbl_serv_vf_info), GFP_KERNEL); - if (!net_resource_mgt->vf_info) - return -ENOMEM; - - vf_info = net_resource_mgt->vf_info; - for (i = 0; i < net_resource_mgt->total_vfs; i++) { - vf_info[i].state = IFLA_VF_LINK_STATE_AUTO; - vf_info[i].spoof_check = false; - } - - net_resource_mgt->sriov_kobj = kobject_create_and_add("SRIOV", &dev->kobj); - if (!net_resource_mgt->sriov_kobj) - nbl_warn(NBL_SERV_MGT_TO_COMMON(serv_mgt), NBL_DEBUG_MAIN, - "Fail to create sriov sysfs"); + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) + if (qos_info->pfc[i]) + return 1; return 0; } -static void nbl_serv_remove_vf_resource(void *priv) +static void nbl_serv_get_board_info(void *priv, struct nbl_board_port_info *board_info) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); - struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); - - nbl_serv_remove_vf_config(priv); - - kobject_put(net_resource_mgt->sriov_kobj); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - if (net_resource_mgt->vf_info) { - devm_kfree(dev, net_resource_mgt->vf_info); - net_resource_mgt->vf_info = NULL; - } + disp_ops->get_board_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), board_info); } -static void nbl_serv_cfg_fd_update_event(void *priv, bool enable) +static int nbl_serv_set_rate_limit(void *priv, enum nbl_traffic_type type, u32 rate) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_serv_qos_info *qos_info = &net_resource_mgt->qos_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; - disp_ops->cfg_fd_update_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); + ret = disp_ops->set_rate_limit(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), type, rate); + if (ret) { + nbl_err(common, NBL_DEBUG_MAIN, "set_rate type %d failed ret %d\n", type, ret); + return ret; + } + + if (type == NBL_TRAFFIC_RDMA_TYPE) + qos_info->rdma_rate = rate; + else + qos_info->net_rate = rate; + + return ret; } -static void nbl_serv_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +static void nbl_serv_get_mirror_table_id(void *priv, u16 vsi_id, int dir, bool mirror_en, + u8 *mt_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - disp_ops->get_xdp_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), queue_num, queue_size, - vsi_id); + disp_ops->get_mirror_table_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, dir, mirror_en, mt_id); } -static void nbl_serv_set_hw_status(void *priv, enum nbl_hw_status hw_status) +static int nbl_serv_configure_mirror(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret; - disp_ops->set_hw_status(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), hw_status); + nbl_event_notify(NBL_EVENT_MIRROR_SELECTPORT, &mirror_en, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + ret = disp_ops->configure_mirror(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, mirror_en, dir, mt_id); + return ret; } -static void nbl_serv_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) +static int nbl_serv_configure_mirror_table(void *priv, bool mirror_en, + u16 func_id, u8 mt_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret; - disp_ops->get_active_func_bitmaps(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), bitmap, max_func); + ret = disp_ops->check_vf_is_active(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); + if (!ret) + return -EIO; + + ret = disp_ops->configure_mirror_table(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + mirror_en, func_id, mt_id); + nbl_serv_chan_notify_mirror_outputport_req(serv_mgt, func_id, mirror_en); + return ret; } -static int nbl_serv_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) +static int nbl_serv_clear_mirror_cfg(void *priv, u16 func_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; - struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); int ret; - ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - eth_id, pfc, trust, dscp2prio_map); - - net_resource_mgt->pfc_mode = trust; - memcpy(net_resource_mgt->dscp2prio_map, dscp2prio_map, NBL_DSCP_MAX); + ret = disp_ops->clear_mirror_cfg(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id); return ret; } -static int nbl_serv_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +u16 nbl_serv_get_vf_function_id(void *priv, int vf_id) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - int ret; - ret = disp_ops->set_pfc_buffer_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - eth_id, prio, xoff, xon); + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) + return U16_MAX; - return ret; + return disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), vf_id); } -static int nbl_serv_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +static void nbl_serv_cfg_mirror_outputport_event(void *priv, bool enable) { struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); - int ret; - - ret = disp_ops->get_pfc_buffer_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), - eth_id, prio, xoff, xon); - return ret; + disp_ops->cfg_mirror_outputport_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); } static struct nbl_service_ops serv_ops = { - .init_chip_factory = nbl_serv_init_chip_factory, - .destroy_chip_factory = nbl_serv_destroy_chip_factory, .init_chip = nbl_serv_init_chip, .destroy_chip = nbl_serv_destroy_chip, + .init_p4 = nbl_serv_init_p4, .configure_msix_map = nbl_serv_configure_msix_map, .destroy_msix_map = nbl_serv_destroy_msix_map, @@ -5403,6 +7452,7 @@ static struct nbl_service_ops serv_ops = { .remove_q2vsi = nbl_serv_remove_q2vsi, .setup_rss = nbl_serv_setup_rss, .remove_rss = nbl_serv_remove_rss, + .setup_rss_indir = nbl_serv_setup_rss_indir, .register_vsi_info = nbl_serv_register_vsi_info, .alloc_rings = nbl_serv_alloc_rings, @@ -5413,6 +7463,9 @@ static struct nbl_service_ops serv_ops = { .set_mask_en = nbl_serv_set_mask_en, .start_net_flow = nbl_serv_start_net_flow, .stop_net_flow = nbl_serv_stop_net_flow, + .clear_flow = nbl_serv_clear_flow, + .set_promisc_mode = nbl_serv_set_promisc_mode, + .cfg_multi_mcast = nbl_serv_cfg_multi_mcast, .set_lldp_flow = nbl_serv_set_lldp_flow, .remove_lldp_flow = nbl_serv_remove_lldp_flow, .start_mgt_flow = nbl_serv_start_mgt_flow, @@ -5431,6 +7484,7 @@ static struct nbl_service_ops serv_ops = { .netdev_open = nbl_serv_netdev_open, .netdev_stop = nbl_serv_netdev_stop, .change_mtu = nbl_serv_change_mtu, + .change_rep_mtu = nbl_serv_change_rep_mtu, .set_mac = nbl_serv_set_mac, .rx_add_vid = nbl_serv_rx_add_vid, .rx_kill_vid = nbl_serv_rx_kill_vid, @@ -5440,18 +7494,20 @@ static struct nbl_service_ops serv_ops = { .set_features = nbl_serv_set_features, .features_check = nbl_serv_features_check, .setup_tc = nbl_serv_setup_tc, - .set_vf_spoofchk = nbl_serv_set_vf_spoofchk, .get_phys_port_name = nbl_serv_get_phys_port_name, .get_port_parent_id = nbl_serv_get_port_parent_id, .tx_timeout = nbl_serv_tx_timeout, .bridge_setlink = nbl_serv_bridge_setlink, .bridge_getlink = nbl_serv_bridge_getlink, + .set_vf_spoofchk = nbl_serv_set_vf_spoofchk, .set_vf_link_state = nbl_serv_set_vf_link_state, .set_vf_mac = nbl_serv_set_vf_mac, .set_vf_rate = nbl_serv_set_vf_rate, .set_vf_vlan = nbl_serv_set_vf_vlan, .get_vf_config = nbl_serv_get_vf_config, + .get_vf_stats = nbl_serv_get_vf_stats, .select_queue = nbl_serv_select_queue, + .set_vf_trust = nbl_serv_set_vf_trust, /* For rep associated */ .rep_netdev_open = nbl_serv_rep_netdev_open, @@ -5498,6 +7554,9 @@ static struct nbl_service_ops serv_ops = { .get_eth_id = nbl_serv_get_eth_id, .setup_net_resource_mgt = nbl_serv_setup_net_resource_mgt, .remove_net_resource_mgt = nbl_serv_remove_net_resource_mgt, + .init_hw_stats = nbl_serv_init_hw_stats, + .remove_hw_stats = nbl_serv_remove_hw_stats, + .get_rx_dropped = nbl_serv_get_rx_dropped, .enable_lag_protocol = nbl_serv_enable_lag_protocol, .cfg_lag_hash_algorithm = nbl_serv_cfg_lag_hash_algorithm, .cfg_lag_member_fwd = nbl_serv_cfg_lag_member_fwd, @@ -5505,6 +7564,7 @@ static struct nbl_service_ops serv_ops = { .cfg_lag_member_up_attr = nbl_serv_cfg_lag_member_up_attr, .cfg_bond_shaping = nbl_serv_cfg_bond_shaping, .cfg_bgid_back_pressure = nbl_serv_cfg_bgid_back_pressure, + .get_board_info = nbl_serv_get_board_info, .get_rdma_cap_num = nbl_serv_get_rdma_cap_num, .setup_rdma_id = nbl_serv_setup_rdma_id, @@ -5526,6 +7586,10 @@ static struct nbl_service_ops serv_ops = { .recovery_abnormal = nbl_serv_recovery_abnormal, .keep_alive = nbl_serv_keep_alive, + .get_mirror_table_id = nbl_serv_get_mirror_table_id, + .configure_mirror = nbl_serv_configure_mirror, + .configure_mirror_table = nbl_serv_configure_mirror_table, + .clear_mirror_cfg = nbl_serv_clear_mirror_cfg, .get_devlink_info = nbl_serv_get_devlink_info, .update_devlink_flash = nbl_serv_update_devlink_flash, .get_adminq_tx_buf_size = nbl_serv_get_adminq_tx_buf_size, @@ -5537,26 +7601,53 @@ static struct nbl_service_ops serv_ops = { .cfg_eth_bond_event = nbl_serv_cfg_eth_bond_event, .cfg_fd_update_event = nbl_serv_cfg_fd_update_event, - /* For virtio */ - .configure_virtio_dev_msix = nbl_serv_configure_virtio_dev_msix, .configure_rdma_msix_off = nbl_serv_configure_rdma_msix_off, - .configure_virtio_dev_ready = nbl_serv_configure_virtio_dev_ready, - .setup_st = nbl_serv_setup_st, .remove_st = nbl_serv_remove_st, .get_vf_base_vsi_id = nbl_serv_get_vf_base_vsi_id, .setup_vf_config = nbl_serv_setup_vf_config, .remove_vf_config = nbl_serv_remove_vf_config, + .register_dev_name = nbl_serv_register_dev_name, + .get_dev_name = nbl_serv_get_dev_name, .setup_vf_resource = nbl_serv_setup_vf_resource, .remove_vf_resource = nbl_serv_remove_vf_resource, .get_xdp_queue_info = nbl_serv_get_xdp_queue_info, + .set_xdp = nbl_serv_set_xdp, .set_hw_status = nbl_serv_set_hw_status, .get_active_func_bitmaps = nbl_serv_get_active_func_bitmaps, - .configure_qos = nbl_serv_configure_qos, + .get_net_rate = nbl_serv_get_net_rate, + .get_rdma_rate = nbl_serv_get_rdma_rate, + .get_rdma_bw = nbl_serv_get_rdma_bw, + .configure_rdma_bw = nbl_serv_configure_rdma_bw, + .configure_pfc = nbl_serv_configure_pfc, + .configure_trust = nbl_serv_configure_trust, + .configure_dscp2prio = nbl_serv_configure_dscp2prio, + .trust_mode_show = nbl_serv_trust_mode_show, + .dscp2prio_show = nbl_serv_dscp2prio_show, + .pfc_show = nbl_serv_pfc_show, + .pfc_buffer_size_show = nbl_serv_pfc_buffer_size_show, .set_pfc_buffer_size = nbl_serv_set_pfc_buffer_size, - .get_pfc_buffer_size = nbl_serv_get_pfc_buffer_size, + .set_rate_limit = nbl_serv_set_rate_limit, + + .ieee_setets = nbl_serv_ieee_setets, + .ieee_getets = nbl_serv_ieee_getets, + .ieee_setpfc = nbl_serv_ieee_setpfc, + .ieee_getpfc = nbl_serv_ieee_getpfc, + .ieee_setapp = nbl_serv_ieee_setapp, + .ieee_delapp = nbl_serv_ieee_delapp, + .dcbnl_setpfccfg = nbl_serv_dcbnl_setpfccfg, + .dcbnl_getpfccfg = nbl_serv_dcbnl_getpfccfg, + .dcbnl_getnumtcs = nbl_serv_dcbnl_getnumtcs, + .ieee_getdcbx = nbl_serv_ieee_getdcbx, + .ieee_setdcbx = nbl_serv_ieee_setdcbx, + .dcbnl_getcap = nbl_serv_dcbnl_getcap, + .dcbnl_getstate = nbl_serv_dcnbl_getstate, + .dcbnl_setstate = nbl_serv_dcnbl_setstate, + .dcbnl_getpfcstate = nbl_serv_dcnbl_getpfcstate, + .get_vf_function_id = nbl_serv_get_vf_function_id, + .cfg_mirror_outputport_event = nbl_serv_cfg_mirror_outputport_event, }; /* Structure starts here, adding an op should not modify anything below */ @@ -5579,9 +7670,11 @@ static int nbl_serv_setup_serv_mgt(struct nbl_common_info *common, static void nbl_serv_remove_serv_mgt(struct nbl_common_info *common, struct nbl_service_mgt **serv_mgt) { - struct device *dev; + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(*serv_mgt); - dev = NBL_COMMON_TO_DEV(common); + if (ring_mgt->rss_indir_user) + devm_kfree(dev, ring_mgt->rss_indir_user); devm_kfree(dev, *serv_mgt); *serv_mgt = NULL; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h index a7e4265d0954..92557c022f3f 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -29,6 +29,8 @@ #define NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt) (NBL_SERV_MGT_TO_CHAN_OPS_TBL(serv_mgt)->priv) #define NBL_DEFAULT_VLAN_ID 0 +#define NBL_HW_STATS_PERIOD_SECONDS 5 +#define NBL_HW_STATS_RX_RATE_THRESHOLD (1000) /* 1k pps */ #define NBL_REP_QUEUE_MGT_DESC_MAX (32768) #define NBL_REP_QUEUE_MGT_DESC_NUM (2048) @@ -46,10 +48,6 @@ #define NBL_TX_TSO_L2L3L4_HDR_LEN_MAX (128) #define NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX (255) -#define NBL_FLAG_AQ_MODIFY_MAC_FILTER BIT(0) -#define NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT(1) -#define NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT(1) - #define NBL_EEPROM_LENGTH (0) /* input set */ @@ -68,8 +66,15 @@ #define SET_DPORT_TYPE_ETH_LAG (2) #define SET_DPORT_TYPE_SP_PORT (3) +#define NBL_MAX_BURST 524287 + #define NBL_VLAN_PCP_SHIFT 13 +/* primary vlan in vlan list */ +#define NBL_NO_TRUST_MAX_VLAN 9 +/* primary mac not in submac list */ +#define NBL_NO_TRUST_MAX_MAC 12 + #define NBL_DEVLINK_INFO_FRIMWARE_VERSION_LEN 32 #define NBL_DEVLINK_FLASH_COMPONENT_CRC_SIZE 4 @@ -108,7 +113,7 @@ struct nbl_serv_vector { char name[32]; cpumask_t cpumask; struct net_device *netdev; - struct napi_struct *napi; + struct nbl_napi_struct *nbl_napi; struct nbl_serv_ring *tx_ring; struct nbl_serv_ring *rx_ring; u8 __iomem *irq_enable_base; @@ -135,6 +140,7 @@ struct nbl_serv_ring_mgt { struct nbl_serv_vector *vectors; void *xdp_prog; struct nbl_serv_ring_vsi_info vsi_info[NBL_VSI_MAX]; + u32 *rss_indir_user; u16 tx_desc_num; u16 rx_desc_num; u16 tx_ring_num; @@ -147,20 +153,51 @@ struct nbl_serv_ring_mgt { struct nbl_serv_vlan_node { struct list_head node; u16 vid; + // primary_mac_effective means base mac + vlan ok + u16 primary_mac_effective; + // sub_mac_effective means sub mac + vlan ok + u16 sub_mac_effective; u16 ref_cnt; }; struct nbl_serv_submac_node { struct list_head node; u8 mac[ETH_ALEN]; + // effective means this submac + allvlan flowrule effective + u16 effective; +}; + +enum { + NBL_PROMISC = 0, + NBL_ALLMULTI = 1, + NBL_USER_FLOW = 2, + NBL_MIRROR = 3, +}; + +enum { + NBL_SUBMAC_UNICAST = 0, + NBL_SUBMAC_MULTI = 1, + NBL_SUBMAC_MAX = 2 }; struct nbl_serv_flow_mgt { struct list_head vlan_list; - struct list_head submac_list; + struct list_head submac_list[NBL_SUBMAC_MAX]; u16 vid; u8 mac[ETH_ALEN]; u8 eth; + bool trusted_en; + bool trusted_update; + u16 vlan_list_cnt; + u16 active_submac_list; + u16 submac_list_cnt; + u16 unicast_mac_cnt; + u16 multi_mac_cnt; + u16 promisc; + bool force_promisc; + bool unicast_flow_enable; + bool multicast_flow_enable; + bool pending_async_work; }; struct nbl_mac_filter { @@ -180,6 +217,7 @@ enum nbl_adapter_flags { NBL_FLAG_P4_DEFAULT, NBL_FLAG_LINK_DOWN_ON_CLOSE, NBL_FLAG_NRZ_RS_FEC_544_SUPPORT, + NBL_FLAG_HIGH_THROUGHPUT, NBL_ADAPTER_FLAGS_MAX }; @@ -210,16 +248,43 @@ struct nbl_sysfs_vf_config_attr { struct nbl_serv_vf_info { struct kobject kobj; + struct kobject meters_kobj; + struct kobject rx_kobj; + struct kobject tx_kobj; + struct kobject rx_bps_kobj; + struct kobject tx_bps_kobj; void *priv; u16 vf_id; int state; int spoof_check; int max_tx_rate; + int meter_tx_rate; + int meter_rx_rate; + int meter_tx_burst; + int meter_rx_burst; u8 mac[ETH_ALEN]; u16 vlan; u16 vlan_proto; u8 vlan_qos; + bool trusted; +}; + +#define NBL_DCB_NO_HW_CHG 1 +#define NBL_DCB_HW_CHG 2 +struct nbl_serv_qos_info { + u8 dcbx_mode; + u8 dcbx_state; + u8 trust_mode; /* Trust Mode value 0:802.1p 1: dscp */ + u8 pfc[NBL_MAX_PFC_PRIORITIES]; + u8 dscp2prio_map[NBL_DSCP_MAX]; /* DSCP -> Priority map */ + int rdma_bw; + u32 rdma_rate; + u32 net_rate; + DECLARE_BITMAP(dscp_mapped, NBL_DSCP_MAX); + struct dcb_app app[NBL_DSCP_MAX]; + int buffer_sizes[NBL_MAX_PFC_PRIORITIES][2]; + struct ieee_ets ets; }; struct nbl_serv_net_resource_mgt { @@ -230,28 +295,28 @@ struct nbl_serv_net_resource_mgt { struct work_struct tx_timeout; struct work_struct update_link_state; struct work_struct update_vlan; + struct work_struct update_mirror_outputport; struct delayed_work watchdog_task; struct timer_list serv_timer; unsigned long serv_timer_period; - /* spinlock_t for rx mode submac */ - spinlock_t mac_vlan_list_lock; - /* spinlock_t for rx mode promisc */ - spinlock_t current_netdev_promisc_flags_lock; - struct list_head mac_filter_list; + struct list_head tmp_add_filter_list; + struct list_head tmp_del_filter_list; struct list_head indr_dev_priv_list; struct nbl_serv_lag_info *lag_info; struct nbl_serv_netdev_ops netdev_ops; - u32 rxmode_set_required; u16 curr_promiscuout_mode; - u16 user_promisc_mode; u16 num_net_msix; + bool update_submac; int num_vfs; int total_vfs; /* stats for netdev */ u64 get_stats_jiffies; struct nbl_stats stats; + struct nbl_hw_stats hw_stats; + unsigned long hw_stats_jiffies; + unsigned long hw_stats_period; struct nbl_priv_stats priv_stats; struct nbl_phy_caps phy_caps; struct nbl_serv_rep_drop *rep_drop; @@ -265,8 +330,9 @@ struct nbl_serv_net_resource_mgt { u16 vlan_tci; u16 vlan_proto; int max_tx_rate; - u8 pfc_mode; - u8 dscp2prio_map[NBL_DSCP_MAX]; /* DSCP -> Priority map */ + u32 dump_flag; + u32 dump_perf_len; + struct nbl_serv_qos_info qos_info; }; struct nbl_serv_rep_queue_mgt { @@ -339,7 +405,6 @@ struct nbl_serv_notify_vlan_param { u16 vlan_tci; u16 vlan_proto; }; - int nbl_serv_netdev_open(struct net_device *netdev); int nbl_serv_netdev_stop(struct net_device *netdev); int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, @@ -348,5 +413,6 @@ int nbl_serv_vsi_stop(void *priv, u16 vsi_index); void nbl_serv_get_rep_drop_stats(struct nbl_service_mgt *serv_mgt, u16 rep_vsi_id, struct nbl_rep_stats *rep_stats); void nbl_serv_cpu_affinity_init(void *priv, u16 rings_num); +u16 nbl_serv_get_vf_function_id(void *priv, int vf_id); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c index 34bbe735bd74..d1fef2c9c764 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c @@ -6,6 +6,20 @@ #include "nbl_dev.h" +#define NBL_SET_RO_ATTR(dev_name_attr, attr_name, attr_show) do { \ + typeof(dev_name_attr) _name_attr = (dev_name_attr); \ + (_name_attr)->attr.name = __stringify(attr_name); \ + (_name_attr)->attr.mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(0444); \ + (_name_attr)->show = attr_show; \ + (_name_attr)->store = NULL; \ +} while (0) + +static ssize_t net_rep_show(struct device *dev, + struct nbl_netdev_name_attr *attr, char *buf) +{ + return scnprintf(buf, IFNAMSIZ, "%s\n", attr->net_dev_name); +} + const char *const nbl_sysfs_qos_name[] = { /* rdma */ "save", @@ -25,74 +39,183 @@ const char *const nbl_sysfs_qos_name[] = { "pfc_buffer", "trust", "dscp2prio", + "rdma_bw", + "rdma_rate", + "net_rate", }; -static ssize_t dscp2prio_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +const char *const nbl_sysfs_mirror_name[] = { + "configure_down_mirror", + "configure_up_mirror", +}; + +static ssize_t rdma_rate_show(struct nbl_sysfs_qos_info *qos_info, char *buf) { struct nbl_dev_net *net_dev = qos_info->net_dev; - struct nbl_net_qos *qos_config = &net_dev->qos_config; - int len = 0; - int i; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u32 rdma_rate = 0; - len += snprintf(buf + len, PAGE_SIZE - len, "dscp2prio mapping:\n"); - for (i = 0; i < NBL_DSCP_MAX; i++) - len += snprintf(buf + len, PAGE_SIZE - len, "\tprio:%d dscp:%d,\n", - qos_config->dscp2prio_map[i], i); + serv_ops->get_rdma_rate(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &rdma_rate); - return len; + return sprintf(buf, "%u\n", rdma_rate); } -static ssize_t dscp2prio_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +static ssize_t rdma_rate_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + unsigned long rate; + int ret; + + ret = kstrtoul(buf, 10, &rate); + if (ret) + return -EINVAL; + + ret = serv_ops->set_rate_limit(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TRAFFIC_RDMA_TYPE, rate); + if (ret) { + netdev_err(net_dev->netdev, "configure_rdma_rate_limit: %s failed\n", buf); + return -EIO; + } + + return count; +} + +static ssize_t net_rate_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u32 net_rate = 0; + + serv_ops->get_net_rate(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &net_rate); + + return sprintf(buf, "%u\n", net_rate); +} + +static ssize_t net_rate_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + unsigned long rate; + int ret; + + ret = kstrtoul(buf, 10, &rate); + if (ret) + return -EINVAL; + + ret = serv_ops->set_rate_limit(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TRAFFIC_NET_TYPE, rate); + if (ret) { + netdev_err(net_dev->netdev, "configure_net_rate_limit: %s failed\n", buf); + return -EIO; + } + + return count; +} + +static ssize_t rdma_bw_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int rdma_bw = 0; + ssize_t ret; + + serv_ops->get_rdma_bw(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &rdma_bw); + + ret = snprintf(buf, PAGE_SIZE, "rdma:%d, normal:%d\n", + rdma_bw, NBL_MAX_BW - rdma_bw); + return ret; +} + +static ssize_t rdma_bw_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) { struct nbl_dev_net *net_dev = qos_info->net_dev; - struct nbl_net_qos *qos_config = &net_dev->qos_config; struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); struct nbl_adapter *adapter = net_priv->adapter; struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - char cmd[8]; - int dscp, prio, ret; - int i; + int rdma = 0, normal = 0; + int ret; - ret = sscanf(buf, "%7[^,], %d , %d", cmd, &dscp, &prio); + if (sscanf(buf, "rdma:%d,normal:%d", &rdma, &normal) != 2) { + pr_err("Invalid format, expected: rdma:,normal:\n"); + return -EINVAL; + } - if (strncmp(cmd, "set", 3) == 0) { - if (ret != 3 || dscp < 0 || dscp >= NBL_DSCP_MAX || prio < 0 || prio > 7) - return -EINVAL; - qos_config->dscp2prio_map[dscp] = prio; - } else if (strncmp(cmd, "del", 3) == 0) { - if (ret != 3 || dscp < 0 || dscp >= NBL_DSCP_MAX) - return -EINVAL; - if (qos_config->dscp2prio_map[dscp] == 0) - return -EINVAL; - qos_config->dscp2prio_map[dscp] = 0; - } else if (strncmp(cmd, "flush", 5) == 0) { - for (i = 0; i < NBL_DSCP_MAX; i++) - qos_config->dscp2prio_map[i] = i / NBL_MAX_PFC_PRIORITIES; - } else { + if (rdma + normal != NBL_MAX_BW) { + pr_err("Invalid value: sum must be 100\n"); return -EINVAL; } - serv_ops->configure_qos(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_ETH_ID(common), - qos_config->pfc, qos_config->trust_mode, qos_config->dscp2prio_map); + ret = serv_ops->configure_rdma_bw(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), rdma); + if (ret) { + netdev_err(net_dev->netdev, "configure_rdma_bw: %s failed\n", buf); + return -EIO; + } return count; } +static ssize_t dscp2prio_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + return serv_ops->dscp2prio_show(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), buf); +} + +static ssize_t dscp2prio_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + return serv_ops->configure_dscp2prio(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), + buf, count); +} + static ssize_t trust_mode_show(struct nbl_sysfs_qos_info *qos_info, char *buf) { struct nbl_dev_net *net_dev = qos_info->net_dev; - struct nbl_net_qos *qos_config = &net_dev->qos_config; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - return scnprintf(buf, PAGE_SIZE, "%s\n", - qos_config->trust_mode == NBL_TRUST_MODE_DSCP ? "dscp" : "802.1p"); + return serv_ops->trust_mode_show(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), buf); } static ssize_t trust_mode_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) { struct nbl_dev_net *net_dev = qos_info->net_dev; - struct nbl_net_qos *qos_config = &net_dev->qos_config; struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); struct nbl_adapter *adapter = net_priv->adapter; struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); @@ -110,19 +233,13 @@ static ssize_t trust_mode_store(struct nbl_sysfs_qos_info *qos_info, const char return -EINVAL; } - if (qos_config->trust_mode == trust_mode) - return count; - - ret = serv_ops->configure_qos(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_COMMON_TO_ETH_ID(common), - qos_config->pfc, trust_mode, qos_config->dscp2prio_map); + ret = serv_ops->configure_trust(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), trust_mode); if (ret) { netdev_err(net_dev->netdev, "configure_qos trust mode: %s failed\n", buf); return -EIO; } - qos_config->trust_mode = trust_mode; - netdev_info(net_dev->netdev, "Trust mode set to %s\n", buf); return count; } @@ -130,23 +247,20 @@ static ssize_t trust_mode_store(struct nbl_sysfs_qos_info *qos_info, const char static ssize_t pfc_buffer_size_show(struct nbl_sysfs_qos_info *qos_info, char *buf) { struct nbl_dev_net *net_dev = qos_info->net_dev; - struct nbl_net_qos *qos_config = &net_dev->qos_config; - int prio; - ssize_t count = 0; - - for (prio = 0; prio < NBL_MAX_PFC_PRIORITIES; prio++) - count += snprintf(buf + count, PAGE_SIZE - count, "prio %d, xoff %d, xon %d\n", - prio, qos_config->buffer_sizes[prio][0], - qos_config->buffer_sizes[prio][1]); + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - return count; + return serv_ops->pfc_buffer_size_show(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), buf); } static ssize_t pfc_buffer_size_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) { struct nbl_dev_net *net_dev = qos_info->net_dev; - struct nbl_net_qos *qos_config = &net_dev->qos_config; struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); struct nbl_adapter *adapter = net_priv->adapter; struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); @@ -167,8 +281,6 @@ static ssize_t pfc_buffer_size_store(struct nbl_sysfs_qos_info *qos_info, netdev_err(net_dev->netdev, "set_pfc_buffer_size failed\n"); return ret; } - qos_config->buffer_sizes[prio][0] = xoff; - qos_config->buffer_sizes[prio][1] = xon; return count; } @@ -176,19 +288,19 @@ static ssize_t pfc_buffer_size_store(struct nbl_sysfs_qos_info *qos_info, static ssize_t pfc_show(struct nbl_sysfs_qos_info *qos_info, char *buf) { struct nbl_dev_net *net_dev = qos_info->net_dev; - struct nbl_net_qos *qos_config = &net_dev->qos_config; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - return scnprintf(buf, PAGE_SIZE, "%d,%d,%d,%d,%d,%d,%d,%d\n", - qos_config->pfc[0], qos_config->pfc[1], - qos_config->pfc[2], qos_config->pfc[3], - qos_config->pfc[4], qos_config->pfc[5], - qos_config->pfc[6], qos_config->pfc[7]); + return serv_ops->pfc_show(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), buf); } static ssize_t pfc_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) { struct nbl_dev_net *net_dev = qos_info->net_dev; - struct nbl_net_qos *qos_config = &net_dev->qos_config; struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); struct nbl_adapter *adapter = net_priv->adapter; struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); @@ -197,7 +309,6 @@ static ssize_t pfc_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, s u8 pfc_config[NBL_MAX_PFC_PRIORITIES]; int ret, i; ssize_t len = count; - bool changed = false; while (len > 0 && (buf[len - 1] == '\n' || buf[len - 1] == ' ')) len--; @@ -221,16 +332,6 @@ static ssize_t pfc_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, s return -EINVAL; } - for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { - if (pfc_config[i] != qos_config->pfc[i]) { - changed = true; - break; - } - } - - if (!changed) - return count; - netdev_info(net_dev->netdev, "Parsed PFC configuration: %u %u %u %u %u %u %u %u\n", pfc_config[0], pfc_config[1], pfc_config[2], pfc_config[3], pfc_config[4], pfc_config[5], pfc_config[6], pfc_config[7]); @@ -239,17 +340,13 @@ static ssize_t pfc_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, s if (pfc_config[i] > 1) return -EINVAL; - ret = serv_ops->configure_qos(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_COMMON_TO_ETH_ID(common), pfc_config, - qos_config->trust_mode, qos_config->dscp2prio_map); + ret = serv_ops->configure_pfc(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), pfc_config); if (ret) { netdev_err(net_dev->netdev, "configure_qos trust mode: %s failed\n", buf); return -EIO; } - for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) - qos_config->pfc[i] = pfc_config[i]; - return count; } @@ -271,6 +368,12 @@ static ssize_t nbl_qos_show(struct kobject *kobj, struct kobj_attribute *attr, c return dscp2prio_show(qos_info, buf); case NBL_QOS_PFC_BUFFER: return pfc_buffer_size_show(qos_info, buf); + case NBL_QOS_RDMA_BW: + return rdma_bw_show(qos_info, buf); + case NBL_QOS_RDMA_RATE: + return rdma_rate_show(qos_info, buf); + case NBL_QOS_NET_RATE: + return net_rate_show(qos_info, buf); case NBL_QOS_RDMA_SAVE: case NBL_QOS_RDMA_TC2PRI: case NBL_QOS_RDMA_SQ_PRI_MAP: @@ -308,6 +411,12 @@ static ssize_t nbl_qos_store(struct kobject *kobj, struct kobj_attribute *attr, return dscp2prio_store(qos_info, buf, count); case NBL_QOS_PFC_BUFFER: return pfc_buffer_size_store(qos_info, buf, count); + case NBL_QOS_RDMA_BW: + return rdma_bw_store(qos_info, buf, count); + case NBL_QOS_RDMA_RATE: + return rdma_rate_store(qos_info, buf, count); + case NBL_QOS_NET_RATE: + return net_rate_store(qos_info, buf, count); case NBL_QOS_RDMA_SAVE: case NBL_QOS_RDMA_TC2PRI: case NBL_QOS_RDMA_SQ_PRI_MAP: @@ -326,27 +435,95 @@ static ssize_t nbl_qos_store(struct kobject *kobj, struct kobj_attribute *attr, } } -static void nbl_init_qos_config(struct nbl_dev_net *net_dev) +static ssize_t nbl_mirror_select_port_show(struct nbl_sysfs_mirror_info *mirror_info, + char *buf) +{ + ssize_t ret; + + ret = snprintf(buf, PAGE_SIZE, "mirror_en: %d, mirror_port: vf%d\n", + mirror_info->mirror_en, mirror_info->vf_id); + return ret; +} + +static ssize_t nbl_mirror_select_port_store(struct nbl_sysfs_mirror_info *mirror_info, + const char *buf, size_t count, int dir) { + struct nbl_dev_net *net_dev = mirror_info->net_dev; struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); struct nbl_adapter *adapter = net_priv->adapter; struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); - struct nbl_net_qos *qos_config = &net_dev->qos_config; - int i; + int vf_id; + int mirror_en; + int ret; + u16 function_id = U16_MAX; + u8 mt_id; - for (i = 0; i < NBL_DSCP_MAX; i++) - qos_config->dscp2prio_map[i] = i / NBL_MAX_PFC_PRIORITIES; + if (sscanf(buf, "mirror_en: %d, mirror_port: vf%d", &mirror_en, &vf_id) != 2) + return -EINVAL; - for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) - serv_ops->get_pfc_buffer_size(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), - NBL_COMMON_TO_ETH_ID(common), i, - &qos_config->buffer_sizes[i][0], - &qos_config->buffer_sizes[i][1]); + function_id = serv_ops->get_vf_function_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vf_id); + if (function_id == U16_MAX) { + netdev_info(net_dev->netdev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + serv_ops->get_mirror_table_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common), dir, !!mirror_en, &mt_id); + + if (mt_id == 8) { + netdev_err(net_dev->netdev, "The mirror table configuration is full!"); + return -EINVAL; + } + + ret = serv_ops->configure_mirror(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->mgt_pf, + !!mirror_en, dir, mt_id); + if (ret) { + netdev_err(net_dev->netdev, "configure mirror failed\n"); + return -EIO; + } + + ret = serv_ops->configure_mirror_table(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + !!mirror_en, function_id, mt_id); + if (ret) { + netdev_err(net_dev->netdev, "configure mirror table failed\n"); + return -EIO; + } - serv_ops->configure_qos(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_ETH_ID(common), - qos_config->pfc, qos_config->trust_mode, qos_config->dscp2prio_map); + mirror_info->mirror_en = mirror_en; + mirror_info->vf_id = vf_id; + return ret ? ret : count; +} + +static ssize_t nbl_mirror_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct nbl_sysfs_mirror_info *mirror_info = + container_of(attr, struct nbl_sysfs_mirror_info, kobj_attr); + + switch (mirror_info->offset) { + case NBL_MIRROR_SELECT_SRC_PORT: + case NBL_MIRROR_SELECT_DST_PORT: + return nbl_mirror_select_port_show(mirror_info, buf); + default: + return -EINVAL; + } +} + +static ssize_t nbl_mirror_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_sysfs_mirror_info *mirror_info = + container_of(attr, struct nbl_sysfs_mirror_info, kobj_attr); + + switch (mirror_info->offset) { + case NBL_MIRROR_SELECT_SRC_PORT: + case NBL_MIRROR_SELECT_DST_PORT: + return nbl_mirror_select_port_store(mirror_info, buf, count, + mirror_info->offset); + default: + return -EINVAL; + } } int nbl_netdev_add_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev) @@ -354,7 +531,6 @@ int nbl_netdev_add_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev) int ret; int i; - nbl_init_qos_config(net_dev); net_dev->qos_config.qos_kobj = kobject_create_and_add("qos", &netdev->dev.kobj); if (!net_dev->qos_config.qos_kobj) return -ENOMEM; @@ -378,6 +554,36 @@ int nbl_netdev_add_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev) return 0; } +int nbl_netdev_add_mirror_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev) +{ + int ret; + int i; + + net_dev->mirror_config.mirror_kobj = kobject_create_and_add("mirror", &netdev->dev.kobj); + if (!net_dev->mirror_config.mirror_kobj) + return -ENOMEM; + + for (i = 0; i < NBL_MIRROR_TYPE_MAX; i++) { + net_dev->mirror_config.mirror_info[i].net_dev = net_dev; + net_dev->mirror_config.mirror_info[i].offset = i; + + sysfs_attr_init(&net_dev->mirror_config.mirror_info[i].kobj_attr.attr); + net_dev->mirror_config.mirror_info[i].kobj_attr.attr.name = + nbl_sysfs_mirror_name[i]; + net_dev->mirror_config.mirror_info[i].kobj_attr.attr.mode = 0644; + net_dev->mirror_config.mirror_info[i].kobj_attr.show = nbl_mirror_show; + net_dev->mirror_config.mirror_info[i].kobj_attr.store = nbl_mirror_store; + + ret = sysfs_create_file(net_dev->mirror_config.mirror_kobj, + &net_dev->mirror_config.mirror_info[i].kobj_attr.attr); + + if (ret) + netdev_err(netdev, "Failed to create %s sysfs file\n", + nbl_sysfs_mirror_name[i]); + } + return 0; +} + void nbl_netdev_remove_sysfs(struct nbl_dev_net *net_dev) { int i; @@ -391,3 +597,36 @@ void nbl_netdev_remove_sysfs(struct nbl_dev_net *net_dev) kobject_put(net_dev->qos_config.qos_kobj); } + +void nbl_netdev_remove_mirror_sysfs(struct nbl_dev_net *net_dev) +{ + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int i; + + serv_ops->clear_mirror_cfg(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->mgt_pf); + + if (!net_dev->mirror_config.mirror_kobj) + return; + + for (i = 0; i < NBL_MIRROR_TYPE_MAX; i++) + sysfs_remove_file(net_dev->mirror_config.mirror_kobj, + &net_dev->mirror_config.mirror_info[i].kobj_attr.attr); + + kobject_put(net_dev->mirror_config.mirror_kobj); +} + +void nbl_net_add_name_attr(struct nbl_netdev_name_attr *attr, char *rep_name) +{ + sysfs_attr_init(&attr->attr); + NBL_SET_RO_ATTR(attr, dev_name, net_rep_show); + strscpy(attr->net_dev_name, rep_name, IFNAMSIZ); +} + +void nbl_net_remove_dev_attr(struct nbl_dev_net *net_dev) +{ + sysfs_remove_file(&net_dev->netdev->dev.kobj, &net_dev->dev_attr.dev_name_attr.attr); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h index 3d049928f726..dd555bd10479 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -26,9 +26,18 @@ enum nbl_qos_param_types { NBL_QOS_PFC_BUFFER, NBL_QOS_TRUST, NBL_QOS_DSCP2PRIO, + NBL_QOS_RDMA_BW, + NBL_QOS_RDMA_RATE, + NBL_QOS_NET_RATE, NBL_QOS_TYPE_MAX }; +enum nbl_mirror_param_types { + NBL_MIRROR_SELECT_SRC_PORT, + NBL_MIRROR_SELECT_DST_PORT, + NBL_MIRROR_TYPE_MAX +}; + struct nbl_sysfs_qos_info { int offset; struct nbl_dev_net *net_dev; @@ -38,10 +47,19 @@ struct nbl_sysfs_qos_info { struct nbl_net_qos { struct kobject *qos_kobj; struct nbl_sysfs_qos_info qos_info[NBL_QOS_TYPE_MAX]; - u8 pfc[NBL_MAX_PFC_PRIORITIES]; - u8 trust_mode; /* Trust Mode value 0:802.1p 1: dscp */ - u8 dscp2prio_map[NBL_DSCP_MAX]; /* DSCP -> Priority map */ - int buffer_sizes[NBL_MAX_PFC_PRIORITIES][2]; +}; + +struct nbl_sysfs_mirror_info { + int offset; + int mirror_en; + u16 vf_id; + struct nbl_dev_net *net_dev; + struct kobj_attribute kobj_attr; +}; + +struct nbl_net_mirror { + struct kobject *mirror_kobj; + struct nbl_sysfs_mirror_info mirror_info[NBL_MIRROR_TYPE_MAX]; }; #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c index 72b406369161..335cafefef00 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c @@ -1,8 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + #include #include #include "nbl_tc.h" #include "nbl_tc_tun.h" +static int nbl_tc_pedit_header_offsets[] = { + [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct nbl_tc_pedit_headers, eth), + [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct nbl_tc_pedit_headers, ip4), + [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct nbl_tc_pedit_headers, ip6), + [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct nbl_tc_pedit_headers, tcp), + [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct nbl_tc_pedit_headers, udp), +}; + +#define nbl_pedit_header(_ph, _htype) ((void *)(_ph) + nbl_tc_pedit_header_offsets[_htype]) + static int nbl_tc_parse_proto(const struct flow_rule *rule, struct nbl_flow_pattern_conf *filter, const struct nbl_common_info *common) @@ -27,14 +43,15 @@ static int nbl_tc_parse_proto(const struct flow_rule *rule, filter->key_flag |= NBL_FLOW_KEY_ETHERTYPE_FLAG; } if (match.key->ip_proto & match.mask->ip_proto) { - nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow with ip proto match is not supported\n"); - return -EOPNOTSUPP; + filter->key_flag |= NBL_FLOW_KEY_PROTOCOL_FLAG; + filter->input.ip.proto = match.key->ip_proto; + filter->input.ip_mask.proto = match.mask->ip_proto; } nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow parse proto l2_data.ether_type=0x%04x, l2_mask.ether_type=0x%04x", - filter->input.l2_data.ether_type, filter->input.l2_mask.ether_type); + "tc flow parse proto (%u) l2_data.ether_type=0x%04x, l2_mask.ether_type=0x%04x", + match.key->ip_proto, filter->input.l2_data.ether_type, + filter->input.l2_mask.ether_type); return 0; } @@ -86,13 +103,15 @@ static int nbl_tc_parse_control(const struct flow_rule *rule, flow_rule_match_control(rule, &match); if (match.key->addr_type & match.mask->addr_type) { - filter->input.l2_data.ether_type = ntohs(match.key->addr_type); - filter->input.l2_mask.ether_type = ntohs(match.mask->addr_type); + if (!filter->input.l2_data.ether_type) { + filter->input.l2_data.ether_type = ntohs(match.key->addr_type); + filter->input.l2_mask.ether_type = ntohs(match.mask->addr_type); + } } nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow parse conrtol.ether_type=0x%04x", - filter->input.l2_data.ether_type); + "tc flow parse conrtol.ether_type=0x%04x, flag:%x", + filter->input.l2_data.ether_type, match.key->flags); return 0; } @@ -146,8 +165,8 @@ static int nbl_tc_parse_tunnel_ports(const struct flow_rule *rule, flow_rule_match_enc_ports(rule, &enc_ports); if (memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) { - nbl_err(common, NBL_DEBUG_FLOW, "nbl tc parse tunnel err: " - "udp tunnel decap filter must match dst_port fully.\n"); + nbl_err(common, NBL_DEBUG_FLOW, "nbl tc parse tunnel err: "); + nbl_err(common, NBL_DEBUG_FLOW, "udp tunnel decap must match dst_port fully.\n"); return -EOPNOTSUPP; } @@ -307,13 +326,11 @@ static int nbl_tc_parse_tunnel_control(const struct flow_rule *rule, filter->key_flag |= NBL_FLOW_KEY_T_DIPV4_FLAG; filter->key_flag |= NBL_FLOW_KEY_T_OPT_DATA_FLAG; filter->key_flag |= NBL_FLOW_KEY_T_OPT_CLASS_FLAG; - - nbl_debug(common, NBL_DEBUG_FLOW, "parse outer tnl ctl ip: " - "sip:0x%x/0x%x, dip:0x%x/0x%x.\n", - filter->input.ip_outer.src_ip.addr, - filter->input.ip_mask_outer.src_ip.addr, - filter->input.ip_outer.dst_ip.addr, - filter->input.ip_mask_outer.dst_ip.addr); + nbl_debug(common, NBL_DEBUG_FLOW, "outer tnl ip: sip:0x%x/0x%x, dip:0x%x/0x%x.\n", + filter->input.ip_outer.src_ip.addr, + filter->input.ip_mask_outer.src_ip.addr, + filter->input.ip_outer.dst_ip.addr, + filter->input.ip_mask_outer.dst_ip.addr); if (filter->input.port & NBL_FLOW_IN_PORT_TYPE_LAG) { dev_ok = true; } else { @@ -363,13 +380,145 @@ static int nbl_tc_parse_tunnel_control(const struct flow_rule *rule, ip6_addrs.key->dst); } } - if (dev_ok) return 0; else return -EOPNOTSUPP; } +static int nbl_tc_parse_ip(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ip ip; + + flow_rule_match_ip(rule, &ip); + filter->input.ip.tos = ip.key->tos; + filter->input.ip.ttl = ip.key->ttl; + filter->input.ip_mask.tos = ip.mask->tos; + filter->input.ip_mask.ttl = ip.mask->ttl; + filter->key_flag |= NBL_FLOW_KEY_TTL_FLAG; + filter->key_flag |= NBL_FLOW_KEY_TOS_FLAG; + filter->key_flag |= NBL_FLOW_KEY_DSCP_FLAG; + + nbl_debug(common, NBL_DEBUG_FLOW, "tos is %u, ttl is %u", ip.key->tos, ip.key->ttl); + return 0; +} + +static int nbl_tc_parse_ip4(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ipv4_addrs ip_addrs; + + flow_rule_match_ipv4_addrs(rule, &ip_addrs); + if (ip_addrs.mask->dst == 0 || ip_addrs.key->dst == 0) { + nbl_debug(common, NBL_DEBUG_FLOW, "dst ipv4:key 0x%x masked 0x%x", + ip_addrs.key->dst, ip_addrs.mask->dst); + return 0; + } else if (ip_addrs.mask->dst != NBL_FLOW_TABLE_IPV4_DEFAULT_MASK) { + nbl_info(common, NBL_DEBUG_FLOW, "dst ipv4:0x%x mask:0x%x not support", + ip_addrs.key->dst, ip_addrs.mask->dst); + return -EINVAL; + } + + filter->input.ip.ip_ver = NBL_IP_VERSION_V4; + filter->key_flag |= NBL_FLOW_KEY_DIPV4_FLAG; + filter->key_flag |= NBL_FLOW_KEY_SIPV4_FLAG; + nbl_debug(common, NBL_DEBUG_FLOW, "nbl parse dst ipv4:0x%x mask:0x%x", + ip_addrs.key->dst, ip_addrs.mask->dst); + filter->input.ip.src_ip.addr = be32_to_cpu(ip_addrs.key->src); + filter->input.ip_mask.src_ip.addr = ip_addrs.mask->src; + filter->input.ip.dst_ip.addr = be32_to_cpu(ip_addrs.key->dst); + filter->input.ip_mask.dst_ip.addr = ip_addrs.mask->dst; + return 0; +} + +static int nbl_tc_parse_ip6(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ipv6_addrs ip6_addrs; + int idx = 0; + int max_idx = NBL_IPV6_ADDR_LEN_AS_U8 - 1; + u8 mask_ip6[NBL_IPV6_ADDR_LEN_AS_U8] = {0}; + u8 exact_ip6[NBL_IPV6_ADDR_LEN_AS_U8]; + + memset(exact_ip6, 0xff, sizeof(exact_ip6)); + flow_rule_match_ipv6_addrs(rule, &ip6_addrs); + if (!memcmp(mask_ip6, ip6_addrs.mask->dst.in6_u.u6_addr8, NBL_IPV6_ADDR_LEN_AS_U8) || + !memcmp(mask_ip6, ip6_addrs.key->dst.in6_u.u6_addr8, NBL_IPV6_ADDR_LEN_AS_U8)) { + nbl_debug(common, NBL_DEBUG_FLOW, "dst ipv6:0x%x-0x%x-0x%x-0x%x masked", + ip6_addrs.key->dst.in6_u.u6_addr32[0], + ip6_addrs.key->dst.in6_u.u6_addr32[1], + ip6_addrs.key->dst.in6_u.u6_addr32[2], + ip6_addrs.key->dst.in6_u.u6_addr32[3]); + return 0; + } else if (memcmp(exact_ip6, ip6_addrs.mask->dst.in6_u.u6_addr8, sizeof(exact_ip6))) { + nbl_info(common, NBL_DEBUG_FLOW, "dst ipv6:0x%x-0x%x-0x%x-0x%x mask:0x%x-0x%x-0x%x-0x%x not support", + ip6_addrs.key->dst.in6_u.u6_addr32[0], + ip6_addrs.key->dst.in6_u.u6_addr32[1], + ip6_addrs.key->dst.in6_u.u6_addr32[2], + ip6_addrs.key->dst.in6_u.u6_addr32[3], + ip6_addrs.mask->dst.in6_u.u6_addr32[1], + ip6_addrs.mask->dst.in6_u.u6_addr32[1], + ip6_addrs.mask->dst.in6_u.u6_addr32[2], + ip6_addrs.mask->dst.in6_u.u6_addr32[3]); + return -EINVAL; + } + + filter->input.ip.ip_ver = NBL_IP_VERSION_V6; + filter->key_flag |= NBL_FLOW_KEY_DIPV6_FLAG; + filter->key_flag |= NBL_FLOW_KEY_SIPV6_FLAG; + filter->key_flag |= NBL_FLOW_KEY_HOPLIMIT_FLAG; + nbl_debug(common, NBL_DEBUG_FLOW, "nbl pasre dst ipv6:0x%x-0x%x-0x%x-0x%x mask:0x%x-0x%x-0x%x-0x%x", + ip6_addrs.key->dst.in6_u.u6_addr32[0], ip6_addrs.key->dst.in6_u.u6_addr32[1], + ip6_addrs.key->dst.in6_u.u6_addr32[2], ip6_addrs.key->dst.in6_u.u6_addr32[3], + ip6_addrs.mask->dst.in6_u.u6_addr32[1], ip6_addrs.mask->dst.in6_u.u6_addr32[1], + ip6_addrs.mask->dst.in6_u.u6_addr32[2], ip6_addrs.mask->dst.in6_u.u6_addr32[3]); + for (idx = 0; idx < NBL_IPV6_ADDR_LEN_AS_U8; idx++) { + filter->input.ip.src_ip.v6_addr[idx] = + ip6_addrs.key->src.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_mask.src_ip.v6_addr[idx] = + ip6_addrs.mask->src.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip.dst_ip.v6_addr[idx] = + ip6_addrs.key->dst.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_mask.dst_ip.v6_addr[idx] = + ip6_addrs.mask->dst.in6_u.u6_addr8[max_idx - idx]; + } + + return 0; +} + +static int nbl_tc_parse_ports(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ports port; + + flow_rule_match_ports(rule, &port); + if (!port.mask->dst && !port.mask->src) { + nbl_debug(common, NBL_DEBUG_FLOW, "src and dst port:%d-%d masked", + port.key->src, port.key->dst); + return 0; + } else if (port.mask->dst != NBL_FLOW_TABLE_L4_PORT_DEFAULT_MASK || + port.mask->src != NBL_FLOW_TABLE_L4_PORT_DEFAULT_MASK) { + nbl_info(common, NBL_DEBUG_FLOW, "src and dst port mask:%d-%d not support", + port.mask->src, port.mask->dst); + return -EINVAL; + } + + filter->key_flag |= NBL_FLOW_KEY_DSTPORT_FLAG; + filter->key_flag |= NBL_FLOW_KEY_SRCPORT_FLAG; + nbl_debug(common, NBL_DEBUG_FLOW, "nbl parse src and dst port key:%d-%d, mask:%d-%d", + port.key->src, port.key->dst, port.mask->src, port.mask->dst); + filter->input.l4.dst_port = be16_to_cpu(port.key->dst); + filter->input.l4_mask.dst_port = be16_to_cpu(port.mask->dst); + filter->input.l4.src_port = be16_to_cpu(port.key->src); + filter->input.l4_mask.src_port = be16_to_cpu(port.mask->src); + return 0; +} + static struct nbl_tc_flow_parse_pattern parse_pattern_list[] = { { FLOW_DISSECTOR_KEY_BASIC, nbl_tc_parse_proto }, { FLOW_DISSECTOR_KEY_ETH_ADDRS, nbl_tc_parse_eth }, @@ -380,6 +529,10 @@ static struct nbl_tc_flow_parse_pattern parse_pattern_list[] = { { FLOW_DISSECTOR_KEY_ENC_PORTS, nbl_tc_parse_tunnel_ports }, { FLOW_DISSECTOR_KEY_ENC_KEYID, nbl_tc_parse_tunnel_keyid }, { FLOW_DISSECTOR_KEY_ENC_CONTROL, nbl_tc_parse_tunnel_control }, + { FLOW_DISSECTOR_KEY_IPV4_ADDRS, nbl_tc_parse_ip4 }, + { FLOW_DISSECTOR_KEY_IPV6_ADDRS, nbl_tc_parse_ip6 }, + { FLOW_DISSECTOR_KEY_IP, nbl_tc_parse_ip }, + { FLOW_DISSECTOR_KEY_PORTS, nbl_tc_parse_ports }, }; static int nbl_tc_flow_set_out_param(struct net_device *out_dev, @@ -407,8 +560,8 @@ static int nbl_tc_flow_set_out_param(struct net_device *out_dev, return -EINVAL; if (common->tc_inst_id != dev_priv->adapter->common.tc_inst_id) { - nbl_info(common, NBL_DEBUG_FLOW, "tc flow rule in different nic is not supported\n"); - return -EINVAL; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow rule in different nic is not supported\n"); + return -EOPNOTSUPP; } serv_mgt = NBL_ADAPTER_TO_SERV_MGT(dev_priv->adapter); @@ -581,6 +734,9 @@ static int nbl_tc_parse_pattern(struct nbl_service_mgt *serv_mgt, BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { nbl_debug(common, NBL_DEBUG_FLOW, "tc flow key used: 0x%llx is not supported\n", @@ -590,6 +746,8 @@ static int nbl_tc_parse_pattern(struct nbl_service_mgt *serv_mgt, for (i = 0; i < ARRAY_SIZE(parse_pattern_list); i++) { if (flow_rule_match_key(rule, parse_pattern_list[i].pattern_type)) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow key %d\n", + parse_pattern_list[i].pattern_type); ret = parse_pattern_list[i].parse_func(rule, filter, common); if (ret != 0) @@ -661,6 +819,239 @@ static int nbl_tc_fill_encap_out_info(struct nbl_tc_flow_param *param, return 0; } +static inline bool nbl_tc_is_dmac_offset(u32 oft) +{ + return (oft < 6); +} + +static inline bool nbl_tc_is_smac_offset(u32 oft) +{ + return (oft >= 6 && oft < 12); +} + +static inline bool nbl_tc_is_sip_offset(u32 oft) +{ + return (oft >= 12 && oft < 16); +} + +static inline bool nbl_tc_is_dip_offset(u32 oft) +{ + return (oft >= 16 && oft < 20); +} + +static inline bool nbl_tc_is_sip6_offset(u32 oft) +{ + return (oft >= 8 && oft < 24); +} + +static inline bool nbl_tc_is_dip6_offset(u32 oft) +{ + return (oft >= 24 && oft < 40); +} + +static inline bool nbl_tc_is_sp_offset(u32 oft) +{ + return (oft >= 0 && oft < 2); +} + +static inline bool nbl_tc_is_dp_offset(u32 oft) +{ + return (oft >= 2 && oft < 4); +} + +static int nbl_tc_pedit_parse_eth(u32 offset, u64 *act_flag) +{ + int ret = 0; + + if (nbl_tc_is_dmac_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_DST_MAC; + else if (nbl_tc_is_smac_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_SRC_MAC; + else + ret = -EOPNOTSUPP; + + return ret; +} + +static int nbl_tc_pedit_parse_ip(u32 offset, u64 *act_flag) +{ + int ret = 0; + + if (nbl_tc_is_dip_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_IPV4_DST_IP; + else if (nbl_tc_is_sip_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_IPV4_SRC_IP; + else + /* we only support sip & dip field now */ + ret = -EOPNOTSUPP; + + return ret; +} + +static int nbl_tc_pedit_parse_ip6(u32 offset, u64 *act_flag) +{ + int ret = 0; + + if (nbl_tc_is_dip6_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_IPV6_DST_IP; + else if (nbl_tc_is_sip6_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_IPV6_SRC_IP; + else + /* we only support sip6 & dip6 field now */ + ret = -EOPNOTSUPP; + + return ret; +} + +static int nbl_tc_pedit_parse_port(u32 offset, u64 *act_flag) +{ + int ret = 0; + + if (nbl_tc_is_dp_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_DST_PORT; + else if (nbl_tc_is_sp_offset(offset)) + *act_flag |= NBL_FLOW_ACTION_SET_SRC_PORT; + else + /* we only support src & dst port field now */ + ret = -EOPNOTSUPP; + + return ret; +} + +static int nbl_tc_pedit_check_field(const struct nbl_common_info *common, u32 offset, + u8 pedit_type, u64 *pedit_flag) +{ + int ret = 0; + + switch (pedit_type) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + ret = nbl_tc_pedit_parse_eth(offset, pedit_flag); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + ret = nbl_tc_pedit_parse_ip(offset, pedit_flag); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + ret = nbl_tc_pedit_parse_ip6(offset, pedit_flag); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + ret = nbl_tc_pedit_parse_port(offset, pedit_flag); + break; + default: + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit not support %d\n", pedit_type); + ret = -EOPNOTSUPP; + } + + if (ret) + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:type(%d)-oft(%u) err\n", + pedit_type, offset); + return ret; +} + +static int nbl_tc_pedit_set_val(u8 htype, u32 mask, u32 val, u32 offset, + struct nbl_tc_pedit_info *pedit_info) +{ + u32 *cur_pmask = (u32 *)(nbl_pedit_header(&pedit_info->mask, htype) + offset); + u32 *cur_pval = (u32 *)(nbl_pedit_header(&pedit_info->val, htype) + offset); + + if (*cur_pmask & mask) + return -EINVAL; + + *cur_pmask |= mask; + *cur_pval |= (val & mask); + + return 0; +} + +static u32 nbl_tc_pedit_update_oft(u32 *oft, u32 mask) +{ + int ret = 0; + + if (NBL_TC_MASK_FORWARD_OFT0(mask)) + *oft += 0; + else if (NBL_TC_MASK_FORWARD_OFT1(mask)) + *oft += 1; + else if (NBL_TC_MASK_FORWARD_OFT2(mask)) + *oft += 2; + else if (NBL_TC_MASK_FORWARD_OFT3(mask)) + *oft += 3; + else if (NBL_TC_MASK_BACKWARD_OFT3(mask)) + *oft = *oft > 3 ? (*oft - 3) : *oft; + else if (NBL_TC_MASK_BACKWARD_OFT2(mask)) + *oft = *oft > 2 ? (*oft - 2) : *oft; + else if (NBL_TC_MASK_BACKWARD_OFT1(mask)) + *oft = *oft > 1 ? (*oft - 1) : *oft; + else + ret = -EINVAL; + return ret; +} + +static int nbl_tc_pedit_parse_edit_info(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + struct nbl_tc_flow_param *param) +{ + int ret = 0; + u8 htype = (u8)act_entry->mangle.htype; + u32 mask = act_entry->mangle.mask; + u32 val = act_entry->mangle.val; + u32 offset = act_entry->mangle.offset; + const struct nbl_common_info *common = param->common; + + if (htype == FLOW_ACT_MANGLE_UNSPEC) { + nbl_info(common, NBL_DEBUG_FLOW, "legacy pedit isn't offloaded"); + ret = -EOPNOTSUPP; + goto pedit_err; + } + + if (htype > FLOW_ACT_MANGLE_HDR_TYPE_UDP) { + nbl_info(common, NBL_DEBUG_FLOW, "pedit:%d isn't offloaded", htype); + ret = -EOPNOTSUPP; + goto pedit_err; + } + + /* try get located pedit val, drop it if we got a bad location*/ + ret = nbl_tc_pedit_set_val(htype, ~mask, val, offset, &rule_act->tc_pedit_info); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit err: disallow edit on same location"); + goto pedit_err; + } + + ret = nbl_tc_pedit_update_oft(&offset, mask); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:type-val-mask-oft->%d-%u-%x-%u %s", + htype, val, mask, offset, ret ? "failed" : "success"); + if (ret) + goto pedit_err; + if (htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) + NBL_TC_PEDIT_SET_NODE_RES_PRO(rule_act->tc_pedit_info.pedit_node); + + /* now set action flag if we supported it */ + ret = nbl_tc_pedit_check_field(common, offset, htype, &rule_act->flag); + if (ret) + goto pedit_err; + + NBL_TC_PEDIT_INC_NODE_RES_EDITS(rule_act->tc_pedit_info.pedit_node); +pedit_err: + return ret; +} + +static int nbl_tc_handle_action_pedit(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + return nbl_tc_pedit_parse_edit_info(rule_act, act_entry, param); +} + +static int nbl_tc_handle_action_csum(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + return 0; +} + static int nbl_tc_handle_action_port_id(struct nbl_rule_action *rule_act, const struct flow_action_entry *act_entry, @@ -850,6 +1241,14 @@ const struct nbl_tc_flow_action_driver_ops nbl_tunnel_decap_act = { .act_update = nbl_tc_handle_action_tun_decap, }; +const struct nbl_tc_flow_action_driver_ops nbl_pedit_act = { + .act_update = nbl_tc_handle_action_pedit, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_csum_act = { + .act_update = nbl_tc_handle_action_csum, +}; + const struct nbl_tc_flow_action_driver_ops *nbl_act_ops[] = { [FLOW_ACTION_REDIRECT] = &nbl_port_id_act, [FLOW_ACTION_DROP] = &nbl_drop, @@ -858,6 +1257,8 @@ const struct nbl_tc_flow_action_driver_ops *nbl_act_ops[] = { [FLOW_ACTION_VLAN_POP] = &nbl_pop_vlan, [FLOW_ACTION_TUNNEL_ENCAP] = &nbl_tunnel_encap_act, [FLOW_ACTION_TUNNEL_DECAP] = &nbl_tunnel_decap_act, + [FLOW_ACTION_MANGLE] = &nbl_pedit_act, + [FLOW_ACTION_CSUM] = &nbl_csum_act, }; /** @@ -908,7 +1309,8 @@ static int nbl_tc_parse_action(struct nbl_service_mgt *serv_mgt, int ret = 0; flow_action_for_each(i, act_entry, &rule->action) { - nbl_debug(common, NBL_DEBUG_FLOW, "tc flow parse action id %d\n", act_entry->id); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow parse action id %d, act idx %d\n", + act_entry->id, i); switch (act_entry->id) { case FLOW_ACTION_REDIRECT: case FLOW_ACTION_DROP: @@ -917,6 +1319,8 @@ static int nbl_tc_parse_action(struct nbl_service_mgt *serv_mgt, case FLOW_ACTION_VLAN_POP: case FLOW_ACTION_TUNNEL_ENCAP: case FLOW_ACTION_TUNNEL_DECAP: + case FLOW_ACTION_MANGLE: + case FLOW_ACTION_CSUM: ret = nbl_tc_parse_action_by_type(rule_act, act_entry, act_entry->id, filter, param); if (ret) @@ -939,7 +1343,7 @@ static int nbl_serv_add_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); struct nbl_flow_pattern_conf *filter = NULL; struct nbl_rule_action *act = NULL; - struct nbl_tc_flow_param param = {0}; + struct nbl_tc_flow_param *param = NULL; int ret = 0; int ret_act = 0; @@ -949,32 +1353,43 @@ static int nbl_serv_add_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls if (!nbl_tc_is_valid_netdev(priv->netdev, &serv_mgt->net_resource_mgt->netdev_ops)) return -EOPNOTSUPP; - param.key.cookie = f->cookie; - ret = disp_ops->flow_index_lookup(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param.key); + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + return -ENOMEM; + param->key.cookie = f->cookie; + ret = disp_ops->flow_index_lookup(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param->key); if (!ret) { nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow cookie %llx has already add, do not add again!\n", - param.key.cookie); - return -EEXIST; + "tc flow cookie %llx has already add, do not add again, dev %s.\n", + param->key.cookie, netdev_name(priv->netdev)); + ret = -EEXIST; + goto ret_param_fail; } - nbl_debug(common, NBL_DEBUG_FLOW, "tc flow add cls, cookie=%lx\n", f->cookie); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow add cls, cookie=%lx, dev %s.\n", + f->cookie, netdev_name(priv->netdev)); - if (nbl_tc_flow_init_param(priv, f, common, ¶m)) - return -EINVAL; + if (nbl_tc_flow_init_param(priv, f, common, param)) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow init param failed, dev %s.\n", + netdev_name(priv->netdev)); + ret = -EINVAL; + goto ret_param_fail; + } filter = kzalloc(sizeof(*filter), GFP_KERNEL); - if (!filter) - return -ENOMEM; + if (!filter) { + ret = -ENOMEM; + goto ret_param_fail; + } - param.common = common; - param.serv_mgt = serv_mgt; + param->common = common; + param->serv_mgt = serv_mgt; filter->input_dev = priv->netdev; - ret = nbl_tc_parse_pattern(serv_mgt, f, filter, ¶m); + ret = nbl_tc_parse_pattern(serv_mgt, f, filter, param); if (ret) { - nbl_debug(common, NBL_DEBUG_FLOW, "tc flow failed to parse " - "pattern, ret %d.\n", ret); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow failed pattern, dev %s, ret %d.\n", + netdev_name(priv->netdev), ret); ret = -EINVAL; goto ret_filter_fail; } @@ -986,17 +1401,18 @@ static int nbl_serv_add_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls } act->in_port = priv->netdev; - ret = nbl_tc_parse_action(serv_mgt, f, filter, act, ¶m); + ret = nbl_tc_parse_action(serv_mgt, f, filter, act, param); if (ret) { - nbl_debug(common, NBL_DEBUG_FLOW, "tc flow failed to parse action.\n"); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow failed action dev %s, ret %d.\n", + netdev_name(priv->netdev), ret); ret = -EINVAL; goto ret_act_fail; } - memcpy(¶m.filter, filter, sizeof(param.filter)); - memcpy(¶m.act, act, sizeof(param.act)); + memcpy(¶m->filter, filter, sizeof(param->filter)); + memcpy(¶m->act, act, sizeof(param->act)); - ret = disp_ops->add_tc_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + ret = disp_ops->add_tc_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); ret_act_fail: /* free edit act */ @@ -1004,15 +1420,17 @@ static int nbl_serv_add_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls act->encap_parse_ok) { ret_act = disp_ops->tc_tun_encap_del(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &act->encap_key); - if (ret_act) - nbl_debug(common, NBL_DEBUG_FLOW, "encap del err, encap_idx:%d, ret:%d", - act->encap_idx, ret_act); + if (ret_act) { + nbl_debug(common, NBL_DEBUG_FLOW, "add ret %d, idx:%d, encap del ret:%d", + ret, act->encap_idx, ret_act); + } } kfree(act); ret_filter_fail: kfree(filter); - +ret_param_fail: + kfree(param); return ret; } @@ -1021,16 +1439,20 @@ static int nbl_serv_del_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); - struct nbl_tc_flow_param param = {0}; + struct nbl_tc_flow_param *param = NULL; int ret = 0; if (!nbl_tc_is_valid_netdev(priv->netdev, &serv_mgt->net_resource_mgt->netdev_ops)) return -EOPNOTSUPP; + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + return -ENOMEM; nbl_debug(common, NBL_DEBUG_FLOW, "tc flow del cls, cookie=%lx\n", f->cookie); - param.key.cookie = f->cookie; + param->key.cookie = f->cookie; - ret = disp_ops->del_tc_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + ret = disp_ops->del_tc_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + kfree(param); return ret; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h index ea8e29551ba4..0619389d8751 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h @@ -1,7 +1,7 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* - * Copyright (c) 2023 nebula-matrix Limited. - * + * Copyright (c) 2022 nebula-matrix Limited. + * Author: */ #ifndef _NBL_TC_OFFLOAD_H @@ -9,6 +9,15 @@ #include "nbl_service.h" +#define NBL_TC_MASK_FORWARD_OFT3(mask) ((mask) == 0xffffff) +#define NBL_TC_MASK_FORWARD_OFT2(mask) ((mask) == 0xffff) +#define NBL_TC_MASK_FORWARD_OFT1(mask) ((mask) == 0xff) +#define NBL_TC_MASK_FORWARD_OFT0(mask) ((mask) == 0) + +#define NBL_TC_MASK_BACKWARD_OFT3(mask) ((mask) == 0xffffff00) +#define NBL_TC_MASK_BACKWARD_OFT2(mask) ((mask) == 0xffff0000) +#define NBL_TC_MASK_BACKWARD_OFT1(mask) ((mask) == 0xff000000) + int nbl_serv_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); int nbl_serv_indr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c index 4265da86ef30..b0b177a0c776 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c @@ -1,3 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + #include #include #include @@ -29,7 +35,7 @@ static int nbl_copy_tun_info(const struct ip_tunnel_info *tun_info, static struct nbl_tc_tunnel *nbl_tc_get_tunnel(struct net_device *tunnel_dev) { if (netif_is_vxlan(tunnel_dev)) - return &vxlan_tunnel; + return &nbl_vxlan_tunnel; else return NULL; } @@ -90,9 +96,8 @@ static int nbl_route_lookup_ipv4(const struct nbl_common_info *common, if (is_vlan_dev(out_dev)) { parent_dev = vlan_dev_priv(out_dev)->real_dev; if (is_vlan_dev(parent_dev)) { - nbl_debug(common, NBL_DEBUG_FLOW, "ipv4 encap out dev is %s, " - "parent_dev:%s is vlan, not support two vlan\n", - out_dev->name, parent_dev ? parent_dev->name : "NULL"); + nbl_debug(common, NBL_DEBUG_FLOW, "encap o_dev is %s p_dev:%s\n", + out_dev->name, parent_dev ? parent_dev->name : "NULL"); ret = -EOPNOTSUPP; goto rt_err; } @@ -117,8 +122,7 @@ static int nbl_route_lookup_ipv4(const struct nbl_common_info *common, if (!tun_route_info->ttl) tun_route_info->ttl = (u8)ip4_dst_hoplimit(&rt->dst); - nbl_debug(common, NBL_DEBUG_FLOW, "route lookup: rt->rt_type:%u, " - "rt->dst.dev:%s, rt->dst.ops:%p, real_dev:%s, ttl:%u", + nbl_debug(common, NBL_DEBUG_FLOW, "route: type:%u, dev:%s, ops:%p, real_dev:%s, ttl:%u", rt->rt_type, rt->dst.dev ? rt->dst.dev->name : "null", rt->dst.ops, real_out_dev ? real_out_dev->name : "NULL", tun_route_info->ttl); @@ -161,8 +165,7 @@ static char *nbl_tc_tun_gen_eth_hdr(char *buf, struct net_device *dev, eth->h_proto = vlan_dev_vlan_proto(dev); vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev)); vlan->h_vlan_encapsulated_proto = htons(proto); - nbl_debug(common, NBL_DEBUG_FLOW, "output is vlan dev: " - "vlan_TCI:0x%x, vlan_proto:0x%x, eth_proto:0x%x", + nbl_debug(common, NBL_DEBUG_FLOW, "TCI:0x%x, vlan_proto:0x%x, eth_proto:0x%x", vlan->h_vlan_TCI, vlan->h_vlan_encapsulated_proto, eth->h_proto); } else { @@ -318,9 +321,8 @@ static int nbl_route_lookup_ipv6(const struct nbl_common_info *common, parent_dev = vlan_dev_priv(out_dev)->real_dev; real_out_dev = vlan_dev_real_dev(out_dev); if (is_vlan_dev(parent_dev)) { - nbl_debug(common, NBL_DEBUG_FLOW, "ipv6 encap out dev is %s, " - "parent_dev:%s is vlan, not support two vlan\n", - out_dev->name, parent_dev ? parent_dev->name : "NULL"); + nbl_debug(common, NBL_DEBUG_FLOW, "ipv6 encap o_dev is %s, p_dev:%s\n", + out_dev->name, parent_dev ? parent_dev->name : "NULL"); ret = -EOPNOTSUPP; goto err; } @@ -549,7 +551,7 @@ int nbl_tc_tun_parse_encap_info(struct nbl_rule_action *rule_act, return ret; } -struct nbl_tc_tunnel vxlan_tunnel = { +struct nbl_tc_tunnel nbl_vxlan_tunnel = { .tunnel_type = NBL_TC_TUNNEL_TYPE_VXLAN, .generate_tunnel_hdr = nbl_tc_tun_gen_tunnel_header_vxlan, .get_tun_hlen = nbl_tc_tun_get_vxlan_hdr_len, diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h index 81a13ead91c2..f7e5164e6ab4 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h @@ -1,3 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + #ifndef __NBL_TC_TUN_H__ #define __NBL_TC_TUN_H__ @@ -49,7 +55,7 @@ struct nbl_tc_tunnel { int (*get_tun_hlen)(void); }; -extern struct nbl_tc_tunnel vxlan_tunnel; +extern struct nbl_tc_tunnel nbl_vxlan_tunnel; int nbl_tc_tun_parse_encap_info(struct nbl_rule_action *rule_act, struct nbl_tc_flow_param *param, diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h index 2fd12ecfadcb..ea86265e0be3 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -69,6 +69,8 @@ struct nbl_core_dev_info { /* Info */ u32 mem_type; u16 rdma_cap_num; + int (*change_mtu_notify)(struct auxiliary_device *adev, int new_mtu); + bool mirror_enable; }; struct nbl_aux_dev { @@ -82,6 +84,7 @@ struct nbl_aux_dev { ssize_t (*qos_cfg_store)(struct auxiliary_device *adev, int offset, const char *buf, size_t count); ssize_t (*qos_cfg_show)(struct auxiliary_device *adev, int offset, char *buf); + int (*mirror_enable_notify)(struct auxiliary_device *adev, bool enable); }; #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c index 65eec7f69c5b..49c6a599d558 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c @@ -349,7 +349,7 @@ static int nbl_uipsec_insert_em_ht(struct nbl_ipsec_ht_mng *ipsec_ht_mng, node = ipsec_ht_mng->hash_map[ht_index]; if (!node) { - node = kzalloc(sizeof(*node), GFP_KERNEL); + node = kzalloc(sizeof(*node), GFP_ATOMIC); if (!node) return -ENOMEM; ipsec_ht_mng->hash_map[ht_index] = node; @@ -449,7 +449,7 @@ static int nbl_res_add_ipsec_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) if (rule->index == index) return -EEXIST; - rule = kzalloc(sizeof(*rule), GFP_KERNEL); + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); if (!rule) return -ENOMEM; @@ -559,7 +559,7 @@ static void nbl_res_del_ipsec_rx_flow(void *priv, u32 index) if (rule->index == index) break; - if (nbl_list_entry_is_head(rule, &accel_mgt->uprbac_head, node)) + if (list_entry_is_head(rule, &accel_mgt->uprbac_head, node)) return; nbl_accel_del_uipsec_rule(res_mgt, &rule->uipsec_entry); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h index d8fc9fcc95d9..7bb096278f4e 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2023 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c index ef9e1e3b66a9..951c4020e700 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c @@ -18,7 +18,7 @@ static int nbl_res_adminq_check_net_ring_num(struct nbl_resource_mgt *res_mgt, u32 sum = 0, pf_real_num = 0, vf_real_num = 0; int i; - pf_real_num = NBL_VSI_PF_REAL_QUEUE_NUM(param->pf_def_max_net_qp_num); + pf_real_num = NBL_VSI_PF_LEGAL_QUEUE_NUM(param->pf_def_max_net_qp_num); vf_real_num = NBL_VSI_VF_REAL_QUEUE_NUM(param->vf_def_max_net_qp_num); if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC || vf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) @@ -27,12 +27,18 @@ static int nbl_res_adminq_check_net_ring_num(struct nbl_resource_mgt *res_mgt, /* TODO: should we consider when pf_num is 8? */ for (i = 0; i < NBL_COMMON_TO_ETH_MODE(common); i++) { pf_real_num = param->net_max_qp_num[i] ? - NBL_VSI_PF_REAL_QUEUE_NUM(param->net_max_qp_num[i]) : - NBL_VSI_PF_REAL_QUEUE_NUM(param->pf_def_max_net_qp_num); + NBL_VSI_PF_LEGAL_QUEUE_NUM(param->net_max_qp_num[i]) : + NBL_VSI_PF_LEGAL_QUEUE_NUM(param->pf_def_max_net_qp_num); if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) return -EINVAL; + pf_real_num = param->net_max_qp_num[i] ? + NBL_VSI_PF_MAX_QUEUE_NUM(param->net_max_qp_num[i]) : + NBL_VSI_PF_MAX_QUEUE_NUM(param->pf_def_max_net_qp_num); + if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) + pf_real_num = NBL_MAX_TXRX_QUEUE_PER_FUNC; + sum += pf_real_num; } @@ -200,6 +206,7 @@ static int nbl_res_adminq_set_module_eeprom_info(struct nbl_resource_mgt *res_mg param.page = page; param.bank = bank; param.write = 1; + param.version = 1; param.offset = offset + byte_offset; param.length = xfer_size; memcpy(param.data, data + byte_offset, xfer_size); @@ -209,12 +216,11 @@ static int nbl_res_adminq_set_module_eeprom_info(struct nbl_resource_mgt *res_mg ¶m, sizeof(param), NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," - " eth_id:%d, i2c_address:%d, page:%d, bank:%d," - " offset:%d, length:%d\n", + dev_err(dev, "adminq send msg failed: %d, msg: 0x%x, eth_id:%d, addr:%d,", ret, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, - eth_info->logic_eth_id[eth_id], - i2c_address, page, bank, offset + byte_offset, xfer_size); + eth_info->logic_eth_id[eth_id], i2c_address); + dev_err(dev, "page:%d, bank:%d, offset:%d, length:%d\n", + page, bank, offset + byte_offset, xfer_size); } byte_offset += xfer_size; } while (!ret && data_length > 0); @@ -280,6 +286,7 @@ static int nbl_res_adminq_get_module_eeprom_info(struct nbl_resource_mgt *res_mg param.page = page; param.bank = bank; param.write = 0; + param.version = 1; param.offset = offset + byte_offset; param.length = xfer_size; @@ -288,12 +295,11 @@ static int nbl_res_adminq_get_module_eeprom_info(struct nbl_resource_mgt *res_mg ¶m, sizeof(param), data + byte_offset, xfer_size, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," - " eth_id:%d, i2c_address:%d, page:%d, bank:%d," - " offset:%d, length:%d\n", + dev_err(dev, "adminq send msg failed: %d, msg: 0x%x, eth_id:%d, addr:%d,", ret, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, - eth_info->logic_eth_id[eth_id], - i2c_address, page, bank, offset + byte_offset, xfer_size); + eth_info->logic_eth_id[eth_id], i2c_address); + dev_err(dev, "page:%d, bank:%d, offset:%d, length:%d\n", + page, bank, offset + byte_offset, xfer_size); } byte_offset += xfer_size; } while (!ret && data_length > 0); @@ -683,8 +689,7 @@ static int nbl_res_adminq_set_sfp_state(void *priv, u8 eth_id, u8 state) param, param_len, NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," - " eth_id:%d, sfp %s\n", + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, sfp %s\n", ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, eth_info->logic_eth_id[eth_id], state ? "on" : "off"); @@ -733,8 +738,7 @@ static int nbl_res_adminq_setup_loopback(void *priv, u32 eth_id, u32 enable) param, param_len, NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," - " eth_id:%d, %s eth loopback\n", + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, %s eth loopback\n", ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, eth_info->logic_eth_id[eth_id], enable ? "enable" : "disable"); @@ -825,8 +829,7 @@ static int nbl_res_adminq_get_port_attributes(void *priv) param, param_len, (void *)&port_caps, sizeof(port_caps), 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," - " eth_id:%d, get_port_caps\n", + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, get_port_caps\n", ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, eth_info->logic_eth_id[eth_id]); kfree(param); @@ -855,8 +858,7 @@ static int nbl_res_adminq_get_port_attributes(void *priv) (void *)&port_advertising, sizeof(port_advertising), 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," - " eth_id:%d, port_advertising\n", + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, port_advertising\n", ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, eth_info->logic_eth_id[eth_id]); kfree(param); @@ -921,8 +923,7 @@ static int nbl_res_adminq_enable_port(void *priv, bool enable) param, param_len, NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," - " eth_id:%d, %s port\n", + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, %s port\n", ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, eth_info->logic_eth_id[eth_id], enable ? "enable" : "disable"); kfree(param); @@ -1193,6 +1194,9 @@ static void nbl_res_adminq_recv_port_notify(void *priv, void *data) last_module_inplace = eth_info->module_inplace[eth_id]; last_link_state = eth_info->link_state[eth_id]; + if (!notify->link_state) + eth_info->link_down_count[eth_id]++; + eth_info->link_state[eth_id] = notify->link_state; eth_info->module_inplace[eth_id] = notify->module_inplace; /* when eth link down, don not update speed @@ -1294,7 +1298,6 @@ static int nbl_res_adminq_set_port_advertising(void *priv, new_advert |= BIT(NBL_PORT_CAP_FEC_BASER); if (advertising->active_fec == NBL_PORT_FEC_AUTO) { new_advert |= NBL_PORT_CAP_FEC_MASK; - new_advert &= ~BIT(NBL_PORT_CAP_FEC_OFF); if (eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_AUTONEG)) new_advert |= BIT(NBL_PORT_CAP_FEC_AUTONEG); } @@ -1307,23 +1310,9 @@ static int nbl_res_adminq_set_port_advertising(void *priv, advertising->speed_advert; } - if (new_advert & NBL_PORT_CAP_SPEED_100G_MASK) { // 100G - if (new_advert & BIT(NBL_PORT_CAP_FEC_BASER)) { - dev_err(dev, "unsupport to set baser when speed is 100G\n"); - return -EOPNOTSUPP; - } - } else if (!(new_advert & NBL_PORT_CAP_SPEED_50G_MASK) && - !(new_advert & NBL_PORT_CAP_SPEED_25G_MASK) && - new_advert & NBL_PORT_CAP_SPEED_10G_MASK) { //10G - if (new_advert & BIT(NBL_PORT_CAP_FEC_RS)) { - new_advert = new_advert & ~NBL_PORT_CAP_FEC_MASK; - new_advert |= BIT(NBL_PORT_CAP_FEC_BASER); - dev_notice(dev, "speed 10G cannot set fec RS, only can set baser\n"); - dev_notice(dev, "set new_advert:%llx\n", new_advert); - } - } - - if (eth_info->port_max_rate[eth_id] != NBL_PORT_MAX_RATE_100G_PAM4) + if (eth_info->port_max_rate[eth_id] != NBL_PORT_MAX_RATE_100G_PAM4 || + (!(new_advert & NBL_PORT_CAP_SPEED_100G_MASK) && + eth_info->port_max_rate[eth_id] == NBL_PORT_MAX_RATE_100G_PAM4)) new_advert &= ~NBL_PORT_CAP_PAM4_MASK; else new_advert |= NBL_PORT_CAP_PAM4_MASK; @@ -1343,8 +1332,7 @@ static int nbl_res_adminq_set_port_advertising(void *priv, param, param_len, NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," - " eth_id:%d, set_port_advertising\n", + dev_err(dev, "adminq send msg failed: %d, msg: 0x%x, eth_id:%d,\n", ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, eth_info->logic_eth_id[eth_id]); kfree(param); @@ -1429,8 +1417,7 @@ static int nbl_res_adminq_get_module_info(void *priv, u8 eth_id, struct ethtool_ /* check if can access page 0xA2 directly, see sff-8472 */ if (addr_mode & SFF_8472_ADDRESSING_MODE) { - dev_err(dev, "Address change required to access page 0xA2" - " which is not supported\n"); + dev_err(dev, "Address change required to access page 0xA2 which is not supported\n"); page_swap = true; } @@ -1456,7 +1443,8 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); u8 module_inplace = 0; /* 1 inplace, 0 not inplace */ u32 start = eeprom->offset; - u32 length = eeprom->len; + u32 total_len = eeprom->len; + u32 length; u8 turn_page, offset; int ret; @@ -1471,12 +1459,12 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, } if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { - while (start < ETH_MODULE_SFF_8636_MAX_LEN) { - length = SFF_8638_PAGESIZE; - if (start + length > ETH_MODULE_SFF_8636_MAX_LEN) - length = ETH_MODULE_SFF_8636_MAX_LEN - start; - + while (start < ETH_MODULE_SFF_8636_MAX_LEN && total_len) { nbl_res_get_module_eeprom_page(start, &turn_page, &offset); + length = min(SFF_8638_PAGESIZE, total_len); + if (offset % SFF_8638_PAGESIZE + length > SFF_8638_PAGESIZE) + length = SFF_8638_PAGESIZE - offset % SFF_8638_PAGESIZE; + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, turn_page); if (ret) { dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", @@ -1485,7 +1473,7 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, } ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, - I2C_DEV_ADDR_A0, 0, 0, + I2C_DEV_ADDR_A0, turn_page, 0, offset, length, data); if (ret) { dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", @@ -1494,14 +1482,15 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, } start += length; data += length; - length = eeprom->len - length; + total_len -= length; } return 0; } + length = total_len; /* Read A0 portion of eth EEPROM */ if (start < ETH_MODULE_SFF_8079_LEN) { - if (start + eeprom->len > ETH_MODULE_SFF_8079_LEN) + if (start + length > ETH_MODULE_SFF_8079_LEN) length = ETH_MODULE_SFF_8079_LEN - start; ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, @@ -1513,7 +1502,7 @@ static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, } start += length; data += length; - length = eeprom->len - length; + length = total_len - length; } /* Read A2 portion of eth EEPROM */ @@ -1543,6 +1532,56 @@ static int nbl_res_adminq_get_link_state(void *priv, u8 eth_id, return 0; } +static int nbl_res_adminq_get_link_down_count(void *priv, u8 eth_id, u64 *link_down_count) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + *link_down_count = eth_info->link_down_count[eth_id]; + return 0; +} + +static int nbl_res_adminq_get_link_status_opcode(void *priv, u8 eth_id, u32 *link_status_opcode) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + u64 data = 0, key = 0, result = 0; + int param_len = 0, ret = 0; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_GET_LINK_STATUS_OPCODE; + + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_READ; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + *link_status_opcode = result; + + kfree(param); + return 0; +} + static int nbl_res_adminq_get_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -1632,8 +1671,7 @@ static int nbl_res_adminq_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) param, param_len, NULL, 0, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) { - dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," - " eth_id:%d, reverse_mac=0x%x:%x:%x:%x:%x:%x\n", + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, reverse_mac=0x%x:%x:%x:%x:%x:%x\n", ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, eth_info->logic_eth_id[eth_id], reverse_mac[0], reverse_mac[1], reverse_mac[2], reverse_mac[3], @@ -1646,6 +1684,26 @@ static int nbl_res_adminq_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) return 0; } +static int nbl_res_adminq_get_fec_stats(void *priv, u32 eth_id, + struct nbl_fec_stats *fec_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + int data_len = sizeof(struct nbl_fec_stats); + int ret; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_FEC_STATS, ð_id, + sizeof(eth_id), fec_stats, data_len, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "ctrl eth %d fec stats failed", eth_id); + + return ret; +} + static int nbl_res_adminq_ctrl_port_led(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg) { @@ -1663,7 +1721,7 @@ static int nbl_res_adminq_ctrl_port_led(void *priv, u8 eth_id, param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); param = kzalloc(param_len, GFP_KERNEL); - key = NBL_PORT_KRY_LED_BLINK; + key = NBL_PORT_KEY_LED_BLINK; switch (led_ctrl) { case NBL_LED_REG_ACTIVE: @@ -1859,7 +1917,9 @@ static int nbl_res_adminq_update_ring_num(void *priv) goto send_fail; } - if (info->pf_def_max_net_qp_num && info->vf_def_max_net_qp_num) + if (info->pf_def_max_net_qp_num && info->vf_def_max_net_qp_num && + !nbl_res_adminq_check_net_ring_num(res_mgt, + (struct nbl_fw_cmd_net_ring_num_param *)info)) memcpy(&res_info->net_ring_num_info, info, sizeof(res_info->net_ring_num_info)); send_fail: @@ -2107,6 +2167,39 @@ static int nbl_res_adminq_init_port(void *priv) return 0; } +static int nbl_res_adminq_set_wol(void *priv, u8 eth_id, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_adminq_reg_write_param reg_write = {0}; + struct nbl_chan_adminq_reg_read_param reg_read = {0}; + u32 value; + int ret = 0; + + dev_info(dev, "set_wol ethid %d %sabled", eth_id, enable ? "en" : "dis"); + + reg_read.reg = NBL_ADMINQ_ETH_WOL_REG_OFFSET; + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_REGISTER_READ, + ®_read, sizeof(reg_read), &value, sizeof(value), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d\n", ret); + return ret; + } + + reg_write.reg = NBL_ADMINQ_ETH_WOL_REG_OFFSET; + reg_write.value = (value & ~(1 << eth_id)) | (enable << eth_id); + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_REGISTER_WRITE, + ®_write, sizeof(reg_write), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq send msg failed with ret: %d\n", ret); + + return ret; +} + #define ADD_ETH_STATISTICS(name) {#name} static struct nbl_leonis_eth_stats_info _eth_statistics[] = { ADD_ETH_STATISTICS(eth_frames_tx), @@ -2136,8 +2229,8 @@ static struct nbl_leonis_eth_stats_info _eth_statistics[] = { ADD_ETH_STATISTICS(eth_frames_tx_128_to_255B), ADD_ETH_STATISTICS(eth_frames_tx_256_to_511B), ADD_ETH_STATISTICS(eth_frames_tx_512_to_1023B), - ADD_ETH_STATISTICS(eth_frames_tx_1024_to_1535B), - ADD_ETH_STATISTICS(eth_frames_tx_1536_to_2047B), + ADD_ETH_STATISTICS(eth_frames_tx_1024_to_1518B), + ADD_ETH_STATISTICS(eth_frames_tx_1519_to_2047B), ADD_ETH_STATISTICS(eth_frames_tx_2048_to_MAXB), ADD_ETH_STATISTICS(eth_undersize_frames_tx_goodfcs), ADD_ETH_STATISTICS(eth_oversize_frames_tx_goodfcs), @@ -2183,13 +2276,14 @@ static struct nbl_leonis_eth_stats_info _eth_statistics[] = { ADD_ETH_STATISTICS(eth_frames_rx_128_to_255B), ADD_ETH_STATISTICS(eth_frames_rx_256_to_511B), ADD_ETH_STATISTICS(eth_frames_rx_512_to_1023B), - ADD_ETH_STATISTICS(eth_frames_rx_1024_to_1535B), - ADD_ETH_STATISTICS(eth_frames_rx_1536_to_2047B), + ADD_ETH_STATISTICS(eth_frames_rx_1024_to_1518B), + ADD_ETH_STATISTICS(eth_frames_rx_1519_to_2047B), ADD_ETH_STATISTICS(eth_frames_rx_2048_to_MAXB), ADD_ETH_STATISTICS(eth_octets_rx), ADD_ETH_STATISTICS(eth_octets_rx_ok), ADD_ETH_STATISTICS(eth_octets_rx_badfcs), ADD_ETH_STATISTICS(eth_octets_rx_dropped), + ADD_ETH_STATISTICS(eth_unsupported_opcodes_rx), }; static void nbl_res_adminq_get_private_stat_len(void *priv, u32 *len) @@ -2197,25 +2291,79 @@ static void nbl_res_adminq_get_private_stat_len(void *priv, u32 *len) *len = ARRAY_SIZE(_eth_statistics); } -static void nbl_res_adminq_get_private_stat_data(void *priv, u32 eth_id, u64 *data) +static void nbl_res_adminq_get_private_stat_data(void *priv, u32 eth_id, u64 *data, u32 data_len) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); struct nbl_chan_send_info chan_send; - int data_length = sizeof(struct nbl_leonis_eth_stats); int ret = 0; NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, - ð_id, sizeof(eth_id), data, data_length, 1); + ð_id, sizeof(eth_id), data, data_len, 1); ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); if (ret) dev_err(dev, "adminq get eth %d stats failed ret: %d\n", eth_info->logic_eth_id[eth_id], ret); } +static int nbl_res_adminq_get_eth_ctrl_stats(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_leonis_eth_stats eth_stats = {{0}}; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d ctrl stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + eth_ctrl_stats->macctrl_frames_txd_ok = eth_stats.tx_stats.macctrl_frames_txd_ok; + eth_ctrl_stats->macctrl_frames_rxd = eth_stats.rx_stats.macctrl_frames_rxd; + eth_ctrl_stats->unsupported_opcodes_rx = eth_stats.rx_stats.unsupported_opcodes_rx; + + return ret; +} + +static int nbl_res_adminq_get_pause_stats(void *priv, u32 eth_id, + struct nbl_pause_stats *pause_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_leonis_eth_stats eth_stats; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), (void *)ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d pause stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + pause_stats->rx_pause_frames = eth_stats.rx_stats.pause_macctrl_frames_rxd; + pause_stats->tx_pause_frames = eth_stats.tx_stats.pause_macctrl_frames_txd; + + return ret; +} + static void nbl_res_adminq_fill_private_stat_strings(void *priv, u8 *strings) { int i; @@ -2226,6 +2374,37 @@ static void nbl_res_adminq_fill_private_stat_strings(void *priv, u8 *strings) } } +static int +nbl_res_adminq_get_eth_abnormal_stats(void *priv, u32 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_leonis_eth_stats eth_stats = {{ 0 }}; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), (u64 *)ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + + eth_abnormal_stats->rx_crc_errors = eth_stats.rx_stats.frames_rxd_badfcs; + eth_abnormal_stats->rx_frame_errors = eth_stats.rx_stats.frames_rxd_misc_error; + eth_abnormal_stats->rx_length_errors = eth_stats.rx_stats.undersize_frames_rxd_goodfcs + + eth_stats.rx_stats.oversize_frames_rxd_goodfcs; + + return 0; +} + static u32 nbl_convert_temp_type_eeprom_offset(enum nbl_hwmon_type type) { switch (type) { @@ -2289,6 +2468,93 @@ static int nbl_res_adminq_get_module_temp_common(void *priv, u8 eth_id, return temp * 1000; } +static int nbl_res_adminq_get_eth_mac_stats(void *priv, u32 eth_id, + struct nbl_eth_mac_stats *eth_mac_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + + struct nbl_leonis_eth_stats eth_stats; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), (void *)ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + eth_mac_stats->frames_txd_ok = eth_stats.tx_stats.frames_txd_ok; + eth_mac_stats->frames_rxd_ok = eth_stats.rx_stats.frames_rxd_ok; + eth_mac_stats->octets_txd_ok = eth_stats.tx_stats.octets_txd_ok; + eth_mac_stats->octets_rxd_ok = eth_stats.rx_stats.octets_rxd_ok; + eth_mac_stats->multicast_frames_txd_ok = eth_stats.tx_stats.multicast_frames_txd_ok; + eth_mac_stats->broadcast_frames_txd_ok = eth_stats.tx_stats.broadcast_frames_txd_ok; + eth_mac_stats->multicast_frames_rxd_ok = eth_stats.rx_stats.multicast_frames_rxd_ok; + eth_mac_stats->broadcast_frames_rxd_ok = eth_stats.rx_stats.broadcast_frames_rxd_ok; + + return ret; +} + +static int nbl_res_adminq_get_rmon_stats(void *priv, u32 eth_id, + struct nbl_rmon_stats *rmon_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_leonis_eth_stats eth_stats = {{0}}; + int data_length = sizeof(struct nbl_leonis_eth_stats); + u64 *rx = rmon_stats->rmon_rx_range; + u64 *tx = rmon_stats->rmon_tx_range; + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), (void *)ð_stats, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq get eth %d rmon stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return ret; + } + rmon_stats->undersize_frames_rxd_goodfcs = + eth_stats.rx_stats.undersize_frames_rxd_goodfcs; + rmon_stats->oversize_frames_rxd_goodfcs = + eth_stats.rx_stats.oversize_frames_rxd_goodfcs; + rmon_stats->undersize_frames_rxd_badfcs = + eth_stats.rx_stats.undersize_frames_rxd_badfcs; + rmon_stats->oversize_frames_rxd_badfcs = + eth_stats.rx_stats.oversize_frames_rxd_badfcs; + + rx[ETHER_STATS_PKTS_64_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange0; + rx[ETHER_STATS_PKTS_65_TO_127_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange1; + rx[ETHER_STATS_PKTS_128_TO_255_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange2; + rx[ETHER_STATS_PKTS_256_TO_511_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange3; + rx[ETHER_STATS_PKTS_512_TO_1023_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange4; + rx[ETHER_STATS_PKTS_1024_TO_1518_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange5; + rx[ETHER_STATS_PKTS_1519_TO_2047_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange6; + rx[ETHER_STATS_PKTS_2048_TO_MAX_OCTETS] = eth_stats.rx_stats.frames_rxd_sizerange7; + + tx[ETHER_STATS_PKTS_64_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange0; + tx[ETHER_STATS_PKTS_65_TO_127_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange1; + tx[ETHER_STATS_PKTS_128_TO_255_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange2; + tx[ETHER_STATS_PKTS_256_TO_511_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange3; + tx[ETHER_STATS_PKTS_512_TO_1023_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange4; + tx[ETHER_STATS_PKTS_1024_TO_1518_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange5; + tx[ETHER_STATS_PKTS_1519_TO_2047_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange6; + tx[ETHER_STATS_PKTS_2048_TO_MAX_OCTETS] = eth_stats.tx_stats.frames_txd_sizerange7; + + return ret; +} + /* return value need to convert to Mil degree Celsius(1/1000) */ static int nbl_res_adminq_get_module_temp_special(struct nbl_resource_mgt *res_mgt, u8 eth_id, enum nbl_hwmon_type type) @@ -2312,7 +2578,7 @@ static int nbl_res_adminq_get_module_temp_special(struct nbl_resource_mgt *res_m } ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, - 0, 0, offset, 1, (u8 *)&temp); + turn_page, 0, offset, 1, (u8 *)&temp); if (ret) { dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", eth_info->logic_eth_id[eth_id], ret); @@ -2435,17 +2701,26 @@ do { \ NBL_ADMINQ_SET_OPS(get_module_info, nbl_res_adminq_get_module_info); \ NBL_ADMINQ_SET_OPS(get_module_eeprom, nbl_res_adminq_get_module_eeprom); \ NBL_ADMINQ_SET_OPS(get_link_state, nbl_res_adminq_get_link_state); \ + NBL_ADMINQ_SET_OPS(get_link_down_count, nbl_res_adminq_get_link_down_count); \ + NBL_ADMINQ_SET_OPS(get_link_status_opcode, nbl_res_adminq_get_link_status_opcode); \ NBL_ADMINQ_SET_OPS(set_eth_mac_addr, nbl_res_adminq_set_eth_mac_addr); \ + NBL_ADMINQ_SET_OPS(get_eth_ctrl_stats, nbl_res_adminq_get_eth_ctrl_stats); \ NBL_ADMINQ_SET_OPS(ctrl_port_led, nbl_res_adminq_ctrl_port_led); \ + NBL_ADMINQ_SET_OPS(set_wol, nbl_res_adminq_set_wol); \ NBL_ADMINQ_SET_OPS(nway_reset, nbl_res_adminq_nway_reset); \ NBL_ADMINQ_SET_OPS(set_eth_pfc, nbl_res_adminq_set_eth_pfc); \ NBL_ADMINQ_SET_OPS(passthrough_fw_cmd, nbl_res_adminq_passthrough); \ NBL_ADMINQ_SET_OPS(get_private_stat_len, nbl_res_adminq_get_private_stat_len); \ NBL_ADMINQ_SET_OPS(get_private_stat_data, nbl_res_adminq_get_private_stat_data); \ + NBL_ADMINQ_SET_OPS(get_pause_stats, nbl_res_adminq_get_pause_stats); \ + NBL_ADMINQ_SET_OPS(get_eth_mac_stats, nbl_res_adminq_get_eth_mac_stats); \ NBL_ADMINQ_SET_OPS(fill_private_stat_strings, nbl_res_adminq_fill_private_stat_strings);\ NBL_ADMINQ_SET_OPS(get_module_temperature, nbl_res_adminq_get_module_temperature); \ NBL_ADMINQ_SET_OPS(load_p4_default, nbl_res_adminq_load_p4_default); \ NBL_ADMINQ_SET_OPS(cfg_eth_bond_event, nbl_res_adminq_cfg_eth_bond_event); \ + NBL_ADMINQ_SET_OPS(get_eth_abnormal_stats, nbl_res_adminq_get_eth_abnormal_stats); \ + NBL_ADMINQ_SET_OPS(get_fec_stats, nbl_res_adminq_get_fec_stats); \ + NBL_ADMINQ_SET_OPS(get_rmon_stats, nbl_res_adminq_get_rmon_stats); \ } while (0) /* Structure starts here, adding an op should not modify anything below */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h index c8f1c03556f9..1a65f9bafa8f 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -81,7 +81,8 @@ #define SFF_COPPER_8431_APPENDIX_E 1 #define SFF_COPPER_8431_LIMITING 4 #define SFF_8636_TURNPAGE_ADDR (127) -#define SFF_8638_PAGESIZE (128) +#define SFF_8638_PAGESIZE (128U) +#define SFF_8638_PAGE0_SIZE (256U) #define SFF_8636_TEMP (0x60) #define SFF_8636_TEMP_MAX (0x4) @@ -91,6 +92,8 @@ #define SFF_8636_QSFP28_TEMP_MAX (0x204) #define SFF_8636_QSFP28_TEMP_CIRT (0x200) +#define NBL_ADMINQ_ETH_WOL_REG_OFFSET (0x1604000 + 0x500) + /* Firmware version */ #define FIRMWARE_MAGIC "M181FWV0" #define BCD2BYTE(b) ({ typeof(b) _b = (b); \ @@ -100,7 +103,8 @@ (((_s) >> 8) & 0xF) * 100 + (((_s) >> 12) & 0xF) * 1000); }) /* VSI fixed number of queues*/ -#define NBL_VSI_PF_REAL_QUEUE_NUM(num) (((num) * 2) + NBL_DEFAULT_REP_HW_QUEUE_NUM) +#define NBL_VSI_PF_LEGAL_QUEUE_NUM(num) ((num) + NBL_DEFAULT_REP_HW_QUEUE_NUM) +#define NBL_VSI_PF_MAX_QUEUE_NUM(num) (((num) * 2) + NBL_DEFAULT_REP_HW_QUEUE_NUM) #define NBL_VSI_VF_REAL_QUEUE_NUM(num) (num) #define NBL_ADMINQ_PFA_TLV_VF_NUM (0x5804) @@ -211,6 +215,7 @@ struct nbl_leonis_eth_rx_stats { u64 octets_rxd_ok; u64 octets_rxd_badfcs; u64 octets_rxd_dropped; + u64 unsupported_opcodes_rx; }; struct nbl_leonis_eth_stats { @@ -222,4 +227,36 @@ struct nbl_leonis_eth_stats_info { const char *descp; }; +struct nbl_port_key { + u32 id; /* port id */ + u32 subop; /* 1: read, 2: write */ + u64 data[]; /* [47:0]: data, [55:48]: rsvd, [63:56]: key */ +}; + +#define NBL_PORT_KEY_ILLEGAL 0x0 +#define NBL_PORT_KEY_CAPABILITIES 0x1 +#define NBL_PORT_KEY_ENABLE 0x2 /* BIT(0): NBL_PORT_FLAG_ENABLE_NOTIFY */ +#define NBL_PORT_KEY_DISABLE 0x3 +#define NBL_PORT_KEY_ADVERT 0x4 +#define NBL_PORT_KEY_LOOPBACK 0x5 /* 0: disable eth loopback, 1: enable eth loopback */ +#define NBL_PORT_KEY_MODULE_SWITCH 0x6 /* 0: sfp off, 1: sfp on */ +#define NBL_PORT_KEY_MAC_ADDRESS 0x7 +#define NBL_PORT_KEY_LED_BLINK 0x8 +#define NBL_PORT_KEY_RESTORE_DEFAULTE_CFG 11 +#define NBL_PORT_KEY_SET_PFC_CFG 12 +#define NBL_PORT_KEY_GET_LINK_STATUS_OPCODE 17 + +enum { + NBL_PORT_SUBOP_READ = 1, + NBL_PORT_SUBOP_WRITE = 2, +}; + +#define NBL_PORT_FLAG_ENABLE_NOTIFY BIT(0) +#define NBL_PORT_ENABLE_LOOPBACK 1 +#define NBL_PORT_DISABLE_LOOPBCK 0 +#define NBL_PORT_SFP_ON 1 +#define NBL_PORT_SFP_OFF 0 +#define NBL_PORT_KEY_KEY_SHIFT 56 +#define NBL_PORT_KEY_DATA_MASK 0xFFFFFFFFFFFF + #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c index 2277e9e56970..aa2960fc87ce 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c @@ -62,8 +62,7 @@ static int nbl_fc_get_stats(void *priv, struct nbl_stats_param *param) nbl_fc_update_stats(¶m->f->stats, bytes, pkts, 0, counter->lastuse); spin_unlock(&mgt->counter_lock); - nbl_debug(common, NBL_DEBUG_FLOW, "nbl flow fc %u-%lu get pkts:(%llu), bytes:(%llu)", - counter->counter_id, cookie, pkts, bytes); + return 0; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h index 1f59714c480e..8664d8a937f1 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c index 72c79e857edc..d113fd995903 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c @@ -187,7 +187,7 @@ static struct nbl_flow_direct_entry *nbl_fd_add_flow(struct nbl_flow_direct_mgt if (next->param.location >= entry->param.location) break; - if (nbl_list_entry_is_head(next, &info->list[param->rule_type], node)) + if (list_entry_is_head(next, &info->list[param->rule_type], node)) list_add(&entry->node, &info->list[param->rule_type]); else list_add(&entry->node, &list_prev_entry(next, node)->node); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h index 05020a7cb3e2..a0e6f465322d 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h index 1ee6260640de..2c75c42c2b3a 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -18,19 +18,19 @@ #define NBL_KT_BYTE_LEN 40 #define NBL_KT_BYTE_HALF_LEN 20 -#define NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2 0 -#define NBL_EM0_PT_PHY_UP_LLDP_LACP 1 -#define NBL_EM0_PT_PHY_UP_UNICAST_L2 2 -#define NBL_EM0_PT_PHY_DOWN_UNICAST_L2 3 -#define NBL_EM0_PT_PHY_UP_MULTICAST_L2 4 -#define NBL_EM0_PT_PHY_DOWN_MULTICAST_L2 5 -#define NBL_EM0_PT_PHY_UP_MULTICAST_L3 6 -#define NBL_EM0_PT_PHY_DOWN_MULTICAST_L3 7 -#define NBL_EM0_PT_PHY_DPRBAC_IPV4 8 -#define NBL_EM0_PT_PHY_DPRBAC_IPV6 9 -#define NBL_EM0_PT_PHY_UL4S_IPV4 10 -#define NBL_EM0_PT_PHY_UL4S_IPV6 11 -#define NBL_EM0_PT_PMD_ND_UPCALL 12 +#define NBL_EM0_PT_PHY_UP_TUNNEL_L2 0 +#define NBL_EM0_PT_PHY_UP_L2 1 +#define NBL_EM0_PT_PHY_DOWN_L2 2 +#define NBL_EM0_PT_PHY_UP_LLDP_LACP 3 +#define NBL_EM0_PT_PMD_ND_UPCALL 4 +#define NBL_EM0_PT_PHY_L2_UP_MULTI_MCAST 5 +#define NBL_EM0_PT_PHY_L3_UP_MULTI_MCAST 6 +#define NBL_EM0_PT_PHY_L2_DOWN_MULTI_MCAST 7 +#define NBL_EM0_PT_PHY_L3_DOWN_MULTI_MCAST 8 +#define NBL_EM0_PT_PHY_DPRBAC_IPV4 9 +#define NBL_EM0_PT_PHY_DPRBAC_IPV6 10 +#define NBL_EM0_PT_PHY_UL4S_IPV4 11 +#define NBL_EM0_PT_PHY_UL4S_IPV6 12 #define NBL_PP0_PROFILE_ID_MIN (0) #define NBL_PP0_PROFILE_ID_MAX (15) @@ -41,9 +41,16 @@ #define NBL_PP_PROFILE_NUM (16) #define NBL_QID_MAP_TABLE_ENTRIES (4096) +#define NBL_EPRO_PF_RSS_RET_TBL_DEPTH (4096) #define NBL_EPRO_RSS_RET_TBL_DEPTH (8192 * 2) #define NBL_EPRO_RSS_ENTRY_SIZE_UNIT (16) +#define NBL_EPRO_PF_RSS_RET_TBL_COUNT (512) +#define NBL_EPRO_PF_RSS_ENTRY_SIZE (5) + +#define NBL_EPRO_RSS_ENTRY_MAX_COUNT (512) +#define NBL_EPRO_RSS_ENTRY_MAX_SIZE (4) + #define NBL_EPRO_RSS_SK_SIZE 40 #define NBL_EPRO_RSS_PER_KEY_SIZE 8 #define NBL_EPRO_RSS_KEY_NUM (NBL_EPRO_RSS_SK_SIZE / NBL_EPRO_RSS_PER_KEY_SIZE) diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h index 105f130a9024..cce5542ede7e 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h @@ -1,27 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 #include "nbl_datapath_upa.h" #include "nbl_datapath_dpa.h" -#include "nbl_datapath_uqm.h" -#include "nbl_datapath_dqm.h" -#include "nbl_datapath_ustat.h" -#include "nbl_datapath_dstat.h" -#include "nbl_datapath_upmem.h" -#include "nbl_datapath_dpmem.h" -//#include "nbl_datapath_uvn.h" -//#include "nbl_datapath_dvn.h" #include "nbl_datapath_ucar.h" -// #include "nbl_datapath_dsch.h" -//#include "nbl_datapath_shaping.h" #include "nbl_datapath_uped.h" #include "nbl_datapath_dped.h" -#include "nbl_datapath_drmux.h" -#include "nbl_datapath_urmux.h" -#include "nbl_datapath_ddmux.h" -//#include "nbl_datapath_ul4s.h" -//#include "nbl_datapath_dl4s.h" -//#include "nbl_datapath_ustore.h" #include "nbl_datapath_dstore.h" -//#include "nbl_datapath_ubm.h" -//#include "nbl_datapath_dbm.h" diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h index 3d7e0bf51a59..a5e4301fd572 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h index 3ca3dbd24e70..313fe6c4bb6b 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h @@ -1,4 +1,9 @@ -// Code generated by interstellar. DO NOT EDIT. +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 #ifndef NBL_DPED_H diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h index f9fe9a617700..cf00d7eb34db 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h index 0f6add426838..2fa92013bd4b 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h index 16061974d449..c98c46b09ab6 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h index 1a88c44380ef..fa7ad4142dcd 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h index c8798808f93d..50984f92991f 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h @@ -1,23 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 #include "nbl_intf_cmdq.h" -//#include "nbl_intf_mailbox.h" -#include "nbl_intf_pcie_ecpu.h" -#include "nbl_intf_pcie_host.h" -#include "nbl_intf_ctrlq_host.h" -#include "nbl_intf_ctrlq_ecpu.h" -#include "nbl_intf_ctrlq_emp.h" -#include "nbl_intf_vblk.h" #include "nbl_intf_vdpa.h" -#include "nbl_intf_ptlp.h" #include "nbl_intf_pcompleter_host.h" -#include "nbl_intf_ecpu_padpt.h" -#include "nbl_intf_host_padpt.h" -#include "nbl_intf_msgq_notify.h" -#include "nbl_intf_msgq_aged.h" -#include "nbl_intf_fifo_ch.h" -#include "nbl_intf_host_pcap.h" -#include "nbl_intf_ecpu_pcap.h" -#include "nbl_intf_native_host.h" -#include "nbl_intf_native_ecpu.h" diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h index 3b3442e5a5fc..5cc8dccf672c 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h index ec0e7a309df0..93651add245f 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 @@ -848,4 +853,107 @@ union pcompleter_host_emp2pcie_rdy_u { u32 data[NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_DWLEN]; } __packed; +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_ADDR (0xf0c000) +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_DEPTH (520) +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_WIDTH (128) +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_DWLEN (4) +union pcompleter_host_function_msix_map_table_u { + struct pcompleter_host_function_msix_map_table { + u32 msix_base_addr_l:32; /* [31:0] Default:0x0 RW */ + u32 msix_base_addr_h:32; /* [63:32] Default:0x0 RW */ + u32 bdf_id:16; /* [79:64] Default:0x0 RW */ + u32 valid:1; /* [80] Default:0x0 RW */ + u32 rsv_l:32; /* [127:81] Default:0x0 RO */ + u32 rsv_h:15; /* [127:81] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_REG(r) \ + (NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_FUNCTION_MSIX_MAP_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_ADDR (0xf18000) +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_DEPTH (8192) +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_WIDTH (128) +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_DWLEN (4) +union pcompleter_host_virtio_qid_map_table_u { + struct pcompleter_host_virtio_qid_map_table { + u32 local_qid:9; /* [8:0] Default:0x1ff RW */ + u32 bar_addr_l:32; /* [63:9] Default:0x7fffffffffffff RW */ + u32 bar_addr_h:23; /* [63:9] Default:0x7fffffffffffff RW */ + u32 global_qid:12; /* [75:64] Default:0xfff RW */ + u32 ctrlq_flag:1; /* [76] Default:0x1 RW */ + u32 rsv_l:32; /* [127:77] Default:0x0 RO */ + u32 rsv_h:19; /* [127:77] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_REG(r) \ + (NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_VIRTIO_QID_MAP_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_ADDR (0xf38000) +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_DEPTH (128) +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_WIDTH (128) +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_DWLEN (4) +union pcompleter_host_rdma_pfid_map_table_u { + struct pcompleter_host_rdma_pfid_map_table { + u32 bar_add_rsv:13; /* [12:0] Default:0x1fff RO */ + u32 bar_addr_l:32; /* [63:13] Default:0x7ffffffffffff RW */ + u32 bar_addr_h:19; /* [63:13] Default:0x7ffffffffffff RW */ + u32 pfid:6; /* [69:64] Default:0x3f RW */ + u32 rsv_l:32; /* [127:70] Default:0x0 RO */ + u32 rsv_h:26; /* [127:70] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_REG(r) \ + (NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_RDMA_PFID_MAP_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_ADDR (0xf40000) +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_DEPTH (520) +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_DWLEN (1) +union pcompleter_host_msixbar_tlp_mis_table_u { + struct pcompleter_host_msixbar_tlp_mis_table { + u32 miss:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_REG(r) \ + (NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_MSIXBAR_TLP_MIS_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_ADDR (0xf41000) +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_DEPTH (520) +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_DWLEN (1) +union pcompleter_host_msixbar_invld_tbl_table_u { + struct pcompleter_host_msixbar_invld_tbl_table { + u32 invld:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_REG(r) \ + (NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_MSIXBAR_INVLD_TBL_TABLE_DWLEN * 4) * (r)) + +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_ADDR (0xf42000) +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_DEPTH (5120) +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_DWLEN (1) +union pcompleter_host_msix_fid_table_u { + struct pcompleter_host_msix_fid_table { + u32 fid:10; /* [09:00] Default:0x0 RW */ + u32 valid:1; /* [10:10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_DWLEN]; +} __packed; +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_REG(r) (NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_ADDR + \ + (NBL_PCOMPLETER_HOST_MSIX_FID_TABLE_DWLEN * 4) * (r)) + #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h index 5e7892880130..1dc2181f1553 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h index ae68b8efe15a..60a9f4f5371c 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 @@ -9,6 +14,3 @@ #include "nbl_ppe_fem.h" #include "nbl_ppe_mcc.h" #include "nbl_ppe_acl.h" -#include "nbl_ppe_cap.h" -#include "nbl_ppe_uprbac.h" -#include "nbl_ppe_dprbac.h" diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h index 8cb5158e9497..552901be24b3 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h index 454d1480be9b..d7bb0a4d3eec 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 @@ -494,6 +499,7 @@ union epro_bp_history_u { #define NBL_EPRO_MT_DEPTH (16) #define NBL_EPRO_MT_WIDTH (64) #define NBL_EPRO_MT_DWLEN (2) +#define NBL_EPRO_MT_MAX (8) union epro_mt_u { struct epro_mt { u32 dport:16; /* [15:0] Default:0x0 RW */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h index 37fe59d6ad8a..2da3d0247734 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h index 416df1273597..43828ca99b25 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h index da3e1e6f8726..f8b2b7b4ce35 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h index 690c6ce96d84..a109c966eda5 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h index d909fd0df59a..254931a1b81e 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h index 71e98b61584f..373b5f6c792d 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ // Code generated by interstellar. DO NOT EDIT. // Compatible with leonis RTL tag 0710 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c index 8614cd58694e..7de439a5a474 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c @@ -107,9 +107,8 @@ static int nbl_fc_update_flow_stats(struct nbl_fc_mgt *mgt, return ret; set_stat_error: - nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc set flow stats failed." - " count_id:%u, cookie: %lu, ret(%u): %d", counter_array->counter_id[idx], - counter_array->cookie[idx], idx, ret); + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "set stats err.id:%u, cookie: %lu, ret(%u): %d", + counter_array->counter_id[idx], counter_array->cookie[idx], idx, ret); return ret; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h index 5b58b8bfdfa2..35945c4f7e3c 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c index 38824b8b14c0..6b64d7dc4320 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c @@ -8,6 +8,26 @@ #include "nbl_p4_actions.h" #include "nbl_resource_leonis.h" +#define NBL_FLOW_LEONIS_VSI_NUM_PER_ETH 256 + +static bool nbl_flow_is_mirror_outputport(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + u16 func_id; + int i; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + if (func_id == U16_MAX) + return false; + + for (i = 0; i < NBL_MIRROR_OUTPUTPORT_MAX_FUNC; i++) { + if (func_id == flow_mgt->mirror_outputport_func[i]) + return true; + } + + return false; +} + static u32 nbl_flow_cfg_action_set_dport(u16 upcall_flag, u16 port_type, u16 vsi, u16 next_stg_sel) { union nbl_action_data set_dport = {.data = 0}; @@ -46,20 +66,6 @@ static u16 nbl_flow_cfg_action_set_dport_mcc_vsi(u16 vsi) return set_dport.data; } -static u16 nbl_flow_cfg_action_set_dport_mcc_lag(u16 lag_id) -{ - union nbl_action_data set_dport = {.data = 0}; - - set_dport.dport.down.upcall_flag = AUX_FWD_TYPE_NML_FWD; - set_dport.dport.down.port_type = SET_DPORT_TYPE_ETH_LAG; - set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_EPRO; - set_dport.dport.down.lag_vld = 1; - set_dport.dport.down.eth_vld = 0; - set_dport.dport.down.lag_id = lag_id; - - return set_dport.data; -} - static u32 nbl_flow_cfg_action_set_dport_mcc_bmc(void) { union nbl_action_data set_dport = {.data = 0}; @@ -82,7 +88,6 @@ static int nbl_flow_cfg_action_mcc(u16 mcc_id, u32 *action0, u32 *action1) set_aux_act.set_aux.sub_id = NBL_SET_AUX_SET_AUX; set_aux_act.set_aux.nstg_vld = 1; set_aux_act.set_aux.nstg_val = NBL_NEXT_STG_MCC; - *action1 = (u32)set_aux_act.data + (NBL_ACT_SET_AUX_FIELD << 16); return 0; @@ -91,8 +96,12 @@ static int nbl_flow_cfg_action_mcc(u16 mcc_id, u32 *action0, u32 *action1) static int nbl_flow_cfg_action_up_tnl(struct nbl_flow_param param, u32 *action0, u32 *action1) { *action1 = 0; - *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, - param.vsi, NEXT_STG_SEL_ACL_S0); + if (param.mcc_id == NBL_MCC_ID_INVALID) + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_ACL_S0); + else + nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); return 0; } @@ -109,8 +118,12 @@ static int nbl_flow_cfg_action_lldp_lacp_up(struct nbl_flow_param param, u32 *ac static int nbl_flow_cfg_action_up(struct nbl_flow_param param, u32 *action0, u32 *action1) { *action1 = 0; - *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, - param.vsi, NEXT_STG_SEL_NONE); + if (param.mcc_id == NBL_MCC_ID_INVALID) + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_NONE); + else + nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); return 0; } @@ -118,32 +131,16 @@ static int nbl_flow_cfg_action_up(struct nbl_flow_param param, u32 *action0, u32 static int nbl_flow_cfg_action_down(struct nbl_flow_param param, u32 *action0, u32 *action1) { *action1 = 0; - *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, - param.vsi, NEXT_STG_SEL_EPRO); + if (param.mcc_id == NBL_MCC_ID_INVALID) + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_ACL_S0); + else + nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); return 0; } -static int nbl_flow_cfg_action_l2_up(struct nbl_flow_param param, u32 *action0, u32 *action1) -{ - return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); -} - -static int nbl_flow_cfg_action_l2_down(struct nbl_flow_param param, u32 *action0, u32 *action1) -{ - return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); -} - -static int nbl_flow_cfg_action_l3_up(struct nbl_flow_param param, u32 *action0, u32 *action1) -{ - return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); -} - -static int nbl_flow_cfg_action_l3_down(struct nbl_flow_param param, u32 *action0, u32 *action1) -{ - return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); -} - static int nbl_flow_cfg_up_tnl_key_value(union nbl_common_data_u *data, struct nbl_flow_param param, u8 eth_mode) { @@ -159,7 +156,7 @@ static int nbl_flow_cfg_up_tnl_key_value(union nbl_common_data_u *data, kt_data->info.dst_mac = dst_mac; kt_data->info.svlan_id = param.vid; - kt_data->info.template = NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2; + kt_data->info.template = NBL_EM0_PT_PHY_UP_TUNNEL_L2; kt_data->info.padding = 0; sport = param.eth; @@ -199,7 +196,7 @@ static int nbl_flow_cfg_up_key_value(union nbl_common_data_u *data, kt_data->info.dst_mac = dst_mac; kt_data->info.svlan_id = param.vid; - kt_data->info.template = NBL_EM0_PT_PHY_UP_UNICAST_L2; + kt_data->info.template = NBL_EM0_PT_PHY_UP_L2; kt_data->info.padding = 0; sport = param.eth; @@ -223,80 +220,14 @@ static int nbl_flow_cfg_down_key_value(union nbl_common_data_u *data, kt_data->info.dst_mac = dst_mac; kt_data->info.svlan_id = param.vid; - kt_data->info.template = NBL_EM0_PT_PHY_DOWN_UNICAST_L2; + kt_data->info.template = NBL_EM0_PT_PHY_DOWN_L2; kt_data->info.padding = 0; sport = param.vsi >> 8; if (eth_mode == NBL_TWO_ETHERNET_PORT) sport &= 0xFE; - kt_data->info.sport = sport; - - return 0; -} - -static int nbl_flow_cfg_l2_up_key_value(union nbl_common_data_u *data, - struct nbl_flow_param param, u8 eth_mode) -{ - union nbl_l2_phy_up_multi_data_u *kt_data = (union nbl_l2_phy_up_multi_data_u *)data; - u8 sport; - - kt_data->info.dst_mac = 0xFFFFFFFFFFFF; - kt_data->info.template = NBL_EM0_PT_PHY_UP_MULTICAST_L2; - kt_data->info.padding = 0; - - sport = param.eth; - kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; - - return 0; -} - -static int nbl_flow_cfg_l2_down_key_value(union nbl_common_data_u *data, - struct nbl_flow_param param, u8 eth_mode) -{ - union nbl_l2_phy_down_multi_data_u *kt_data = (union nbl_l2_phy_down_multi_data_u *)data; - u8 sport; - - kt_data->info.dst_mac = 0xFFFFFFFFFFFF; - kt_data->info.template = NBL_EM0_PT_PHY_DOWN_MULTICAST_L2; - kt_data->info.padding = 0; - - sport = param.eth; - if (eth_mode == NBL_TWO_ETHERNET_PORT) - sport &= 0xFE; - kt_data->info.sport = sport; - - return 0; -} - -static int nbl_flow_cfg_l3_up_key_value(union nbl_common_data_u *data, - struct nbl_flow_param param, u8 eth_mode) -{ - union nbl_l3_phy_up_multi_data_u *kt_data = (union nbl_l3_phy_up_multi_data_u *)data; - u8 sport; - - kt_data->info.dst_mac = 0x3333; - kt_data->info.template = NBL_EM0_PT_PHY_UP_MULTICAST_L3; - kt_data->info.padding = 0; - - sport = param.eth; - kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; - - return 0; -} - -static int nbl_flow_cfg_l3_down_key_value(union nbl_common_data_u *data, - struct nbl_flow_param param, u8 eth_mode) -{ - union nbl_l3_phy_down_multi_data_u *kt_data = (union nbl_l3_phy_down_multi_data_u *)data; - u8 sport; - - kt_data->info.dst_mac = 0x3333; - kt_data->info.template = NBL_EM0_PT_PHY_DOWN_MULTICAST_L3; - kt_data->info.padding = 0; - - sport = param.eth; - if (eth_mode == NBL_TWO_ETHERNET_PORT) - sport &= 0xFE; + if (eth_mode == NBL_ONE_ETHERNET_PORT) + sport = 0; kt_data->info.sport = sport; return 0; @@ -307,6 +238,7 @@ static void nbl_flow_cfg_kt_action_up_tnl(union nbl_common_data_u *data, u32 act union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; kt_data->info.act0 = action0; + kt_data->info.act1 = action1; } static void nbl_flow_cfg_kt_action_lldp_lacp_up(union nbl_common_data_u *data, @@ -322,43 +254,13 @@ static void nbl_flow_cfg_kt_action_up(union nbl_common_data_u *data, u32 action0 union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; kt_data->info.act0 = action0; + kt_data->info.act1 = action1; } static void nbl_flow_cfg_kt_action_down(union nbl_common_data_u *data, u32 action0, u32 action1) { union nbl_l2_phy_down_data_u *kt_data = (union nbl_l2_phy_down_data_u *)data; - kt_data->info.act0 = action0; -} - -static void nbl_flow_cfg_kt_action_l2_up(union nbl_common_data_u *data, u32 action0, u32 action1) -{ - union nbl_l2_phy_up_multi_data_u *kt_data = (union nbl_l2_phy_up_multi_data_u *)data; - - kt_data->info.act0 = action0; - kt_data->info.act1 = action1; -} - -static void nbl_flow_cfg_kt_action_l2_down(union nbl_common_data_u *data, u32 action0, u32 action1) -{ - union nbl_l2_phy_down_multi_data_u *kt_data = (union nbl_l2_phy_down_multi_data_u *)data; - - kt_data->info.act0 = action0; - kt_data->info.act1 = action1; -} - -static void nbl_flow_cfg_kt_action_l3_up(union nbl_common_data_u *data, u32 action0, u32 action1) -{ - union nbl_l3_phy_up_multi_data_u *kt_data = (union nbl_l3_phy_up_multi_data_u *)data; - - kt_data->info.act0 = action0; - kt_data->info.act1 = action1; -} - -static void nbl_flow_cfg_kt_action_l3_down(union nbl_common_data_u *data, u32 action0, u32 action1) -{ - union nbl_l3_phy_down_multi_data_u *kt_data = (union nbl_l3_phy_down_multi_data_u *)data; - kt_data->info.act0 = action0; kt_data->info.act1 = action1; } @@ -502,6 +404,89 @@ static void nbl_flow_cfg_kt_action_nd_upcall(union nbl_common_data_u *data, kt_data->info.act1 = action1; } +static int nbl_flow_cfg_action_multi_mcast(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} + +static int nbl_flow_cfg_l2up_multi_mcast_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_up_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_up_multi_mcast_data_u *)data; + u8 sport; + + kt_data->info.template = NBL_EM0_PT_PHY_L2_UP_MULTI_MCAST; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static void nbl_flow_cfg_kt_action_l2up_multi_mcast(union nbl_common_data_u *data, + u32 action0, u32 action1) +{ + union nbl_l2_phy_up_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_up_multi_mcast_data_u *)data; + + kt_data->info.act0 = action0; +} + +static int nbl_flow_cfg_l3up_multi_mcast_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_up_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_up_multi_mcast_data_u *)data; + u8 sport; + + kt_data->info.template = NBL_EM0_PT_PHY_L3_UP_MULTI_MCAST; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_l2down_multi_mcast_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_down_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_down_multi_mcast_data_u *)data; + u8 sport; + + kt_data->info.template = NBL_EM0_PT_PHY_L2_DOWN_MULTI_MCAST; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static void nbl_flow_cfg_kt_action_l2down_multi_mcast(union nbl_common_data_u *data, + u32 action0, u32 action1) +{ + union nbl_l2_phy_down_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_down_multi_mcast_data_u *)data; + + kt_data->info.act0 = action0; +} + +static int nbl_flow_cfg_l3down_multi_mcast_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_down_multi_mcast_data_u *kt_data = + (union nbl_l2_phy_down_multi_mcast_data_u *)data; + u8 sport; + + kt_data->info.template = NBL_EM0_PT_PHY_L3_DOWN_MULTI_MCAST; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + #define NBL_FLOW_OPS_ARR_ENTRY(type, action_func, kt_func, kt_action_func) \ [type] = {.cfg_action = action_func, .cfg_key = kt_func, \ .cfg_kt_action = kt_action_func} @@ -510,10 +495,6 @@ static const struct nbl_flow_rule_cfg_ops cfg_ops[] = { nbl_flow_cfg_action_up_tnl, nbl_flow_cfg_up_tnl_key_value, nbl_flow_cfg_kt_action_up_tnl), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_LLDP_LACP_UP, - nbl_flow_cfg_action_lldp_lacp_up, - nbl_flow_cfg_lldp_lacp_up_key_value, - nbl_flow_cfg_kt_action_lldp_lacp_up), NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_UP, nbl_flow_cfg_action_up, nbl_flow_cfg_up_key_value, @@ -522,22 +503,22 @@ static const struct nbl_flow_rule_cfg_ops cfg_ops[] = { nbl_flow_cfg_action_down, nbl_flow_cfg_down_key_value, nbl_flow_cfg_kt_action_down), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_UP, - nbl_flow_cfg_action_l2_up, - nbl_flow_cfg_l2_up_key_value, - nbl_flow_cfg_kt_action_l2_up), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_DOWN, - nbl_flow_cfg_action_l2_down, - nbl_flow_cfg_l2_down_key_value, - nbl_flow_cfg_kt_action_l2_down), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_UP, - nbl_flow_cfg_action_l3_up, - nbl_flow_cfg_l3_up_key_value, - nbl_flow_cfg_kt_action_l3_up), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_DOWN, - nbl_flow_cfg_action_l3_down, - nbl_flow_cfg_l3_down_key_value, - nbl_flow_cfg_kt_action_l3_down), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_LLDP_LACP_UP, + nbl_flow_cfg_action_lldp_lacp_up, + nbl_flow_cfg_lldp_lacp_up_key_value, + nbl_flow_cfg_kt_action_lldp_lacp_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_UP_MULTI_MCAST, + nbl_flow_cfg_action_multi_mcast, + nbl_flow_cfg_l2up_multi_mcast_key_value, + nbl_flow_cfg_kt_action_l2up_multi_mcast), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_UP_MULTI_MCAST, + nbl_flow_cfg_action_multi_mcast, + nbl_flow_cfg_l3up_multi_mcast_key_value, + nbl_flow_cfg_kt_action_l2up_multi_mcast), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_PMD_ND_UPCALL, + nbl_flow_cfg_action_nd_upcall, + nbl_flow_cfg_nd_upcall_key_value, + nbl_flow_cfg_kt_action_nd_upcall), NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_TLS_UP, nbl_flow_cfg_action_tls_up, nbl_flow_cfg_tls_up_key_value, @@ -546,10 +527,14 @@ static const struct nbl_flow_rule_cfg_ops cfg_ops[] = { nbl_flow_cfg_action_ipsec_down, nbl_flow_cfg_ipsec_down_key_value, nbl_flow_cfg_kt_action_ipsec_down), - NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_PMD_ND_UPCALL, - nbl_flow_cfg_action_nd_upcall, - nbl_flow_cfg_nd_upcall_key_value, - nbl_flow_cfg_kt_action_nd_upcall), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_DOWN_MULTI_MCAST, + nbl_flow_cfg_action_multi_mcast, + nbl_flow_cfg_l2down_multi_mcast_key_value, + nbl_flow_cfg_kt_action_l2down_multi_mcast), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_DOWN_MULTI_MCAST, + nbl_flow_cfg_action_multi_mcast, + nbl_flow_cfg_l3down_multi_mcast_key_value, + nbl_flow_cfg_kt_action_l2down_multi_mcast), }; static int nbl_flow_alloc_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow_fem_entry *flow) @@ -561,6 +546,7 @@ static int nbl_flow_alloc_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow if (flow_id == NBL_MACVLAN_TABLE_LEN) return -ENOSPC; set_bit(flow_id, flow_mgt->flow_id_bitmap); + flow_mgt->flow_id_cnt--; } else { flow_id = nbl_common_find_available_idx(flow_mgt->flow_id_bitmap, NBL_MACVLAN_TABLE_LEN, 2, 2); @@ -568,6 +554,7 @@ static int nbl_flow_alloc_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow return -ENOSPC; set_bit(flow_id, flow_mgt->flow_id_bitmap); set_bit(flow_id + 1, flow_mgt->flow_id_bitmap); + flow_mgt->flow_id_cnt -= 2; } flow->flow_id = flow_id; @@ -582,10 +569,12 @@ static void nbl_flow_free_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow if (flow->flow_type == NBL_KT_HALF_MODE) { clear_bit(flow->flow_id, flow_mgt->flow_id_bitmap); flow->flow_id = 0xFFFF; + flow_mgt->flow_id_cnt++; } else { clear_bit(flow->flow_id, flow_mgt->flow_id_bitmap); clear_bit(flow->flow_id + 1, flow_mgt->flow_id_bitmap); flow->flow_id = 0xFFFF; + flow_mgt->flow_id_cnt += 2; } } @@ -611,6 +600,25 @@ static void nbl_flow_free_tcam_id(struct nbl_flow_mgt *flow_mgt, tcam_item->tcam_index = 0; } +static int nbl_flow_alloc_mcc_id(struct nbl_flow_mgt *flow_mgt) +{ + u32 mcc_id; + + mcc_id = find_first_zero_bit(flow_mgt->mcc_id_bitmap, NBL_FLOW_MCC_INDEX_SIZE); + if (mcc_id == NBL_FLOW_MCC_INDEX_SIZE) + return -ENOSPC; + + set_bit(mcc_id, flow_mgt->mcc_id_bitmap); + + return mcc_id + NBL_FLOW_MCC_INDEX_START; +} + +static void nbl_flow_free_mcc_id(struct nbl_flow_mgt *flow_mgt, u32 mcc_id) +{ + if (mcc_id >= NBL_FLOW_MCC_INDEX_START) + clear_bit(mcc_id - NBL_FLOW_MCC_INDEX_START, flow_mgt->mcc_id_bitmap); +} + static void nbl_flow_set_mt_input(struct nbl_mt_input *mt_input, union nbl_common_data_u *kt_data, u8 type, u16 flow_id) { @@ -623,7 +631,7 @@ static void nbl_flow_set_mt_input(struct nbl_mt_input *mt_input, union nbl_commo mt_input->tbl_id = flow_id + NBL_EM_PHY_KT_OFFSET; mt_input->depth = 0; - mt_input->power = 10; + mt_input->power = NBL_PP0_POWER; } static void nbl_flow_key_hash(struct nbl_flow_fem_entry *flow, struct nbl_mt_input *mt_input) @@ -651,9 +659,9 @@ static bool nbl_pp_ht0_ht1_search(struct nbl_flow_ht_mng *pp_ht0_mng, u16 ht0_ha for (i = 0; i < NBL_HASH_CFT_MAX; i++) if (node0->key[i].vid && node0->key[i].ht_other_index == ht1_hash) { is_find = true; - nbl_info(common, NBL_DEBUG_FLOW, - "Conflicted ht on vid %d and kt_index %u\n", - node0->key[i].vid, node0->key[i].kt_index); + nbl_debug(common, NBL_DEBUG_FLOW, + "Conflicted ht on vid %d and kt_index %u\n", + node0->key[i].vid, node0->key[i].kt_index); return is_find; } @@ -662,9 +670,9 @@ static bool nbl_pp_ht0_ht1_search(struct nbl_flow_ht_mng *pp_ht0_mng, u16 ht0_ha for (i = 0; i < NBL_HASH_CFT_MAX; i++) if (node1->key[i].vid && node1->key[i].ht_other_index == ht0_hash) { is_find = true; - nbl_info(common, NBL_DEBUG_FLOW, - "Conflicted ht on vid %d and kt_index %u\n", - node1->key[i].vid, node1->key[i].kt_index); + nbl_debug(common, NBL_DEBUG_FLOW, + "Conflicted ht on vid %d and kt_index %u\n", + node1->key[i].vid, node1->key[i].kt_index); return is_find; } @@ -843,10 +851,6 @@ static int nbl_flow_del_2hw(struct nbl_resource_mgt *res_mgt, struct nbl_ht_item phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - memset(kt_item.kt_data.hash_key, 0, sizeof(kt_item.kt_data.hash_key)); - phy_ops->set_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), kt_item.kt_data.hash_key, - ht_item.key_index, key_type); - hash = ht_item.ht_table == NBL_HT0 ? ht_item.ht0_hash : ht_item.ht1_hash; phy_ops->set_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hash, 0, ht_item.ht_table, ht_item.hash_bucket, 0, 0); @@ -902,7 +906,7 @@ static int nbl_flow_add_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_p memset(&ht_item, 0, sizeof(ht_item)); memset(&kt_item, 0, sizeof(kt_item)); - tcam_item = kzalloc(sizeof(*tcam_item), GFP_KERNEL); + tcam_item = kzalloc(sizeof(*tcam_item), GFP_ATOMIC); if (!tcam_item) return -ENOMEM; @@ -914,7 +918,7 @@ static int nbl_flow_add_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_p flow->type = type; flow->flow_id = 0xFFFF; - if (type >= NBL_FLOW_TYPE_MAX && type < NBL_FLOW_ACCEL_MAX) { + if (type >= NBL_FLOW_ACCEL_BEGIN && type < NBL_FLOW_ACCEL_END) { if (flow->flow_type == NBL_KT_FULL_MODE) cost = 2; else @@ -925,7 +929,6 @@ static int nbl_flow_add_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_p goto free_mem; } } - ret = nbl_flow_alloc_flow_id(flow_mgt, flow); if (ret) goto free_mem; @@ -1023,8 +1026,7 @@ static void nbl_flow_del_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_ } nbl_flow_free_flow_id(flow_mgt, flow); - - if (flow->type >= NBL_FLOW_TYPE_MAX && flow->type < NBL_FLOW_ACCEL_MAX) { + if (flow->type >= NBL_FLOW_ACCEL_BEGIN && flow->type < NBL_FLOW_ACCEL_END) { if (flow->flow_type == NBL_KT_FULL_MODE) flow_mgt->accel_flow_count -= 2; else @@ -1032,20 +1034,22 @@ static void nbl_flow_del_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_ } } -static int nbl_flow_add_mcc_node(struct nbl_flow_multi_group *multi_group, - struct nbl_resource_mgt *res_mgt, - int type, u16 data, u16 mcc_id, u16 head) +static struct nbl_flow_mcc_node *nbl_flow_alloc_mcc_node(struct nbl_flow_mgt *flow_mgt, + u8 type, u16 data, u16 head) { - struct nbl_flow_mcc_node *mcc_node = NULL, *mcc_head = NULL; - struct nbl_phy_ops *phy_ops; - u16 prev_mcc_id, mcc_action; - int ret = 0; + struct nbl_flow_mcc_node *node; + int mcc_id; + u16 mcc_action; - phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return NULL; - mcc_node = kzalloc(sizeof(*mcc_node), GFP_KERNEL); - if (!mcc_node) - return -ENOMEM; + mcc_id = nbl_flow_alloc_mcc_id(flow_mgt); + if (mcc_id < 0) { + kfree(node); + return NULL; + } switch (type) { case NBL_MCC_INDEX_BOND: @@ -1059,68 +1063,102 @@ static int nbl_flow_add_mcc_node(struct nbl_flow_multi_group *multi_group, mcc_action = nbl_flow_cfg_action_set_dport_mcc_bmc(); break; default: - return -EINVAL; + nbl_flow_free_mcc_id(flow_mgt, mcc_id); + kfree(node); + return NULL; } - mcc_node->mcc_id = mcc_id; - mcc_node->mcc_head = head; + INIT_LIST_HEAD(&node->node); + node->mcc_id = mcc_id; + node->mcc_head = head; + node->type = type; + node->data = data; + node->mcc_action = mcc_action; + + return node; +} + +static void nbl_flow_free_mcc_node(struct nbl_flow_mgt *flow_mgt, struct nbl_flow_mcc_node *node) +{ + nbl_flow_free_mcc_id(flow_mgt, node->mcc_id); + kfree(node); +} + +/* not consider multicast node first change, need modify all macvlan mcc */ +static int nbl_flow_add_mcc_node(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_mcc_node *mcc_node, + struct list_head *head, + struct list_head *list, + struct list_head *suffix) +{ + struct nbl_flow_mcc_node *mcc_head = NULL; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 prev_mcc_id, next_mcc_id = NBL_MCC_ID_INVALID; + int ret = 0; /* mcc_head must init before mcc_list */ - if (head) { - list_add_tail(&mcc_node->node, &multi_group->mcc_head); + if (mcc_node->mcc_head) { + list_add_tail(&mcc_node->node, head); prev_mcc_id = NBL_MCC_ID_INVALID; - WARN_ON(!nbl_list_empty(&multi_group->mcc_list)); - ret = phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_id, - prev_mcc_id, mcc_action); + WARN_ON(!nbl_list_empty(list)); + ret = phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, NBL_MCC_ID_INVALID, mcc_node->mcc_action); goto check_ret; } - list_add_tail(&mcc_node->node, &multi_group->mcc_list); + list_add_tail(&mcc_node->node, list); - if (nbl_list_is_first(&mcc_node->node, &multi_group->mcc_list)) + if (nbl_list_is_first(&mcc_node->node, list)) prev_mcc_id = NBL_MCC_ID_INVALID; else prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; + /* not head, next mcc may point suffix */ + if (suffix && !nbl_list_empty(suffix)) + next_mcc_id = list_first_entry(suffix, struct nbl_flow_mcc_node, node)->mcc_id; + else + next_mcc_id = NBL_MCC_ID_INVALID; + /* first add mcc_list */ - if (prev_mcc_id == NBL_MCC_ID_INVALID && !nbl_list_empty(&multi_group->mcc_head)) { - list_for_each_entry(mcc_head, &multi_group->mcc_head, node) { + if (prev_mcc_id == NBL_MCC_ID_INVALID && !nbl_list_empty(head)) { + list_for_each_entry(mcc_head, head, node) { prev_mcc_id = mcc_head->mcc_id; - ret |= phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_id, - prev_mcc_id, mcc_action); + ret |= phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, next_mcc_id, + mcc_node->mcc_action); } goto check_ret; } ret = phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), - mcc_id, prev_mcc_id, mcc_action); + mcc_node->mcc_id, prev_mcc_id, next_mcc_id, mcc_node->mcc_action); check_ret: if (ret) { list_del(&mcc_node->node); - kfree(mcc_node); - return -EFAULT; + return -EINVAL; } return 0; } -static void nbl_flow_del_mcc_node(struct nbl_flow_multi_group *multi_group, - struct nbl_resource_mgt *res_mgt, - struct nbl_flow_mcc_node *mcc_node) +/* not consider multicast node first change, need modify all macvlan mcc */ +static void nbl_flow_del_mcc_node(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_mcc_node *mcc_node, + struct list_head *head, + struct list_head *list, + struct list_head *suffix) { - struct nbl_phy_ops *phy_ops; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_flow_mcc_node *mcc_head = NULL; u16 prev_mcc_id, next_mcc_id; - phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - - if (nbl_list_entry_is_head(mcc_node, &multi_group->mcc_list, node) || - nbl_list_entry_is_head(mcc_node, &multi_group->mcc_head, node)) + if (list_entry_is_head(mcc_node, head, node) || + list_entry_is_head(mcc_node, list, node)) return; if (mcc_node->mcc_head) { - WARN_ON(!nbl_list_empty(&multi_group->mcc_list)); + WARN_ON(!nbl_list_empty(list)); prev_mcc_id = NBL_MCC_ID_INVALID; next_mcc_id = NBL_MCC_ID_INVALID; phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, @@ -1128,18 +1166,22 @@ static void nbl_flow_del_mcc_node(struct nbl_flow_multi_group *multi_group, goto free_node; } - if (nbl_list_is_first(&mcc_node->node, &multi_group->mcc_list)) + if (nbl_list_is_first(&mcc_node->node, list)) prev_mcc_id = NBL_MCC_ID_INVALID; else prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; - if (nbl_list_is_last(&mcc_node->node, &multi_group->mcc_list)) + if (nbl_list_is_last(&mcc_node->node, list)) next_mcc_id = NBL_MCC_ID_INVALID; else next_mcc_id = list_next_entry(mcc_node, node)->mcc_id; - if (prev_mcc_id == NBL_MCC_ID_INVALID && !nbl_list_empty(&multi_group->mcc_head)) { - list_for_each_entry(mcc_head, &multi_group->mcc_head, node) { + /* not head, next mcc may point suffix */ + if (next_mcc_id == NBL_MCC_ID_INVALID && suffix && !nbl_list_empty(suffix)) + next_mcc_id = list_first_entry(suffix, struct nbl_flow_mcc_node, node)->mcc_id; + + if (prev_mcc_id == NBL_MCC_ID_INVALID && !nbl_list_empty(head)) { + list_for_each_entry(mcc_head, head, node) { prev_mcc_id = mcc_head->mcc_id; phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, prev_mcc_id, next_mcc_id); @@ -1151,105 +1193,554 @@ static void nbl_flow_del_mcc_node(struct nbl_flow_multi_group *multi_group, prev_mcc_id, next_mcc_id); free_node: list_del(&mcc_node->node); - kfree(mcc_node); -} - -static void nbl_flow_macvlan_node_del_action_func(void *priv, void *x_key, void *y_key, - void *data) -{ - struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; - int i; - - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) - nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); } -static int nbl_flow_add_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +static struct nbl_flow_mcc_group *nbl_flow_alloc_mcc_group(struct nbl_resource_mgt *res_mgt, + unsigned long *vsi_bitmap, + u16 eth_id, bool multi, u16 vsi_num) { - struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_flow_mgt *flow_mgt; - struct nbl_common_info *common; - struct nbl_flow_l2_data *rule_data; - void *mac_hash_tbl; - struct nbl_flow_param param = {0}; - int i; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_switch_res *res = &flow_mgt->switch_res[eth_id]; + struct nbl_flow_mcc_group *group; + struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; int ret; - u16 eth_id; - u16 node_num; + int bit; - flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - common = NBL_RES_MGT_TO_COMMON(res_mgt); + /* The structure for mc macvlan list is: + * + * macvlan up + * | + * | + * BMC -> | + * VSI 0 -> VSI 1 -> -> allmulti list + * ETH -> | + * | + * | + * macvlan down + * + * So that the up mc pkts will be send to BMC, not need broadcast to eth, + * but the down mc pkts will send to eth, not send to BMC. + * + * Per mac flow entry has independent bmc/eth mcc nodes. + * All mac flow entry share all allmuti vsi nodes. + */ + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return NULL; + + group->vsi_base = eth_id * NBL_FLOW_LEONIS_VSI_NUM_PER_ETH; + group->multi = multi; + group->nbits = flow_mgt->vsi_max_per_switch; + group->ref_cnt = 1; + group->vsi_num = vsi_num; + + INIT_LIST_HEAD(&group->group_node); + INIT_LIST_HEAD(&group->mcc_node); + INIT_LIST_HEAD(&group->mcc_head); + + group->vsi_bitmap = kcalloc(BITS_TO_LONGS(flow_mgt->vsi_max_per_switch), sizeof(long), + GFP_KERNEL); + if (!group->vsi_bitmap) + goto alloc_vsi_bitmap_failed; + + bitmap_copy(group->vsi_bitmap, vsi_bitmap, flow_mgt->vsi_max_per_switch); + if (!multi) + goto add_mcc_node; + + mcc_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_ETH, eth_id, 1); + if (!mcc_node) + goto free_nodes; + + ret = nbl_flow_add_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + if (ret) { + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + goto free_nodes; + } + + group->down_mcc_id = mcc_node->mcc_id; + mcc_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_BMC, NBL_FLOW_MCC_BMC_DPORT, 1); + if (!mcc_node) + goto free_nodes; + + ret = nbl_flow_add_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + if (ret) { + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + goto free_nodes; + } + group->up_mcc_id = mcc_node->mcc_id; + +add_mcc_node: + for_each_set_bit(bit, vsi_bitmap, flow_mgt->vsi_max_per_switch) { + mcc_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_VSI, + bit + group->vsi_base, 0); + if (!mcc_node) + goto free_nodes; + + if (multi) + ret = nbl_flow_add_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, &res->allmulti_list); + else + ret = nbl_flow_add_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + + if (ret) { + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + goto free_nodes; + } + } + + if (nbl_list_empty(&group->mcc_head)) { + group->down_mcc_id = list_first_entry(&group->mcc_node, + struct nbl_flow_mcc_node, node)->mcc_id; + group->up_mcc_id = list_first_entry(&group->mcc_node, + struct nbl_flow_mcc_node, node)->mcc_id; + } + list_add_tail(&group->group_node, &res->mcc_group_head); + + return group; + +free_nodes: + list_for_each_entry_safe(mcc_node, mcc_node_safe, &group->mcc_node, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } + + list_for_each_entry_safe(mcc_node, mcc_node_safe, &group->mcc_head, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } + kfree(group->vsi_bitmap); +alloc_vsi_bitmap_failed: + kfree(group); + + return NULL; +} + +static void nbl_flow_free_mcc_group(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_mcc_group *group) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; + + group->ref_cnt--; + if (group->ref_cnt) + return; + + list_del(&group->group_node); + list_for_each_entry_safe(mcc_node, mcc_node_safe, &group->mcc_node, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } + + list_for_each_entry_safe(mcc_node, mcc_node_safe, &group->mcc_head, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &group->mcc_head, + &group->mcc_node, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } + + kfree(group->vsi_bitmap); + kfree(group); +} + +static struct nbl_flow_mcc_group *nbl_find_same_mcc_group(struct nbl_flow_switch_res *res, + unsigned long *vsi_bitmap, + bool multi) +{ + struct nbl_flow_mcc_group *group = NULL; + + list_for_each_entry(group, &res->mcc_group_head, group_node) + if (group->multi == multi && + __bitmap_equal(group->vsi_bitmap, vsi_bitmap, group->nbits)) { + group->ref_cnt++; + return group; + } + + return NULL; +} + +static void nbl_flow_macvlan_node_del_action_func(void *priv, void *x_key, void *y_key, + void *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; + int i; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + } + + /* delete mcc */ + if (rule_data->mcast_flow) + nbl_flow_free_mcc_group(res_mgt, rule_data->mcc_group); +} + +static u32 nbl_flow_get_reserve_macvlan_cnt(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_flow_switch_res *res; + int i; + u32 reserve_cnt = 0; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + res = &flow_mgt->switch_res[i]; + if (res->num_vfs) + reserve_cnt += (res->num_vfs - res->active_vfs) * 3; + } + + return reserve_cnt; +} + +static int nbl_flow_macvlan_node_vsi_match_func(void *condition, void *x_key, void *y_key, + void *data) +{ + u16 vsi = *(u16 *)condition; + struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; + + if (!rule_data->mcast_flow) + return rule_data->vsi == vsi ? 0 : -1; + else + return !test_bit(vsi - rule_data->mcc_group->vsi_base, + rule_data->mcc_group->vsi_bitmap); +} + +static void nbl_flow_macvlan_node_found_vsi_action(void *priv, void *x_key, void *y_key, + void *data) +{ + bool *match = (bool *)(priv); + + *match = 1; +} + +static int nbl_flow_add_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_hash_xy_tbl_scan_key scan_key; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_switch_res *res; + struct nbl_flow_l2_data *rule_data; + struct nbl_flow_mcc_group *mcc_group = NULL, *pend_group = NULL; + unsigned long *vsi_bitmap; + struct nbl_flow_param param = {0}; + int i; + int ret = 0; + int pf_id, vf_id; + u32 reserve_cnt; + u16 eth_id; + u16 vsi_base; + u16 vsi_num = 0; + u16 func_id; + bool alloc_rule = 0; + bool need_mcast = 0; + bool vsi_match = 0; + + if (nbl_flow_is_mirror_outputport(res_mgt, vsi)) + return 0; eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); - mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; - node_num = nbl_common_get_hash_xy_node_num(mac_hash_tbl); - if (node_num >= flow_mgt->unicast_mac_threshold) - return -ENOSPC; + res = &flow_mgt->switch_res[eth_id]; - if (nbl_common_get_hash_xy_node(mac_hash_tbl, mac, &vlan)) - return -EEXIST; + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pf_id, &vf_id); + reserve_cnt = nbl_flow_get_reserve_macvlan_cnt(res_mgt); - rule_data = kzalloc(sizeof(*rule_data), GFP_KERNEL); - if (!rule_data) + if (flow_mgt->flow_id_cnt <= reserve_cnt && + (vf_id == U32_MAX || test_bit(vf_id, res->vf_bitmap))) + return -ENOSPC; + + vsi_bitmap = kcalloc(BITS_TO_LONGS(flow_mgt->vsi_max_per_switch), sizeof(long), GFP_KERNEL); + if (!vsi_bitmap) return -ENOMEM; + NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_SHOW, NBL_HASH_TBL_ALL_SCAN, + false, NULL, NULL, &vsi, + &nbl_flow_macvlan_node_vsi_match_func, &vsi_match, + &nbl_flow_macvlan_node_found_vsi_action); + param.mac = mac; param.vid = vlan; param.eth = eth_id; param.vsi = vsi; + param.mcc_id = NBL_MCC_ID_INVALID; + + vsi_base = eth_id * NBL_FLOW_LEONIS_VSI_NUM_PER_ETH; + rule_data = (struct nbl_flow_l2_data *)nbl_common_get_hash_xy_node(res->mac_hash_tbl, + mac, &vlan); + if (rule_data) { + if (rule_data->mcast_flow && + test_bit(vsi - rule_data->mcc_group->vsi_base, + rule_data->mcc_group->vsi_bitmap)) + goto success; + else if (!rule_data->mcast_flow && rule_data->vsi == vsi) + goto success; + + if (!rule_data->mcast_flow) { + vsi_num = 1; + set_bit(rule_data->vsi - vsi_base, vsi_bitmap); + } else { + vsi_num = rule_data->mcc_group->vsi_num; + bitmap_copy(vsi_bitmap, rule_data->mcc_group->vsi_bitmap, + flow_mgt->vsi_max_per_switch); + } + need_mcast = 1; - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { - if (nbl_flow_add_flow(res_mgt, param, i, &rule_data->entry[i])) - break; + } else { + rule_data = kzalloc(sizeof(*rule_data), GFP_KERNEL); + if (!rule_data) { + ret = -ENOMEM; + goto alloc_rule_failed; + } + alloc_rule = 1; + rule_data->multi = is_multicast_ether_addr(mac); + rule_data->mcast_flow = 0; } - if (i != NBL_FLOW_MACVLAN_MAX) { - while (--i + 1) + + if (rule_data->multi) + need_mcast = 1; + + if (need_mcast) { + set_bit(vsi - vsi_base, vsi_bitmap); + vsi_num++; + mcc_group = nbl_find_same_mcc_group(res, vsi_bitmap, rule_data->multi); + if (!mcc_group) { + mcc_group = nbl_flow_alloc_mcc_group(res_mgt, vsi_bitmap, eth_id, + rule_data->multi, vsi_num); + if (!mcc_group) { + ret = -ENOMEM; + goto alloc_mcc_group_failed; + } + } + if (rule_data->mcast_flow) + pend_group = rule_data->mcc_group; + } else { + rule_data->vsi = vsi; + } + + if (!alloc_rule) { + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); - goto rule_err; + } + + if (pend_group) + nbl_flow_free_mcc_group(res_mgt, pend_group); } - rule_data->vsi = vsi; - ret = nbl_common_alloc_hash_xy_node(mac_hash_tbl, mac, &vlan, rule_data); - if (ret) - goto node_err; + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + if (mcc_group) { + if (i <= NBL_FLOW_UP) + param.mcc_id = mcc_group->up_mcc_id; + else + param.mcc_id = mcc_group->down_mcc_id; + } + ret = nbl_flow_add_flow(res_mgt, param, i, &rule_data->entry[i]); + if (ret) + goto add_flow_failed; + } - kfree(rule_data); + if (mcc_group) { + rule_data->mcast_flow = 1; + rule_data->mcc_group = mcc_group; + } else { + rule_data->mcast_flow = 0; + rule_data->vsi = vsi; + } + + if (alloc_rule) { + ret = nbl_common_alloc_hash_xy_node(res->mac_hash_tbl, mac, &vlan, rule_data); + if (ret) + goto add_flow_failed; + } + + if (alloc_rule) + kfree(rule_data); +success: + kfree(vsi_bitmap); + + if (vf_id != U32_MAX && !test_bit(vf_id, res->vf_bitmap)) { + set_bit(vf_id, res->vf_bitmap); + res->active_vfs++; + } return 0; -node_err: - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) +add_flow_failed: + while (--i + 1) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); -rule_err: - kfree(rule_data); - return -EFAULT; + } + if (!alloc_rule) + nbl_common_free_hash_xy_node(res->mac_hash_tbl, mac, &vlan); + if (mcc_group) + nbl_flow_free_mcc_group(res_mgt, mcc_group); +alloc_mcc_group_failed: + if (alloc_rule) + kfree(rule_data); +alloc_rule_failed: + kfree(vsi_bitmap); + + nbl_common_scan_hash_xy_node(res->mac_hash_tbl, &scan_key); + if (vf_id != U32_MAX && test_bit(vf_id, res->vf_bitmap) && !vsi_match) { + clear_bit(vf_id, res->vf_bitmap); + res->active_vfs--; + } + + return ret; } static void nbl_flow_del_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_mcc_group *mcc_group = NULL, *pend_group = NULL; + unsigned long *vsi_bitmap; + struct nbl_flow_switch_res *res; struct nbl_flow_l2_data *rule_data; - void *mac_hash_tbl; + struct nbl_flow_param param = {0}; + struct nbl_hash_xy_tbl_scan_key scan_key; int i; + int ret; + int pf_id, vf_id; + u32 vsi_num; + u16 vsi_base = 0; u16 eth_id; + u16 func_id; + bool need_mcast = false; + bool add_flow = false; + bool vsi_match = 0; - flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); - mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + res = &flow_mgt->switch_res[eth_id]; - rule_data = nbl_common_get_hash_xy_node(mac_hash_tbl, mac, &vlan); + rule_data = nbl_common_get_hash_xy_node(res->mac_hash_tbl, mac, &vlan); if (!rule_data) return; + if (!rule_data->mcast_flow && rule_data->vsi != vsi) + return; + else if (rule_data->mcast_flow && + !test_bit(vsi - rule_data->mcc_group->vsi_base, rule_data->mcc_group->vsi_bitmap)) + return; - if (rule_data->vsi != vsi) + vsi_bitmap = kcalloc(BITS_TO_LONGS(flow_mgt->vsi_max_per_switch), sizeof(long), GFP_KERNEL); + if (!vsi_bitmap) return; - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pf_id, &vf_id); + NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_SHOW, NBL_HASH_TBL_ALL_SCAN, + false, NULL, NULL, &vsi, + &nbl_flow_macvlan_node_vsi_match_func, &vsi_match, + &nbl_flow_macvlan_node_found_vsi_action); + + if (rule_data->mcast_flow) { + bitmap_copy(vsi_bitmap, rule_data->mcc_group->vsi_bitmap, + flow_mgt->vsi_max_per_switch); + vsi_num = rule_data->mcc_group->vsi_num; + clear_bit(vsi - rule_data->mcc_group->vsi_base, vsi_bitmap); + vsi_num--; + vsi_base = (u16)rule_data->mcc_group->vsi_base; + + if (rule_data->mcc_group->vsi_num > 1) + add_flow = true; + + if ((rule_data->multi && rule_data->mcc_group->vsi_num > 1) || + (!rule_data->multi && rule_data->mcc_group->vsi_num > 2)) + need_mcast = 1; + pend_group = rule_data->mcc_group; + } + + if (need_mcast) { + mcc_group = nbl_find_same_mcc_group(res, vsi_bitmap, rule_data->multi); + if (!mcc_group) { + mcc_group = nbl_flow_alloc_mcc_group(res_mgt, vsi_bitmap, eth_id, + rule_data->multi, vsi_num); + if (!mcc_group) + goto alloc_mcc_group_failed; + } + } + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + } + + if (pend_group) + nbl_flow_free_mcc_group(res_mgt, pend_group); + + if (add_flow) { + param.mac = mac; + param.vid = vlan; + param.eth = eth_id; + param.mcc_id = NBL_MCC_ID_INVALID; + param.vsi = (u16)find_first_bit(vsi_bitmap, + flow_mgt->vsi_max_per_switch) + vsi_base; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + if (mcc_group) { + if (i <= NBL_FLOW_UP) + param.mcc_id = mcc_group->up_mcc_id; + else + param.mcc_id = mcc_group->down_mcc_id; + } + ret = nbl_flow_add_flow(res_mgt, param, i, &rule_data->entry[i]); + if (ret) + goto add_flow_failed; + } + + if (mcc_group) { + rule_data->mcast_flow = 1; + rule_data->mcc_group = mcc_group; + } else { + rule_data->mcast_flow = 0; + rule_data->vsi = param.vsi; + } + } + + if (!add_flow) + nbl_common_free_hash_xy_node(res->mac_hash_tbl, mac, &vlan); - nbl_common_free_hash_xy_node(mac_hash_tbl, mac, &vlan); +alloc_mcc_group_failed: + kfree(vsi_bitmap); + + nbl_common_scan_hash_xy_node(res->mac_hash_tbl, &scan_key); + if (vf_id != U32_MAX && test_bit(vf_id, res->vf_bitmap) && !vsi_match) { + clear_bit(vf_id, res->vf_bitmap); + res->active_vfs--; + } + + return; + +add_flow_failed: + while (--i + 1) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + } + if (mcc_group) + nbl_flow_free_mcc_group(res_mgt, pend_group); + nbl_common_free_hash_xy_node(res->mac_hash_tbl, mac, &vlan); + kfree(vsi_bitmap); + nbl_common_scan_hash_xy_node(res->mac_hash_tbl, &scan_key); + if (vf_id != U32_MAX && test_bit(vf_id, res->vf_bitmap) && !vsi_match) { + clear_bit(vf_id, res->vf_bitmap); + res->active_vfs--; + } } static int nbl_flow_add_lag(void *priv, u16 vsi) @@ -1296,7 +1787,7 @@ static void nbl_flow_del_lag(void *priv, u16 vsi) if (rule->vsi == vsi) break; - if (nbl_list_entry_is_head(rule, &flow_mgt->lacp_list, node)) + if (list_entry_is_head(rule, &flow_mgt->lacp_list, node)) return; nbl_flow_del_flow(res_mgt, &rule->entry); @@ -1349,7 +1840,7 @@ static void nbl_flow_del_lldp(void *priv, u16 vsi) if (rule->vsi == vsi) break; - if (nbl_list_entry_is_head(rule, &flow_mgt->lldp_list, node)) + if (list_entry_is_head(rule, &flow_mgt->lldp_list, node)) return; nbl_flow_del_flow(res_mgt, &rule->entry); @@ -1358,156 +1849,195 @@ static void nbl_flow_del_lldp(void *priv, u16 vsi) kfree(rule); } -static int nbl_flow_cfg_lag_mcc(void *priv, u16 eth_id, u16 lag_id, bool enable) +static int nbl_flow_change_mcc_group_chain(struct nbl_resource_mgt *res_mgt, u8 eth, + u16 current_mcc_id) { - struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - struct nbl_flow_multi_group *multi_group; + struct nbl_flow_switch_res *switch_res = &flow_mgt->switch_res[eth]; + struct nbl_flow_mcc_group *group; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - struct nbl_flow_mcc_index_key index_key = {0}; - u16 mcc_id, mcc_action; - - multi_group = &flow_mgt->multi_flow[eth_id]; - NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_ETH, eth_id); - mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); - - mcc_action = enable ? nbl_flow_cfg_action_set_dport_mcc_lag(lag_id) - : nbl_flow_cfg_action_set_dport_mcc_eth(eth_id); - - return phy_ops->cfg_lag_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_id, mcc_action); + u16 node_mcc; + + list_for_each_entry(group, &switch_res->mcc_group_head, group_node) + if (group->multi && !nbl_list_empty(&group->mcc_node)) { + node_mcc = list_last_entry(&group->mcc_node, + struct nbl_flow_mcc_node, node)->mcc_id; + phy_ops->update_mcc_next_node(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + node_mcc, current_mcc_id); + } + switch_res->allmulti_first_mcc = current_mcc_id; + return 0; } -static int nbl_flow_add_multi_rule(void *priv, u16 vsi) +static int nbl_flow_add_multi_mcast(void *priv, u16 vsi) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - struct nbl_flow_multi_group *multi_group; - struct nbl_flow_mcc_index_key index_key = {0}; - u16 mcc_id; + struct nbl_flow_switch_res *switch_res; + struct nbl_flow_mcc_node *node; + int ret; + u16 current_mcc_id; u8 eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); - NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_VSI, vsi); - mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); + switch_res = &flow_mgt->switch_res[eth]; + list_for_each_entry(node, &switch_res->allmulti_list, node) + if (node->data == vsi && node->type == NBL_MCC_INDEX_VSI) + return 0; + + node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_VSI, vsi, 0); + if (!node) + return -ENOSPC; + + switch_res = &flow_mgt->switch_res[eth]; + ret = nbl_flow_add_mcc_node(res_mgt, node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + if (ret) { + nbl_flow_free_mcc_node(flow_mgt, node); + return ret; + } + + if (nbl_list_empty(&switch_res->allmulti_list)) + current_mcc_id = NBL_MCC_ID_INVALID; + else + current_mcc_id = list_first_entry(&switch_res->allmulti_list, + struct nbl_flow_mcc_node, node)->mcc_id; - multi_group = &flow_mgt->multi_flow[eth]; + if (current_mcc_id != switch_res->allmulti_first_mcc) + nbl_flow_change_mcc_group_chain(res_mgt, eth, current_mcc_id); - return nbl_flow_add_mcc_node(multi_group, res_mgt, NBL_MCC_INDEX_VSI, vsi, mcc_id, 0); + return 0; } -static void nbl_flow_del_multi_rule(void *priv, u16 vsi) +static void nbl_flow_del_multi_mcast(void *priv, u16 vsi) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - struct nbl_flow_multi_group *multi_group; + struct nbl_flow_switch_res *switch_res; struct nbl_flow_mcc_node *mcc_node; - struct nbl_flow_mcc_index_key index_key = {0}; + u16 current_mcc_id; u8 eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); - u16 mcc_id; - NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_VSI, vsi); - mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); - nbl_common_free_index(flow_mgt->mcc_tbl_priv, &index_key); + switch_res = &flow_mgt->switch_res[eth]; + list_for_each_entry(mcc_node, &switch_res->allmulti_list, node) + if (mcc_node->data == vsi && mcc_node->type == NBL_MCC_INDEX_VSI) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + break; + } - multi_group = &flow_mgt->multi_flow[eth]; + if (nbl_list_empty(&switch_res->allmulti_list)) + current_mcc_id = NBL_MCC_ID_INVALID; + else + current_mcc_id = list_first_entry(&switch_res->allmulti_list, + struct nbl_flow_mcc_node, node)->mcc_id; - list_for_each_entry(mcc_node, &multi_group->mcc_list, node) - if (mcc_node->mcc_id == mcc_id) { - nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); - return; - } + if (current_mcc_id != switch_res->allmulti_first_mcc) + nbl_flow_change_mcc_group_chain(res_mgt, eth, current_mcc_id); } static int nbl_flow_add_multi_group(struct nbl_resource_mgt *res_mgt, u8 eth) { struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - struct nbl_flow_multi_group *multi_group = &flow_mgt->multi_flow[eth]; - struct nbl_flow_mcc_index_key index_key = {0}; - struct nbl_flow_param param_down = {0}, param_up = {0}; - struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; + struct nbl_flow_switch_res *switch_res = &flow_mgt->switch_res[eth]; + struct nbl_flow_param param_up = {0}; + struct nbl_flow_mcc_node *up_node; + struct nbl_flow_param param_down = {0}; + struct nbl_flow_mcc_node *down_node; int i, ret; - /* The structure for MCC list is: - * - * l2/l3_mc_up - * | - * | - * BMC -> | - * PF -> VF -> ... - * ETH -> | - * | - * | - * l2/l3_mc_down - * - * So that the up mc pkts will be send to BMC, not need broadcast to eth, - * but the down mc pkts will send to eth, not send to BMC. - */ - NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_ETH, eth); - param_down.mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); - param_down.eth = eth; + down_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_ETH, eth, 1); + if (!down_node) + return -ENOSPC; - NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_BMC, eth); - param_up.mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); - param_up.eth = eth; + ret = nbl_flow_add_mcc_node(res_mgt, down_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + if (ret) + goto add_eth_mcc_node_failed; - multi_group = &flow_mgt->multi_flow[eth]; - for (i = 0; i < NBL_FLOW_TYPE_MAX - NBL_FLOW_MACVLAN_MAX; i++) { - if (i % 2) - ret = nbl_flow_add_flow(res_mgt, param_down, i + NBL_FLOW_MACVLAN_MAX, - &multi_group->entry[i]); - else - ret = nbl_flow_add_flow(res_mgt, param_up, i + NBL_FLOW_MACVLAN_MAX, - &multi_group->entry[i]); + param_down.mcc_id = down_node->mcc_id; + param_down.eth = eth; + for (i = 0; i < NBL_FLOW_DOWN_MULTI_MCAST_END - NBL_FLOW_L2_DOWN_MULTI_MCAST; i++) { + ret = nbl_flow_add_flow(res_mgt, param_down, i + NBL_FLOW_L2_DOWN_MULTI_MCAST, + &switch_res->allmulti_down[i]); if (ret) - goto add_macvlan_fail; + goto add_down_flow_failed; } - ret = nbl_flow_add_mcc_node(multi_group, res_mgt, NBL_MCC_INDEX_BMC, - NBL_FLOW_MCC_BMC_DPORT, param_up.mcc_id, 1); - if (ret) - goto add_mcc_bmc_fail; + up_node = nbl_flow_alloc_mcc_node(flow_mgt, NBL_MCC_INDEX_BMC, NBL_FLOW_MCC_BMC_DPORT, 1); + if (!up_node) { + ret = -ENOSPC; + goto alloc_bmc_node_failed; + } - ret = nbl_flow_add_mcc_node(multi_group, res_mgt, NBL_MCC_INDEX_ETH, eth, - param_down.mcc_id, 1); + ret = nbl_flow_add_mcc_node(res_mgt, up_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); if (ret) - goto add_mcc_eth_fail; + goto add_bmc_mcc_node_failed; - multi_group->ether_id = eth; - multi_group->mcc_id = param_up.mcc_id; + param_up.mcc_id = up_node->mcc_id; + param_up.eth = eth; + for (i = 0; i < NBL_FLOW_UP_MULTI_MCAST_END - NBL_FLOW_L2_UP_MULTI_MCAST; i++) { + ret = nbl_flow_add_flow(res_mgt, param_up, i + NBL_FLOW_L2_UP_MULTI_MCAST, + &switch_res->allmulti_up[i]); + if (ret) + goto add_up_flow_failed; + } + + switch_res->ether_id = eth; + switch_res->allmulti_first_mcc = NBL_MCC_ID_INVALID; + switch_res->vld = 1; return 0; -add_mcc_eth_fail: - list_for_each_entry_safe(mcc_node, mcc_node_safe, &multi_group->mcc_head, node) - nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); -add_mcc_bmc_fail: -add_macvlan_fail: +add_up_flow_failed: while (--i >= 0) - nbl_flow_del_flow(res_mgt, &multi_group->entry[i]); + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_up[i]); + nbl_flow_del_mcc_node(res_mgt, up_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); +add_bmc_mcc_node_failed: + nbl_flow_free_mcc_node(flow_mgt, up_node); +alloc_bmc_node_failed: +add_down_flow_failed: + while (--i >= 0) + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_down[i]); + nbl_flow_del_mcc_node(res_mgt, down_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); +add_eth_mcc_node_failed: + nbl_flow_free_mcc_node(flow_mgt, down_node); return ret; } static void nbl_flow_del_multi_group(struct nbl_resource_mgt *res_mgt, u8 eth) { struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); - struct nbl_flow_multi_group *multi_group = &flow_mgt->multi_flow[eth]; + struct nbl_flow_switch_res *switch_res = &flow_mgt->switch_res[eth]; struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; - int i; - if (!multi_group->mcc_id) + if (!switch_res->vld) return; - for (i = NBL_FLOW_MACVLAN_MAX; i < NBL_FLOW_TYPE_MAX; i++) - nbl_flow_del_flow(res_mgt, &multi_group->entry[i - NBL_FLOW_MACVLAN_MAX]); + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_up[0]); + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_up[1]); + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_down[0]); + nbl_flow_del_flow(res_mgt, &switch_res->allmulti_down[1]); - list_for_each_entry_safe(mcc_node, mcc_node_safe, &multi_group->mcc_list, node) - nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); + list_for_each_entry_safe(mcc_node, mcc_node_safe, &switch_res->allmulti_list, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } - list_for_each_entry_safe(mcc_node, mcc_node_safe, &multi_group->mcc_head, node) - nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); + list_for_each_entry_safe(mcc_node, mcc_node_safe, &switch_res->allmulti_head, node) { + nbl_flow_del_mcc_node(res_mgt, mcc_node, &switch_res->allmulti_head, + &switch_res->allmulti_list, NULL); + nbl_flow_free_mcc_node(flow_mgt, mcc_node); + } - memset(multi_group, 0, sizeof(*multi_group)); - INIT_LIST_HEAD(&multi_group->mcc_list); - INIT_LIST_HEAD(&multi_group->mcc_head); + INIT_LIST_HEAD(&switch_res->allmulti_list); + INIT_LIST_HEAD(&switch_res->allmulti_head); + switch_res->vld = 0; + switch_res->allmulti_first_mcc = NBL_MCC_ID_INVALID; } static void nbl_flow_remove_multi_group(void *priv) @@ -1544,15 +2074,6 @@ static int nbl_res_flow_cfg_duppkt_mcc(void *priv, struct nbl_lag_member_list_pa return 0; } -static int nbl_flow_macvlan_node_vsi_match_func(void *condition, void *x_key, void *y_key, - void *data) -{ - u16 vsi = *(u16 *)condition; - struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; - - return rule_data->vsi == vsi ? 0 : -1; -} - static void nbl_flow_clear_accel_flow(void *priv, u16 vsi_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -1575,6 +2096,31 @@ static void nbl_flow_clear_accel_flow(void *priv, u16 vsi_id) } } +static u16 nbl_vsi_mtu_index(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 index; + + index = phy_ops->get_mtu_index(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + return index - 1; +} + +static void nbl_clear_mtu_entry(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 mtu_index; + + mtu_index = nbl_vsi_mtu_index(res_mgt, vsi_id); + if (mtu_index < NBL_MAX_MTU) { + res_mgt->resource_info->mtu_list[mtu_index].ref_count--; + phy_ops->set_vsi_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, 0); + if (res_mgt->resource_info->mtu_list[mtu_index].ref_count == 0) { + phy_ops->set_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mtu_index + 1, 0); + res_mgt->resource_info->mtu_list[mtu_index].mtu_value = 0; + } + } +} + static void nbl_flow_clear_flow(void *priv, u16 vsi_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -1584,25 +2130,29 @@ static void nbl_flow_clear_flow(void *priv, u16 vsi_id) u8 eth_id; eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); - mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + mac_hash_tbl = flow_mgt->switch_res[eth_id].mac_hash_tbl; + nbl_clear_mtu_entry(res_mgt, vsi_id); NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_DELETE, NBL_HASH_TBL_ALL_SCAN, false, NULL, NULL, &vsi_id, &nbl_flow_macvlan_node_vsi_match_func, res_mgt, &nbl_flow_macvlan_node_del_action_func); nbl_common_scan_hash_xy_node(mac_hash_tbl, &scan_key); - - nbl_flow_del_multi_rule(res_mgt, vsi_id); + nbl_flow_del_multi_mcast(res_mgt, vsi_id); } char templete_name[NBL_FLOW_TYPE_MAX][16] = { "up_tnl", "up", "down", - "l2_mc_up", - "l2_mc_down", - "l3_mc_up", - "l3_mc_down" + "lldp/lacp", + "pmd_nd_upcall", + "l2_mul_up", + "l3_mul_up", + "l2_mul_down", + "l3_mul_down", + "tls_up", + "ipsec_down", }; static void nbl_flow_id_dump(struct seq_file *m, struct nbl_flow_fem_entry *entry, char *title) @@ -1612,6 +2162,28 @@ static void nbl_flow_id_dump(struct seq_file *m, struct nbl_flow_fem_entry *entr entry->hash_table, entry->hash_bucket); } +static void nbl_flow_mcc_node_dump(struct seq_file *m, struct nbl_flow_mcc_node *node) +{ + seq_printf(m, " head: %u, type: %u, id: %u, data: %u; ", node->mcc_head, + node->type, node->mcc_id, node->data); +} + +static void nbl_flow_mcc_group_dump(struct seq_file *m, struct nbl_flow_mcc_group *group) +{ + struct nbl_flow_mcc_node *mcc_node; + + seq_printf(m, "vsi_base: %u, nbits: %u, vsi_number: %u, ref_cnt %u, multi %u, up_mcc_id %u, down_mcc_id %u\n", + group->vsi_base, group->nbits, group->vsi_num, group->ref_cnt, group->multi, + group->up_mcc_id, group->down_mcc_id); + seq_puts(m, "mcc head list\n"); + list_for_each_entry(mcc_node, &group->mcc_head, node) + nbl_flow_mcc_node_dump(m, mcc_node); + seq_puts(m, "\nmcc body list\n"); + list_for_each_entry(mcc_node, &group->mcc_node, node) + nbl_flow_mcc_node_dump(m, mcc_node); + seq_puts(m, "\n"); +} + static void nbl_flow_macvlan_node_show_action_func(void *priv, void *x_key, void *y_key, void *data) { @@ -1621,10 +2193,18 @@ static void nbl_flow_macvlan_node_show_action_func(void *priv, void *x_key, void struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; int i; - seq_printf(m, "\nvsi %d, vlan %d MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", - rule_data->vsi, vlan, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); - for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + seq_printf(m, "\nvlan %d MAC address %02X:%02X:%02X:%02X:%02X:%02X, multi %u, mcast %u\n", + vlan, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], rule_data->multi, + rule_data->mcast_flow); + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (i == NBL_FLOW_UP_TNL && rule_data->multi) + continue; nbl_flow_id_dump(m, &rule_data->entry[i], templete_name[i]); + } + if (!rule_data->mcast_flow) + seq_printf(m, "rule action to vsi %u\n", rule_data->vsi); + else + nbl_flow_mcc_group_dump(m, rule_data->mcc_group); } static void nbl_flow_dump_flow(void *priv, struct seq_file *m) @@ -1632,28 +2212,45 @@ static void nbl_flow_dump_flow(void *priv, struct seq_file *m) struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); - struct nbl_flow_multi_group *multi_group; + struct nbl_flow_switch_res *switch_res; + struct nbl_flow_mcc_node *mcc_node; struct nbl_flow_lldp_rule *lldp_rule; struct nbl_flow_lacp_rule *lacp_rule; + struct nbl_flow_fem_entry *entry; struct nbl_hash_xy_tbl_scan_key scan_key; int i, j; - for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { - multi_group = &flow_mgt->multi_flow[i]; - seq_printf(m, "\nether_id %d, mcc_id %d, status %u\n" + !i, - multi_group->ether_id, multi_group->mcc_id, multi_group->network_status); - for (j = NBL_FLOW_MACVLAN_MAX; j < NBL_FLOW_TYPE_MAX; j++) - nbl_flow_id_dump(m, &multi_group->entry[j - NBL_FLOW_MACVLAN_MAX], - templete_name[j]); - } - NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_SHOW, NBL_HASH_TBL_ALL_SCAN, false, NULL, NULL, NULL, NULL, m, &nbl_flow_macvlan_node_show_action_func); - for (i = 0; i < NBL_MAX_ETHERNET; i++) - nbl_common_scan_hash_xy_node(flow_mgt->mac_hash_tbl[i], &scan_key); - seq_puts(m, "\n"); + seq_printf(m, "\n flow_mgt flow_id_cnt %u, pp_tcam_count %u, accel_flow_count %u, vsi_max_per_switch %u.\n", + flow_mgt->flow_id_cnt, flow_mgt->pp_tcam_count, + flow_mgt->accel_flow_count, flow_mgt->vsi_max_per_switch); + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + switch_res = &flow_mgt->switch_res[i]; + seq_printf(m, "\nether_id %d, status %u\n", + switch_res->ether_id, switch_res->network_status); + entry = &switch_res->allmulti_up[0]; + for (j = NBL_FLOW_L2_UP_MULTI_MCAST; j < NBL_FLOW_UP_MULTI_MCAST_END; j++) + nbl_flow_id_dump(m, &entry[j - NBL_FLOW_L2_UP_MULTI_MCAST], + templete_name[j]); + entry = &switch_res->allmulti_down[0]; + for (j = NBL_FLOW_L2_DOWN_MULTI_MCAST; j < NBL_FLOW_DOWN_MULTI_MCAST_END; j++) + nbl_flow_id_dump(m, &entry[j - NBL_FLOW_L2_DOWN_MULTI_MCAST], + templete_name[j]); + seq_printf(m, "\nether_id %d, mcc head list\n", switch_res->ether_id); + list_for_each_entry(mcc_node, &switch_res->allmulti_head, node) + nbl_flow_mcc_node_dump(m, mcc_node); + seq_printf(m, "\n\nether_id %d, mcc body list\n", switch_res->ether_id); + list_for_each_entry(mcc_node, &switch_res->allmulti_list, node) + nbl_flow_mcc_node_dump(m, mcc_node); + seq_printf(m, "\nnumber vf %u, active vf %u, vf bitmap: %*pb\n", + switch_res->num_vfs, switch_res->active_vfs, + switch_res->num_vfs, switch_res->vf_bitmap); + nbl_common_scan_hash_xy_node(switch_res->mac_hash_tbl, &scan_key); + seq_puts(m, "\n"); + } list_for_each_entry(lldp_rule, &flow_mgt->lldp_list, node) seq_printf(m, "LLDP rule: vsi %d\n", lldp_rule->vsi); @@ -1678,7 +2275,7 @@ static int nbl_flow_add_ktls_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) if (rule->index == index) return -EEXIST; - rule = kzalloc(sizeof(*rule), GFP_KERNEL); + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); if (!rule) return -ENOMEM; @@ -1696,7 +2293,6 @@ static int nbl_flow_add_ktls_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) rule->index = index; rule->vsi = vsi; list_add_tail(&rule->node, &flow_mgt->ul4s_head); - return 0; } @@ -1712,7 +2308,7 @@ static void nbl_flow_del_ktls_rx_flow(void *priv, u32 index) if (rule->index == index) break; - if (nbl_list_entry_is_head(rule, &flow_mgt->ul4s_head, node)) + if (list_entry_is_head(rule, &flow_mgt->ul4s_head, node)) return; nbl_flow_del_flow(res_mgt, &rule->ul4s_entry); @@ -1735,7 +2331,7 @@ static int nbl_flow_add_ipsec_tx_flow(void *priv, u32 index, u32 *data, u16 vsi) if (rule->index == index) return -EEXIST; - rule = kzalloc(sizeof(*rule), GFP_KERNEL); + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); if (!rule) return -ENOMEM; @@ -1753,7 +2349,6 @@ static int nbl_flow_add_ipsec_tx_flow(void *priv, u32 index, u32 *data, u16 vsi) rule->index = index; rule->vsi = vsi; list_add_tail(&rule->node, &flow_mgt->dprbac_head); - return 0; } @@ -1769,7 +2364,7 @@ static void nbl_flow_del_ipsec_tx_flow(void *priv, u32 index) if (rule->index == index) break; - if (nbl_list_entry_is_head(rule, &flow_mgt->dprbac_head, node)) + if (list_entry_is_head(rule, &flow_mgt->dprbac_head, node)) return; nbl_flow_del_flow(res_mgt, &rule->dipsec_entry); @@ -1814,7 +2409,7 @@ static void nbl_res_flow_del_nd_upcall_flow(void *priv) } rule = list_entry(flow_mgt->nd_upcall_list.next, struct nbl_flow_nd_upcall_rule, node); - if (nbl_list_entry_is_head(rule, &flow_mgt->nd_upcall_list, node)) + if (list_entry_is_head(rule, &flow_mgt->nd_upcall_list, node)) return; for (i = 0; i < NBL_FLOW_PMD_ND_UPCALL_FLOW_NUM; i++) @@ -1878,6 +2473,115 @@ static int nbl_res_flow_add_nd_upcall_flow(void *priv, u16 vsi, bool for_pmd) return 0; } +static int nbl_res_flow_check_flow_table_spec(void *priv, u16 vlan_cnt, + u16 unicast_cnt, u16 multicast_cnt) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + u32 reserve_cnt = nbl_flow_get_reserve_macvlan_cnt(res_mgt); + u32 need = vlan_cnt * (3 * unicast_cnt + 2 * multicast_cnt); + + if (reserve_cnt + need > flow_mgt->flow_id_cnt) + return -ENOSPC; + + return 0; +} + +static int nbl_res_set_mtu(void *priv, u16 vsi_id, u16 mtu) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_mtu_entry *mtu_list = &res_mgt->resource_info->mtu_list[0]; + int i, found_idx = -1, first_zero_idx = -1; + u16 real_mtu = mtu + ETH_HLEN + 2 * VLAN_HLEN; + + nbl_clear_mtu_entry(res_mgt, vsi_id); + if (mtu == 0) + return 0; + + for (i = 0; i < NBL_MAX_MTU; i++) { + if (mtu_list[i].mtu_value == real_mtu) { + found_idx = i; + break; + } + + if (!mtu_list[i].mtu_value) + first_zero_idx = i; + } + + if (first_zero_idx == -1 && found_idx == -1) + return 0; + + if (found_idx != -1) { + mtu_list[found_idx].ref_count++; + phy_ops->set_vsi_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, found_idx + 1); + return 0; + } + + if (first_zero_idx != -1) { + mtu_list[first_zero_idx].ref_count++; + mtu_list[first_zero_idx].mtu_value = real_mtu; + phy_ops->set_vsi_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, first_zero_idx + 1); + phy_ops->set_mtu(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), first_zero_idx + 1, real_mtu); + } + + return 0; +} + +static int nbl_flow_handle_mirror_outputport_event(u16 type, void *event_data, void *callback_data) +{ + int i; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)callback_data; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_event_mirror_outputport_data *mirror_outputport = + (struct nbl_event_mirror_outputport_data *)event_data; + + if (mirror_outputport->opcode) { + for (i = 0; i < NBL_MIRROR_OUTPUTPORT_MAX_FUNC; i++) { + if (flow_mgt->mirror_outputport_func[i] == mirror_outputport->func_id) + return 0; + } + for (i = 0; i < NBL_MIRROR_OUTPUTPORT_MAX_FUNC; i++) { + if (flow_mgt->mirror_outputport_func[i] == U16_MAX) { + flow_mgt->mirror_outputport_func[i] = mirror_outputport->func_id; + break; + } + + if (i >= NBL_MIRROR_OUTPUTPORT_MAX_FUNC) + nbl_err(common, NBL_DEBUG_FLOW, "Macvlan blacklist exceed max func:%d", + mirror_outputport->func_id); + } + } else { + for (i = 0; i < NBL_MIRROR_OUTPUTPORT_MAX_FUNC; i++) { + if (flow_mgt->mirror_outputport_func[i] == mirror_outputport->func_id) { + flow_mgt->mirror_outputport_func[i] = U16_MAX; + break; + } + } + } + + return 0; +} + +static void nbl_flow_cfg_mirror_outputport_event(void *priv, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_event_callback event_callback = {0}; + + event_callback.callback_data = res_mgt; + if (enable) { + event_callback.callback = nbl_flow_handle_mirror_outputport_event; + nbl_event_register(NBL_EVENT_MIRROR_OUTPUTPORT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else { + event_callback.callback = nbl_flow_handle_mirror_outputport_event; + nbl_event_unregister(NBL_EVENT_MIRROR_OUTPUTPORT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } +} + /* NBL_FLOW_SET_OPS(ops_name, func) * * Use X Macros to reduce setup and remove codes. @@ -1890,9 +2594,8 @@ do { \ NBL_FLOW_SET_OPS(del_lag_flow, nbl_flow_del_lag); \ NBL_FLOW_SET_OPS(add_lldp_flow, nbl_flow_add_lldp); \ NBL_FLOW_SET_OPS(del_lldp_flow, nbl_flow_del_lldp); \ - NBL_FLOW_SET_OPS(cfg_lag_mcc, nbl_flow_cfg_lag_mcc); \ - NBL_FLOW_SET_OPS(add_multi_rule, nbl_flow_add_multi_rule); \ - NBL_FLOW_SET_OPS(del_multi_rule, nbl_flow_del_multi_rule); \ + NBL_FLOW_SET_OPS(add_multi_mcast, nbl_flow_add_multi_mcast); \ + NBL_FLOW_SET_OPS(del_multi_mcast, nbl_flow_del_multi_mcast); \ NBL_FLOW_SET_OPS(setup_multi_group, nbl_flow_setup_multi_group); \ NBL_FLOW_SET_OPS(remove_multi_group, nbl_flow_remove_multi_group); \ NBL_FLOW_SET_OPS(clear_accel_flow, nbl_flow_clear_accel_flow); \ @@ -1907,6 +2610,9 @@ do { \ NBL_FLOW_SET_OPS(cfg_duppkt_mcc, nbl_res_flow_cfg_duppkt_mcc); \ NBL_FLOW_SET_OPS(add_nd_upcall_flow, nbl_res_flow_add_nd_upcall_flow); \ NBL_FLOW_SET_OPS(del_nd_upcall_flow, nbl_res_flow_del_nd_upcall_flow); \ + NBL_FLOW_SET_OPS(set_mtu, nbl_res_set_mtu); \ + NBL_FLOW_SET_OPS(cfg_mirror_outputport_event, nbl_flow_cfg_mirror_outputport_event); \ + NBL_FLOW_SET_OPS(check_flow_table_spec, nbl_res_flow_check_flow_table_spec); \ } while (0) static void nbl_flow_remove_mgt(struct device *dev, struct nbl_resource_mgt *res_mgt) @@ -1915,69 +2621,91 @@ static void nbl_flow_remove_mgt(struct device *dev, struct nbl_resource_mgt *res int i; struct nbl_hash_xy_tbl_del_key del_key; - nbl_common_remove_index_table(flow_mgt->mcc_tbl_priv, NULL); - NBL_HASH_XY_TBL_DEL_KEY_INIT(&del_key, res_mgt, &nbl_flow_macvlan_node_del_action_func); - for (i = 0; i < NBL_MAX_ETHERNET; i++) - nbl_common_remove_hash_xy_table(flow_mgt->mac_hash_tbl[i], &del_key); + for (i = 0; i < NBL_MAX_ETHERNET; i++) { + nbl_common_remove_hash_xy_table(flow_mgt->switch_res[i].mac_hash_tbl, &del_key); + if (flow_mgt->switch_res[i].vf_bitmap) + devm_kfree(dev, flow_mgt->switch_res[i].vf_bitmap); + } - devm_kfree(dev, flow_mgt->flow_id_bitmap); + if (flow_mgt->flow_id_bitmap) + devm_kfree(dev, flow_mgt->flow_id_bitmap); + if (flow_mgt->mcc_id_bitmap) + devm_kfree(dev, flow_mgt->mcc_id_bitmap); + flow_mgt->flow_id_cnt = 0; devm_kfree(dev, flow_mgt); NBL_RES_MGT_TO_FLOW_MGT(res_mgt) = NULL; } static int nbl_flow_setup_mgt(struct device *dev, struct nbl_resource_mgt *res_mgt) { - struct nbl_index_tbl_key mcc_tbl_key; struct nbl_hash_xy_tbl_key macvlan_tbl_key; struct nbl_flow_mgt *flow_mgt; struct nbl_eth_info *eth_info; int i; + int vf_num = -1; + u16 pf_id; flow_mgt = devm_kzalloc(dev, sizeof(struct nbl_flow_mgt), GFP_KERNEL); if (!flow_mgt) return -ENOMEM; NBL_RES_MGT_TO_FLOW_MGT(res_mgt) = flow_mgt; + eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); flow_mgt->flow_id_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(NBL_MACVLAN_TABLE_LEN), sizeof(long), GFP_KERNEL); if (!flow_mgt->flow_id_bitmap) - goto settup_mgt_failed; + goto setup_mgt_failed; + flow_mgt->flow_id_cnt = NBL_MACVLAN_TABLE_LEN; - NBL_INDEX_TBL_KEY_INIT(&mcc_tbl_key, dev, NBL_FLOW_MCC_INDEX_START, - NBL_FLOW_MCC_INDEX_SIZE, sizeof(struct nbl_flow_mcc_index_key)); - flow_mgt->mcc_tbl_priv = nbl_common_init_index_table(&mcc_tbl_key); - if (!flow_mgt->mcc_tbl_priv) - goto settup_mgt_failed; + flow_mgt->mcc_id_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(NBL_FLOW_MCC_INDEX_SIZE), + sizeof(long), GFP_KERNEL); + if (!flow_mgt->mcc_id_bitmap) + goto setup_mgt_failed; NBL_HASH_XY_TBL_KEY_INIT(&macvlan_tbl_key, dev, ETH_ALEN, sizeof(u16), sizeof(struct nbl_flow_l2_data), NBL_MACVLAN_TBL_BUCKET_SIZE, NBL_MACVLAN_X_AXIS_BUCKET_SIZE, NBL_MACVLAN_Y_AXIS_BUCKET_SIZE, false); for (i = 0; i < NBL_MAX_ETHERNET; i++) { - (flow_mgt)->mac_hash_tbl[i] = nbl_common_init_hash_xy_table(&macvlan_tbl_key); - if (!flow_mgt->mac_hash_tbl[i]) - goto settup_mgt_failed; - } - - for (i = 0; i < NBL_MAX_ETHERNET; i++) { - INIT_LIST_HEAD(&flow_mgt->multi_flow[i].mcc_list); - INIT_LIST_HEAD(&flow_mgt->multi_flow[i].mcc_head); + INIT_LIST_HEAD(&flow_mgt->switch_res[i].allmulti_head); + INIT_LIST_HEAD(&flow_mgt->switch_res[i].allmulti_list); + INIT_LIST_HEAD(&flow_mgt->switch_res[i].mcc_group_head); + + flow_mgt->switch_res[i].mac_hash_tbl = + nbl_common_init_hash_xy_table(&macvlan_tbl_key); + if (!flow_mgt->switch_res[i].mac_hash_tbl) + goto setup_mgt_failed; + pf_id = find_first_bit((unsigned long *)ð_info->pf_bitmap[i], 8); + if (pf_id != 8) + vf_num = nbl_res_get_pf_vf_num(res_mgt, pf_id); + + if (vf_num != -1) { + flow_mgt->switch_res[i].num_vfs = vf_num; + flow_mgt->switch_res[i].vf_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(vf_num), + sizeof(long), GFP_KERNEL); + if (!flow_mgt->switch_res[i].vf_bitmap) + goto setup_mgt_failed; + } else { + flow_mgt->switch_res[i].num_vfs = 0; + flow_mgt->switch_res[i].vf_bitmap = NULL; + } + flow_mgt->switch_res[i].active_vfs = 0; } + memset(flow_mgt->mirror_outputport_func, 0xff, sizeof(flow_mgt->mirror_outputport_func)); INIT_LIST_HEAD(&flow_mgt->lldp_list); INIT_LIST_HEAD(&flow_mgt->lacp_list); INIT_LIST_HEAD(&flow_mgt->ul4s_head); INIT_LIST_HEAD(&flow_mgt->dprbac_head); INIT_LIST_HEAD(&flow_mgt->nd_upcall_list); - eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); - flow_mgt->unicast_mac_threshold = NBL_TOTAL_MACVLAN_NUM / eth_info->eth_num; + flow_mgt->vsi_max_per_switch = NBL_VSI_MAX_ID / eth_info->eth_num; return 0; -settup_mgt_failed: +setup_mgt_failed: nbl_flow_remove_mgt(dev, res_mgt); return -1; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h index c16b90ae1c56..620ec2e4bc53 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -10,21 +10,22 @@ #include "nbl_hw.h" #include "nbl_resource.h" -#define NBL_EM_PHY_KT_OFFSET (0x1F000) -#define NBL_TOTAL_MACVLAN_NUM 2048 -#define NBL_MAX_ACTION_NUM 16 +#define NBL_EM_PHY_KT_OFFSET (0x1E000) -#define NBL_MCC_NUM_PER_SWITCH 256 +#define NBL_TOTAL_MACVLAN_NUM 4096 +#define NBL_MAX_ACTION_NUM 16 #define NBL_FLOW_MCC_PXE_SIZE 8 -#define NBL_FLOW_MCC_INDEX_SIZE (1024 - NBL_FLOW_MCC_PXE_SIZE) -#define NBL_FLOW_MCC_INDEX_START (7 * 1024) +#define NBL_FLOW_MCC_INDEX_SIZE (4096 - NBL_FLOW_MCC_PXE_SIZE) +#define NBL_FLOW_MCC_INDEX_START (4 * 1024) #define NBL_FLOW_MCC_BMC_DPORT 0x30D #define NBL_MACVLAN_TBL_BUCKET_SIZE 64 #define NBL_MACVLAN_X_AXIS_BUCKET_SIZE 64 #define NBL_MACVLAN_Y_AXIS_BUCKET_SIZE 16 +#define NBL_PP0_POWER 11 + enum nbl_flow_mcc_index_type { NBL_MCC_INDEX_ETH, NBL_MCC_INDEX_VSI, @@ -32,29 +33,6 @@ enum nbl_flow_mcc_index_type { NBL_MCC_INDEX_BMC, }; -struct nbl_flow_mcc_index_key { - enum nbl_flow_mcc_index_type type; - union { - u8 eth_id; - u16 vsi_id; - u32 data; - }; -}; - -#define NBL_FLOW_MCC_INDEX_KEY_INIT(key, key_type_arg, value_arg) \ -do { \ - typeof(key) __key = key; \ - typeof(key_type_arg) __type = key_type_arg; \ - typeof(value_arg) __value = value_arg; \ - __key->type = __type; \ - if (__type == NBL_MCC_INDEX_ETH) \ - __key->eth_id = __value; \ - else if (__type == NBL_MCC_INDEX_VSI || __type == NBL_MCC_INDEX_BOND) \ - __key->vsi_id = __value; \ - else \ - __key->data = __value; \ -} while (0) - #pragma pack(1) #define NBL_DUPPKT_PTYPE_NA 135 @@ -62,13 +40,20 @@ do { \ struct nbl_flow_l2_data { struct nbl_flow_fem_entry entry[NBL_FLOW_MACVLAN_MAX]; - u16 vsi; + union { + struct nbl_flow_mcc_group *mcc_group; + u16 vsi; + }; + bool multi; + bool mcast_flow; + }; union nbl_l2_phy_up_data_u { struct nbl_l2_phy_up_data { u32 act0:22; - u64 rsv1:62; + u32 act1:22; + u64 rsv1:40; u32 padding:4; u32 sport:4; u32 svlan_id:16; @@ -98,95 +83,53 @@ union nbl_l2_phy_lldp_lacp_data_u { u8 hash_key[sizeof(struct nbl_l2_phy_lldp_lacp_data)]; }; -union nbl_l2_phy_down_data_u { - struct nbl_l2_phy_down_data { +union nbl_l2_phy_up_multi_mcast_data_u { + struct nbl_l2_phy_up_multi_mcast_data { u32 act0:22; - u32 rsv2:10; - u64 rsv1:52; - u32 padding:6; - u32 sport:2; - u32 svlan_id:16; - u64 dst_mac:48; - u32 template:4; - u32 rsv[5]; - } __packed info; -#define NBL_L2_PHY_DOWN_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_data) \ - / sizeof(u32)) - u32 data[NBL_L2_PHY_DOWN_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l2_phy_down_data)]; -}; - -union nbl_l2_phy_up_multi_data_u { - struct nbl_l2_phy_up_multi_data { - u32 act0:22; - u32 act1:22; - u32 rsv2:20; - u64 rsv1:36; - u32 padding:4; + u32 rsv1:2; + u8 padding[16]; u32 sport:4; - u64 dst_mac:48; u32 template:4; u32 rsv[5]; } __packed info; -#define NBL_L2_PHY_UP_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_up_multi_data) \ +#define NBL_L2_PHY_UP_MULTI_MCAST_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_up_multi_mcast_data) \ / sizeof(u32)) - u32 data[NBL_L2_PHY_UP_MULTI_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l2_phy_up_multi_data)]; + u32 data[NBL_L2_PHY_UP_MULTI_MCAST_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_up_multi_mcast_data)]; }; -union nbl_l2_phy_down_multi_data_u { - struct nbl_l2_phy_down_multi_data { +union nbl_l2_phy_down_multi_mcast_data_u { + struct nbl_l2_phy_down_multi_mcast_data { u32 act0:22; - u32 act1:22; - u32 rsv2:20; - u64 rsv1:36; - u32 padding:6; + u32 rsv1:2; + u8 rsv2[16]; + u32 padding:2; u32 sport:2; - u64 dst_mac:48; - u32 template:4; - u32 rsv[5]; - } __packed info; -#define NBL_L2_PHY_DOWN_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_multi_data) \ - / sizeof(u32)) - u32 data[NBL_L2_PHY_DOWN_MULTI_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l2_phy_down_multi_data)]; -}; - -union nbl_l3_phy_up_multi_data_u { - struct nbl_l3_phy_up_multi_data { - u32 act0:22; - u32 act1:22; - u32 rsv2:20; - u64 rsv1:60; - u32 padding:12; - u32 sport:4; - u64 dst_mac:16; u32 template:4; u32 rsv[5]; } __packed info; -#define NBL_L3_PHY_UP_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l3_phy_up_multi_data) \ - / sizeof(u32)) - u32 data[NBL_L3_PHY_UP_MULTI_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l3_phy_up_multi_data)]; +#define NBL_L2_PHY_DOWN_MULTI_MCAST_DATA_TAB_WIDTH \ + (sizeof(struct nbl_l2_phy_down_multi_mcast_data) / sizeof(u32)) + u32 data[NBL_L2_PHY_DOWN_MULTI_MCAST_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_down_multi_mcast_data)]; }; -union nbl_l3_phy_down_multi_data_u { - struct nbl_l3_phy_down_multi_data { +union nbl_l2_phy_down_data_u { + struct nbl_l2_phy_down_data { u32 act0:22; u32 act1:22; - u32 rsv3:20; - u64 rsv2; - u64 rsv1:4; + u64 rsv2:40; u32 padding:6; u32 sport:2; - u64 dst_mac:16; + u32 svlan_id:16; + u64 dst_mac:48; u32 template:4; u32 rsv[5]; } __packed info; -#define NBL_L3_PHY_DOWN_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l3_phy_down_multi_data) \ +#define NBL_L2_PHY_DOWN_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_data) \ / sizeof(u32)) - u32 data[NBL_L3_PHY_DOWN_MULTI_DATA_TAB_WIDTH]; - u8 hash_key[sizeof(struct nbl_l3_phy_down_multi_data)]; + u32 data[NBL_L2_PHY_DOWN_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_down_data)]; }; union nbl_phy_ul4s_data_u { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c index 1d1f05fc88f7..48776f73457b 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c @@ -10,11 +10,26 @@ #include "nbl_hw/nbl_hw_leonis/base/nbl_ppe.h" #include "nbl_hw/nbl_hw_leonis/base/nbl_intf.h" #include "nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h" +#include "nbl_phy_leonis_regs.h" static int dvn_descreq_num_cfg = DEFAULT_DVN_DESCREQ_NUMCFG; /* default 8 and 8 */ module_param(dvn_descreq_num_cfg, int, 0); -MODULE_PARM_DESC(dvn_descreq_num_cfg, "bit[31:16]:split ring,support 8/16," - " bit[15:0]:packed ring, support 8/12/16/20/24/28/32"); +/* checkpatch:ignore SPLIT_STRING */ +MODULE_PARM_DESC(dvn_descreq_num_cfg, + "bit[31:16]:split ring,support 8/16,bit[15:0]:packed ring, support 4*n,n:2-8"); + +static u32 nbl_phy_dump_registers[] = { + NBL_UVN_DIF_DELAY_REQ, + NBL_UVN_DIF_DELAY_TIME, + NBL_UVN_DIF_DELAY_MAX, + NBL_UVN_DESC_PRE_DESC_REQ_NULL, + NBL_UVN_DESC_PRE_DESC_REQ_LACK, + NBL_UVN_DESC_RD_DROP_DESC_LACK, + NBL_DVN_DESCRD_L2_UNAVAIL_CNT, + NBL_DVN_DESCRD_L2_NOAVAIL_CNT, + NBL_USTORE_BUF_TOTAL_DROP_PKT, + NBL_USTORE_BUF_TOTAL_TRUN_PKT +}; static u32 nbl_phy_get_quirks(void *priv) { @@ -185,114 +200,6 @@ static void nbl_phy_fem_clear_tcam_ad(struct nbl_phy_mgt *phy_mgt) } } -static int nbl_phy_fem_em0_pt_phy_l2_init(struct nbl_phy_mgt *phy_mgt, int pt_idx) -{ - union nbl_fem_profile_tbl_u em0_pt_tbl = {.info = {0}}; - - em0_pt_tbl.info.pt_vld = 1; - em0_pt_tbl.info.pt_hash_sel0 = 0; - em0_pt_tbl.info.pt_hash_sel1 = 3; - - switch (pt_idx) { - case NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_12; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_UP_UNICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_12; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_DOWN_UNICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_4; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_UP_MULTICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_68; - em0_pt_tbl.info.pt_act_num = 2; - break; - case NBL_EM0_PT_PHY_DOWN_MULTICAST_L2: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_60; - em0_pt_tbl.info.pt_act_num = 2; - break; - case NBL_EM0_PT_PHY_UP_MULTICAST_L3: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_36; - em0_pt_tbl.info.pt_act_num = 2; - break; - case NBL_EM0_PT_PHY_DOWN_MULTICAST_L3: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_28; - em0_pt_tbl.info.pt_act_num = 2; - break; - case NBL_EM0_PT_PHY_DPRBAC_IPV4: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_DPRBAC_IPV6: - em0_pt_tbl.info.pt_key_size = 1; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_64 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_128; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_UL4S_IPV4: - em0_pt_tbl.info.pt_key_size = 0; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_32; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - case NBL_EM0_PT_PHY_UL4S_IPV6: - em0_pt_tbl.info.pt_key_size = 1; - em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; - em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_112; - em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; - em0_pt_tbl.info.pt_act_num = 1; - break; - default: - return -EOPNOTSUPP; - } - - nbl_hw_write_regs(phy_mgt, NBL_FEM0_PROFILE_TABLE(pt_idx), em0_pt_tbl.data, - NBL_FEM_PROFILE_TBL_WIDTH); - return 0; -} - -static __maybe_unused int nbl_phy_fem_em0_pt_init(struct nbl_phy_mgt *phy_mgt) -{ - int i, ret = 0; - - for (i = NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2; i <= NBL_EM0_PT_PHY_UL4S_IPV6; i++) { - ret = nbl_phy_fem_em0_pt_phy_l2_init(phy_mgt, i); - if (ret) - return ret; - } - - return 0; -} - static int nbl_phy_set_ht(void *priv, u16 hash, u16 hash_other, u8 ht_table, u8 bucket, u32 key_index, u8 valid) { @@ -478,14 +385,20 @@ static void nbl_phy_del_tcam(void *priv, u32 index, u8 key_type, u8 pp_type) ad_table.hash_key, NBL_FLOW_AD_TOTAL_LEN); } -static int nbl_phy_add_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 action) +static int nbl_phy_add_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id, u16 action) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_mcc_tbl node = {0}; node.vld = 1; - node.next_pntr = 0; - node.tail = 1; + if (next_mcc_id == NBL_MCC_ID_INVALID) { + node.next_pntr = 0; + node.tail = 1; + } else { + node.next_pntr = next_mcc_id; + node.tail = 0; + } + node.stateid_filter = 1; node.flowid_filter = 1; node.dport_act = action; @@ -527,6 +440,25 @@ static void nbl_phy_del_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mc nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); } +static void nbl_phy_update_mcc_next_node(void *priv, u16 mcc_id, u16 next_mcc_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_mcc_tbl node = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), + (u8 *)&node, sizeof(node)); + if (next_mcc_id != NBL_MCC_ID_INVALID) { + node.next_pntr = next_mcc_id; + node.tail = 0; + } else { + node.next_pntr = 0; + node.tail = 1; + } + + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), + (u8 *)&node, sizeof(node)); +} + static int nbl_phy_add_tnl_encap(void *priv, const u8 encap_buf[], u16 encap_idx, union nbl_flow_encap_offset_tbl_u encap_idx_info) { @@ -668,6 +600,8 @@ static void nbl_shaping_eth_init(struct nbl_phy_mgt *phy_mgt, u8 eth_id, u8 spee static int nbl_shaping_init(struct nbl_phy_mgt *phy_mgt, u8 speed) { struct dsch_psha_en psha_en = {0}; + struct nbl_shaping_net net_shaping = {0}; + int i; for (i = 0; i < NBL_MAX_ETHERNET; i++) @@ -676,6 +610,9 @@ static int nbl_shaping_init(struct nbl_phy_mgt *phy_mgt, u8 speed) psha_en.en = 0xF; nbl_hw_write_regs(phy_mgt, NBL_DSCH_PSHA_EN_ADDR, (u8 *)&psha_en, sizeof(psha_en)); + for (i = 0; i < NBL_MAX_FUNC; i++) + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET_REG(i), + (u8 *)&net_shaping, sizeof(net_shaping)); return 0; } @@ -718,6 +655,11 @@ static int nbl_ustore_init(struct nbl_phy_mgt *phy_mgt, u8 eth_num) nbl_hw_write_regs(phy_mgt, NBL_USTORE_PORT_DROP_TH_REG_ARR(i), (u8 *)&drop_th, sizeof(drop_th)); + for (i = 0; i < NBL_MAX_ETHERNET; i++) { + nbl_hw_rd32(phy_mgt, NBL_USTORE_BUF_PORT_DROP_PKT(i)); + nbl_hw_rd32(phy_mgt, NBL_USTORE_BUF_PORT_TRUN_PKT(i)); + } + return 0; } @@ -772,8 +714,9 @@ static int nbl_ul4s_init(struct nbl_phy_mgt *phy_mgt) return 0; } -static void nbl_dvn_descreq_num_cfg(struct nbl_phy_mgt *phy_mgt, u32 descreq_num) +static void nbl_dvn_descreq_num_cfg(void *priv, u32 descreq_num) { + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_dvn_descreq_num_cfg descreq_num_cfg = { 0 }; u32 packet_ring_prefect_num = descreq_num & 0xffff; u32 split_ring_prefect_num = (descreq_num >> 16) & 0xffff; @@ -790,6 +733,42 @@ static void nbl_dvn_descreq_num_cfg(struct nbl_phy_mgt *phy_mgt, u32 descreq_num (u8 *)&descreq_num_cfg, sizeof(descreq_num_cfg)); } +static u32 nbl_dvn_descreq_num_get(void *priv) +{ + u16 split_req; + u16 packed_req; + struct nbl_dvn_descreq_num_cfg descreq_num_cfg = { 0 }; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_DESCREQ_NUM_CFG, + (u8 *)&descreq_num_cfg, sizeof(descreq_num_cfg)); + + split_req = (descreq_num_cfg.avring_cfg_num + 1) * 8; + packed_req = descreq_num_cfg.packed_l1_num * 4 + 8; + + return (split_req << 16) + packed_req; +} + +static void nbl_phy_cfg_dvn_bp_mask(struct dvn_back_pressure_mask *mask, u8 eth_id, bool enable) +{ + switch (eth_id) { + case 0: + mask->dstore_port0_flag = enable; + break; + case 1: + mask->dstore_port1_flag = enable; + break; + case 2: + mask->dstore_port2_flag = enable; + break; + case 3: + mask->dstore_port3_flag = enable; + break; + default: + return; + } +} + static int nbl_dvn_init(struct nbl_phy_mgt *phy_mgt, u8 speed) { struct nbl_dvn_desc_wr_merge_timeout timeout = {0}; @@ -821,10 +800,16 @@ static int nbl_uvn_init(struct nbl_phy_mgt *phy_mgt) struct uvn_desc_prefetch_init prefetch_init = {0}; u32 timeout = 119760; /* 200us 200000/1.67 */ u32 quirks; + struct uvn_desc_wr_timeout desc_wr_timeout = {0}; + u16 wr_timeout = 0x12c; pdev = NBL_COMMON_TO_PDEV(phy_mgt->common); nbl_hw_wr32(phy_mgt, NBL_UVN_DESC_RD_WAIT, timeout); + desc_wr_timeout.num = wr_timeout; + nbl_hw_write_regs(phy_mgt, NBL_UVN_DESC_WR_TIMEOUT, + (u8 *)&desc_wr_timeout, sizeof(desc_wr_timeout)); + flag.avail_rd = 1; flag.desc_rd = 1; flag.pkt_wr = 1; @@ -852,6 +837,7 @@ static int nbl_uvn_init(struct nbl_phy_mgt *phy_mgt) static int nbl_uqm_init(struct nbl_phy_mgt *phy_mgt) { + struct nbl_uqm_que_type que_type = {0}; u32 cnt = 0; int i; @@ -880,6 +866,9 @@ static int nbl_uqm_init(struct nbl_phy_mgt *phy_mgt) nbl_hw_write_regs(phy_mgt, NBL_UQM_DPORT_DROP_CNT + (sizeof(cnt) * i), (u8 *)&cnt, sizeof(cnt)); + que_type.bp_drop = 0; + nbl_hw_write_regs(phy_mgt, NBL_UQM_QUE_TYPE, (u8 *)&que_type, sizeof(que_type)); + return 0; } @@ -1032,17 +1021,32 @@ static int nbl_intf_init(struct nbl_phy_mgt *phy_mgt) return 0; } +static void nbl_rdma_init(struct nbl_phy_mgt *phy_mgt) +{ + u32 data; + + data = nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_LB_CLK); + data |= NBL_TOP_CTRL_RDMA_LB_CLK; + nbl_hw_wr32(phy_mgt, NBL_TOP_CTRL_LB_CLK, data); + + data = nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_LB_RST); + data &= ~NBL_TOP_CTRL_RDMA_LB_RST; + nbl_hw_wr32(phy_mgt, NBL_TOP_CTRL_LB_RST, data); +} + static int nbl_phy_init_chip_module(void *priv, u8 eth_speed, u8 eth_num) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_PHY, "phy_chip_init"); + nbl_rdma_init(phy_mgt); nbl_dp_init(phy_mgt, eth_speed, eth_num); nbl_ppe_init(phy_mgt); nbl_intf_init(phy_mgt); - phy_mgt->version = nbl_hw_rd32(phy_mgt, 0x1300904); + nbl_write_all_regs(phy_mgt); + phy_mgt->version = nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); return 0; } @@ -1198,6 +1202,7 @@ static int nbl_phy_set_vnet_queue_info(void *priv, struct nbl_vnet_queue_info_pa host_vnet_qinfo.valid = param->valid; host_vnet_qinfo.msix_idx = param->msix_idx; host_vnet_qinfo.msix_idx_valid = param->msix_idx_valid; + if (phy_mgt_leonis->ro_enable) { host_vnet_qinfo.ido_en = 1; host_vnet_qinfo.rlo_en = 1; @@ -1514,7 +1519,8 @@ static void nbl_phy_deactive_shaping(void *priv, u16 func_id) (u8 *)&sha2net, sizeof(sha2net)); } -static int nbl_phy_set_shaping(void *priv, u16 func_id, u64 total_tx_rate, u8 vld, bool active) +static int nbl_phy_set_shaping(void *priv, u16 func_id, u64 total_tx_rate, u64 burst, + u8 vld, bool active) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_shaping_net shaping_net = {0}; @@ -1539,7 +1545,11 @@ static int nbl_phy_set_shaping(void *priv, u16 func_id, u64 total_tx_rate, u8 vl shaping_net.cir = total_tx_rate; /* pir equal cir */ shaping_net.pir = shaping_net.cir; - shaping_net.depth = max(shaping_net.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + if (burst) + shaping_net.depth = burst; + else + shaping_net.depth = max(shaping_net.cir * 2, + NBL_LR_LEONIS_NET_BUCKET_DEPTH); shaping_net.cbs = shaping_net.depth; shaping_net.pbs = shaping_net.depth; } @@ -1593,6 +1603,64 @@ static void nbl_phy_set_offload_shaping(struct nbl_phy_mgt *phy_mgt, } } +static int nbl_phy_set_ucar(void *priv, u16 vsi_id, u64 totel_rx_rate, u64 burst, + u8 vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union ucar_flow_u ucar_flow = {.info = {0}}; + union epro_vpt_u epro_vpt = {.info = {0}}; + int car_id = 0; + int index = 0; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + if (vld) { + if (epro_vpt.info.car_en) { + car_id = epro_vpt.info.car_id; + } else { + epro_vpt.info.car_en = 1; + for (; index < 1024; index++) { + nbl_hw_read_regs(phy_mgt, NBL_UCAR_FLOW_REG(index), + (u8 *)&ucar_flow, sizeof(ucar_flow)); + if (ucar_flow.info.valid == 0) { + car_id = index; + break; + } + } + if (car_id == 1024) { + nbl_err(common, NBL_DEBUG_PHY, "Car ID exceeds the valid range!"); + return -ENOMEM; + } + epro_vpt.info.car_id = car_id; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + } + } else { + epro_vpt.info.car_en = 0; + car_id = epro_vpt.info.car_id; + epro_vpt.info.car_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + } + + if (vld) { + ucar_flow.info.valid = 1; + ucar_flow.info.cir = totel_rx_rate; + ucar_flow.info.pir = totel_rx_rate; + if (burst) + ucar_flow.info.depth = burst; + else + ucar_flow.info.depth = NBL_UCAR_MAX_BUCKET_DEPTH; + ucar_flow.info.cbs = ucar_flow.info.depth; + ucar_flow.info.pbs = ucar_flow.info.depth; + } + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_REG(car_id), + (u8 *)&ucar_flow, sizeof(ucar_flow)); + + return 0; +} + static void nbl_phy_set_shaping_dport_vld(void *priv, u8 eth_id, bool vld) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -1639,7 +1707,8 @@ static int nbl_phy_cfg_dsch_net_to_group(void *priv, u16 func_id, u16 group_id, return 0; } -static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_num, u16 *queue_list) +static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_num, + u16 *queue_list, const u32 *indir) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); @@ -1647,7 +1716,7 @@ static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_n u32 table_id, table_end, group_count, odd_num, queue_id = 0; group_count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << size_type; - if (group_count > 256) { + if (group_count > NBL_EPRO_RSS_ENTRY_MAX_COUNT) { nbl_err(common, NBL_DEBUG_QUEUE, "Rss group entry size type %u exceed the max value %u", size_type, NBL_EPRO_RSS_ENTRY_SIZE_256); @@ -1673,34 +1742,63 @@ static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_n nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), (u8 *)&rss_ret, sizeof(rss_ret)); - if (odd_num) { - rss_ret.vld1 = 1; - rss_ret.dqueue1 = queue_list[queue_id++]; - nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), - (u8 *)&rss_ret, sizeof(rss_ret)); - table_id++; - } + if (indir) { + if (odd_num) { + rss_ret.vld1 = 1; + rss_ret.dqueue1 = indir[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + table_id++; + } + + for (; table_id < table_end; table_id++) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = indir[queue_id++]; + rss_ret.vld1 = 1; + rss_ret.dqueue1 = indir[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + + if (odd_num) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = indir[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } + } else { + if (odd_num) { + rss_ret.vld1 = 1; + rss_ret.dqueue1 = queue_list[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + table_id++; + } - queue_id = queue_id % q_num; - for (; table_id < table_end; table_id++) { - rss_ret.vld0 = 1; - rss_ret.dqueue0 = queue_list[queue_id++]; - queue_id = queue_id % q_num; - rss_ret.vld1 = 1; - rss_ret.dqueue1 = queue_list[queue_id++]; queue_id = queue_id % q_num; - nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), - (u8 *)&rss_ret, sizeof(rss_ret)); - } + for (; table_id < table_end; table_id++) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = queue_list[queue_id++]; + queue_id = queue_id % q_num; + rss_ret.vld1 = 1; + rss_ret.dqueue1 = queue_list[queue_id++]; + queue_id = queue_id % q_num; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } - nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), - (u8 *)&rss_ret, sizeof(rss_ret)); + nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); - if (odd_num) { - rss_ret.vld0 = 1; - rss_ret.dqueue0 = queue_list[queue_id++]; - nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), - (u8 *)&rss_ret, sizeof(rss_ret)); + if (odd_num) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = queue_list[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } } return 0; @@ -1762,19 +1860,41 @@ static void nbl_phy_read_rss_indir(void *priv, u16 vsi_id, u32 *rss_indir, } } -static void nbl_phy_get_rss_alg_sel(void *priv, u8 eth_id, u8 *alg_sel) +static void nbl_phy_get_rss_alg_sel(void *priv, u16 vsi_id, u8 *alg_sel) { - struct nbl_epro_ept_tbl ept_tbl = {0}; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_vpt_tbl epro_vpt_tbl = {0}; - nbl_hw_read_regs(priv, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, - sizeof(struct nbl_epro_ept_tbl)); + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); - if (ept_tbl.lag_alg_sel == NBL_EPRO_RSS_ALG_TOEPLITZ_HASH) + if (epro_vpt_tbl.rss_alg_sel == NBL_EPRO_RSS_ALG_TOEPLITZ_HASH) *alg_sel = ETH_RSS_HASH_TOP; - else if (ept_tbl.lag_alg_sel == NBL_EPRO_RSS_ALG_CRC32) + else if (epro_vpt_tbl.rss_alg_sel == NBL_EPRO_RSS_ALG_CRC32) *alg_sel = ETH_RSS_HASH_CRC32; } +static int nbl_phy_set_rss_alg_sel(void *priv, u16 vsi_id, u8 alg_sel) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_vpt_tbl epro_vpt_tbl = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + + if (alg_sel == ETH_RSS_HASH_TOP) + epro_vpt_tbl.rss_alg_sel = NBL_EPRO_RSS_ALG_TOEPLITZ_HASH; + else if (alg_sel == ETH_RSS_HASH_CRC32) + epro_vpt_tbl.rss_alg_sel = NBL_EPRO_RSS_ALG_CRC32; + else + return -EOPNOTSUPP; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), + (u8 *)&epro_vpt_tbl, + sizeof(struct nbl_epro_vpt_tbl)); + return 0; +} + static int nbl_phy_init_epro_vpt_tbl(void *priv, u16 vsi_id) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -1816,13 +1936,25 @@ static int nbl_phy_set_epro_rss_pt(void *priv, u16 vsi_id, u16 rss_ret_base, u16 struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_epro_rss_pt_tbl epro_rss_pt_tbl = {0}; struct nbl_epro_vpt_tbl epro_vpt_tbl; + u16 entry_size; + + if (rss_entry_size > NBL_EPRO_RSS_ENTRY_MAX_SIZE) + entry_size = NBL_EPRO_RSS_ENTRY_MAX_SIZE; + else + entry_size = rss_entry_size; epro_rss_pt_tbl.vld = 1; - epro_rss_pt_tbl.entry_size = rss_entry_size; + epro_rss_pt_tbl.entry_size = entry_size; epro_rss_pt_tbl.offset0_vld = 1; epro_rss_pt_tbl.offset0 = rss_ret_base; - epro_rss_pt_tbl.offset1_vld = 0; - epro_rss_pt_tbl.offset1 = 0; + if (rss_entry_size > NBL_EPRO_RSS_ENTRY_MAX_SIZE) { + epro_rss_pt_tbl.offset1_vld = 1; + epro_rss_pt_tbl.offset1 = + rss_ret_base + (NBL_EPRO_RSS_ENTRY_SIZE_UNIT << entry_size); + } else { + epro_rss_pt_tbl.offset1_vld = 0; + epro_rss_pt_tbl.offset1 = 0; + } nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_PT_TABLE(vsi_id), (u8 *)&epro_rss_pt_tbl, sizeof(epro_rss_pt_tbl)); @@ -1874,13 +2006,12 @@ static int nbl_phy_disable_uvn(void *priv, u16 queue_id) return 0; } -static bool nbl_phy_is_txq_drain_out(struct nbl_phy_mgt *phy_mgt, u16 queue_id) +static bool nbl_phy_is_txq_drain_out(struct nbl_phy_mgt *phy_mgt, u16 queue_id, + struct dsch_vn_tc_q_list_tbl *tc_q_list) { - struct dsch_vn_tc_q_list_tbl tc_q_list = {0}; - nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_TC_Q_LIST_TABLE_REG_ARR(queue_id), - (u8 *)&tc_q_list, sizeof(tc_q_list)); - if (!tc_q_list.regi && !tc_q_list.fly && !tc_q_list.vld) + (u8 *)tc_q_list, sizeof(*tc_q_list)); + if (!tc_q_list->regi && !tc_q_list->fly) return true; return false; @@ -1902,17 +2033,25 @@ static int nbl_phy_lso_dsch_drain(void *priv, u16 queue_id) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct dsch_vn_tc_q_list_tbl tc_q_list = {0}; + struct dsch_vn_q2tc_cfg_tbl info; int i = 0; + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + info.vld = 0; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); do { - if (nbl_phy_is_txq_drain_out(phy_mgt, queue_id)) + if (nbl_phy_is_txq_drain_out(phy_mgt, queue_id, &tc_q_list)) break; usleep_range(10, 20); } while (++i < NBL_DRAIN_WAIT_TIMES); if (i >= NBL_DRAIN_WAIT_TIMES) { - nbl_err(common, NBL_DEBUG_QUEUE, "nbl queue %u lso dsch drain\n", queue_id); + nbl_err(common, NBL_DEBUG_QUEUE, "nbl queue %u lso dsch drain, regi %u, fly %u, vld %u\n", + queue_id, tc_q_list.regi, tc_q_list.fly, tc_q_list.vld); return -1; } @@ -2364,6 +2503,44 @@ static void nbl_phy_configure_trust(void *priv, u8 eth_id, u8 trust, u8 *dscp2pr } } +static void nbl_phy_configure_rdma_bw(void *priv, u8 eth_id, int rdma_bw) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_dport dport = {0}; + struct nbl_shaping_dvn_dport dvn_dport = {0}; + struct nbl_shaping_rdma_dport rdma_dport = {0}; + u32 rate, rdma_rate, dvn_rate; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), (u8 *)&dport, sizeof(dport)); + + rate = dport.cir; + rdma_rate = rate * rdma_bw / 100; + dvn_rate = rate - rdma_rate; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id), + (u8 *)&dvn_dport, sizeof(dvn_dport)); + dvn_dport.cir = dvn_rate; + dvn_dport.pir = rate; + dvn_dport.depth = dport.depth; + dvn_dport.cbs = dvn_dport.depth; + dvn_dport.pbs = dvn_dport.depth; + dvn_dport.valid = 1; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_RDMA_DPORT_REG(eth_id), + (u8 *)&rdma_dport, sizeof(rdma_dport)); + rdma_dport.cir = rdma_rate; + rdma_dport.pir = rate; + rdma_dport.depth = dport.depth; + rdma_dport.cbs = rdma_dport.depth; + rdma_dport.pbs = rdma_dport.depth; + rdma_dport.valid = 1; + + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id), + (u8 *)&dvn_dport, sizeof(dvn_dport)); + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_RDMA_DPORT_REG(eth_id), + (u8 *)&rdma_dport, sizeof(rdma_dport)); +} + static void nbl_phy_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) { nbl_phy_configure_pfc(priv, eth_id, pfc); @@ -2400,6 +2577,48 @@ static void nbl_phy_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xof *xon = ustore_cos_fc_th.xon_th; } +static void nbl_phy_set_rate_limit(void *priv, u16 func_id, enum nbl_traffic_type type, u32 rate) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net net_shaping = {0}; + struct dsch_rdma_net2sha_map_tbl rdma_net2sha_map = {0}; + struct dsch_rdma_sha2net_map_tbl rdma_sha2net_map = {0}; + struct dsch_vn_sha2net_map_tbl sha2net = {0}; + struct dsch_vn_net2sha_map_tbl net2sha = {0}; + u64 addr; + + if (type == NBL_TRAFFIC_RDMA_TYPE) { + nbl_hw_read_regs(phy_mgt, NBL_DSCH_RDMA_NET2SHA_MAP_TBL_REG(func_id), + (u8 *)&rdma_net2sha_map, sizeof(rdma_net2sha_map)); + rdma_sha2net_map.rdma_vf_id = func_id; /* only pf */ + rdma_sha2net_map.vld = 1; + nbl_hw_read_regs(phy_mgt, NBL_DSCH_RDMA_SHA2NET_MAP_TBL_REG(func_id), + (u8 *)&rdma_sha2net_map, sizeof(rdma_sha2net_map)); + if (rdma_net2sha_map.vld) + addr = NBL_SHAPING_NET_REG(rdma_net2sha_map.net_shaping_id); + else + addr = NBL_SHAPING_NET_REG(func_id + NBL_NET_SHAPING_RDMA_BASE_ID); + } else { + sha2net.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); + + net2sha.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); + addr = NBL_SHAPING_NET_REG(func_id); + } + + net_shaping.cir = rate; + net_shaping.pir = rate; + net_shaping.depth = max(net_shaping.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + net_shaping.cbs = net_shaping.depth; + net_shaping.pbs = net_shaping.depth; + net_shaping.valid = 1; + + nbl_hw_write_regs(phy_mgt, addr, (u8 *)&net_shaping, sizeof(net_shaping)); +} + static void nbl_phy_enable_mailbox_irq(void *priv, u16 func_id, bool enable_msix, u16 global_vector_id) { @@ -2555,7 +2774,7 @@ static void nbl_phy_update_mailbox_queue_tail_ptr(void *priv, u16 tail_ptr, u8 t u32 local_qid = txrx; u32 value = ((u32)tail_ptr << 16) | local_qid; - /* wmb for mbx notify */ + /* wmb for doorbell */ wmb(); nbl_mbx_wr32(priv, NBL_MAILBOX_NOTIFY_ADDR, value); } @@ -2642,7 +2861,7 @@ static u32 nbl_phy_get_host_pf_mask(void *priv) return data; } -static u32 nbl_phy_get_host_pf_fid(void *priv, u8 func_id) +static u32 nbl_phy_get_host_pf_fid(void *priv, u16 func_id) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; u32 data; @@ -2651,6 +2870,57 @@ static u32 nbl_phy_get_host_pf_fid(void *priv, u8 func_id) return data; } +static u32 nbl_phy_get_real_bus(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 data; + + data = nbl_hw_rd32(phy_mgt, NBL_PCIE_HOST_TL_CFG_BUSDEV); + return data >> 5; +} + +static u64 nbl_phy_get_pf_bar_addr(void *priv, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u64 addr; + u32 val; + u32 selector; + + selector = NBL_LB_PF_CONFIGSPACE_SELECT_OFFSET + + func_id * NBL_LB_PF_CONFIGSPACE_SELECT_STRIDE; + nbl_hw_wr32(phy_mgt, NBL_LB_PCIEX16_TOP_AHB, selector); + + val = nbl_hw_rd32(phy_mgt, NBL_LB_PF_CONFIGSPACE_BASE_ADDR + PCI_BASE_ADDRESS_0); + addr = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + + val = nbl_hw_rd32(phy_mgt, NBL_LB_PF_CONFIGSPACE_BASE_ADDR + PCI_BASE_ADDRESS_0 + 4); + addr |= ((u64)val << 32); + + return addr; +} + +static u64 nbl_phy_get_vf_bar_addr(void *priv, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u64 addr; + u32 val; + u32 selector; + + selector = NBL_LB_PF_CONFIGSPACE_SELECT_OFFSET + + func_id * NBL_LB_PF_CONFIGSPACE_SELECT_STRIDE; + nbl_hw_wr32(phy_mgt, NBL_LB_PCIEX16_TOP_AHB, selector); + + val = nbl_hw_rd32(phy_mgt, NBL_LB_PF_CONFIGSPACE_BASE_ADDR + + NBL_SRIOV_CAPS_OFFSET + PCI_SRIOV_BAR); + addr = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + + val = nbl_hw_rd32(phy_mgt, NBL_LB_PF_CONFIGSPACE_BASE_ADDR + + NBL_SRIOV_CAPS_OFFSET + PCI_SRIOV_BAR + 4); + addr |= ((u64)val << 32); + + return addr; +} + static void nbl_phy_cfg_mailbox_qinfo(void *priv, u16 func_id, u16 bus, u16 devid, u16 function) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -2743,6 +3013,20 @@ static int nbl_phy_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) return 0; } +static int nbl_phy_set_vsi_mtu(void *priv, u16 vsi_id, u16 mtu_sel) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + dpsport.mtu_sel = mtu_sel; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + return 0; +} + static int nbl_phy_set_spoof_check_enable(void *priv, u16 vsi_id, u8 enable) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -2829,7 +3113,7 @@ static void nbl_phy_enable_adminq_irq(void *priv, bool enable_msix, u16 global_v struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); struct nbl_adminq_qinfo_map_table adminq_qinfo_map = { 0 }; - adminq_qinfo_map.bus = common->bus; + adminq_qinfo_map.bus = common->hw_bus; adminq_qinfo_map.devid = common->devid; adminq_qinfo_map.function = NBL_COMMON_TO_PCI_FUNC_ID(common); @@ -2851,7 +3135,7 @@ static void nbl_phy_update_adminq_queue_tail_ptr(void *priv, u16 tail_ptr, u8 tx u32 local_qid = txrx; u32 value = ((u32)tail_ptr << 16) | local_qid; - /* wmb for adminq notify */ + /* wmb for doorbell */ wmb(); nbl_mbx_wr32(priv, NBL_ADMINQ_NOTIFY_ADDR, value); } @@ -3378,7 +3662,7 @@ static void nbl_phy_init_uprbac(void *priv) static u32 nbl_phy_get_fw_ping(void *priv) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - u32 ping; + unsigned long ping; nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_HEARTBEAT_PING, (u8 *)&ping, sizeof(ping)); @@ -3414,6 +3698,26 @@ static void nbl_phy_load_p4(void *priv, u32 addr, u32 size, u8 *data) nbl_hw_write_be_regs(priv, addr, data, size); } +static void nbl_phy_ipro_chksum_err_ctrl(void *priv, u8 status) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union ipro_errcode_tbl_u errcode; + u8 index = NBL_ERROR_CODE_L3_CHKSUM; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_ERRCODE_TBL_REG(index), + (u8 *)errcode.data, sizeof(errcode)); + errcode.info.vld = status; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_ERRCODE_TBL_REG(index), + (u8 *)errcode.data, sizeof(errcode)); + + index = NBL_ERROR_CODE_L4_CHKSUM; + nbl_hw_read_regs(phy_mgt, NBL_IPRO_ERRCODE_TBL_REG(index), + (u8 *)errcode.data, sizeof(errcode)); + errcode.info.vld = status; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_ERRCODE_TBL_REG(index), + (u8 *)errcode.data, sizeof(errcode)); +} + static int nbl_phy_init_offload_fwd(void *priv, u16 vsi_id) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -3437,6 +3741,10 @@ static int nbl_phy_init_offload_fwd(void *priv, u16 vsi_id) vpt.info.rss_key_type_btm = NBL_KEY_IP4_L4_RSS_BIT | NBL_KEY_IP6_L4_RSS_BIT; nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)vpt.data, NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + + /* drop packets with wrong chksums, to prevent PED from correcting them */ + nbl_phy_ipro_chksum_err_ctrl(phy_mgt, 1); + return 0; } @@ -4724,6 +5032,7 @@ static void nbl_read_parsed_reg(struct nbl_phy_mgt *phy_mgt, { u32 reg_len = reg_info->data_len; + // in this mode, both or-data and and-data are sent if (reg_info->mode == NBL_FLOW_READ_OR_AND_WRITE_MODE) reg_len = reg_len / 2; @@ -4797,9 +5106,7 @@ static int nbl_phy_offload_flow_rule(void *priv, void *param) u8 i; nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, - "send regs: flow regs received: to parse and read/write: " - "regs info: count %u, total size %u, " - "1st reg: table %u, mode %u, size %u, depth %u, data %u", + "count %u, total size %u, 1st reg: tab %u, mode %u, size %u, depth %u, data %u", hdr_info->item_cnt, hdr_info->data_len, reg_info->tbl_name, reg_info->mode, reg_info->data_len, reg_info->depth, reg_info->data[0]); @@ -4822,9 +5129,8 @@ static int nbl_phy_offload_flow_rule(void *priv, void *param) nbl_write_parsed_reg(phy_mgt, reg_info, value); } else { nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, - "failed parsing reg info: unrecognized mode: " - "tab %u, mode %u, size %u, ", reg_info->tbl_name, - reg_info->mode, reg_info->data_len); + "failed: unrecognized mode: tab %u, mode %u, size %u, ", + reg_info->tbl_name, reg_info->mode, reg_info->data_len); } reg_info = (struct nbl_chan_regs_info *) @@ -5046,6 +5352,71 @@ static u32 nbl_phy_get_chip_temperature(void *priv, enum nbl_hwmon_type type, u3 return temp; } +static struct nbl_phy_ped_tbl ped_tbl[NBL_FLOW_PED_RECORD_MAX] = { + [NBL_FLOW_PED_UMAC_TYPE] = {.addr = NBL_UPED_TAB_REPLACE_ADDR, + .addr_len = NBL_UPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_DMAC_TYPE] = {.addr = NBL_DPED_TAB_REPLACE_ADDR, + .addr_len = NBL_DPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_UIP_TYPE] = {.addr = NBL_UPED_TAB_REPLACE_ADDR, + .addr_len = NBL_UPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_DIP_TYPE] = {.addr = NBL_DPED_TAB_REPLACE_ADDR, + .addr_len = NBL_DPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_UIP6_TYPE] = {.addr = NBL_UPED_TAB_REPLACE_ADDR, + .addr_len = NBL_UPED_TAB_REPLACE_DWLEN,}, + [NBL_FLOW_PED_DIP6_TYPE] = {.addr = NBL_DPED_TAB_REPLACE_ADDR, + .addr_len = NBL_DPED_TAB_REPLACE_DWLEN,}, +}; + +static void nbl_phy_write_ped_tbl(void *priv, u8 *data, u16 idx, enum nbl_flow_ped_type ped_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u64 reg; + +#define NBL_PHY_PED_ADDR_REG(addr, idx, size) ((addr) + (idx) * (size) * 4) + /* if ped type is ipv6 ,we need write ped_h */ + if (ped_type == NBL_FLOW_PED_UIP6_TYPE || ped_type == NBL_FLOW_PED_DIP6_TYPE) { + /* write high 64-bit first then update data and idx for common write */ + data += ped_tbl[ped_type].addr_len * 4; + reg = NBL_PHY_PED_ADDR_REG(ped_tbl[ped_type].addr, idx, + ped_tbl[ped_type].addr_len); + nbl_hw_write_regs(phy_mgt, reg, data, ped_tbl[ped_type].addr_len * 4); + idx += NBL_TC_MAX_PED_H_IDX; + data -= ped_tbl[ped_type].addr_len * 4; + } + + reg = NBL_PHY_PED_ADDR_REG(ped_tbl[ped_type].addr, idx, ped_tbl[ped_type].addr_len); + nbl_hw_write_regs(phy_mgt, reg, data, ped_tbl[ped_type].addr_len * 4); +} + +static int nbl_phy_set_mtu(void *priv, u16 mtu_index, u16 mtu) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_mtu_sel ipro_mtu_sel = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_MTU_SEL_REG(mtu_index / 2), + (u8 *)&ipro_mtu_sel, sizeof(ipro_mtu_sel)); + + if (mtu_index % 2 == 0) + ipro_mtu_sel.mtu_0 = mtu; + else + ipro_mtu_sel.mtu_1 = mtu; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_MTU_SEL_REG(mtu_index / 2), + (u8 *)&ipro_mtu_sel, sizeof(ipro_mtu_sel)); + + return 0; +} + +static u16 nbl_phy_get_mtu_index(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl ipro_dn_src_port_tbl = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, sizeof(ipro_dn_src_port_tbl)); + return ipro_dn_src_port_tbl.mtu_sel; +} + static int nbl_phy_process_abnormal_queue(struct nbl_phy_mgt *phy_mgt, u16 queue_id, int type, struct nbl_abnormal_details *detail) { @@ -5297,20 +5668,6 @@ static int nbl_phy_cfg_lag_member_up_attr(void *priv, u16 eth_id, u16 lag_id, bo return 0; } -static int nbl_phy_cfg_lag_mcc(void *priv, u16 mcc_id, u16 action) -{ - struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; - struct nbl_mcc_tbl node = {0}; - - nbl_hw_read_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); - - node.dport_act = action; - - nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); - - return 0; -} - static void nbl_phy_get_board_info(void *priv, struct nbl_board_port_info *board_info) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; @@ -5388,27 +5745,8 @@ static int nbl_phy_cfg_bond_shaping(void *priv, u8 eth_id, u8 speed, bool enable rdma_dport.valid = 1; nbl_hw_write_regs(phy_mgt, NBL_SHAPING_RDMA_DPORT_REG(eth_id), (u8 *)&rdma_dport, sizeof(rdma_dport)); - return 0; -} -static void nbl_phy_cfg_dvn_bp_mask(struct dvn_back_pressure_mask *mask, u8 eth_id, bool enable) -{ - switch (eth_id) { - case 0: - mask->dstore_port0_flag = enable; - break; - case 1: - mask->dstore_port1_flag = enable; - break; - case 2: - mask->dstore_port2_flag = enable; - break; - case 3: - mask->dstore_port3_flag = enable; - break; - default: - return; - } + return 0; } static void nbl_phy_set_bond_fc_th(struct nbl_phy_mgt *phy_mgt, @@ -5475,6 +5813,7 @@ static void nbl_phy_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, u8 other_ bool enable, u8 speed) { struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dvn_back_pressure_mask mask = {0}; nbl_hw_read_regs(phy_mgt, NBL_DVN_BACK_PRESSURE_MASK, (u8 *)&mask, sizeof(mask)); @@ -6577,6 +6916,238 @@ static enum nbl_hw_status nbl_phy_get_hw_status(void *priv) return phy_mgt->hw_status; }; +static u32 nbl_phy_get_perf_dump_length(void *priv) +{ + return sizeof(nbl_phy_dump_registers); +}; + +static u32 nbl_phy_get_perf_dump_data(void *priv, u8 *buffer, u32 length) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 copy_len = min_t(u32, length, sizeof(nbl_phy_dump_registers)); + int i; + + for (i = 0; i < copy_len / 4; i++) { + nbl_hw_read_regs(phy_mgt, nbl_phy_dump_registers[i], buffer, 4); + buffer += 4; + } + + return copy_len; +}; + +static int nbl_phy_get_mirror_table_id(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union ipro_dn_src_port_tbl_u ipro_dn_src_port_tbl = {{0}}; + union epro_vpt_u epro_vpt = {{0}}; + union epro_mt_u epro_mt = {{0}}; + int index = 0; + + if (dir == 0) { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + if (!mirror_en && !ipro_dn_src_port_tbl.info.mirror_en) { + *mt_id = NBL_EPRO_MT_MAX; + } else if (!mirror_en && ipro_dn_src_port_tbl.info.mirror_en) { + *mt_id = ipro_dn_src_port_tbl.info.mirror_id; + } else if (mirror_en && ipro_dn_src_port_tbl.info.mirror_en) { + *mt_id = ipro_dn_src_port_tbl.info.mirror_id; + } else if (mirror_en && !ipro_dn_src_port_tbl.info.mirror_en) { + for (; index < NBL_EPRO_MT_MAX; index++) { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_MT_REG(index), + (u8 *)&epro_mt, sizeof(epro_mt)); + if (epro_mt.info.vld == 0) { + *mt_id = index; + return 0; + } + } + *mt_id = NBL_EPRO_MT_MAX; + } + } else { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + if (!mirror_en && !epro_vpt.info.mirror_en) { + *mt_id = NBL_EPRO_MT_MAX; + } else if (!mirror_en && epro_vpt.info.mirror_en) { + *mt_id = epro_vpt.info.mirror_id; + } else if (mirror_en && epro_vpt.info.mirror_en) { + *mt_id = epro_vpt.info.mirror_id; + } else if (mirror_en && !epro_vpt.info.mirror_en) { + for (; index < NBL_EPRO_MT_MAX; index++) { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_MT_REG(index), + (u8 *)&epro_mt, sizeof(epro_mt)); + if (epro_mt.info.vld == 0) { + *mt_id = index; + return 0; + } + } + *mt_id = NBL_EPRO_MT_MAX; + } + } + + return 0; +} + +static int nbl_phy_configure_mirror(void *priv, u16 vsi_id, bool mirror_en, + int dir, u8 mt_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union ipro_dn_src_port_tbl_u ipro_dn_src_port_tbl = {{0}}; + union epro_vpt_u epro_vpt = {{0}}; + + if (!mirror_en) { + if (dir == 0) { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + ipro_dn_src_port_tbl.info.mirror_en = 0; + ipro_dn_src_port_tbl.info.mirror_pr = 0; + ipro_dn_src_port_tbl.info.mirror_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + } else { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + epro_vpt.info.mirror_en = 0; + epro_vpt.info.mirror_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)&epro_vpt, + sizeof(epro_vpt)); + } + } else { + if (dir == 0) { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, sizeof(ipro_dn_src_port_tbl)); + ipro_dn_src_port_tbl.info.mirror_en = mirror_en; + ipro_dn_src_port_tbl.info.mirror_pr = 3; + ipro_dn_src_port_tbl.info.mirror_id = mt_id; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + } else { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + epro_vpt.info.mirror_en = mirror_en; + epro_vpt.info.mirror_id = mt_id; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)&epro_vpt, + sizeof(epro_vpt)); + } + } + return 0; +} + +static int nbl_phy_configure_mirror_table(void *priv, bool mirror_en, + u16 mirror_vsi_id, u16 mirror_queue_id, u8 mt_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union epro_mt_u epro_mt = {{0}}; + + if (!mirror_en) { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_MT_REG(mt_id), (u8 *)&epro_mt, + sizeof(epro_mt)); + epro_mt.info.dport = 0; + epro_mt.info.dqueue = 0; + epro_mt.info.vld = mirror_en; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(mt_id), (u8 *)&epro_mt, + sizeof(epro_mt)); + } else { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_MT_REG(mt_id), (u8 *)&epro_mt, + sizeof(epro_mt)); + epro_mt.info.dport = mirror_vsi_id; + epro_mt.info.dqueue = mirror_queue_id; + epro_mt.info.vld = mirror_en; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(mt_id), (u8 *)&epro_mt, + sizeof(epro_mt)); + } + + return 0; +} + +static int nbl_phy_clear_mirror_cfg(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union ipro_dn_src_port_tbl_u ipro_dn_src_port_tbl = {{0}}; + union epro_vpt_u epro_vpt = {{0}}; + union epro_mt_u epro_mt = {{0}}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, sizeof(ipro_dn_src_port_tbl)); + if (ipro_dn_src_port_tbl.info.mirror_en) { + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(ipro_dn_src_port_tbl.info.mirror_id), + (u8 *)&epro_mt, sizeof(epro_mt)); + ipro_dn_src_port_tbl.info.mirror_en = 0; + ipro_dn_src_port_tbl.info.mirror_pr = 0; + ipro_dn_src_port_tbl.info.mirror_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)&ipro_dn_src_port_tbl, + sizeof(ipro_dn_src_port_tbl)); + } + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), + (u8 *)&epro_vpt, sizeof(epro_vpt)); + if (epro_vpt.info.mirror_en) { + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(epro_vpt.info.mirror_id), + (u8 *)&epro_mt, sizeof(epro_mt)); + epro_vpt.info.mirror_en = 0; + epro_vpt.info.mirror_id = 0; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)&epro_vpt, + sizeof(epro_vpt)); + } + + return 0; +} + +static int nbl_phy_get_dstat_vsi_stat(void *priv, u16 vsi_id, u64 *fwd_pkt, u64 *fwd_byte) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dstat_vsi_stat dstat_vsi_stat = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSTAT_VSI_STAT(vsi_id), + (u8 *)&dstat_vsi_stat, sizeof(dstat_vsi_stat)); + + *fwd_pkt = dstat_vsi_stat.fwd_pkt_cnt_low + + ((u64)(dstat_vsi_stat.fwd_pkt_cnt_high) << 32); + *fwd_byte = dstat_vsi_stat.fwd_byte_cnt_low + + ((u64)(dstat_vsi_stat.fwd_byte_cnt_high) << 32); + + return 0; +} + +static int nbl_phy_get_ustat_vsi_stat(void *priv, u16 vsi_id, u64 *fwd_pkt, u64 *fwd_byte) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ustat_vsi_stat ustat_vsi_stat = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_USTAT_VSI_STAT(vsi_id), + (u8 *)&ustat_vsi_stat, sizeof(ustat_vsi_stat)); + + *fwd_pkt = ustat_vsi_stat.fwd_pkt_cnt_low + + ((u64)(ustat_vsi_stat.fwd_pkt_cnt_high) << 32); + *fwd_byte = ustat_vsi_stat.fwd_byte_cnt_low + + ((u64)(ustat_vsi_stat.fwd_byte_cnt_high) << 32); + + return 0; +} + +static int nbl_phy_get_uvn_pkt_drop_stats(void *priv, u16 global_queue_id, u32 *uvn_stat_pkt_drop) +{ + *uvn_stat_pkt_drop = nbl_hw_rd32(priv, NBL_UVN_STATIS_PKT_DROP(global_queue_id)); + return 0; +} + +static int nbl_phy_get_ustore_pkt_drop_stats(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + ustore_stats->rx_drop_packets = nbl_hw_rd32(phy_mgt, NBL_USTORE_BUF_PORT_DROP_PKT(eth_id)); + ustore_stats->rx_trun_packets = nbl_hw_rd32(phy_mgt, NBL_USTORE_BUF_PORT_TRUN_PKT(eth_id)); + + return 0; +} + static struct nbl_phy_ops phy_ops = { .init_chip_module = nbl_phy_init_chip_module, .init_qid_map_table = nbl_phy_init_qid_map_table, @@ -6602,11 +7173,13 @@ static struct nbl_phy_ops phy_ops = { .active_shaping = nbl_phy_active_shaping, .deactive_shaping = nbl_phy_deactive_shaping, .set_shaping = nbl_phy_set_shaping, + .set_ucar = nbl_phy_set_ucar, .cfg_dsch_net_to_group = nbl_phy_cfg_dsch_net_to_group, .init_epro_rss_key = nbl_phy_init_epro_rss_key, .read_rss_key = nbl_phy_read_epro_rss_key, .read_rss_indir = nbl_phy_read_rss_indir, .get_rss_alg_sel = nbl_phy_get_rss_alg_sel, + .set_rss_alg_sel = nbl_phy_set_rss_alg_sel, .init_epro_vpt_tbl = nbl_phy_init_epro_vpt_tbl, .set_epro_rss_default = nbl_phy_set_epro_rss_default, .cfg_epro_rss_ret = nbl_phy_cfg_epro_rss_ret, @@ -6626,6 +7199,10 @@ static struct nbl_phy_ops phy_ops = { .cfg_phy_flow = nbl_phy_cfg_phy_flow, .cfg_eth_port_priority_replace = nbl_phy_cfg_eth_port_priority_replace, .get_chip_temperature = nbl_phy_get_chip_temperature, + .write_ped_tbl = nbl_phy_write_ped_tbl, + .set_vsi_mtu = nbl_phy_set_vsi_mtu, + .set_mtu = nbl_phy_set_mtu, + .get_mtu_index = nbl_phy_get_mtu_index, .configure_msix_map = nbl_phy_configure_msix_map, .configure_msix_info = nbl_phy_configure_msix_info, @@ -6639,6 +7216,7 @@ static struct nbl_phy_ops phy_ops = { .del_tcam = nbl_phy_del_tcam, .add_mcc = nbl_phy_add_mcc, .del_mcc = nbl_phy_del_mcc, + .update_mcc_next_node = nbl_phy_update_mcc_next_node, .add_tnl_encap = nbl_phy_add_tnl_encap, .del_tnl_encap = nbl_phy_del_tnl_encap, .init_fem = nbl_phy_init_fem, @@ -6659,6 +7237,9 @@ static struct nbl_phy_ops phy_ops = { .check_mailbox_dma_err = nbl_phy_check_mailbox_dma_err, .get_host_pf_mask = nbl_phy_get_host_pf_mask, .get_host_pf_fid = nbl_phy_get_host_pf_fid, + .get_real_bus = nbl_phy_get_real_bus, + .get_pf_bar_addr = nbl_phy_get_pf_bar_addr, + .get_vf_bar_addr = nbl_phy_get_vf_bar_addr, .cfg_mailbox_qinfo = nbl_phy_cfg_mailbox_qinfo, .enable_mailbox_irq = nbl_phy_enable_mailbox_irq, .enable_abnormal_irq = nbl_phy_enable_abnormal_irq, @@ -6722,8 +7303,10 @@ static struct nbl_phy_ops phy_ops = { .load_p4 = nbl_phy_load_p4, .configure_qos = nbl_phy_configure_qos, + .configure_rdma_bw = nbl_phy_configure_rdma_bw, .set_pfc_buffer_size = nbl_phy_set_pfc_buffer_size, .get_pfc_buffer_size = nbl_phy_get_pfc_buffer_size, + .set_rate_limit = nbl_phy_set_rate_limit, .init_offload_fwd = nbl_phy_init_offload_fwd, .init_cmdq = nbl_phy_cmdq_init, @@ -6737,6 +7320,7 @@ static struct nbl_phy_ops phy_ops = { .offload_flow_rule = nbl_phy_offload_flow_rule, .init_rep = nbl_phy_init_rep, .clear_profile_table_action = nbl_phy_clear_profile_table_action, + .ipro_chksum_err_ctrl = nbl_phy_ipro_chksum_err_ctrl, .init_vdpaq = nbl_phy_init_vdpaq, .destroy_vdpaq = nbl_phy_destroy_vdpaq, @@ -6751,7 +7335,6 @@ static struct nbl_phy_ops phy_ops = { .cfg_lag_member_fwd = nbl_phy_cfg_lag_member_fwd, .cfg_lag_member_list = nbl_phy_cfg_lag_member_list, .cfg_lag_member_up_attr = nbl_phy_cfg_lag_member_up_attr, - .cfg_lag_mcc = nbl_phy_cfg_lag_mcc, .get_lag_fwd = nbl_phy_get_lag_fwd, .cfg_bond_shaping = nbl_phy_cfg_bond_shaping, .cfg_bgid_back_pressure = nbl_phy_cfg_bgid_back_pressure, @@ -6774,6 +7357,20 @@ static struct nbl_phy_ops phy_ops = { .set_fd_action_ram = nbl_phy_set_fd_action_ram, .set_hw_status = nbl_phy_set_hw_status, .get_hw_status = nbl_phy_get_hw_status, + + .get_perf_dump_length = nbl_phy_get_perf_dump_length, + .get_perf_dump_data = nbl_phy_get_perf_dump_data, + + .get_mirror_table_id = nbl_phy_get_mirror_table_id, + .configure_mirror = nbl_phy_configure_mirror, + .configure_mirror_table = nbl_phy_configure_mirror_table, + .clear_mirror_cfg = nbl_phy_clear_mirror_cfg, + .set_dvn_desc_req = nbl_dvn_descreq_num_cfg, + .get_dvn_desc_req = nbl_dvn_descreq_num_get, + .get_dstat_vsi_stat = nbl_phy_get_dstat_vsi_stat, + .get_ustat_vsi_stat = nbl_phy_get_ustat_vsi_stat, + .get_uvn_pkt_drop_stats = nbl_phy_get_uvn_pkt_drop_stats, + .get_ustore_pkt_drop_stats = nbl_phy_get_ustore_pkt_drop_stats, }; /* Structure starts here, adding an op should not modify anything below */ @@ -6827,7 +7424,7 @@ static void nbl_phy_remove_ops(struct nbl_common_info *common, struct nbl_phy_op *phy_ops_tbl = NULL; } -static void nbl_phy_disable_rx_err_report(struct pci_dev *pdev) +static void __maybe_unused nbl_phy_disable_rx_err_report(struct pci_dev *pdev) { #define NBL_RX_ERR_BIT 0 #define NBL_BAD_TLP_BIT 6 @@ -6908,7 +7505,7 @@ int nbl_phy_init_leonis(void *p, struct nbl_init_param *param) if (ret) goto setup_ops_fail; - nbl_phy_disable_rx_err_report(pdev); + /* nbl_phy_disable_rx_err_report(pdev); */ (*phy_mgt_leonis)->ro_enable = pcie_relaxed_ordering_enabled(pdev); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h index c1f26c636773..080cc087601e 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -26,20 +26,20 @@ #define KT_MASK_LEN32_ACTION_INFO (0x0) #define KT_MASK_LEN12_ACTION_INFO (0xFFFFF000) #define NBL_FEM_SEARCH_KEY_LEN 44 +#define NBL_HW_DUMMY_REG (0x1300904) -#define HT_PORT0_BANK_SEL (0b01000000) -#define HT_PORT1_BANK_SEL (0b00110000) +#define HT_PORT0_BANK_SEL (0b01100000) +#define HT_PORT1_BANK_SEL (0b00011000) #define HT_PORT2_BANK_SEL (0b00000111) -#define KT_PORT0_BANK_SEL (0b11000000) -#define KT_PORT1_BANK_SEL (0b00110000) -#define KT_PORT2_BANK_SEL (0b00001111) +#define KT_PORT0_BANK_SEL (0b11100000) +#define KT_PORT1_BANK_SEL (0b00011000) +#define KT_PORT2_BANK_SEL (0b00000111) #define AT_PORT0_BANK_SEL (0b000000000000) -#define AT_PORT1_BANK_SEL (0b111000000000) -#define AT_PORT2_BANK_SEL (0b000111111111) -#define HT_PORT0_BTM 1 -#define HT_PORT1_BTM 3 +#define AT_PORT1_BANK_SEL (0b111110000000) +#define AT_PORT2_BANK_SEL (0b000001111111) +#define HT_PORT0_BTM 2 +#define HT_PORT1_BTM 6 #define HT_PORT2_BTM 16 - #define NBL_1BIT 1 #define NBL_8BIT 8 #define NBL_16BIT 16 @@ -409,6 +409,8 @@ union nbl_fem_profile_tbl_u { #define NBL_LB_PF_CONFIGSPACE_BASE_ADDR (NBL_LB_PCIEX16_TOP_BASE + 0x00024000) #define NBL_LB_PCIEX16_TOP_AHB (NBL_LB_PCIEX16_TOP_BASE + 0x00000020) +#define NBL_SRIOV_CAPS_OFFSET (0x140) + /* -------- MAILBOX BAR2 ----- */ #define NBL_MAILBOX_NOTIFY_ADDR (0x00000000) #define NBL_MAILBOX_BAR_REG (0x00000000) @@ -489,6 +491,7 @@ struct nbl_mailbox_qinfo_map_table { #define NBL_PCIE_HOST_K_PF_MASK_REG (NBL_INTF_HOST_PCIE_BASE + 0x00001004) #define NBL_PCIE_HOST_K_PF_FID(pf_id) \ (NBL_INTF_HOST_PCIE_BASE + 0x0000106C + 4 * (pf_id)) +#define NBL_PCIE_HOST_TL_CFG_BUSDEV (NBL_INTF_HOST_PCIE_BASE + 0x11040) /* -------- HOST_PADPT -------- */ #define NBL_HOST_PADPT_HOST_CFG_FC_PD_DN (NBL_INTF_HOST_PADPT_BASE + 0x00000160) @@ -686,6 +689,8 @@ struct nbl_ped_hw_edit_profile_cfg { #define NBL_SHAPING_DPORT_100G_RATE 0x1A400 #define NBL_SHAPING_DPORT_HALF_100G_RATE 0xD200 +#define NBL_UCAR_MAX_BUCKET_DEPTH 524287 + #define NBL_DSTORE_DROP_XOFF_TH 0xC8 #define NBL_DSTORE_DROP_XON_TH 0x64 @@ -733,6 +738,19 @@ struct dsch_vn_net2sha_map_tbl { u32 reserve:31; }; +#define NBL_NET_SHAPING_RDMA_BASE_ID (448) + +struct dsch_rdma_net2sha_map_tbl { + u32 net_shaping_id:10; + u32 reserve:21; + u32 vld:1; +}; + +struct dsch_rdma_sha2net_map_tbl { + u32 rdma_vf_id:31; + u32 vld:1; +}; + struct dsch_psha_en { u32 en:4; u32 rsv:28; @@ -906,6 +924,8 @@ struct nbl_dvn_stat_cnt { #define NBL_DVN_PKT_DIF_ERR_CNT (NBL_DP_DVN_BASE + 0x00000034) #define NBL_DVN_ERR_QUEUE_ID_GET (NBL_DP_DVN_BASE + 0x0000040C) #define NBL_DVN_BACK_PRESSURE_MASK (NBL_DP_DVN_BASE + 0x00000464) +#define NBL_DVN_DESCRD_L2_UNAVAIL_CNT (NBL_DP_DVN_BASE + 0x00000A1C) +#define NBL_DVN_DESCRD_L2_NOAVAIL_CNT (NBL_DP_DVN_BASE + 0x00000A20) #define DEFAULT_DVN_DESCREQ_NUMCFG (0x00080014) #define DEFAULT_DVN_100G_DESCREQ_NUMCFG (0x00080020) @@ -1035,7 +1055,13 @@ struct dvn_back_pressure_mask { #define NBL_UVN_QUEUE_ERR_MASK (NBL_DP_UVN_BASE + 0x00000224) #define NBL_UVN_ECPU_QUEUE_NUM (NBL_DP_UVN_BASE + 0x0000023C) #define NBL_UVN_DESC_WR_TIMEOUT (NBL_DP_UVN_BASE + 0x00000214) +#define NBL_UVN_DIF_DELAY_REQ (NBL_DP_UVN_BASE + 0x000010D0) +#define NBL_UVN_DIF_DELAY_TIME (NBL_DP_UVN_BASE + 0x000010D4) +#define NBL_UVN_DIF_DELAY_MAX (NBL_DP_UVN_BASE + 0x000010D8) +#define NBL_UVN_DESC_PRE_DESC_REQ_NULL (NBL_DP_UVN_BASE + 0x000012C8) +#define NBL_UVN_DESC_PRE_DESC_REQ_LACK (NBL_DP_UVN_BASE + 0x000012CC) #define NBL_UVN_DESC_RD_ENTRY (NBL_DP_UVN_BASE + 0x000012D0) +#define NBL_UVN_DESC_RD_DROP_DESC_LACK (NBL_DP_UVN_BASE + 0x000012E0) #define NBL_UVN_DIF_REQ_RO_FLAG (NBL_DP_UVN_BASE + 0x00000250) #define NBL_UVN_DESC_PREFETCH_INIT (NBL_DP_UVN_BASE + 0x00000204) #define NBL_UVN_DESC_WR_TIMEOUT_4US (0x960) @@ -1144,12 +1170,16 @@ struct uvn_desc_prefetch_init { #define NBL_USTORE_PKT_LEN_ADDR (NBL_DP_USTORE_BASE + 0x00000108) #define NBL_USTORE_PORT_FC_TH_REG_ARR(port_id) \ (NBL_DP_USTORE_BASE + 0x00000134 + (port_id) * sizeof(struct nbl_ustore_port_fc_th)) - #define NBL_USTORE_COS_FC_TH_REG_ARR(cos_id) \ (NBL_DP_USTORE_BASE + 0x00000200 + (cos_id) * sizeof(struct nbl_ustore_cos_fc_th)) - #define NBL_USTORE_PORT_DROP_TH_REG_ARR(port_id) \ (NBL_DP_USTORE_BASE + 0x00000150 + (port_id) * sizeof(struct nbl_ustore_port_drop_th)) +#define NBL_USTORE_BUF_TOTAL_DROP_PKT (NBL_DP_USTORE_BASE + 0x000010A8) +#define NBL_USTORE_BUF_TOTAL_TRUN_PKT (NBL_DP_USTORE_BASE + 0x000010AC) +#define NBL_USTORE_BUF_PORT_DROP_PKT(eth_id) \ + (NBL_DP_USTORE_BASE + 0x00002500 + (eth_id) * sizeof(u32)) +#define NBL_USTORE_BUF_PORT_TRUN_PKT(eth_id) \ + (NBL_DP_USTORE_BASE + 0x00002540 + (eth_id) * sizeof(u32)) #define NBL_USTORE_SIGNLE_ETH_DROP_TH 0xC80 #define NBL_USTORE_DUAL_ETH_DROP_TH 0x640 @@ -1204,6 +1234,28 @@ struct ul4s_sch_pad { u32 rsv:30; }; +/* --------- DSTAT --------- */ +#define NBL_DSTAT_VSI_STAT(vsi_id) \ + (NBL_DP_DSTAT_BASE + 0x00008000 + (vsi_id) * sizeof(struct nbl_dstat_vsi_stat)) + +struct nbl_dstat_vsi_stat { + u32 fwd_byte_cnt_low; + u32 fwd_byte_cnt_high; + u32 fwd_pkt_cnt_low; + u32 fwd_pkt_cnt_high; +}; + +/* --------- USTAT --------- */ +#define NBL_USTAT_VSI_STAT(vsi_id) \ + (NBL_DP_USTAT_BASE + 0x00008000 + (vsi_id) * sizeof(struct nbl_ustat_vsi_stat)) + +struct nbl_ustat_vsi_stat { + u32 fwd_byte_cnt_low; + u32 fwd_byte_cnt_high; + u32 fwd_pkt_cnt_low; + u32 fwd_pkt_cnt_high; +}; + /* ---------- IPRO ---------- */ /* ipro module related macros */ #define NBL_IPRO_MODULE (0xB04000) @@ -1235,7 +1287,7 @@ struct nbl_ipro_dn_src_port_tbl { u32 mirror_id:4; u32 vlan_layer_num_1:2; u32 phy_flow:1; - u32 not_used_0:4; + u32 mtu_sel:4; u32 addr_check_en:1; u32 smac_low:16; u32 smac_high; @@ -1274,6 +1326,11 @@ struct nbl_ipro_upsport_tbl { u32 rsv:1; }; +struct nbl_ipro_mtu_sel { + u32 mtu_1:16; /* [15:0] Default:0x0 RW */ + u32 mtu_0:16; /* [31:16] Default:0x0 RW */ +}; + /* ---------- EPRO ---------- */ #define NBL_EPRO_INT_STATUS (NBL_PPE_EPRO_BASE + 0x00000000) #define NBL_EPRO_INT_MASK (NBL_PPE_EPRO_BASE + 0x00000004) @@ -1538,6 +1595,7 @@ struct nbl_dqm_rxmac_tx_cos_bp_en_cfg { u32 eth3:8; }; +#define NBL_UQM_QUE_TYPE (NBL_DP_UQM_BASE + 0x0000013c) #define NBL_UQM_RX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000614) #define NBL_UQM_TX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000604) @@ -1557,6 +1615,11 @@ struct nbl_dqm_rxmac_tx_cos_bp_en_cfg { #define NBL_UQM_PORT_DROP_DEPTH 6 #define NBL_UQM_DPORT_DROP_DEPTH 16 +struct nbl_uqm_que_type { + u32 bp_drop:1; + u32 rsv:31; +}; + /* UQM rx_cos_bp_en */ struct nbl_uqm_rx_cos_bp_en_cfg { u32 vld_l; @@ -1919,6 +1982,8 @@ union nbl_ipsec_lifetime_diff { #define NBL_TOP_CTRL_MODULE (0x01300000) #define NBL_TOP_CTRL_INT_STATUS (NBL_TOP_CTRL_MODULE + 0X0000) #define NBL_TOP_CTRL_INT_MASK (NBL_TOP_CTRL_MODULE + 0X0004) +#define NBL_TOP_CTRL_LB_CLK (NBL_TOP_CTRL_MODULE + 0X0100) +#define NBL_TOP_CTRL_LB_RST (NBL_TOP_CTRL_MODULE + 0X0104) #define NBL_TOP_CTRL_TVSENSOR0 (NBL_TOP_CTRL_MODULE + 0X0254) #define NBL_TOP_CTRL_SOFT_DEF0 (NBL_TOP_CTRL_MODULE + 0x0430) #define NBL_TOP_CTRL_SOFT_DEF1 (NBL_TOP_CTRL_MODULE + 0x0434) @@ -1931,6 +1996,9 @@ union nbl_ipsec_lifetime_diff { #define NBL_FW_HEARTBEAT_PONG NBL_TOP_CTRL_SOFT_DEF1 +#define NBL_TOP_CTRL_RDMA_LB_RST BIT(10) +#define NBL_TOP_CTRL_RDMA_LB_CLK BIT(10) + /* temperature threshold1 */ #define NBL_LEONIS_TEMP_MAX (105) /* temperature threshold2 */ @@ -1977,7 +2045,7 @@ union nbl_ipsec_lifetime_diff { #define NBL_CMDQ_HI_DWORD(x) ((u32)(((x) >> 32) & 0xFFFFFFFF)) #define NBL_CMDQ_LO_DWORD(x) ((u32)(x) & 0xFFFFFFFF) #define NBL_FEM_INIT_START_KERN (0xFE) -#define NBL_FEM_INIT_START_VALUE (0x7E) +#define NBL_FEM_INIT_START_VALUE (0x3E) #define NBL_PED_VSI_TYPE_ETH_BASE (1027) #define NBL_DPED_VLAN_TYPE_PORT_NUM (1031) #define NBL_CHAN_REG_MAX_LEN (32) @@ -2023,6 +2091,15 @@ union nbl_ipsec_lifetime_diff { #define NBL_DSCH_VN_NET2SHA_MAP_TBL_REG(r) (NBL_DSCH_VN_NET2SHA_MAP_TBL_ADDR + \ (NBL_DSCH_VN_NET2SHA_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_ADDR (0x49c000) +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_DWLEN (1) +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_REG(r) (NBL_DSCH_RDMA_SHA2NET_MAP_TBL_ADDR + \ + (NBL_DSCH_RDMA_SHA2NET_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_ADDR (0x494000) +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_DWLEN (1) +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_REG(r) (NBL_DSCH_RDMA_NET2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_RDMA_NET2SHA_MAP_TBL_DWLEN * 4) * (r)) + /* Mailbox bar phy register offset begin */ #define NBL_FW_HEARTBEAT_PING 0x84 #define NBL_FW_BOARD_CONFIG 0x200 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.c new file mode 100644 index 000000000000..811507f25fdd --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.c @@ -0,0 +1,3863 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_phy.h" +#include "nbl_phy_leonis.h" +#include "nbl_phy_leonis_regs.h" + +#define NBL_SEC_BLOCK_SIZE (0x100) +#define NBL_SEC000_SIZE (1) +#define NBL_SEC000_ADDR (0x114150) +#define NBL_SEC001_SIZE (1) +#define NBL_SEC001_ADDR (0x15c190) +#define NBL_SEC002_SIZE (1) +#define NBL_SEC002_ADDR (0x10417c) +#define NBL_SEC003_SIZE (1) +#define NBL_SEC003_ADDR (0x714154) +#define NBL_SEC004_SIZE (1) +#define NBL_SEC004_ADDR (0x75c190) +#define NBL_SEC005_SIZE (1) +#define NBL_SEC005_ADDR (0x70417c) +#define NBL_SEC006_SIZE (512) +#define NBL_SEC006_ADDR (0x8f000) +#define NBL_SEC006_REGI(i) (0x8f000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC007_SIZE (256) +#define NBL_SEC007_ADDR (0x8f800) +#define NBL_SEC007_REGI(i) (0x8f800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC008_SIZE (1024) +#define NBL_SEC008_ADDR (0x90000) +#define NBL_SEC008_REGI(i) (0x90000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC009_SIZE (2048) +#define NBL_SEC009_ADDR (0x94000) +#define NBL_SEC009_REGI(i) (0x94000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC010_SIZE (256) +#define NBL_SEC010_ADDR (0x96000) +#define NBL_SEC010_REGI(i) (0x96000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC011_SIZE (1024) +#define NBL_SEC011_ADDR (0x91000) +#define NBL_SEC011_REGI(i) (0x91000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC012_SIZE (128) +#define NBL_SEC012_ADDR (0x92000) +#define NBL_SEC012_REGI(i) (0x92000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC013_SIZE (64) +#define NBL_SEC013_ADDR (0x92200) +#define NBL_SEC013_REGI(i) (0x92200 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC014_SIZE (64) +#define NBL_SEC014_ADDR (0x92300) +#define NBL_SEC014_REGI(i) (0x92300 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC015_SIZE (1) +#define NBL_SEC015_ADDR (0x8c214) +#define NBL_SEC016_SIZE (1) +#define NBL_SEC016_ADDR (0x8c220) +#define NBL_SEC017_SIZE (1) +#define NBL_SEC017_ADDR (0x8c224) +#define NBL_SEC018_SIZE (1) +#define NBL_SEC018_ADDR (0x8c228) +#define NBL_SEC019_SIZE (1) +#define NBL_SEC019_ADDR (0x8c22c) +#define NBL_SEC020_SIZE (1) +#define NBL_SEC020_ADDR (0x8c1f0) +#define NBL_SEC021_SIZE (1) +#define NBL_SEC021_ADDR (0x8c1f8) +#define NBL_SEC022_SIZE (256) +#define NBL_SEC022_ADDR (0x85f000) +#define NBL_SEC022_REGI(i) (0x85f000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC023_SIZE (128) +#define NBL_SEC023_ADDR (0x85f800) +#define NBL_SEC023_REGI(i) (0x85f800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC024_SIZE (512) +#define NBL_SEC024_ADDR (0x860000) +#define NBL_SEC024_REGI(i) (0x860000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC025_SIZE (1024) +#define NBL_SEC025_ADDR (0x864000) +#define NBL_SEC025_REGI(i) (0x864000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC026_SIZE (256) +#define NBL_SEC026_ADDR (0x866000) +#define NBL_SEC026_REGI(i) (0x866000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC027_SIZE (512) +#define NBL_SEC027_ADDR (0x861000) +#define NBL_SEC027_REGI(i) (0x861000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC028_SIZE (64) +#define NBL_SEC028_ADDR (0x862000) +#define NBL_SEC028_REGI(i) (0x862000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC029_SIZE (32) +#define NBL_SEC029_ADDR (0x862200) +#define NBL_SEC029_REGI(i) (0x862200 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC030_SIZE (32) +#define NBL_SEC030_ADDR (0x862300) +#define NBL_SEC030_REGI(i) (0x862300 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC031_SIZE (1) +#define NBL_SEC031_ADDR (0x85c214) +#define NBL_SEC032_SIZE (1) +#define NBL_SEC032_ADDR (0x85c220) +#define NBL_SEC033_SIZE (1) +#define NBL_SEC033_ADDR (0x85c224) +#define NBL_SEC034_SIZE (1) +#define NBL_SEC034_ADDR (0x85c228) +#define NBL_SEC035_SIZE (1) +#define NBL_SEC035_ADDR (0x85c22c) +#define NBL_SEC036_SIZE (1) +#define NBL_SEC036_ADDR (0xb04200) +#define NBL_SEC037_SIZE (1) +#define NBL_SEC037_ADDR (0xb04230) +#define NBL_SEC038_SIZE (1) +#define NBL_SEC038_ADDR (0xb04234) +#define NBL_SEC039_SIZE (64) +#define NBL_SEC039_ADDR (0xb05800) +#define NBL_SEC039_REGI(i) (0xb05800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC040_SIZE (32) +#define NBL_SEC040_ADDR (0xb05400) +#define NBL_SEC040_REGI(i) (0xb05400 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC041_SIZE (16) +#define NBL_SEC041_ADDR (0xb05500) +#define NBL_SEC041_REGI(i) (0xb05500 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC042_SIZE (1) +#define NBL_SEC042_ADDR (0xb14148) +#define NBL_SEC043_SIZE (1) +#define NBL_SEC043_ADDR (0xb14104) +#define NBL_SEC044_SIZE (1) +#define NBL_SEC044_ADDR (0xb1414c) +#define NBL_SEC045_SIZE (1) +#define NBL_SEC045_ADDR (0xb14150) +#define NBL_SEC046_SIZE (256) +#define NBL_SEC046_ADDR (0xb15000) +#define NBL_SEC046_REGI(i) (0xb15000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC047_SIZE (32) +#define NBL_SEC047_ADDR (0xb15800) +#define NBL_SEC047_REGI(i) (0xb15800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC048_SIZE (1) +#define NBL_SEC048_ADDR (0xb24148) +#define NBL_SEC049_SIZE (1) +#define NBL_SEC049_ADDR (0xb24104) +#define NBL_SEC050_SIZE (1) +#define NBL_SEC050_ADDR (0xb2414c) +#define NBL_SEC051_SIZE (1) +#define NBL_SEC051_ADDR (0xb24150) +#define NBL_SEC052_SIZE (256) +#define NBL_SEC052_ADDR (0xb25000) +#define NBL_SEC052_REGI(i) (0xb25000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC053_SIZE (32) +#define NBL_SEC053_ADDR (0xb25800) +#define NBL_SEC053_REGI(i) (0xb25800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC054_SIZE (1) +#define NBL_SEC054_ADDR (0xb34148) +#define NBL_SEC055_SIZE (1) +#define NBL_SEC055_ADDR (0xb34104) +#define NBL_SEC056_SIZE (1) +#define NBL_SEC056_ADDR (0xb3414c) +#define NBL_SEC057_SIZE (1) +#define NBL_SEC057_ADDR (0xb34150) +#define NBL_SEC058_SIZE (256) +#define NBL_SEC058_ADDR (0xb35000) +#define NBL_SEC058_REGI(i) (0xb35000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC059_SIZE (32) +#define NBL_SEC059_ADDR (0xb35800) +#define NBL_SEC059_REGI(i) (0xb35800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC060_SIZE (1) +#define NBL_SEC060_ADDR (0xe74630) +#define NBL_SEC061_SIZE (1) +#define NBL_SEC061_ADDR (0xe74634) +#define NBL_SEC062_SIZE (64) +#define NBL_SEC062_ADDR (0xe75000) +#define NBL_SEC062_REGI(i) (0xe75000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC063_SIZE (32) +#define NBL_SEC063_ADDR (0xe75480) +#define NBL_SEC063_REGI(i) (0xe75480 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC064_SIZE (16) +#define NBL_SEC064_ADDR (0xe75980) +#define NBL_SEC064_REGI(i) (0xe75980 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC065_SIZE (32) +#define NBL_SEC065_ADDR (0x15f000) +#define NBL_SEC065_REGI(i) (0x15f000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC066_SIZE (32) +#define NBL_SEC066_ADDR (0x75f000) +#define NBL_SEC066_REGI(i) (0x75f000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC067_SIZE (1) +#define NBL_SEC067_ADDR (0xb64108) +#define NBL_SEC068_SIZE (1) +#define NBL_SEC068_ADDR (0xb6410c) +#define NBL_SEC069_SIZE (1) +#define NBL_SEC069_ADDR (0xb64140) +#define NBL_SEC070_SIZE (1) +#define NBL_SEC070_ADDR (0xb64144) +#define NBL_SEC071_SIZE (512) +#define NBL_SEC071_ADDR (0xb65000) +#define NBL_SEC071_REGI(i) (0xb65000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC072_SIZE (32) +#define NBL_SEC072_ADDR (0xb65800) +#define NBL_SEC072_REGI(i) (0xb65800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC073_SIZE (1) +#define NBL_SEC073_ADDR (0x8c210) +#define NBL_SEC074_SIZE (1) +#define NBL_SEC074_ADDR (0x85c210) +#define NBL_SEC075_SIZE (4) +#define NBL_SEC075_ADDR (0x8c1b0) +#define NBL_SEC075_REGI(i) (0x8c1b0 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC076_SIZE (4) +#define NBL_SEC076_ADDR (0x8c1c0) +#define NBL_SEC076_REGI(i) (0x8c1c0 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC077_SIZE (4) +#define NBL_SEC077_ADDR (0x85c1b0) +#define NBL_SEC077_REGI(i) (0x85c1b0 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC078_SIZE (1) +#define NBL_SEC078_ADDR (0x85c1ec) +#define NBL_SEC079_SIZE (1) +#define NBL_SEC079_ADDR (0x8c1ec) +#define NBL_SEC080_SIZE (1) +#define NBL_SEC080_ADDR (0xb04440) +#define NBL_SEC081_SIZE (1) +#define NBL_SEC081_ADDR (0xb04448) +#define NBL_SEC082_SIZE (1) +#define NBL_SEC082_ADDR (0xb14450) +#define NBL_SEC083_SIZE (1) +#define NBL_SEC083_ADDR (0xb24450) +#define NBL_SEC084_SIZE (1) +#define NBL_SEC084_ADDR (0xb34450) +#define NBL_SEC085_SIZE (1) +#define NBL_SEC085_ADDR (0xa04188) +#define NBL_SEC086_SIZE (1) +#define NBL_SEC086_ADDR (0xe74218) +#define NBL_SEC087_SIZE (1) +#define NBL_SEC087_ADDR (0xe7421c) +#define NBL_SEC088_SIZE (1) +#define NBL_SEC088_ADDR (0xe74220) +#define NBL_SEC089_SIZE (1) +#define NBL_SEC089_ADDR (0xe74224) +#define NBL_SEC090_SIZE (1) +#define NBL_SEC090_ADDR (0x75c22c) +#define NBL_SEC091_SIZE (1) +#define NBL_SEC091_ADDR (0x75c230) +#define NBL_SEC092_SIZE (1) +#define NBL_SEC092_ADDR (0x75c238) +#define NBL_SEC093_SIZE (1) +#define NBL_SEC093_ADDR (0x75c244) +#define NBL_SEC094_SIZE (1) +#define NBL_SEC094_ADDR (0x75c248) +#define NBL_SEC095_SIZE (1) +#define NBL_SEC095_ADDR (0x75c250) +#define NBL_SEC096_SIZE (1) +#define NBL_SEC096_ADDR (0x15c230) +#define NBL_SEC097_SIZE (1) +#define NBL_SEC097_ADDR (0x15c234) +#define NBL_SEC098_SIZE (1) +#define NBL_SEC098_ADDR (0x15c238) +#define NBL_SEC099_SIZE (1) +#define NBL_SEC099_ADDR (0x15c23c) +#define NBL_SEC100_SIZE (1) +#define NBL_SEC100_ADDR (0x15c244) +#define NBL_SEC101_SIZE (1) +#define NBL_SEC101_ADDR (0x15c248) +#define NBL_SEC102_SIZE (1) +#define NBL_SEC102_ADDR (0xb6432c) +#define NBL_SEC103_SIZE (1) +#define NBL_SEC103_ADDR (0xb64220) +#define NBL_SEC104_SIZE (1) +#define NBL_SEC104_ADDR (0xb44804) +#define NBL_SEC105_SIZE (1) +#define NBL_SEC105_ADDR (0xb44a00) +#define NBL_SEC106_SIZE (1) +#define NBL_SEC106_ADDR (0xe84210) +#define NBL_SEC107_SIZE (1) +#define NBL_SEC107_ADDR (0xe84214) +#define NBL_SEC108_SIZE (1) +#define NBL_SEC108_ADDR (0xe64228) +#define NBL_SEC109_SIZE (1) +#define NBL_SEC109_ADDR (0x65413c) +#define NBL_SEC110_SIZE (1) +#define NBL_SEC110_ADDR (0x984144) +#define NBL_SEC111_SIZE (1) +#define NBL_SEC111_ADDR (0x114130) +#define NBL_SEC112_SIZE (1) +#define NBL_SEC112_ADDR (0x714138) +#define NBL_SEC113_SIZE (1) +#define NBL_SEC113_ADDR (0x114134) +#define NBL_SEC114_SIZE (1) +#define NBL_SEC114_ADDR (0x71413c) +#define NBL_SEC115_SIZE (1) +#define NBL_SEC115_ADDR (0x90437c) +#define NBL_SEC116_SIZE (32) +#define NBL_SEC116_ADDR (0xb05000) +#define NBL_SEC116_REGI(i) (0xb05000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC117_SIZE (1) +#define NBL_SEC117_ADDR (0xb043e0) +#define NBL_SEC118_SIZE (1) +#define NBL_SEC118_ADDR (0xb043f0) +#define NBL_SEC119_SIZE (5) +#define NBL_SEC119_ADDR (0x8c230) +#define NBL_SEC119_REGI(i) (0x8c230 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC120_SIZE (1) +#define NBL_SEC120_ADDR (0x8c1f4) +#define NBL_SEC121_SIZE (1) +#define NBL_SEC121_ADDR (0x2046c4) +#define NBL_SEC122_SIZE (1) +#define NBL_SEC122_ADDR (0x85c1f4) +#define NBL_SEC123_SIZE (1) +#define NBL_SEC123_ADDR (0x75c194) +#define NBL_SEC124_SIZE (256) +#define NBL_SEC124_ADDR (0xa05000) +#define NBL_SEC124_REGI(i) (0xa05000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC125_SIZE (256) +#define NBL_SEC125_ADDR (0xa06000) +#define NBL_SEC125_REGI(i) (0xa06000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC126_SIZE (256) +#define NBL_SEC126_ADDR (0xa07000) +#define NBL_SEC126_REGI(i) (0xa07000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC127_SIZE (1) +#define NBL_SEC127_ADDR (0x75c204) +#define NBL_SEC128_SIZE (1) +#define NBL_SEC128_ADDR (0x15c204) +#define NBL_SEC129_SIZE (1) +#define NBL_SEC129_ADDR (0x75c208) +#define NBL_SEC130_SIZE (1) +#define NBL_SEC130_ADDR (0x15c208) +#define NBL_SEC131_SIZE (1) +#define NBL_SEC131_ADDR (0x75c20c) +#define NBL_SEC132_SIZE (1) +#define NBL_SEC132_ADDR (0x15c20c) +#define NBL_SEC133_SIZE (1) +#define NBL_SEC133_ADDR (0x75c210) +#define NBL_SEC134_SIZE (1) +#define NBL_SEC134_ADDR (0x15c210) +#define NBL_SEC135_SIZE (1) +#define NBL_SEC135_ADDR (0x75c214) +#define NBL_SEC136_SIZE (1) +#define NBL_SEC136_ADDR (0x15c214) +#define NBL_SEC137_SIZE (32) +#define NBL_SEC137_ADDR (0x15d000) +#define NBL_SEC137_REGI(i) (0x15d000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC138_SIZE (32) +#define NBL_SEC138_ADDR (0x75d000) +#define NBL_SEC138_REGI(i) (0x75d000 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC139_SIZE (1) +#define NBL_SEC139_ADDR (0x75c310) +#define NBL_SEC140_SIZE (1) +#define NBL_SEC140_ADDR (0x75c314) +#define NBL_SEC141_SIZE (1) +#define NBL_SEC141_ADDR (0x75c340) +#define NBL_SEC142_SIZE (1) +#define NBL_SEC142_ADDR (0x75c344) +#define NBL_SEC143_SIZE (1) +#define NBL_SEC143_ADDR (0x75c348) +#define NBL_SEC144_SIZE (1) +#define NBL_SEC144_ADDR (0x75c34c) +#define NBL_SEC145_SIZE (32) +#define NBL_SEC145_ADDR (0xb15800) +#define NBL_SEC145_REGI(i) (0xb15800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC146_SIZE (32) +#define NBL_SEC146_ADDR (0xb25800) +#define NBL_SEC146_REGI(i) (0xb25800 + NBL_BYTES_IN_REG * (i)) +#define NBL_SEC147_SIZE (32) +#define NBL_SEC147_ADDR (0xb35800) +#define NBL_SEC147_REGI(i) (0xb35800 + NBL_BYTES_IN_REG * (i)) + +static u32 nbl_sec046_1p_data[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00077c2b, 0x005c0000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x70000000, 0x00000020, 0x24140000, 0x00000020, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00002100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x20140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x20140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x38430000, + 0x70000006, 0x00000020, 0x24140000, 0x00000020, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x98cb1180, 0x6e36d469, + 0x9d8eb91c, 0x87e3ef47, 0xa2931288, 0x08405c5a, + 0x73865086, 0x00000080, 0x30140000, 0x00000080, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x000b3849, 0x38430000, + 0x00000006, 0x0000c100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x08400000, + 0x03865086, 0x4c016100, 0x00000014, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec071_1p_data[] = { + 0x00000000, 0x00000000, 0x00113d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7029b00, 0x00000000, + 0x00000000, 0x43000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x51e00000, 0x00000c9c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00293d00, 0x00000000, + 0x00000000, 0x00000000, 0x67089b00, 0x00000002, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x80000000, 0x00000000, 0xb1e00000, 0x0000189c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x014b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x015b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d29a00, 0x000149c4, + 0x00000000, 0x4b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d2c000, 0x000149c4, + 0x00000000, 0x5b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x64d49200, 0x5e556945, + 0xc666d89a, 0x4b0001a9, 0x00004c84, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x6ed4ba00, 0x5ef56bc5, + 0xc666d8c0, 0x5b0001a9, 0x00004dc4, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00700000, 0x00000000, 0x08028000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec046_2p_data[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00077c2b, 0x005c0000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x70000000, 0x00000020, 0x04140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00002100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x00140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x00140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x38430000, + 0x70000006, 0x00000020, 0x04140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x98cb1180, 0x6e36d469, + 0x9d8eb91c, 0x87e3ef47, 0xa2931288, 0x08405c5a, + 0x73865086, 0x00000080, 0x10140000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x000b3849, 0x38430000, + 0x00000006, 0x0000c100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x08400000, + 0x03865086, 0x4c016100, 0x00000014, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec071_2p_data[] = { + 0x00000000, 0x00000000, 0x00113d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7029b00, 0x00000000, + 0x00000000, 0x43000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x51e00000, 0x00000c9c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00293d00, 0x00000000, + 0x00000000, 0x00000000, 0x67089b00, 0x00000002, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x80000000, 0x00000000, 0xb1e00000, 0x0000189c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x014b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x015b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d29a00, 0x000149c4, + 0x00000000, 0x4b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d2c000, 0x000149c4, + 0x00000000, 0x5b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x64d49200, 0x5e556945, + 0xc666d89a, 0x4b0001a9, 0x00004c84, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x6ed4ba00, 0x5ef56bc5, + 0xc666d8c0, 0x5b0001a9, 0x00004dc4, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00700000, 0x00000000, 0x00028000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec006_data[] = { + 0x81008100, 0x00000001, 0x88a88100, 0x00000001, + 0x810088a8, 0x00000001, 0x88a888a8, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004000, 0x00000001, 0x86dd6000, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x08060000, 0x00000001, 0x80350000, 0x00000001, + 0x88080000, 0x00000001, 0x88f70000, 0x00000001, + 0x88cc0000, 0x00000001, 0x88090000, 0x00000001, + 0x89150000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x11006000, 0x00000001, 0x06006000, 0x00000001, + 0x02006000, 0x00000001, 0x3a006000, 0x00000001, + 0x2f006000, 0x00000001, 0x84006000, 0x00000001, + 0x32006000, 0x00000001, 0x2c006000, 0x00000001, + 0x3c006000, 0x00000001, 0x2b006000, 0x00000001, + 0x00006000, 0x00000001, 0x00004000, 0x00000001, + 0x00004000, 0x00000001, 0x20004000, 0x00000001, + 0x40004000, 0x00000001, 0x00000000, 0x00000001, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x2c000000, 0x00000001, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x00000000, 0x00000000, + 0x2c000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x06001072, 0x00000001, 0x06000000, 0x00000001, + 0x110017c1, 0x00000001, 0x110012b7, 0x00000001, + 0x110012b5, 0x00000001, 0x01000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x11000043, 0x00000001, 0x11000044, 0x00000001, + 0x11000222, 0x00000001, 0x11000000, 0x00000001, + 0x2f006558, 0x00000001, 0x32000000, 0x00000001, + 0x84000000, 0x00000001, 0x00000000, 0x00000001, + 0x65582000, 0x00000001, 0x65583000, 0x00000001, + 0x6558a000, 0x00000001, 0x6558b000, 0x00000001, + 0x65580000, 0x00000001, 0x12b50000, 0x00000001, + 0x02000102, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x65580000, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x81008100, 0x00000001, 0x88a88100, 0x00000001, + 0x810088a8, 0x00000001, 0x88a888a8, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004000, 0x00000001, 0x86dd6000, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x08060000, 0x00000001, 0x80350000, 0x00000001, + 0x88080000, 0x00000001, 0x88f70000, 0x00000001, + 0x88cc0000, 0x00000001, 0x88090000, 0x00000001, + 0x89150000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x11006000, 0x00000001, 0x06006000, 0x00000001, + 0x02006000, 0x00000001, 0x3a006000, 0x00000001, + 0x2f006000, 0x00000001, 0x84006000, 0x00000001, + 0x32006000, 0x00000001, 0x2c006000, 0x00000001, + 0x3c006000, 0x00000001, 0x2b006000, 0x00000001, + 0x00006000, 0x00000001, 0x00004000, 0x00000001, + 0x00004000, 0x00000001, 0x20004000, 0x00000001, + 0x40004000, 0x00000001, 0x00000000, 0x00000001, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x2c000000, 0x00000001, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x00000000, 0x00000000, + 0x2c000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x06001072, 0x00000001, 0x06000000, 0x00000001, + 0x110012b7, 0x00000001, 0x01000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x32000000, 0x00000001, 0x84000000, 0x00000001, + 0x11000043, 0x00000001, 0x11000044, 0x00000001, + 0x11000222, 0x00000001, 0x11000000, 0x00000001, + 0x2f006558, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec007_data[] = { + 0x10001000, 0x00001000, 0x10000000, 0x00000000, + 0x1000ffff, 0x0000ffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000fff, 0x00000fff, 0x1000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x10ff0fff, 0xffff0fff, 0x00000fff, + 0x1fff0fff, 0x1fff0fff, 0x1fff0fff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ff0000, 0x00ffffff, 0x00ff0000, 0x00ff0000, + 0x00ff0000, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ff0000, 0x00ff0000, 0x00ff0001, 0x00ffffff, + 0x00ff0000, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00000fff, 0x00000fff, 0x00000fff, 0x00000fff, + 0x00000fff, 0x0000ffff, 0xc0ff0000, 0xc0ffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x0000ffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x10001000, 0x00001000, 0x10000000, 0x00000000, + 0x1000ffff, 0x0000ffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000fff, 0x00000fff, 0x1000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x10ff0fff, 0xffff0fff, 0x00000fff, + 0x1fff0fff, 0x1fff0fff, 0x1fff0fff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ff0000, 0x00ffffff, 0x00ff0000, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ff0000, 0x00ff0000, 0x00ff0001, 0x00ffffff, + 0x00ff0000, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, +}; + +static u32 nbl_sec008_data[] = { + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00800090, 0x12009092, 0x00000100, 0x00000000, + 0x00800090, 0x12009092, 0x00000100, 0x00000000, + 0x00800000, 0x0e008c8e, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08909581, 0x00008680, 0x00000200, 0x00000000, + 0x10900082, 0x28008680, 0x00000200, 0x00000000, + 0x809b0093, 0x00000000, 0x00000100, 0x00000000, + 0x809b0093, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b0000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x009b0000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000200, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000200, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000200, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000300, 0x00000000, + 0x00000000, 0x00a089c2, 0x000005f0, 0x00000000, + 0x000b0085, 0x00a00000, 0x000002f0, 0x00000000, + 0x000b0085, 0x00a00000, 0x000002f0, 0x00000000, + 0x00000000, 0x00a089c2, 0x000005f0, 0x00000000, + 0x000b0000, 0x00000000, 0x00000200, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x0000050f, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x0000050f, 0x00000000, + 0x02a00084, 0x08008890, 0x00000600, 0x00000000, + 0x02ab848a, 0x08000000, 0x00000500, 0x00000000, + 0x02a00084, 0x10008200, 0x00000600, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab0084, 0x08000000, 0x00000500, 0x00000000, + 0x00a00000, 0x04008280, 0x00000600, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x04ab8e84, 0x0c000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x00000000, 0x0400ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0800ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0800ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0c00ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0000ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0000ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x10008200, 0x00000700, 0x00000000, + 0x00000000, 0x08008200, 0x00000700, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x0000ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x0000ccd0, 0x00000800, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00808786, 0x16009496, 0x00000900, 0x00000000, + 0x00808786, 0x16009496, 0x00000900, 0x00000000, + 0x00808786, 0x16009496, 0x00000900, 0x00000000, + 0x00808786, 0x16009496, 0x00000900, 0x00000000, + 0x00800086, 0x12009092, 0x00000900, 0x00000000, + 0x00800086, 0x12009092, 0x00000900, 0x00000000, + 0x00800000, 0x0e008c8e, 0x00000900, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08908192, 0x00008680, 0x00000a00, 0x00000000, + 0x10908292, 0x28008680, 0x00000a00, 0x00000000, + 0x809b9392, 0x00000000, 0x00000900, 0x00000000, + 0x809b9392, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b8f92, 0x00000000, 0x00000900, 0x00000000, + 0x009b0092, 0x00000000, 0x00000900, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x009b0092, 0x00000000, 0x00000900, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000a00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000a00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000a00, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000b00, 0x00000000, + 0x00000000, 0x00a089c2, 0x00000df0, 0x00000000, + 0x000b0085, 0x00a00000, 0x00000af0, 0x00000000, + 0x000b0085, 0x00a00000, 0x00000af0, 0x00000000, + 0x00000000, 0x00a089c2, 0x00000df0, 0x00000000, + 0x000b0000, 0x00000000, 0x00000a00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000b00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000b00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000b00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000b00, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000c00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000082, 0x00000d00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000c00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000c00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000c00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000c00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000c00, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x00000d0f, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x00000d0f, 0x00000000, + 0x02ab8a84, 0x08000000, 0x00000d00, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000d00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000d00, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000d00, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000d00, 0x00000000, + 0x04ab8e84, 0x0c000000, 0x00000d00, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000d00, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000d00, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000d00, 0x00000000, + 0x02ab0084, 0x08000000, 0x00000d00, 0x00000000, + 0x00ab0000, 0x04000000, 0x00000d00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000d00, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec009_data[] = { + 0x00000000, 0x00000060, 0x00000000, 0x00000090, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000050, 0x00000000, 0x000000a0, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000000a0, 0x00000000, 0x00000050, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000800, 0x00000000, 0x00000700, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000900, 0x00000000, 0x00000600, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00008000, 0x00000000, 0x00007000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00009000, 0x00000000, 0x00006000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x0000a000, 0x00000000, 0x00005000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000c0000, 0x00000000, 0x00030000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000d0000, 0x00000000, 0x00020000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000e0000, 0x00000000, 0x00010000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000040, 0x00000000, 0x000000b0, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000070, 0x00000000, 0x00000080, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000090, 0x00000000, 0x00000060, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000080, 0x00000000, 0x00000070, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000700, 0x00000000, 0x00000800, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00007000, 0x00000000, 0x00008000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00080000, 0x00000000, 0x00070000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000c00, 0x00000000, 0x00000300, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000d00, 0x00000000, 0x00000200, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00600000, 0x00000000, 0x00900000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00d00000, 0x00000000, 0x00200000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00500000, 0x00000000, 0x00a00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00700000, 0x00000000, 0x00800000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00e00000, 0x00000000, 0x00100000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00f00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00f00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00100000, 0x00000000, 0x00e00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00300000, 0x00000000, 0x00c00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00800000, 0x00000000, 0x00700000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00900000, 0x00000000, 0x00600000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00a00000, 0x00000000, 0x00500000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00b00000, 0x00000000, 0x00400000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000060, 0x00400000, 0x00000090, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000050, 0x00400000, 0x000000a0, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000000a0, 0x00400000, 0x00000050, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000800, 0x00400000, 0x00000700, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000900, 0x00400000, 0x00000600, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00008000, 0x00400000, 0x00007000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00009000, 0x00400000, 0x00006000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x0000a000, 0x00400000, 0x00005000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000c0000, 0x00400000, 0x00030000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000d0000, 0x00400000, 0x00020000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000e0000, 0x00400000, 0x00010000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000070, 0x00400000, 0x00000080, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000700, 0x00400000, 0x00000800, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00007000, 0x00400000, 0x00008000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00080000, 0x00400000, 0x00070000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000c00, 0x00400000, 0x00000300, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000d00, 0x00400000, 0x00000200, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00400000, 0x000000b0, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000090, 0x00400000, 0x00000060, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000080, 0x00400000, 0x00000070, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000060, 0x06000000, 0x00000090, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000060, 0x07000000, 0x00000090, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000050, 0x06000000, 0x000000a0, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000050, 0x07000000, 0x000000a0, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000000a0, 0x06000000, 0x00000050, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000000a0, 0x07000000, 0x00000050, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000800, 0x06000000, 0x00000700, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000900, 0x06000000, 0x00000600, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00008000, 0x06000000, 0x00007000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00009000, 0x06000000, 0x00006000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x0000a000, 0x06000000, 0x00005000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000c0000, 0x06000000, 0x00030000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000d0000, 0x06000000, 0x00020000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000e0000, 0x06000000, 0x00010000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000800, 0x07000000, 0x00000700, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000900, 0x07000000, 0x00000600, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00008000, 0x07000000, 0x00007000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00009000, 0x07000000, 0x00006000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x0000a000, 0x07000000, 0x00005000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000c0000, 0x07000000, 0x00030000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000d0000, 0x07000000, 0x00020000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000e0000, 0x07000000, 0x00010000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000070, 0x06000000, 0x00000080, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000070, 0x07000000, 0x00000080, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000700, 0x06000000, 0x00000800, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00007000, 0x06000000, 0x00008000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00080000, 0x06000000, 0x00070000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000c00, 0x06000000, 0x00000300, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000d00, 0x06000000, 0x00000200, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000700, 0x07000000, 0x00000800, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00007000, 0x07000000, 0x00008000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00080000, 0x07000000, 0x00070000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000c00, 0x07000000, 0x00000300, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000d00, 0x07000000, 0x00000200, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x06000000, 0x000000b0, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x07000000, 0x000000b0, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000090, 0x06000000, 0x00000060, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000090, 0x07000000, 0x00000060, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000080, 0x06000000, 0x00000070, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000080, 0x07000000, 0x00000070, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000060, 0x00c00000, 0x00000090, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000050, 0x00c00000, 0x000000a0, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000000a0, 0x00c00000, 0x00000050, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000800, 0x00c00000, 0x00000700, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000900, 0x00c00000, 0x00000600, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00008000, 0x00c00000, 0x00007000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00009000, 0x00c00000, 0x00006000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x0000a000, 0x00c00000, 0x00005000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000c0000, 0x00c00000, 0x00030000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000d0000, 0x00c00000, 0x00020000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000e0000, 0x00c00000, 0x00010000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000070, 0x00c00000, 0x00000080, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000700, 0x00c00000, 0x00000800, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00007000, 0x00c00000, 0x00008000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00080000, 0x00c00000, 0x00070000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000c00, 0x00c00000, 0x00000300, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000d00, 0x00c00000, 0x00000200, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00c00000, 0x000000b0, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000090, 0x00c00000, 0x00000060, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000080, 0x00c00000, 0x00000070, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x00400000, 0x00b00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00600000, 0x00400000, 0x00900000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00300000, 0x00400000, 0x00c00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x00400000, 0x00a00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00700000, 0x00400000, 0x00800000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00200000, 0x00400000, 0x00d00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00800000, 0x00400000, 0x00700000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00900000, 0x00400000, 0x00600000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00a00000, 0x00400000, 0x00500000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00b00000, 0x00400000, 0x00400000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00400000, 0x00f00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00400000, 0x00f00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00100000, 0x00400000, 0x00e00000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x06000000, 0x00b00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x07000000, 0x00b00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00600000, 0x06000000, 0x00900000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00600000, 0x07000000, 0x00900000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00300000, 0x06000000, 0x00c00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00300000, 0x07000000, 0x00c00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x06000000, 0x00a00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x07000000, 0x00a00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00700000, 0x06000000, 0x00800000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00700000, 0x07000000, 0x00800000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00200000, 0x06000000, 0x00d00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00200000, 0x07000000, 0x00d00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00800000, 0x06000000, 0x00700000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00900000, 0x06000000, 0x00600000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00a00000, 0x06000000, 0x00500000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00b00000, 0x06000000, 0x00400000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00800000, 0x07000000, 0x00700000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00900000, 0x07000000, 0x00600000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00a00000, 0x07000000, 0x00500000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00b00000, 0x07000000, 0x00400000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x06000000, 0x00f00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x07000000, 0x00f00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x06000000, 0x00f00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00100000, 0x06000000, 0x00e00000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x07000000, 0x00f00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00100000, 0x07000000, 0x00e00000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x00c00000, 0x00b00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00600000, 0x00c00000, 0x00900000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00300000, 0x00c00000, 0x00c00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x00c00000, 0x00a00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00700000, 0x00c00000, 0x00800000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00200000, 0x00c00000, 0x00d00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00800000, 0x00c00000, 0x00700000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00900000, 0x00c00000, 0x00600000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00a00000, 0x00c00000, 0x00500000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00b00000, 0x00c00000, 0x00400000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00c00000, 0x00f00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00c00000, 0x00f00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00100000, 0x00c00000, 0x00e00000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000f0000, 0x00400000, 0x00000000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00f00000, 0x00400000, 0x00000000, 0x00b00000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000f0000, 0x06000000, 0x00000000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00f00000, 0x06000000, 0x00000000, 0x09000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000f0000, 0x07000000, 0x00000000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00f00000, 0x07000000, 0x00000000, 0x08000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x000f0000, 0x00c00000, 0x00000000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00f00000, 0x00c00000, 0x00000000, 0x00300000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000f0000, 0x00000000, 0x00000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00f00000, 0x00000000, 0x00000000, + 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec010_data[] = { + 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, + 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, + 0x0000000a, 0x0000000a, 0x0000000a, 0x00000000, + 0x0000000b, 0x00000008, 0x00000009, 0x0000000f, + 0x0000000f, 0x0000000f, 0x0000000f, 0x0000000f, + 0x0000000c, 0x0000000d, 0x00000001, 0x00000001, + 0x0000000e, 0x00000005, 0x00000002, 0x00000002, + 0x00000004, 0x00000003, 0x00000003, 0x00000003, + 0x00000003, 0x00000040, 0x00000040, 0x00000040, + 0x00000040, 0x00000040, 0x00000040, 0x00000040, + 0x00000040, 0x00000040, 0x00000040, 0x00000040, + 0x00000045, 0x00000044, 0x00000044, 0x00000044, + 0x00000044, 0x00000044, 0x00000041, 0x00000042, + 0x00000043, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x0000004b, + 0x0000004b, 0x0000004a, 0x0000004a, 0x0000004a, + 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a, + 0x0000004a, 0x0000004a, 0x0000004a, 0x00000047, + 0x00000047, 0x00000048, 0x00000048, 0x00000049, + 0x00000049, 0x0000004c, 0x0000004c, 0x0000004c, + 0x0000004c, 0x0000004c, 0x0000004c, 0x0000004c, + 0x0000004c, 0x0000004c, 0x0000004c, 0x0000004c, + 0x00000051, 0x00000050, 0x00000050, 0x00000050, + 0x00000050, 0x00000050, 0x0000004d, 0x0000004e, + 0x0000004f, 0x00000052, 0x00000053, 0x00000054, + 0x00000054, 0x00000055, 0x00000056, 0x00000057, + 0x00000057, 0x00000057, 0x00000057, 0x00000058, + 0x00000059, 0x00000059, 0x0000005a, 0x0000005a, + 0x0000005b, 0x0000005b, 0x0000005c, 0x0000005c, + 0x0000005c, 0x0000005c, 0x0000005d, 0x0000005d, + 0x0000005e, 0x0000005e, 0x0000005f, 0x0000005f, + 0x0000005f, 0x0000005f, 0x0000005f, 0x0000005f, + 0x0000005f, 0x0000005f, 0x00000060, 0x00000060, + 0x00000061, 0x00000061, 0x00000061, 0x00000061, + 0x00000062, 0x00000063, 0x00000064, 0x00000064, + 0x00000065, 0x00000066, 0x00000067, 0x00000067, + 0x00000067, 0x00000067, 0x00000068, 0x00000069, + 0x00000069, 0x00000040, 0x00000040, 0x00000046, + 0x00000046, 0x00000046, 0x00000046, 0x0000004c, + 0x0000004c, 0x0000000a, 0x0000000a, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec011_data[] = { + 0x0008002c, 0x00080234, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080230, + 0x00080332, 0x0008063c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0008002c, 0x00080234, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080230, + 0x00080332, 0x00080738, 0x0008083c, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0008002c, 0x00080234, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080230, + 0x00080332, 0x00080738, 0x0008093a, 0x00080a3c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080634, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080834, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080932, 0x00080a34, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090200, 0x00090304, 0x00090408, 0x0009050c, + 0x00090610, 0x00090714, 0x00090818, 0x0009121c, + 0x0009131e, 0x00000000, 0x00000000, 0x00000000, + 0x00090644, 0x00000000, 0x000d8045, 0x000d4145, + 0x0009030c, 0x0009041c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090145, 0x00090944, 0x00000000, 0x00000000, + 0x0009061c, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x0009033a, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090200, 0x00090304, 0x00090408, 0x0009050c, + 0x00090610, 0x00090714, 0x00090818, 0x0009121c, + 0x0009131e, 0x00000000, 0x00000000, 0x00000000, + 0x0009063d, 0x00090740, 0x000d803f, 0x000d413f, + 0x0009030c, 0x0009041c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009013f, 0x00090840, 0x000dc93d, 0x000d093d, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0324, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a003e, + 0x000a0140, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0324, 0x000a0520, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a003e, + 0x000a0140, 0x000a0842, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0124, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0224, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a003c, 0x000a0037, 0x000ec139, 0x000e0139, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x000a0742, 0x00000000, 0x00000000, + 0x000a0d41, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x000a0d3e, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0037, 0x000a0139, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080634, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080834, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080932, 0x00080a34, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009061c, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x0009033a, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090200, 0x00090304, 0x00090408, 0x0009050c, + 0x00090610, 0x00090714, 0x00090818, 0x0009121c, + 0x0009131e, 0x00000000, 0x00000000, 0x00000000, + 0x0009063d, 0x00090740, 0x000d803f, 0x000d413f, + 0x0009030c, 0x0009041c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009013f, 0x00090840, 0x000dc93d, 0x000d093d, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a003c, 0x000a0037, 0x000ec139, 0x000e0139, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x000a0742, 0x00000000, 0x00000000, + 0x000a0d41, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x000a0d3e, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0037, 0x000a0139, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec012_data[] = { + 0x00000006, 0x00000001, 0x00000004, 0x00000001, + 0x00000006, 0x00000001, 0x00000000, 0x00000001, + 0x00000004, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000010, 0x00000001, 0x00000000, 0x00000001, + 0x00000040, 0x00000001, 0x00000010, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x06200000, 0x00000001, 0x00c00000, 0x00000001, + 0x02c00000, 0x00000001, 0x00200000, 0x00000001, + 0x00400000, 0x00000001, 0x00700000, 0x00000001, + 0x00300000, 0x00000001, 0x00000000, 0x00000001, + 0x00a00000, 0x00000001, 0x00b00000, 0x00000001, + 0x00e00000, 0x00000001, 0x00500000, 0x00000001, + 0x00800000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000004, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00000001, 0x00000010, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x00000001, 0x00700000, 0x00000001, + 0x00a00000, 0x00000001, 0x00b00000, 0x00000001, + 0x00200000, 0x00000001, 0x00000000, 0x00000001, + 0x00300000, 0x00000001, 0x00800000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec013_data[] = { + 0xf7fffff0, 0xf7fffff1, 0xfffffff0, 0xf7fffff3, + 0xfffffff1, 0xfffffff3, 0xffffffff, 0xffffffff, + 0xf7ffff0f, 0xf7ffff0f, 0xffffff0f, 0xffffff0f, + 0xffffff0f, 0xffffffff, 0xffffffff, 0xffffffff, + 0x100fffff, 0xf10fffff, 0xf10fffff, 0xf70fffff, + 0xf70fffff, 0xff0fffff, 0xff0fffff, 0xff1fffff, + 0xff0fffff, 0xff0fffff, 0xff0fffff, 0xff0fffff, + 0xff1fffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xfffffff1, 0xfffffff3, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffff0f, 0xffffff0f, 0xffffff0f, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xff0fffff, 0xff0fffff, 0xff0fffff, 0xff0fffff, + 0xff0fffff, 0xff1fffff, 0xff0fffff, 0xff1fffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, +}; + +static u32 nbl_sec014_data[] = { + 0x00000000, 0x00000001, 0x00000003, 0x00000002, + 0x00000004, 0x00000005, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000002, 0x00000003, + 0x00000004, 0x00000000, 0x00000000, 0x00000000, + 0x00000001, 0x00000002, 0x00000003, 0x00000000, + 0x00000000, 0x00000004, 0x00000005, 0x00000006, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000001, 0x00000002, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000001, 0x00000001, + 0x00000002, 0x00000003, 0x00000004, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec022_data[] = { + 0x81008100, 0x00000001, 0x88a88100, 0x00000001, + 0x810088a8, 0x00000001, 0x88a888a8, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004000, 0x00000001, 0x86dd6000, 0x00000001, + 0x81000000, 0x00000001, 0x88a80000, 0x00000001, + 0x08060000, 0x00000001, 0x80350000, 0x00000001, + 0x88080000, 0x00000001, 0x88f70000, 0x00000001, + 0x88cc0000, 0x00000001, 0x88090000, 0x00000001, + 0x89150000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x11006000, 0x00000001, 0x06006000, 0x00000001, + 0x02006000, 0x00000001, 0x3a006000, 0x00000001, + 0x2f006000, 0x00000001, 0x84006000, 0x00000001, + 0x32006000, 0x00000001, 0x2c006000, 0x00000001, + 0x3c006000, 0x00000001, 0x2b006000, 0x00000001, + 0x00006000, 0x00000001, 0x00004000, 0x00000001, + 0x00004000, 0x00000001, 0x20004000, 0x00000001, + 0x40004000, 0x00000001, 0x00000000, 0x00000001, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x2c000000, 0x00000001, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x11000000, 0x00000001, 0x06000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x2f000000, 0x00000001, 0x84000000, 0x00000001, + 0x32000000, 0x00000001, 0x00000000, 0x00000000, + 0x2c000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2b000000, 0x00000001, 0x3c000000, 0x00000001, + 0x3b000000, 0x00000001, 0x00000000, 0x00000001, + 0x06001072, 0x00000001, 0x06000000, 0x00000001, + 0x110012b7, 0x00000001, 0x01000000, 0x00000001, + 0x02000000, 0x00000001, 0x3a000000, 0x00000001, + 0x32000000, 0x00000001, 0x84000000, 0x00000001, + 0x11000043, 0x00000001, 0x11000044, 0x00000001, + 0x11000222, 0x00000001, 0x11000000, 0x00000001, + 0x2f006558, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec023_data[] = { + 0x10001000, 0x00001000, 0x10000000, 0x00000000, + 0x1000ffff, 0x0000ffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000fff, 0x00000fff, 0x1000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, + 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, 0x00ff0fff, + 0x00ff0fff, 0x10ff0fff, 0xffff0fff, 0x00000fff, + 0x1fff0fff, 0x1fff0fff, 0x1fff0fff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0xffffffff, + 0x00ff0000, 0x00ffffff, 0x00ff0000, 0x00ffffff, + 0x00ffffff, 0x00ffffff, 0x00ffffff, 0x00ffffff, + 0x00ff0000, 0x00ff0000, 0x00ff0001, 0x00ffffff, + 0x00ff0000, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, +}; + +static u32 nbl_sec024_data[] = { + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00809190, 0x16009496, 0x00000100, 0x00000000, + 0x00800090, 0x12009092, 0x00000100, 0x00000000, + 0x00800090, 0x12009092, 0x00000100, 0x00000000, + 0x00800000, 0x0e008c8e, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08900081, 0x00008680, 0x00000200, 0x00000000, + 0x10900082, 0x28008680, 0x00000200, 0x00000000, + 0x809b0093, 0x00000000, 0x00000100, 0x00000000, + 0x809b0093, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b008f, 0x00000000, 0x00000100, 0x00000000, + 0x009b0000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x009b0000, 0x00000000, 0x00000100, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000200, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000200, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000200, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000300, 0x00000000, + 0x00000000, 0x00a089c2, 0x000005f0, 0x00000000, + 0x000b0085, 0x00a00000, 0x000002f0, 0x00000000, + 0x000b0085, 0x00a00000, 0x000002f0, 0x00000000, + 0x00000000, 0x00a089c2, 0x000005f0, 0x00000000, + 0x000b0000, 0x00000000, 0x00000200, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000300, 0x00000000, + 0x40000000, 0x01c180c2, 0x00000400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000082, 0x00000500, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0085, 0x08000000, 0x00000400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000400, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x0000050f, 0x00000000, + 0x01ab0083, 0x0ca00000, 0x0000050f, 0x00000000, + 0x02ab848a, 0x08000000, 0x00000500, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x00ab8f8e, 0x04000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x04ab8e84, 0x0c000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab848f, 0x08000000, 0x00000500, 0x00000000, + 0x02ab0084, 0x08000000, 0x00000500, 0x00000000, + 0x00ab0000, 0x04000000, 0x00000500, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00ab0000, 0x00000000, 0x00000500, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec025_data[] = { + 0x00000060, 0x00000090, 0x00000001, 0x00000000, + 0x00000050, 0x000000a0, 0x00000001, 0x00000000, + 0x000000a0, 0x00000050, 0x00000001, 0x00000000, + 0x00000800, 0x00000700, 0x00000001, 0x00000000, + 0x00000900, 0x00000600, 0x00000001, 0x00000000, + 0x00008000, 0x00007000, 0x00000001, 0x00000000, + 0x00009000, 0x00006000, 0x00000001, 0x00000000, + 0x0000a000, 0x00005000, 0x00000001, 0x00000000, + 0x000c0000, 0x00030000, 0x00000001, 0x00000000, + 0x000d0000, 0x00020000, 0x00000001, 0x00000000, + 0x000e0000, 0x00010000, 0x00000001, 0x00000000, + 0x00000040, 0x000000b0, 0x00000001, 0x00000000, + 0x00000070, 0x00000080, 0x00000001, 0x00000000, + 0x00000090, 0x00000060, 0x00000001, 0x00000000, + 0x00000080, 0x00000070, 0x00000001, 0x00000000, + 0x00000700, 0x00000800, 0x00000001, 0x00000000, + 0x00007000, 0x00008000, 0x00000001, 0x00000000, + 0x00080000, 0x00070000, 0x00000001, 0x00000000, + 0x00000c00, 0x00000300, 0x00000001, 0x00000000, + 0x00000d00, 0x00000200, 0x00000001, 0x00000000, + 0x00400000, 0x00b00000, 0x00000001, 0x00000000, + 0x00600000, 0x00900000, 0x00000001, 0x00000000, + 0x00300000, 0x00c00000, 0x00000001, 0x00000000, + 0x00500000, 0x00a00000, 0x00000001, 0x00000000, + 0x00700000, 0x00800000, 0x00000001, 0x00000000, + 0x00000000, 0x00f00000, 0x00000001, 0x00000000, + 0x00000000, 0x00f00000, 0x00000001, 0x00000000, + 0x00100000, 0x00e00000, 0x00000001, 0x00000000, + 0x00200000, 0x00d00000, 0x00000001, 0x00000000, + 0x00800000, 0x00700000, 0x00000001, 0x00000000, + 0x00900000, 0x00600000, 0x00000001, 0x00000000, + 0x00a00000, 0x00500000, 0x00000001, 0x00000000, + 0x00b00000, 0x00400000, 0x00000001, 0x00000000, + 0x000f0000, 0x00000000, 0x00000001, 0x00000000, + 0x00f00000, 0x00000000, 0x00000001, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec026_data[] = { + 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, + 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, + 0x0000000a, 0x0000000a, 0x0000000a, 0x00000000, + 0x0000000b, 0x00000008, 0x00000009, 0x0000000f, + 0x0000000f, 0x0000000f, 0x0000000f, 0x0000000f, + 0x0000000c, 0x0000000d, 0x00000001, 0x00000001, + 0x0000000e, 0x00000005, 0x00000002, 0x00000002, + 0x00000004, 0x00000003, 0x00000003, 0x00000003, + 0x00000003, 0x0000000a, 0x0000000a, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec027_data[] = { + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080634, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080834, 0x0008082e, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00080020, 0x00080228, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00080224, + 0x00080326, 0x00080730, 0x00080932, 0x00080a34, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009061c, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x0009033a, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00090200, 0x00090304, 0x00090408, 0x0009050c, + 0x00090610, 0x00090714, 0x00090818, 0x0009121c, + 0x0009131e, 0x00000000, 0x00000000, 0x00000000, + 0x0009063d, 0x00090740, 0x000d803f, 0x000d413f, + 0x0009030c, 0x0009041c, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0009013f, 0x00090840, 0x000dc93d, 0x000d093d, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a003c, 0x000a0037, 0x000ec139, 0x000e0139, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x000a0742, 0x00000000, 0x00000000, + 0x000a0d41, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000a0036, + 0x000a0138, 0x00000000, 0x00000000, 0x00000000, + 0x000a0d3e, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000a0037, 0x000a0139, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec028_data[] = { + 0x00000006, 0x00000001, 0x00000004, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00000001, 0x00000010, 0x00000001, + 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00500000, 0x00000001, 0x00700000, 0x00000001, + 0x00a00000, 0x00000001, 0x00b00000, 0x00000001, + 0x00200000, 0x00000001, 0x00000000, 0x00000001, + 0x00300000, 0x00000001, 0x00800000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec029_data[] = { + 0xfffffff0, 0xfffffff1, 0xfffffff3, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffff0f, 0xffffff0f, 0xffffff0f, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xff0fffff, 0xff0fffff, 0xff0fffff, 0xff0fffff, + 0xff0fffff, 0xff1fffff, 0xff0fffff, 0xff1fffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, +}; + +static u32 nbl_sec030_data[] = { + 0x00000000, 0x00000001, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000001, 0x00000001, 0x00000001, + 0x00000002, 0x00000003, 0x00000004, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec039_data[] = { + 0xfef80000, 0x00000002, 0x000002e0, 0x00000000, + 0xfef8013e, 0x00000002, 0x000002e0, 0x00000000, + 0x6660013e, 0x726e6802, 0x02224e42, 0x00000000, + 0x6660013e, 0x726e6802, 0x02224e42, 0x00000000, + 0x66600000, 0x726e6802, 0x02224e42, 0x00000000, + 0x66600000, 0x726e6802, 0x02224e42, 0x00000000, + 0x66600000, 0x00026802, 0x02224e40, 0x00000000, + 0x66627800, 0x00026802, 0x02224e40, 0x00000000, + 0x66600000, 0x00026a76, 0x02224e40, 0x00000000, + 0x66600000, 0x00026802, 0x00024e40, 0x00000000, + 0x66600000, 0x00026802, 0x00024e40, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec040_data[] = { + 0x0040fb3f, 0x00000001, 0x0440fb3f, 0x00000001, + 0x0502fa00, 0x00000001, 0x0602f900, 0x00000001, + 0x0903e600, 0x00000001, 0x0a03e500, 0x00000001, + 0x1101e600, 0x00000001, 0x1201e500, 0x00000001, + 0x0000ff00, 0x00000001, 0x0008ff07, 0x00000001, + 0x00ffff00, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec046_4p_data[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00077c2b, 0x005c0000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x00000000, 0x00008100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00073029, 0x00480000, + 0x70000000, 0x00000020, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xa0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00002100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00000009, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x70000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x38430000, + 0x70000006, 0x00000020, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x98cb1180, 0x6e36d469, + 0x9d8eb91c, 0x87e3ef47, 0xa2931288, 0x08405c5a, + 0x73865086, 0x00000080, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x000b3849, 0x38430000, + 0x00000006, 0x0000c100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x08400000, + 0x03865086, 0x4c016100, 0x00000014, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec047_data[] = { + 0x2040dc3f, 0x00000001, 0x2000dcff, 0x00000001, + 0x2200dcff, 0x00000001, 0x0008dc01, 0x00000001, + 0x0001de00, 0x00000001, 0x2900c4ff, 0x00000001, + 0x3100c4ff, 0x00000001, 0x2b00c4ff, 0x00000001, + 0x3300c4ff, 0x00000001, 0x2700d8ff, 0x00000001, + 0x2300d8ff, 0x00000001, 0x2502d800, 0x00000001, + 0x2102d800, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec052_data[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x30000000, 0x000b844c, 0xc8580000, + 0x00000006, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0xb0d3668b, 0xb0555e12, + 0x03b055c6, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0xa64b3449, 0x405a3cc1, + 0x00000006, 0x3d2d3300, 0x00000010, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x26473429, 0x00482cc1, + 0x00000000, 0x00ccd300, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec053_data[] = { + 0x0840f03f, 0x00000001, 0x0040f03f, 0x00000001, + 0x0140fa3f, 0x00000001, 0x0100fa0f, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec058_data[] = { + 0x00000000, 0x00000000, 0x59f89400, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00470000, + 0x00000000, 0x3c000000, 0xa2e40006, 0x00000017, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x19fa1400, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x28440000, + 0x038e5186, 0x3c000000, 0xa8e40012, 0x00000047, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x0001f3d0, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x38c30000, + 0x0000000a, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x0001f3d0, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x38c30000, + 0x0000000a, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x000113d0, 0x00000000, + 0x00000000, 0xb0000000, 0x00073829, 0x00430000, + 0x00000000, 0x3c000000, 0x0000000a, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x000293d0, 0x00000000, + 0x00000000, 0xb0000000, 0x00133889, 0x08400000, + 0x03865086, 0x3c000000, 0x00000016, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec059_data[] = { + 0x0200e4ff, 0x00000001, 0x0400e2ff, 0x00000001, + 0x1300ecff, 0x00000001, 0x1500eaff, 0x00000001, + 0x0300e4ff, 0x00000001, 0x0500e2ff, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec062_data[] = { + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x90939899, 0x88809c9b, 0x0000013d, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec063_data[] = { + 0x0500e2ff, 0x00000001, 0x0900e2ff, 0x00000001, + 0x1900e2ff, 0x00000001, 0x1100e2ff, 0x00000001, + 0x0100e2ff, 0x00000001, 0x0600e1ff, 0x00000001, + 0x0a00e1ff, 0x00000001, 0x1a00e1ff, 0x00000001, + 0x1200e1ff, 0x00000001, 0x0200e1ff, 0x00000001, + 0x0000fcff, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec065_data[] = { + 0x006e120c, 0x006e1210, 0x006e4208, 0x006e4218, + 0x00200b02, 0x00200b00, 0x000e1900, 0x000e1906, + 0x00580208, 0x00580204, 0x004c0208, 0x004c0207, + 0x0002110c, 0x0002110c, 0x0012010c, 0x00100110, + 0x0010010c, 0x000a010c, 0x0008010c, 0x00060000, + 0x00160000, 0x00140000, 0x001e0000, 0x001e0000, + 0x001e0000, 0x001e0000, 0x001e0000, 0x001e0000, + 0x001e0000, 0x001e0000, 0x001e0000, 0x001e0000, +}; + +static u32 nbl_sec066_data[] = { + 0x006e120c, 0x006e1210, 0x006e4208, 0x006e4218, + 0x00200b02, 0x00200b00, 0x000e1900, 0x000e1906, + 0x00580208, 0x00580204, 0x004c0208, 0x004c0207, + 0x0002110c, 0x0002110c, 0x0012010c, 0x00100110, + 0x0010010c, 0x000a010c, 0x0008010c, 0x00060000, + 0x00160000, 0x00140000, 0x001e0000, 0x001e0000, + 0x001e0000, 0x001e0000, 0x001e0000, 0x001e0000, + 0x001e0000, 0x001e0000, 0x001e0000, 0x001e0000, +}; + +static u32 nbl_sec071_4p_data[] = { + 0x00000000, 0x00000000, 0x00113d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7029b00, 0x00000000, + 0x00000000, 0x43000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x51e00000, 0x00000c9c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00293d00, 0x00000000, + 0x00000000, 0x00000000, 0x67089b00, 0x00000002, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x80000000, 0x00000000, 0xb1e00000, 0x0000189c, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x014b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00213d00, 0x00000000, + 0x00000000, 0x00000000, 0xe7069b00, 0x00000001, + 0x00000000, 0x43000000, 0x015b0c70, 0x00000000, + 0x00000000, 0x00000000, 0x92600000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d29a00, 0x000149c4, + 0x00000000, 0x4b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00553d00, 0x00000000, + 0x00000000, 0x00000000, 0xe6d2c000, 0x000149c4, + 0x00000000, 0x5b000000, 0x00000004, 0x00000000, + 0x80000000, 0x00022200, 0x62600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x64d49200, 0x5e556945, + 0xc666d89a, 0x4b0001a9, 0x00004c84, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x006d3d00, 0x00000000, + 0x00000000, 0x00000000, 0x6ed4ba00, 0x5ef56bc5, + 0xc666d8c0, 0x5b0001a9, 0x00004dc4, 0x00000000, + 0x80000000, 0x00022200, 0xc2600000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000002, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00700000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec072_data[] = { + 0x84006aff, 0x00000001, 0x880066ff, 0x00000001, + 0x140040ff, 0x00000001, 0x70000cff, 0x00000001, + 0x180040ff, 0x00000001, 0x30000cff, 0x00000001, + 0x10004cff, 0x00000001, 0x30004cff, 0x00000001, + 0x0100ecff, 0x00000001, 0x0300ecff, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec116_data[] = { + 0x00000000, 0x00000000, 0x3fff8000, 0x00000007, + 0x3fff8000, 0x00000007, 0x3fff8000, 0x00000007, + 0x3fff8000, 0x00000003, 0x3fff8000, 0x00000003, + 0x3fff8000, 0x00000007, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec124_data[] = { + 0xfffffffc, 0xffffffff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00300010, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00300010, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00300fff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00301fff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x0030ffff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x0030ffff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x0030ffff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x0030ffff, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0xffffffff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000fffe, 0x00000000, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0x00ffffff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x0000000f, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000580, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec125_data[] = { + 0xfffffffc, 0x01ffffff, 0x00300000, 0x70000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x00000001, 0x00300000, 0x70000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000540, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x011003ff, 0x00300000, 0x70000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000005c0, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0x103fffff, 0x00300001, 0x70000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec126_data[] = { + 0xfffffffc, 0xffffffff, 0x00300001, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000500, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x000001ff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000005c0, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00002013, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000400, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00002013, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000400, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffc, 0x01ffffff, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000480, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfffffffe, 0x00000001, 0x00300000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000540, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static u32 nbl_sec137_data[] = { + 0x0000017a, 0x000000f2, 0x00000076, 0x0000017a, + 0x0000017a, 0x00000080, 0x00000024, 0x0000017a, + 0x0000017a, 0x00000191, 0x00000035, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, + 0x0000017a, 0x000000d2, 0x00000066, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, + 0x0000017a, 0x000000f2, 0x00000076, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, +}; + +static u32 nbl_sec138_data[] = { + 0x0000017a, 0x000000f2, 0x00000076, 0x0000017a, + 0x0000017a, 0x00000080, 0x00000024, 0x0000017a, + 0x0000017a, 0x00000191, 0x00000035, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, + 0x0000017a, 0x000000d2, 0x00000066, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, + 0x0000017a, 0x000000f2, 0x00000076, 0x0000017a, + 0x0000017a, 0x0000017a, 0x0000017a, 0x0000017a, +}; + +void nbl_write_all_regs(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + u32 *nbl_sec046_data; + u32 *nbl_sec071_data; + u8 eth_mode = NBL_COMMON_TO_ETH_MODE(common); + u32 i = 0; + + switch (eth_mode) { + case 1: + nbl_sec046_data = nbl_sec046_1p_data; + nbl_sec071_data = nbl_sec071_1p_data; + break; + case 2: + nbl_sec046_data = nbl_sec046_2p_data; + nbl_sec071_data = nbl_sec071_2p_data; + break; + case 4: + nbl_sec046_data = nbl_sec046_4p_data; + nbl_sec071_data = nbl_sec071_4p_data; + break; + default: + nbl_sec046_data = nbl_sec046_2p_data; + nbl_sec071_data = nbl_sec071_2p_data; + } + + for (i = 0; i < NBL_SEC006_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC006_REGI(i), nbl_sec006_data[i]); + } + + for (i = 0; i < NBL_SEC007_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC007_REGI(i), nbl_sec007_data[i]); + + for (i = 0; i < NBL_SEC008_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC008_REGI(i), nbl_sec008_data[i]); + } + + for (i = 0; i < NBL_SEC009_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC009_REGI(i), nbl_sec009_data[i]); + } + + for (i = 0; i < NBL_SEC010_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC010_REGI(i), nbl_sec010_data[i]); + + for (i = 0; i < NBL_SEC011_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC011_REGI(i), nbl_sec011_data[i]); + } + + for (i = 0; i < NBL_SEC012_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC012_REGI(i), nbl_sec012_data[i]); + + for (i = 0; i < NBL_SEC013_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC013_REGI(i), nbl_sec013_data[i]); + + for (i = 0; i < NBL_SEC014_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC014_REGI(i), nbl_sec014_data[i]); + + for (i = 0; i < NBL_SEC022_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC022_REGI(i), nbl_sec022_data[i]); + + for (i = 0; i < NBL_SEC023_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC023_REGI(i), nbl_sec023_data[i]); + + for (i = 0; i < NBL_SEC024_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC024_REGI(i), nbl_sec024_data[i]); + } + + for (i = 0; i < NBL_SEC025_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC025_REGI(i), nbl_sec025_data[i]); + } + + for (i = 0; i < NBL_SEC026_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC026_REGI(i), nbl_sec026_data[i]); + + for (i = 0; i < NBL_SEC027_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC027_REGI(i), nbl_sec027_data[i]); + } + + for (i = 0; i < NBL_SEC028_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC028_REGI(i), nbl_sec028_data[i]); + + for (i = 0; i < NBL_SEC029_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC029_REGI(i), nbl_sec029_data[i]); + + for (i = 0; i < NBL_SEC030_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC030_REGI(i), nbl_sec030_data[i]); + + for (i = 0; i < NBL_SEC039_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC039_REGI(i), nbl_sec039_data[i]); + + for (i = 0; i < NBL_SEC040_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC040_REGI(i), nbl_sec040_data[i]); + + for (i = 0; i < NBL_SEC046_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC046_REGI(i), nbl_sec046_data[i]); + + for (i = 0; i < NBL_SEC047_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC047_REGI(i), nbl_sec047_data[i]); + + for (i = 0; i < NBL_SEC052_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC052_REGI(i), nbl_sec052_data[i]); + + for (i = 0; i < NBL_SEC053_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC053_REGI(i), nbl_sec053_data[i]); + + for (i = 0; i < NBL_SEC058_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC058_REGI(i), nbl_sec058_data[i]); + + for (i = 0; i < NBL_SEC059_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC059_REGI(i), nbl_sec059_data[i]); + + for (i = 0; i < NBL_SEC062_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC062_REGI(i), nbl_sec062_data[i]); + + for (i = 0; i < NBL_SEC063_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC063_REGI(i), nbl_sec063_data[i]); + + for (i = 0; i < NBL_SEC065_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC065_REGI(i), nbl_sec065_data[i]); + + for (i = 0; i < NBL_SEC066_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC066_REGI(i), nbl_sec066_data[i]); + + for (i = 0; i < NBL_SEC071_SIZE; i++) { + if ((i + 1) % NBL_SEC_BLOCK_SIZE == 0) + nbl_hw_rd32(phy_mgt, NBL_HW_DUMMY_REG); + + nbl_hw_wr32(phy_mgt, NBL_SEC071_REGI(i), nbl_sec071_data[i]); + } + + for (i = 0; i < NBL_SEC072_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC072_REGI(i), nbl_sec072_data[i]); + + for (i = 0; i < NBL_SEC116_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC116_REGI(i), nbl_sec116_data[i]); + + for (i = 0; i < NBL_SEC124_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC124_REGI(i), nbl_sec124_data[i]); + + for (i = 0; i < NBL_SEC125_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC125_REGI(i), nbl_sec125_data[i]); + + for (i = 0; i < NBL_SEC126_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC126_REGI(i), nbl_sec126_data[i]); + + for (i = 0; i < NBL_SEC137_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC137_REGI(i), nbl_sec137_data[i]); + + for (i = 0; i < NBL_SEC138_SIZE; i++) + nbl_hw_wr32(phy_mgt, NBL_SEC138_REGI(i), nbl_sec138_data[i]); + + nbl_hw_wr32(phy_mgt, NBL_SEC000_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC001_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC002_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC003_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC004_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC005_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC015_ADDR, 0x000f0908); + nbl_hw_wr32(phy_mgt, NBL_SEC016_ADDR, 0x10110607); + nbl_hw_wr32(phy_mgt, NBL_SEC017_ADDR, 0x383a3032); + nbl_hw_wr32(phy_mgt, NBL_SEC018_ADDR, 0x0201453f); + nbl_hw_wr32(phy_mgt, NBL_SEC019_ADDR, 0x00000a41); + nbl_hw_wr32(phy_mgt, NBL_SEC020_ADDR, 0x000000c8); + nbl_hw_wr32(phy_mgt, NBL_SEC021_ADDR, 0x00000400); + nbl_hw_wr32(phy_mgt, NBL_SEC031_ADDR, 0x000f0908); + nbl_hw_wr32(phy_mgt, NBL_SEC032_ADDR, 0x00001011); + nbl_hw_wr32(phy_mgt, NBL_SEC033_ADDR, 0x00003032); + nbl_hw_wr32(phy_mgt, NBL_SEC034_ADDR, 0x0201003f); + nbl_hw_wr32(phy_mgt, NBL_SEC035_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC036_ADDR, 0x00001701); + nbl_hw_wr32(phy_mgt, NBL_SEC037_ADDR, 0x009238a1); + nbl_hw_wr32(phy_mgt, NBL_SEC038_ADDR, 0x0000002e); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(0), 0x00000200); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(1), 0x00000300); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(2), 0x00000105); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(3), 0x00000106); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(4), 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(5), 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(6), 0x00000041); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(7), 0x00000082); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(8), 0x00000020); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(9), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(10), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(11), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(12), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(13), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(14), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC041_REGI(15), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC042_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC043_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC044_ADDR, 0x28212000); + nbl_hw_wr32(phy_mgt, NBL_SEC045_ADDR, 0x00002b29); + nbl_hw_wr32(phy_mgt, NBL_SEC048_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC049_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC050_ADDR, 0x352b2000); + nbl_hw_wr32(phy_mgt, NBL_SEC051_ADDR, 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC054_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC055_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC056_ADDR, 0x2b222100); + nbl_hw_wr32(phy_mgt, NBL_SEC057_ADDR, 0x00000038); + nbl_hw_wr32(phy_mgt, NBL_SEC060_ADDR, 0x24232221); + nbl_hw_wr32(phy_mgt, NBL_SEC061_ADDR, 0x0000002e); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(0), 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(1), 0x00000005); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(2), 0x00000011); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(3), 0x00000005); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(4), 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(5), 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(6), 0x00000006); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(7), 0x00000012); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(8), 0x00000006); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(9), 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(10), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(11), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(12), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(13), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(14), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC064_REGI(15), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC067_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC068_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC069_ADDR, 0x22212000); + nbl_hw_wr32(phy_mgt, NBL_SEC070_ADDR, 0x3835322b); + nbl_hw_wr32(phy_mgt, NBL_SEC073_ADDR, 0x0316a5ff); + nbl_hw_wr32(phy_mgt, NBL_SEC074_ADDR, 0x0316a5ff); + nbl_hw_wr32(phy_mgt, NBL_SEC075_REGI(0), 0x08802080); + nbl_hw_wr32(phy_mgt, NBL_SEC075_REGI(1), 0x12a05080); + nbl_hw_wr32(phy_mgt, NBL_SEC075_REGI(2), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC075_REGI(3), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC076_REGI(0), 0x08802080); + nbl_hw_wr32(phy_mgt, NBL_SEC076_REGI(1), 0x12a05080); + nbl_hw_wr32(phy_mgt, NBL_SEC076_REGI(2), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC076_REGI(3), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC077_REGI(0), 0x08802080); + nbl_hw_wr32(phy_mgt, NBL_SEC077_REGI(1), 0x12a05080); + nbl_hw_wr32(phy_mgt, NBL_SEC077_REGI(2), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC077_REGI(3), 0xffffffff); + nbl_hw_wr32(phy_mgt, NBL_SEC078_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC079_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC080_ADDR, 0x0014a248); + nbl_hw_wr32(phy_mgt, NBL_SEC081_ADDR, 0x00000d33); + nbl_hw_wr32(phy_mgt, NBL_SEC082_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC083_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC084_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC085_ADDR, 0x000144d2); + nbl_hw_wr32(phy_mgt, NBL_SEC086_ADDR, 0x31322e2f); + nbl_hw_wr32(phy_mgt, NBL_SEC087_ADDR, 0x0a092d2c); + nbl_hw_wr32(phy_mgt, NBL_SEC088_ADDR, 0x33050804); + nbl_hw_wr32(phy_mgt, NBL_SEC089_ADDR, 0x14131535); + nbl_hw_wr32(phy_mgt, NBL_SEC090_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC091_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC092_ADDR, 0x00000008); + nbl_hw_wr32(phy_mgt, NBL_SEC093_ADDR, 0x0000000e); + nbl_hw_wr32(phy_mgt, NBL_SEC094_ADDR, 0x0000000f); + nbl_hw_wr32(phy_mgt, NBL_SEC095_ADDR, 0x00000015); + nbl_hw_wr32(phy_mgt, NBL_SEC096_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC097_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC098_ADDR, 0x00000008); + nbl_hw_wr32(phy_mgt, NBL_SEC099_ADDR, 0x00000011); + nbl_hw_wr32(phy_mgt, NBL_SEC100_ADDR, 0x00000013); + nbl_hw_wr32(phy_mgt, NBL_SEC101_ADDR, 0x00000014); + nbl_hw_wr32(phy_mgt, NBL_SEC102_ADDR, 0x00000010); + nbl_hw_wr32(phy_mgt, NBL_SEC103_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC104_ADDR, 0x0000004d); + nbl_hw_wr32(phy_mgt, NBL_SEC105_ADDR, 0x08020a09); + nbl_hw_wr32(phy_mgt, NBL_SEC106_ADDR, 0x00000005); + nbl_hw_wr32(phy_mgt, NBL_SEC107_ADDR, 0x00000006); + nbl_hw_wr32(phy_mgt, NBL_SEC108_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC109_ADDR, 0x00110a09); + nbl_hw_wr32(phy_mgt, NBL_SEC110_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC111_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC112_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC113_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC114_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC115_ADDR, 0x00000009); + nbl_hw_wr32(phy_mgt, NBL_SEC117_ADDR, 0x0000000a); + nbl_hw_wr32(phy_mgt, NBL_SEC118_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(0), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(1), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(2), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(3), 0x00000000); + nbl_hw_wr32(phy_mgt, NBL_SEC119_REGI(4), 0x00000100); + nbl_hw_wr32(phy_mgt, NBL_SEC120_ADDR, 0x0000003c); + nbl_hw_wr32(phy_mgt, NBL_SEC121_ADDR, 0x00000003); + nbl_hw_wr32(phy_mgt, NBL_SEC122_ADDR, 0x000000bc); + nbl_hw_wr32(phy_mgt, NBL_SEC123_ADDR, 0x0000023b); + nbl_hw_wr32(phy_mgt, NBL_SEC127_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC128_ADDR, 0x00000001); + nbl_hw_wr32(phy_mgt, NBL_SEC129_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC130_ADDR, 0x00000002); + nbl_hw_wr32(phy_mgt, NBL_SEC131_ADDR, 0x00000003); + nbl_hw_wr32(phy_mgt, NBL_SEC132_ADDR, 0x00000003); + nbl_hw_wr32(phy_mgt, NBL_SEC133_ADDR, 0x00000004); + nbl_hw_wr32(phy_mgt, NBL_SEC134_ADDR, 0x00000004); + nbl_hw_wr32(phy_mgt, NBL_SEC135_ADDR, 0x0000000e); + nbl_hw_wr32(phy_mgt, NBL_SEC136_ADDR, 0x0000000e); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.h new file mode 100644 index 000000000000..6b416b9b1ab5 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis_regs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_PHY_LEONIS_REGS_H_ +#define _NBL_PHY_LEONIS_REGS_H_ + +void nbl_write_all_regs(void *priv); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c index dbdfc1035139..522cdb54fbfe 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c @@ -7,6 +7,8 @@ #include "nbl_queue_leonis.h" #include "nbl_resource_leonis.h" +static int nbl_res_queue_reset_uvn_pkt_drop_stats(void *priv, u16 func_id, u16 global_queue_id); + static struct nbl_queue_vsi_info * nbl_res_queue_get_vsi_info(struct nbl_resource_mgt *res_mgt, u16 vsi_id) { @@ -54,6 +56,7 @@ static int nbl_res_queue_setup_queue_info(struct nbl_resource_mgt *res_mgt, u16 struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; struct nbl_event_queue_update_data event_data; u16 *txrx_queues, *queues_context; + u32 *uvn_stat_pkt_drop; u16 queue_index; int i, ret = 0; @@ -72,14 +75,20 @@ static int nbl_res_queue_setup_queue_info(struct nbl_resource_mgt *res_mgt, u16 goto alloc_queue_contex_fail; } + uvn_stat_pkt_drop = kcalloc(num_queues, sizeof(*uvn_stat_pkt_drop), GFP_ATOMIC); + if (!uvn_stat_pkt_drop) { + ret = -ENOMEM; + goto alloc_uvn_stat_pkt_drop_fail; + } + queue_info->num_txrx_queues = num_queues; queue_info->txrx_queues = txrx_queues; queue_info->queues_context = queues_context; + queue_info->uvn_stat_pkt_drop = uvn_stat_pkt_drop; for (i = 0; i < num_queues; i++) { queue_index = find_first_zero_bit(queue_mgt->txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE); if (queue_index == NBL_MAX_TXRX_QUEUE) { - nbl_err(common, NBL_DEBUG_QUEUE, "There is no available txrx queues left\n"); ret = -ENOSPC; goto get_txrx_queue_fail; } @@ -96,12 +105,15 @@ static int nbl_res_queue_setup_queue_info(struct nbl_resource_mgt *res_mgt, u16 return 0; get_txrx_queue_fail: + kfree(uvn_stat_pkt_drop); while (--i + 1) { queue_index = txrx_queues[i]; clear_bit(queue_index, queue_mgt->txrx_queue_bitmap); } queue_info->num_txrx_queues = 0; queue_info->txrx_queues = NULL; +alloc_uvn_stat_pkt_drop_fail: + kfree(queues_context); alloc_queue_contex_fail: kfree(txrx_queues); alloc_txrx_queues_fail: @@ -119,8 +131,10 @@ static void nbl_res_queue_remove_queue_info(struct nbl_resource_mgt *res_mgt, u1 kfree(queue_info->txrx_queues); kfree(queue_info->queues_context); + kfree(queue_info->uvn_stat_pkt_drop); queue_info->txrx_queues = NULL; queue_info->queues_context = NULL; + queue_info->uvn_stat_pkt_drop = NULL; queue_info->num_txrx_queues = 0; } @@ -285,25 +299,45 @@ void nbl_res_queue_remove_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, nbl_res_queue_set_qid_map_table(res_mgt, tail); } -static int nbl_res_queue_get_rss_ret_base(struct nbl_resource_mgt *res_mgt, u16 count, u16 *result) +static int +nbl_res_queue_get_rss_ret_base(struct nbl_resource_mgt *res_mgt, u16 count, u16 rss_entry_size, + struct nbl_queue_vsi_info *vsi_info) { struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + u32 rss_ret_base_start; + u32 rss_ret_base_end; + u16 func_id; + u16 rss_entry_count; u16 index, i, j, k; int success = 1; int ret = -EFAULT; - for (i = 0; i < NBL_EPRO_RSS_RET_TBL_DEPTH;) { + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_info->vsi_id); + if (func_id < NBL_MAX_ETHERNET && + (vsi_info->vsi_index == NBL_VSI_DATA || vsi_info->vsi_index == NBL_VSI_USER)) { + rss_ret_base_start = 0; + rss_ret_base_end = NBL_EPRO_PF_RSS_RET_TBL_DEPTH; + vsi_info->rss_entry_size = NBL_EPRO_PF_RSS_ENTRY_SIZE; + rss_entry_count = NBL_EPRO_PF_RSS_RET_TBL_COUNT; + } else { + rss_ret_base_start = NBL_EPRO_PF_RSS_RET_TBL_DEPTH; + rss_ret_base_end = NBL_EPRO_RSS_RET_TBL_DEPTH; + vsi_info->rss_entry_size = rss_entry_size; + rss_entry_count = count; + } + + for (i = rss_ret_base_start; i < rss_ret_base_end;) { index = find_next_zero_bit(queue_mgt->rss_ret_bitmap, - NBL_EPRO_RSS_RET_TBL_DEPTH, i); - if (index == NBL_EPRO_RSS_RET_TBL_DEPTH) { + rss_ret_base_end, i); + if (index == rss_ret_base_end) { nbl_err(common, NBL_DEBUG_QUEUE, "There is no available rss ret left"); break; } success = 1; - for (j = index + 1; j < (index + count); j++) { - if (j >= NBL_EPRO_RSS_RET_TBL_DEPTH) { + for (j = index + 1; j < (index + rss_entry_count); j++) { + if (j >= rss_ret_base_end) { success = 0; break; } @@ -314,9 +348,9 @@ static int nbl_res_queue_get_rss_ret_base(struct nbl_resource_mgt *res_mgt, u16 } } if (success) { - for (k = index; k < (index + count); k++) + for (k = index; k < (index + rss_entry_count); k++) set_bit(k, queue_mgt->rss_ret_bitmap); - *result = index; + vsi_info->rss_ret_base = index; ret = 0; break; } @@ -396,14 +430,14 @@ static int nbl_res_queue_setup_rss(void *priv, u16 vsi_id) rss_entry_size = (vsi_info->queue_num + NBL_EPRO_RSS_ENTRY_SIZE_UNIT - 1) / NBL_EPRO_RSS_ENTRY_SIZE_UNIT; + rss_entry_size = ilog2(roundup_pow_of_two(rss_entry_size)); count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << rss_entry_size; - ret = nbl_res_queue_get_rss_ret_base(res_mgt, count, &vsi_info->rss_ret_base); + ret = nbl_res_queue_get_rss_ret_base(res_mgt, count, rss_entry_size, vsi_info); if (ret) return -ENOSPC; - vsi_info->rss_entry_size = rss_entry_size; vsi_info->rss_vld = true; return 0; @@ -460,16 +494,46 @@ static void nbl_res_queue_setup_queue_cfg(struct nbl_queue_mgt *queue_mgt, cfg_param->half_offload_en = queue_param->half_offload_en; } +static void nbl_res_queue_update_netid_refnum(struct nbl_queue_mgt *queue_mgt, u16 net_id, bool add) +{ + if (net_id >= NBL_MAX_NET_ID) + return; + + if (add) { + queue_mgt->net_id_ref_vsinum[net_id]++; + } else { + /* probe call clear_queue first, so judge nor zero to support disable dsch more than + * once + */ + if (queue_mgt->net_id_ref_vsinum[net_id]) + queue_mgt->net_id_ref_vsinum[net_id]--; + } +} + +static u16 nbl_res_queue_get_netid_refnum(struct nbl_queue_mgt *queue_mgt, u16 net_id) +{ + if (net_id >= NBL_MAX_NET_ID) + return 0; + + return queue_mgt->net_id_ref_vsinum[net_id]; +} + static void nbl_res_queue_setup_hw_dq(struct nbl_resource_mgt *res_mgt, - struct nbl_queue_cfg_param *queue_cfg, u16 func_id) + struct nbl_queue_cfg_param *queue_cfg, + u16 func_id, u16 vsi_id) { struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_queue_vsi_info *vsi_info; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_vnet_queue_info_param param = {0}; u16 global_queue_id = queue_cfg->global_queue_id; u8 bus, dev, func; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &dev, &func); queue_info->split = queue_cfg->split; queue_info->queue_size = queue_cfg->size; @@ -494,11 +558,15 @@ static void nbl_res_queue_setup_hw_dq(struct nbl_resource_mgt *res_mgt, queue_cfg->last_avail_idx); phy_ops->cfg_tx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), queue_cfg, global_queue_id); + if (nbl_res_queue_get_netid_refnum(queue_mgt, vsi_info->net_id)) + phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, vsi_info->net_id, 1); } else { phy_ops->set_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¶m, NBL_PAIR_ID_GET_RX(global_queue_id)); phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue_id); if (!queue_cfg->extend_header) phy_ops->restore_uvn_context(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id, queue_cfg->split, @@ -508,6 +576,46 @@ static void nbl_res_queue_setup_hw_dq(struct nbl_resource_mgt *res_mgt, } } +static void nbl_res_queue_remove_hw_dq(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_cfg_param *queue_cfg, u16 func_id) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 global_queue_id = queue_cfg->global_queue_id; + int ret = 0; + + if (queue_cfg->tx) { + ret = phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + if (ret) { + pr_err("lso_dsch_drain failed\n"); + return; + } + phy_ops->clear_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_TX(global_queue_id)); + + phy_ops->disable_dvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + + queue_info->queues_context[NBL_PAIR_ID_GET_TX(global_queue_id)] = + phy_ops->save_dvn_ctx(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_info->split); + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + } else { + phy_ops->clear_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_RX(global_queue_id)); + phy_ops->disable_uvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + + queue_info->queues_context[NBL_PAIR_ID_GET_RX(global_queue_id)] = + phy_ops->save_uvn_ctx(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_info->split, + queue_info->queue_size); + + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue_id); + } +} + static void nbl_res_queue_remove_all_hw_dq(struct nbl_resource_mgt *res_mgt, u16 func_id, struct nbl_queue_vsi_info *vsi_info) { @@ -546,6 +654,7 @@ static void nbl_res_queue_remove_all_hw_dq(struct nbl_resource_mgt *res_mgt, u16 for (i = start; i < end; i++) { global_queue = queue_info->txrx_queues[i]; phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue); phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); } @@ -600,12 +709,12 @@ static int nbl_res_queue_init_epro_vpt_table(struct nbl_resource_mgt *res_mgt, u u16 vsi_id, vf_vsi_id; u16 i; - vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); if (sriov_info->bdf != 0) { /* init pf vsi */ - for (i = 0; i < NBL_VSI_MAX; i++) { + for (i = NBL_VSI_SERV_PF_DATA_TYPE; i <= NBL_VSI_SERV_PF_XDP_TYPE; i++) { vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, i); phy_ops->init_epro_vpt_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); } @@ -632,7 +741,7 @@ static int nbl_res_queue_init_ipro_dn_sport_tbl(struct nbl_resource_mgt *res_mgt u16 eth_id, vsi_id, vf_vsi_id; int i; - vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); if (sriov_info->bdf != 0) { @@ -717,7 +826,19 @@ static int nbl_res_queue_setup_queue(void *priv, struct nbl_txrx_queue_param *pa nbl_res_queue_setup_queue_cfg(NBL_RES_MGT_TO_QUEUE_MGT(res_mgt), &cfg_param, param, is_tx, func_id); - nbl_res_queue_setup_hw_dq(res_mgt, &cfg_param, func_id); + nbl_res_queue_setup_hw_dq(res_mgt, &cfg_param, func_id, param->vsi_id); + return 0; +} + +static int nbl_res_queue_remove_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_cfg_param cfg_param = {0}; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, param->vsi_id); + + nbl_res_queue_setup_queue_cfg(NBL_RES_MGT_TO_QUEUE_MGT(res_mgt), + &cfg_param, param, is_tx, func_id); + nbl_res_queue_remove_hw_dq(res_mgt, &cfg_param, func_id); return 0; } @@ -758,30 +879,6 @@ static int nbl_res_queue_register_vsi2q(void *priv, u16 vsi_index, u16 vsi_id, return 0; } -static void nbl_res_queue_update_netid_refnum(struct nbl_queue_mgt *queue_mgt, u16 net_id, bool add) -{ - if (net_id >= NBL_MAX_NET_ID) - return; - - if (add) { - queue_mgt->net_id_ref_vsinum[net_id]++; - } else { - /* probe call clear_queue first, so judge nor zero to support disable dsch more than - * once - */ - if (queue_mgt->net_id_ref_vsinum[net_id]) - queue_mgt->net_id_ref_vsinum[net_id]--; - } -} - -static u16 nbl_res_queue_get_netid_refnum(struct nbl_queue_mgt *queue_mgt, u16 net_id) -{ - if (net_id >= NBL_MAX_NET_ID) - return 0; - - return queue_mgt->net_id_ref_vsinum[net_id]; -} - static int nbl_res_queue_cfg_dsch(void *priv, u16 vsi_id, bool vld) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -828,7 +925,7 @@ static int nbl_res_queue_cfg_dsch(void *priv, u16 vsi_id, bool vld) return 0; } -static int nbl_res_queue_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) +static int nbl_res_queue_setup_cqs(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); @@ -847,11 +944,11 @@ static int nbl_res_queue_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) if (real_qps == vsi_info->curr_qps) return 0; - if (real_qps) + if (real_qps && rss_indir_set) phy_ops->cfg_epro_rss_ret(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->rss_ret_base, vsi_info->rss_entry_size, real_qps, - queue_info->txrx_queues + vsi_info->queue_offset); + queue_info->txrx_queues + vsi_info->queue_offset, NULL); if (!vsi_info->curr_qps) phy_ops->set_epro_rss_pt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, @@ -977,6 +1074,9 @@ static int nbl_res_queue_cfg_qdisc_mqprio(void *priv, struct nbl_tc_qidsc_param case NBL_FW_PORT_SPEED_25G: max_rate = NBL_RATE_MBPS_25G; break; + case NBL_FW_PORT_SPEED_10G: + max_rate = NBL_RATE_MBPS_10G; + break; default: return -EOPNOTSUPP; } @@ -1023,17 +1123,94 @@ static int nbl_res_queue_cfg_qdisc_mqprio(void *priv, struct nbl_tc_qidsc_param for (i = 0; i < param->num_tc; i++) weight[i] = param->info[i].max_tx_rate / gravity; + phy_ops->set_tc_wgt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, weight, param->num_tc); /* Config shaping */ - phy_ops->set_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, total_tx_rate, + phy_ops->set_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, total_tx_rate, 0, param->enable && total_tx_rate, is_active); kfree(weight); return 0; } +static int nbl_res_queue_set_tc_wgt(void *priv, u16 vsi_id, u8 *weight, u8 num_tc) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + u8 *weight_to_set; + int i; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + weight_to_set = kcalloc(num_tc, sizeof(*weight_to_set), GFP_KERNEL); + if (!weight_to_set) + return -ENOMEM; + + for (i = 0; i < num_tc; i++) + weight_to_set[i] = weight[i] * NBL_SHAPING_WGT_MAX / NBL_TC_MAX_BW; + phy_ops->set_tc_wgt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, + weight_to_set, num_tc); + + kfree(weight_to_set); + return 0; +} + +static void nbl_res_restore_tc_mgt(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + u8 *weight; + int i; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + weight = kcalloc(NBL_MAX_TC_NUM, sizeof(*weight), GFP_KERNEL); + if (!weight) + return; + + for (i = 0; i < NBL_MAX_TC_NUM; i++) + weight[i] = 1; + phy_ops->set_tc_wgt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, + weight, NBL_MAX_TC_NUM); + + kfree(weight); +} + +static u16 nbl_res_queue_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + int i; + + queue_info = &queue_mgt->queue_info[func_id]; + + if (queue_info->txrx_queues) + for (i = 0; i < queue_info->num_txrx_queues; i++) + if (global_queue_id == queue_info->txrx_queues[i]) + return i; + + return U16_MAX; +} + +static u16 nbl_res_queue_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + + if (!queue_info->num_txrx_queues) + return 0xffff; + + return queue_info->txrx_queues[local_qid]; +} + static void nbl_res_queue_get_rxfh_indir_size(void *priv, u16 vsi_id, u32 *rxfh_indir_size) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -1050,25 +1227,64 @@ static void nbl_res_queue_get_rxfh_indir(void *priv, u16 vsi_id, u32 *indir) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_queue_vsi_info *vsi_info = NULL; - int i, j; - u32 rxfh_indir_size; - u16 queue_num; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 i, indir_size; vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); if (!vsi_info) return; - queue_num = vsi_info->curr_qps_static ? vsi_info->curr_qps_static : vsi_info->queue_num; - rxfh_indir_size = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << vsi_info->rss_entry_size; + phy_ops->read_rss_indir(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, indir, + vsi_info->rss_ret_base, vsi_info->rss_entry_size); - for (i = 0, j = 0; i < rxfh_indir_size; i++) { - indir[i] = j; - j++; - if (j == queue_num) - j = 0; + indir_size = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << vsi_info->rss_entry_size; + for (i = 0; i < indir_size; i++) { + indir[i] = nbl_res_queue_get_local_queue_id(res_mgt, vsi_id, indir[i]); + indir[i] -= vsi_info->queue_offset; } } +static int nbl_res_queue_set_rxfh_indir(void *priv, u16 vsi_id, const u32 *indir, u32 indir_size) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + struct nbl_queue_info *queue_info = NULL; + u32 *rss_ret; + u16 func_id = 0; + int i = 0; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + if (indir) { + rss_ret = kcalloc(indir_size, sizeof(indir[0]), GFP_KERNEL); + if (!rss_ret) + return -ENOMEM; + func_id = NBL_COMMON_TO_MGT_PF(common); + queue_info = &queue_mgt->queue_info[func_id]; + /* local queue to global queue */ + for (i = 0; i < indir_size; i++) + rss_ret[i] = nbl_res_queue_get_vsi_global_qid(res_mgt, vsi_id, + vsi_info->queue_offset + + indir[i]); + phy_ops->cfg_epro_rss_ret(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_info->rss_ret_base, + vsi_info->rss_entry_size, 0, + NULL, rss_ret); + kfree(rss_ret); + } + + if (!vsi_info->curr_qps) + phy_ops->set_epro_rss_pt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + vsi_info->rss_ret_base, vsi_info->rss_entry_size); + + return 0; +} + static void nbl_res_queue_get_rxfh_rss_key_size(void *priv, u32 *rxfh_rss_key_size) { *rxfh_rss_key_size = NBL_EPRO_RSS_SK_SIZE; @@ -1098,12 +1314,22 @@ static void nbl_res_queue_get_rss_key(void *priv, u8 *rss_key) nbl_res_rss_key_reverse_order(rss_key + i * NBL_EPRO_RSS_PER_KEY_SIZE); } -static void nbl_res_queue_get_rss_alg_sel(void *priv, u8 *alg_sel, u8 eth_id) +static void nbl_res_queue_get_rss_alg_sel(void *priv, u16 vsi_id, u8 *alg_sel) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_rss_alg_sel(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, alg_sel); +} + +static int nbl_res_queue_set_rss_alg_sel(void *priv, u16 vsi_id, u8 alg_sel) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int ret = 0; - phy_ops->get_rss_alg_sel(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, alg_sel); + ret = phy_ops->set_rss_alg_sel(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, alg_sel); + return ret; } static void nbl_res_queue_clear_queues(void *priv, u16 vsi_id) @@ -1116,6 +1342,7 @@ static void nbl_res_queue_clear_queues(void *priv, u16 vsi_id) nbl_res_queue_remove_rss(priv, vsi_id); nbl_res_queue_remove_q2vsi(priv, vsi_id); + nbl_res_restore_tc_mgt(priv, vsi_id); if (!queue_info->num_txrx_queues) return; @@ -1130,20 +1357,6 @@ static void nbl_res_queue_clear_queues(void *priv, u16 vsi_id) nbl_res_queue_free_txrx_queues(res_mgt, vsi_id); } -/* for pmd driver */ -static u16 nbl_res_queue_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) -{ - struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); - struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); - struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; - - if (!queue_info->num_txrx_queues) - return 0xffff; - - return queue_info->txrx_queues[local_qid]; -} - static int nbl_res_queue_cfg_log(void *priv, u16 vsi_id, u16 qps, bool vld) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -1238,6 +1451,20 @@ static void nbl_res_queue_adapt_desc_gother(void *priv) } } +static void nbl_res_queue_set_desc_high_throughput(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_adapt_desc_gother *adapt_desc_gother = &queue_mgt->adapt_desc_gother; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (adapt_desc_gother->level != NBL_ADAPT_DESC_GOTHER_LEVEL1) { + phy_ops->set_uvn_desc_wr_timeout(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_ADAPT_DESC_GOTHER_LEVEL1_TIMEOUT); + adapt_desc_gother->level = NBL_ADAPT_DESC_GOTHER_LEVEL1; + } +} + static void nbl_res_flr_clear_queues(void *priv, u16 vf_id) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -1254,9 +1481,13 @@ static int nbl_res_queue_restore_tx_queue(struct nbl_resource_mgt *res_mgt, u16 struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_queue_info *queue_info; + struct nbl_queue_vsi_info *vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); struct nbl_queue_cfg_param queue_cfg = {0}; u16 global_queue, func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + if (!vsi_info) + return -ENOSPC; + queue_info = &queue_mgt->queue_info[func_id]; global_queue = queue_info->txrx_queues[local_queue_id]; @@ -1272,6 +1503,8 @@ static int nbl_res_queue_restore_tx_queue(struct nbl_resource_mgt *res_mgt, u16 phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); phy_ops->cfg_tx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue, vsi_info->net_id, 1); return 0; } @@ -1298,6 +1531,7 @@ static int nbl_res_queue_restore_rx_queue(struct nbl_resource_mgt *res_mgt, u16 phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue); phy_ops->cfg_rx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); @@ -1344,6 +1578,7 @@ nbl_res_queue_stop_abnormal_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + nbl_res_queue_reset_uvn_pkt_drop_stats(res_mgt, func_id, global_queue); return 0; default: break; @@ -1352,25 +1587,7 @@ nbl_res_queue_stop_abnormal_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, return -EINVAL; } -static u16 nbl_res_queue_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) -{ - struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; - struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); - struct nbl_queue_info *queue_info; - u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); - int i; - - queue_info = &queue_mgt->queue_info[func_id]; - - if (queue_info->txrx_queues) - for (i = 0; i < queue_info->num_txrx_queues; i++) - if (global_queue_id == queue_info->txrx_queues[i]) - return i; - - return U16_MAX; -} - -static int nbl_res_queue_set_tx_rate(void *priv, u16 func_id, int tx_rate) +static int nbl_res_queue_set_tx_rate(void *priv, u16 func_id, int tx_rate, int burst) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); @@ -1382,7 +1599,7 @@ static int nbl_res_queue_set_tx_rate(void *priv, u16 func_id, int tx_rate) bool is_active = false; int max_rate = 0, i; - vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); if (!vsi_info) @@ -1395,6 +1612,9 @@ static int nbl_res_queue_set_tx_rate(void *priv, u16 func_id, int tx_rate) case NBL_FW_PORT_SPEED_25G: max_rate = NBL_RATE_MBPS_25G; break; + case NBL_FW_PORT_SPEED_10G: + max_rate = NBL_RATE_MBPS_10G; + break; default: return -EOPNOTSUPP; } @@ -1411,7 +1631,44 @@ static int nbl_res_queue_set_tx_rate(void *priv, u16 func_id, int tx_rate) /* Config shaping */ return phy_ops->set_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, tx_rate, - !!(tx_rate), is_active); + burst, !!(tx_rate), is_active); +} + +static int nbl_res_queue_set_rx_rate(void *priv, u16 func_id, int rx_rate, int burst) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 vsi_id; + int max_rate = 0; + + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + + if (!vsi_info) + return 0; + + switch (res_info->board_info.eth_speed) { + case NBL_FW_PORT_SPEED_100G: + max_rate = NBL_RATE_MBPS_100G; + break; + case NBL_FW_PORT_SPEED_25G: + max_rate = NBL_RATE_MBPS_25G; + break; + case NBL_FW_PORT_SPEED_10G: + max_rate = NBL_RATE_MBPS_10G; + break; + default: + return -EOPNOTSUPP; + } + + if (rx_rate > max_rate) + return -EINVAL; + + /* Config ucar */ + return phy_ops->set_ucar(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, rx_rate, + burst, !!(rx_rate)); } static void nbl_res_queue_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) @@ -1429,6 +1686,96 @@ static void nbl_res_queue_get_active_func_bitmaps(void *priv, unsigned long *bit } } +static int nbl_res_queue_configure_mirror_table(void *priv, bool mirror_en, u16 func_id, u8 mt_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_event_mirror_outputport_data blacklist_data = {0}; + u16 vsi_id; + u16 queue_id = 0; + + queue_info = &queue_mgt->queue_info[func_id]; + if (!queue_info->num_txrx_queues) { + dev_err(dev, "func:%d num_txrx_queues is 0!", func_id); + return -EINVAL; + } + + blacklist_data.opcode = mirror_en; + blacklist_data.func_id = func_id; + nbl_event_notify(NBL_EVENT_MIRROR_OUTPUTPORT, &blacklist_data, NBL_COMMON_TO_VSI_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + + queue_id = queue_info->txrx_queues[0]; + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + + return phy_ops->configure_mirror_table(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + mirror_en, vsi_id, queue_id, mt_id); +} + +static void nbl_res_queue_set_dvn_desc_req(void *priv, u32 desc_req) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_dvn_desc_req(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), desc_req); +} + +static u32 nbl_res_queue_get_dvn_desc_req(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_dvn_desc_req(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_queue_reset_uvn_pkt_drop_stats(void *priv, u16 func_id, u16 global_queue_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); + u16 local_queue_id; + + local_queue_id = nbl_res_queue_get_local_queue_id(res_mgt, vsi_id, global_queue_id); + queue_info->uvn_stat_pkt_drop[local_queue_id] = 0; + return 0; +} + +static int nbl_res_queue_get_uvn_pkt_drop_stats(void *priv, u16 vsi_id, + u16 num_queues, u32 *uvn_stat_pkt_drop) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = NULL; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id = 0; + u32 pkt_drop_num = 0; + int i = 0; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + for (i = vsi_info->queue_offset; + i < vsi_info->queue_offset + num_queues && + i < queue_info->num_txrx_queues; i++) { + phy_ops->get_uvn_pkt_drop_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], &pkt_drop_num); + *uvn_stat_pkt_drop = pkt_drop_num - queue_info->uvn_stat_pkt_drop[i]; + uvn_stat_pkt_drop++; + queue_info->uvn_stat_pkt_drop[i] = pkt_drop_num; + } + + return 0; +} + /* NBL_QUEUE_SET_OPS(ops_name, func) * * Use X Macros to reduce setup and remove codes. @@ -1443,6 +1790,7 @@ do { \ NBL_QUEUE_SET_OPS(setup_rss, nbl_res_queue_setup_rss); \ NBL_QUEUE_SET_OPS(remove_rss, nbl_res_queue_remove_rss); \ NBL_QUEUE_SET_OPS(setup_queue, nbl_res_queue_setup_queue); \ + NBL_QUEUE_SET_OPS(remove_queue, nbl_res_queue_remove_queue); \ NBL_QUEUE_SET_OPS(remove_all_queues, nbl_res_queue_remove_all_queues); \ NBL_QUEUE_SET_OPS(cfg_dsch, nbl_res_queue_cfg_dsch); \ NBL_QUEUE_SET_OPS(setup_cqs, nbl_res_queue_setup_cqs); \ @@ -1452,21 +1800,30 @@ do { \ NBL_QUEUE_SET_OPS(cfg_qdisc_mqprio, nbl_res_queue_cfg_qdisc_mqprio); \ NBL_QUEUE_SET_OPS(get_rxfh_indir_size, nbl_res_queue_get_rxfh_indir_size); \ NBL_QUEUE_SET_OPS(get_rxfh_indir, nbl_res_queue_get_rxfh_indir); \ + NBL_QUEUE_SET_OPS(set_rxfh_indir, nbl_res_queue_set_rxfh_indir); \ NBL_QUEUE_SET_OPS(get_rxfh_rss_key_size, nbl_res_queue_get_rxfh_rss_key_size); \ NBL_QUEUE_SET_OPS(get_rxfh_rss_key, nbl_res_queue_get_rss_key); \ NBL_QUEUE_SET_OPS(get_rss_alg_sel, nbl_res_queue_get_rss_alg_sel); \ + NBL_QUEUE_SET_OPS(set_rss_alg_sel, nbl_res_queue_set_rss_alg_sel); \ NBL_QUEUE_SET_OPS(clear_queues, nbl_res_queue_clear_queues); \ NBL_QUEUE_SET_OPS(get_vsi_global_queue_id, nbl_res_queue_get_vsi_global_qid); \ NBL_QUEUE_SET_OPS(cfg_queue_log, nbl_res_queue_cfg_log); \ NBL_QUEUE_SET_OPS(get_queue_ctx, nbl_req_queue_get_ctx); \ NBL_QUEUE_SET_OPS(adapt_desc_gother, nbl_res_queue_adapt_desc_gother); \ + NBL_QUEUE_SET_OPS(set_desc_high_throughput, nbl_res_queue_set_desc_high_throughput); \ NBL_QUEUE_SET_OPS(flr_clear_queues, nbl_res_flr_clear_queues); \ NBL_QUEUE_SET_OPS(restore_hw_queue, nbl_res_queue_restore_hw_queue); \ NBL_QUEUE_SET_OPS(get_local_queue_id, nbl_res_queue_get_local_queue_id); \ NBL_QUEUE_SET_OPS(set_bridge_mode, nbl_res_queue_set_bridge_mode); \ NBL_QUEUE_SET_OPS(set_tx_rate, nbl_res_queue_set_tx_rate); \ + NBL_QUEUE_SET_OPS(set_rx_rate, nbl_res_queue_set_rx_rate); \ NBL_QUEUE_SET_OPS(stop_abnormal_hw_queue, nbl_res_queue_stop_abnormal_hw_queue); \ NBL_QUEUE_SET_OPS(get_active_func_bitmaps, nbl_res_queue_get_active_func_bitmaps); \ + NBL_QUEUE_SET_OPS(set_tc_wgt, nbl_res_queue_set_tc_wgt); \ + NBL_QUEUE_SET_OPS(configure_mirror_table, nbl_res_queue_configure_mirror_table); \ + NBL_QUEUE_SET_OPS(get_dvn_desc_req, nbl_res_queue_get_dvn_desc_req); \ + NBL_QUEUE_SET_OPS(set_dvn_desc_req, nbl_res_queue_set_dvn_desc_req); \ + NBL_QUEUE_SET_OPS(get_uvn_pkt_drop_stats, nbl_res_queue_get_uvn_pkt_drop_stats); \ } while (0) int nbl_queue_setup_ops_leonis(struct nbl_resource_ops *res_ops) diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h index 5e2620ca7836..efbb19c705ce 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c index 889edf2749b0..26a3d0d5a8e4 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c @@ -62,12 +62,14 @@ static void nbl_res_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_s struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + u16 default_queue; if (num_info->net_max_qp_num[func_id] != 0) - *queue_num = num_info->net_max_qp_num[func_id]; + default_queue = num_info->net_max_qp_num[func_id]; else - *queue_num = num_info->pf_def_max_net_qp_num; + default_queue = num_info->pf_def_max_net_qp_num; + *queue_num = min_t(u16, default_queue, NBL_VSI_PF_LEGACY_QUEUE_NUM_MAX - default_queue); *queue_size = NBL_DEFAULT_DESC_NUM; if (*queue_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) { @@ -94,43 +96,40 @@ static int nbl_res_save_vf_bar_info(struct nbl_resource_mgt *res_mgt, u16 func_id, struct nbl_register_net_param *register_param) { struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; u64 pf_bar_start; - u16 pf_bdf; u64 vf_bar_start; + u16 pf_bdf; u64 vf_bar_size; u16 total_vfs; u16 offset; u16 stride; - pf_bar_start = register_param->pf_bar_start; - if (pf_bar_start) { + if (func_id < NBL_RES_MGT_TO_PF_NUM(res_mgt)) { + pf_bar_start = phy_ops->get_pf_bar_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id); sriov_info->pf_bar_start = pf_bar_start; dev_info(dev, "sriov_info, pf_bar_start:%llx\n", sriov_info->pf_bar_start); } - pf_bdf = register_param->pf_bdf; - vf_bar_start = register_param->vf_bar_start; + pf_bdf = (u16)sriov_info->bdf; vf_bar_size = register_param->vf_bar_size; total_vfs = register_param->total_vfs; offset = register_param->offset; stride = register_param->stride; if (total_vfs) { - if (pf_bdf != sriov_info->bdf) { - dev_err(dev, "PF bdf donot equal, af record = %u, real pf bdf: %u\n", - sriov_info->bdf, pf_bdf); - return -EIO; - } sriov_info->offset = offset; sriov_info->stride = stride; + vf_bar_start = phy_ops->get_vf_bar_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id); sriov_info->vf_bar_start = vf_bar_start; sriov_info->vf_bar_len = vf_bar_size / total_vfs; - dev_info(dev, "sriov_info, bdf:%x:%x.%x, num_vfs:%d, start_vf_func_id:%d," - "offset:%d, stride:%d,", - PCI_BUS_NUM(pf_bdf), PCI_SLOT(pf_bdf & 0xff), PCI_FUNC(pf_bdf & 0xff), - sriov_info->num_vfs, sriov_info->start_vf_func_id, offset, stride); + dev_info(dev, "sriov_info, bdf:%x:%x.%x, num_vfs:%d, start_vf_func_id:%d,", + PCI_BUS_NUM(pf_bdf), PCI_SLOT(pf_bdf & 0xff), PCI_FUNC(pf_bdf & 0xff), + sriov_info->num_vfs, sriov_info->start_vf_func_id); + dev_info(dev, "offset:%d, stride:%d, vf_bar_start: %llx", + offset, stride, sriov_info->vf_bar_start); } return 0; @@ -140,9 +139,7 @@ static int nbl_res_prepare_vf_chan(struct nbl_resource_mgt *res_mgt, u16 func_id, struct nbl_register_net_param *register_param) { struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); - struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; - u16 pf_bdf; u16 total_vfs; u16 offset; u16 stride; @@ -155,18 +152,11 @@ static int nbl_res_prepare_vf_chan(struct nbl_resource_mgt *res_mgt, u8 function; u16 vf_func_id; - pf_bdf = register_param->pf_bdf; total_vfs = register_param->total_vfs; offset = register_param->offset; stride = register_param->stride; if (total_vfs) { - if (pf_bdf != sriov_info->bdf) { - dev_err(dev, "PF bdf donot equal, af record = %u, real pf bdf: %u\n", - sriov_info->bdf, pf_bdf); - return -EIO; - } - /* Configure mailbox qinfo_map_table for the pf's all vf, * so vf's mailbox is ready, vf can use mailbox. */ @@ -236,19 +226,27 @@ static int nbl_res_register_net(void *priv, u16 func_id, struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_vdpa_status **vf_status = NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); netdev_features_t csumo_features = 0; netdev_features_t tso_features = 0; netdev_features_t pf_features = 0; + netdev_features_t vlano_features = 0; u16 tx_queue_num, rx_queue_num; u8 mac[ETH_ALEN] = {0}; u32 quirks; + u16 vsi_id; int ret = 0; if (func_id < NBL_MAX_PF) { nbl_res_get_eth_mac(res_mgt, mac, nbl_res_pf_to_eth_id(res_mgt, func_id)); pf_features = NBL_FEATURE(NETIF_F_NTUPLE); + register_result->trusted = 1; } else { ether_addr_copy(mac, vsi_info->mac_info[func_id].mac); + register_result->trusted = vsi_info->mac_info[func_id].trusted; } ether_addr_copy(register_result->mac, mac); @@ -263,16 +261,27 @@ static int nbl_res_register_net(void *priv, u16 func_id, NBL_FEATURE(NETIF_F_GSO_UDP_L4); } + if (func_id < NBL_MAX_PF) /* vf unsupport */ + vlano_features = NBL_FEATURE(NETIF_F_HW_VLAN_CTAG_TX) | + NBL_FEATURE(NETIF_F_HW_VLAN_CTAG_RX) | + NBL_FEATURE(NETIF_F_HW_VLAN_STAG_TX) | + NBL_FEATURE(NETIF_F_HW_VLAN_STAG_RX); + register_result->hw_features |= pf_features | csumo_features | tso_features | + vlano_features | NBL_FEATURE(NETIF_F_SG) | - NBL_FEATURE(NETIF_F_HW_TC); + NBL_FEATURE(NETIF_F_HW_TC) | + NBL_FEATURE(NETIF_F_RXHASH); + register_result->features |= register_result->hw_features | NBL_FEATURE(NETIF_F_HW_TC) | NBL_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER) | NBL_FEATURE(NETIF_F_HW_VLAN_STAG_FILTER); + register_result->vlan_features = register_result->features; + register_result->max_mtu = NBL_MAX_JUMBO_FRAME_SIZE - NBL_PKT_HDR_PAD; register_result->vlan_proto = vsi_info->mac_info[func_id].vlan_proto; @@ -302,6 +311,31 @@ static int nbl_res_register_net(void *priv, u16 func_id, goto update_active_vf_fail; } + if (register_param->is_vdpa) { + set_bit(func_id, resource_info->vdpa.vdpa_func_bitmap); + + if (!vf_status[func_id]) { + vf_status[func_id] = devm_kzalloc(dev, sizeof(struct nbl_vdpa_status), + GFP_KERNEL); + if (!vf_status[func_id]) { + ret = -ENOMEM; + goto alloc_nbl_vf_stats_fail; + } + } + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + phy_ops->get_dstat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &vf_status[func_id]->init_stats.tx_packets, + &vf_status[func_id]->init_stats.tx_bytes); + phy_ops->get_ustat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &vf_status[func_id]->init_stats.rx_packets, + &vf_status[func_id]->init_stats.rx_bytes); + memcpy(&vf_status[func_id]->prev_stats, &vf_status[func_id]->init_stats, + sizeof(vf_status[func_id]->prev_stats)); + vf_status[func_id]->timestamp = jiffies; + } else { + clear_bit(func_id, resource_info->vdpa.vdpa_func_bitmap); + } + if (func_id >= NBL_RES_MGT_TO_PF_NUM(res_mgt)) return 0; @@ -319,8 +353,9 @@ static int nbl_res_register_net(void *priv, u16 func_id, prepare_vf_chan_fail: save_vf_bar_info_fail: +alloc_nbl_vf_stats_fail: update_active_vf_fail: - return -EIO; + return ret; } static int nbl_res_unregister_net(void *priv, u16 func_id) @@ -621,11 +656,56 @@ static void nbl_res_set_offload_status(void *priv, u16 func_id) rep_status->timestamp = jiffies; } +static void nbl_res_vdpa_itr_update(struct nbl_resource_mgt *res_mgt, + u16 func_id, bool active) +{ + struct nbl_vdpa_info *vdpa_info = &res_mgt->resource_info->vdpa; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_vdpa_status *vdpa_vf_stats = vdpa_info->vf_stats[func_id]; + struct nbl_vf_stats cur_stats = {0}, *prev_stats; + u64 tx_rates = 0, rx_rates = 0, pkt_rates = 0, time_diff; + u16 itr_level = 0; + u16 vsi_id; + + if (!vdpa_vf_stats) + return; + + if (active) { + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + phy_ops->get_dstat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &cur_stats.tx_packets, &cur_stats.tx_bytes); + phy_ops->get_ustat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &cur_stats.rx_packets, &cur_stats.rx_bytes); + + time_diff = jiffies - vdpa_vf_stats->timestamp; + if (time_diff > 0) { + prev_stats = &vdpa_vf_stats->prev_stats; + tx_rates = (cur_stats.tx_packets - prev_stats->tx_packets) / time_diff * HZ; + rx_rates = (cur_stats.rx_packets - prev_stats->rx_packets) / time_diff * HZ; + pkt_rates = max_t(u64, tx_rates, rx_rates); + + itr_level = nbl_res_intr_get_suppress_level(res_mgt, pkt_rates, + vdpa_vf_stats->itr_level); + } else { + itr_level = vdpa_vf_stats->itr_level; + } + + memcpy(&vdpa_vf_stats->prev_stats, &cur_stats, sizeof(cur_stats)); + vdpa_vf_stats->timestamp = jiffies; + } + + if (itr_level != vdpa_vf_stats->itr_level) { + nbl_res_intr_set_intr_suppress_level(res_mgt, func_id, 0, U16_MAX, itr_level); + vdpa_vf_stats->itr_level = itr_level; + } +} + static int nbl_res_check_offload_status(void *priv, bool *is_down) { struct nbl_resource_mgt_leonis *res_mgt_leonis = (struct nbl_resource_mgt_leonis *)priv; struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_resource_info *res_info = res_mgt->resource_info; struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct nbl_upcall_port_info *upcall_port_info = &res_mgt_leonis->pmd_status.upcall_port_info; @@ -633,6 +713,8 @@ static int nbl_res_check_offload_status(void *priv, bool *is_down) &res_mgt_leonis->pmd_status.rep_status; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); int i; + u16 func_id; + u32 start, batch_cnt; if (!upcall_port_info->upcall_port_active) return 0; @@ -644,6 +726,8 @@ static int nbl_res_check_offload_status(void *priv, bool *is_down) return 0; } + start = res_info->vdpa.start; + batch_cnt = NBL_VDPA_ITR_BATCH_CNT; if (rep_status->timestamp && time_after(jiffies, rep_status->timestamp + 30 * HZ)) { for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_VSI; i++) clear_bit(i, rep_status->rep_vsi_bitmap); @@ -654,10 +738,32 @@ static int nbl_res_check_offload_status(void *priv, bool *is_down) upcall_port_info->upcall_port_active = false; nbl_err(common, NBL_DEBUG_FLOW, "offload found inactive!"); phy_ops->clear_profile_table_action(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + phy_ops->ipro_chksum_err_ctrl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), 0); nbl_res_update_offload_status(res_mgt_leonis); *is_down = true; + + start = 0; + batch_cnt = NBL_MAX_FUNC; + } + + i = 0; + for (; start < NBL_MAX_FUNC;) { + func_id = find_next_bit(res_info->vdpa.vdpa_func_bitmap, NBL_MAX_FUNC, start); + if (func_id >= NBL_MAX_FUNC) { + start = 0; + break; + } + i++; + start = func_id + 1; + + nbl_res_vdpa_itr_update(res_mgt, func_id, + upcall_port_info->upcall_port_active); + if (i >= batch_cnt) + break; } + res_info->vdpa.start = start; + return 0; } @@ -963,7 +1069,8 @@ static int nbl_res_register_upcall_port(void *priv, u16 func_id) &res_mgt_leonis->pmd_status.upcall_port_info; struct nbl_rep_offload_status *rep_status = &res_mgt_leonis->pmd_status.rep_status; - u16 vsi_id = nbl_res_func_id_to_vsi_id(&res_mgt_leonis->res_mgt, func_id, NBL_VSI_DATA); + u16 vsi_id = nbl_res_func_id_to_vsi_id(&res_mgt_leonis->res_mgt, func_id, + NBL_VSI_SERV_PF_DATA_TYPE); int i; rep_status->timestamp = jiffies; @@ -1038,6 +1145,14 @@ static void nbl_res_init_cmdq(void *priv, void *data, u16 func_id) (struct nbl_resource_mgt_leonis *)priv; struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_chan_cmdq_init_info *cmdq_param = + (struct nbl_chan_cmdq_init_info *)data; + u8 bus; + u8 dev; + u8 func; + + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &dev, &func); + cmdq_param->bdf_num = (u16)PCI_DEVID(bus, PCI_DEVFN(dev, func)); phy_ops->init_cmdq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data, func_id); } @@ -1166,6 +1281,7 @@ static int nbl_res_get_upcall_port(void *priv, u16 *bdf) struct nbl_resource_mgt_leonis *res_mgt_leonis = (struct nbl_resource_mgt_leonis *)priv; struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); struct nbl_upcall_port_info *upcall_port_info = &res_mgt_leonis->pmd_status.upcall_port_info; u8 bus, dev, func; @@ -1174,7 +1290,7 @@ static int nbl_res_get_upcall_port(void *priv, u16 *bdf) return U32_MAX; nbl_res_func_id_to_bdf(res_mgt, upcall_port_info->func_id, &bus, &dev, &func); - *bdf = PCI_DEVID(bus, PCI_DEVFN(dev, func)); + *bdf = (u16)PCI_DEVID(common->bus, PCI_DEVFN(dev, func)); return 0; } @@ -1302,6 +1418,7 @@ static int nbl_res_set_tc_flow_info(void *priv) } tc_flow_mgt->pf_set_tc_count++; + phy_ops->ipro_chksum_err_ctrl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), 1); nbl_info(common, NBL_DEBUG_FLOW, "tc flow set pf_set_tc_count++=%d\n", tc_flow_mgt->pf_set_tc_count); @@ -1346,6 +1463,8 @@ static int nbl_res_unset_tc_flow_info(void *priv) nbl_tc_unset_flow_info(common->tc_inst_id); nbl_info(common, NBL_DEBUG_FLOW, "tc flow unset inst_id=%d success.\n", common->tc_inst_id); + + phy_ops->ipro_chksum_err_ctrl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), 0); } return 0; @@ -1443,7 +1562,7 @@ static void nbl_res_flr_clear_net(void *priv, u16 vf_id) u16 func_id = vf_id + NBL_MAX_PF; u16 vsi_id; - vsi_id = nbl_res_func_id_to_vsi_id(priv, func_id, NBL_VSI_DATA); + vsi_id = nbl_res_func_id_to_vsi_id(priv, func_id, NBL_VSI_SERV_VF_DATA_TYPE); nbl_res_unregister_rdma(priv, vsi_id); if (nbl_res_vf_is_active(priv, func_id)) @@ -1455,7 +1574,7 @@ static void nbl_res_flr_clear_rdma(void *priv, u16 vf_id) u16 func_id = vf_id + NBL_MAX_PF; u16 vsi_id; - vsi_id = nbl_res_func_id_to_vsi_id(priv, func_id, NBL_VSI_DATA); + vsi_id = nbl_res_func_id_to_vsi_id(priv, func_id, NBL_VSI_SERV_VF_DATA_TYPE); nbl_res_unregister_rdma(priv, vsi_id); } @@ -1467,6 +1586,88 @@ static u16 nbl_res_covert_vfid_to_vsi_id(void *priv, u16 vf_id) return nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); } +static bool nbl_res_check_vf_is_active(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_vf_is_active(res_mgt, func_id); +} + +static int nbl_res_check_vf_is_vdpa(void *priv, u16 func_id, u8 *is_vdpa) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + *is_vdpa = test_bit(func_id, resource_info->vdpa.vdpa_func_bitmap); + return 0; +} + +static int nbl_res_get_vdpa_vf_stats(void *priv, u16 func_id, struct nbl_vf_stats *vf_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_vdpa_status *vdpa_vf_stats = NULL; + struct nbl_vf_stats vdpa_vf_stats_current = {0}, *init_stats; + u16 vsi_id; + + if (NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt) && + NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt)[func_id]) { + vdpa_vf_stats = NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt)[func_id]; + init_stats = &vdpa_vf_stats->init_stats; + } else { + dev_err(dev, "function %d vdpa_vf_stats is NULL\n", func_id); + return -EFAULT; + } + + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + phy_ops->get_dstat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &vdpa_vf_stats_current.tx_packets, + &vdpa_vf_stats_current.tx_bytes); + phy_ops->get_ustat_vsi_stat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + &vdpa_vf_stats_current.rx_packets, + &vdpa_vf_stats_current.rx_bytes); + + vf_stats->tx_packets = vdpa_vf_stats_current.tx_packets - init_stats->tx_packets; + vf_stats->tx_bytes = vdpa_vf_stats_current.tx_bytes - init_stats->tx_bytes; + vf_stats->rx_packets = vdpa_vf_stats_current.rx_packets - init_stats->rx_packets; + vf_stats->rx_bytes = vdpa_vf_stats_current.rx_bytes - init_stats->rx_bytes; + + return 0; +} + +static int nbl_res_get_ustore_pkt_drop_stats(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_ustore_stats *ustore_stats = NBL_RES_MGT_TO_USTORE_STATS(res_mgt); + struct nbl_ustore_stats ustore_stats_temp = {0}; + u8 eth_id = 0; + int i = 0; + + for (i = 0; i < eth_info->eth_num; i++) { + eth_id = eth_info->eth_id[i]; + phy_ops->get_ustore_pkt_drop_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + eth_id, &ustore_stats_temp); + ustore_stats[eth_id].rx_drop_packets += ustore_stats_temp.rx_drop_packets; + ustore_stats[eth_id].rx_trun_packets += ustore_stats_temp.rx_trun_packets; + } + + return 0; +} + +static int nbl_res_get_ustore_total_pkt_drop_stats(void *priv, u8 eth_id, + struct nbl_ustore_stats *nbl_ustore_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_ustore_stats *ustore_stats = NBL_RES_MGT_TO_USTORE_STATS(res_mgt); + + nbl_ustore_stats->rx_drop_packets = ustore_stats[eth_id].rx_drop_packets; + nbl_ustore_stats->rx_trun_packets = ustore_stats[eth_id].rx_trun_packets; + return 0; +} + static int nbl_res_get_board_id(void *priv) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; @@ -1579,13 +1780,14 @@ static void nbl_res_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_si struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + u16 default_queue; if (num_info->net_max_qp_num[func_id] != 0) - *queue_num = num_info->net_max_qp_num[func_id]; + default_queue = num_info->net_max_qp_num[func_id]; else - *queue_num = num_info->pf_def_max_net_qp_num; + default_queue = num_info->pf_def_max_net_qp_num; - *queue_size = NBL_DEFAULT_DESC_NUM; + *queue_num = min_t(u16, default_queue, NBL_VSI_PF_LEGACY_QUEUE_NUM_MAX - default_queue); if (*queue_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) { nbl_warn(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_QUEUE, @@ -1623,6 +1825,118 @@ static int nbl_res_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *d return 0; } +static int nbl_res_configure_rdma_bw(void *priv, u8 eth_id, int rdma_bw) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->configure_rdma_bw(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, rdma_bw); + + return 0; +} + +static int nbl_res_set_rate_limit(void *priv, u16 func_id, enum nbl_traffic_type type, u32 rate) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_rate_limit(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, type, rate); + + return 0; +} + +static u32 nbl_res_get_perf_dump_length(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_perf_dump_length(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static u32 nbl_res_get_perf_dump_data(void *priv, u8 *buffer, u32 length) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_perf_dump_data(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), buffer, length); +} + +static void nbl_res_register_dev_name(void *priv, u16 vsi_id, char *name) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u32 pf_id; + + pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + WARN_ON(pf_id >= NBL_MAX_PF); + strscpy(resource_info->pf_name_list[pf_id], name, IFNAMSIZ); + nbl_info(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_RESOURCE, + "vsi:%u-pf:%u register a pf_name->%s", vsi_id, pf_id, name); +} + +static void nbl_res_get_dev_name(void *priv, u16 vsi_id, char *name) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + int pf_id, vf_id; + u16 func_id; + int name_len; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pf_id, &vf_id); + WARN_ON(pf_id >= NBL_MAX_PF); + name_len = snprintf(name, IFNAMSIZ, "%sv%d", resource_info->pf_name_list[pf_id], vf_id); + if (name_len >= IFNAMSIZ) + nbl_err(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_RESOURCE, + "vsi:%u-pf%uvf%u get name over length", vsi_id, pf_id, vf_id); + + nbl_debug(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_RESOURCE, + "vsi:%u-pf%uvf%u get a pf_name->%s", vsi_id, pf_id, vf_id, name); +} + +static int nbl_res_get_mirror_table_id(void *priv, u16 vsi_id, int dir, bool mirror_en, + u8 *mt_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_mirror_table_id(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_id, dir, mirror_en, mt_id); +} + +static int nbl_res_configure_mirror(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id) +{ + u16 data_vsi, user_vsi; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + data_vsi = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); + user_vsi = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_USER_TYPE); + + phy_ops->configure_mirror(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data_vsi, mirror_en, dir, + mt_id); + phy_ops->configure_mirror(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), user_vsi, mirror_en, dir, + mt_id); + + return 0; +} + +static int nbl_res_clear_mirror_cfg(void *priv, u16 func_id) +{ + u16 data_vsi, user_vsi; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + data_vsi = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_DATA_TYPE); + user_vsi = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_PF_USER_TYPE); + + phy_ops->clear_mirror_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data_vsi); + phy_ops->clear_mirror_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), user_vsi); + + return 0; +} + static struct nbl_resource_ops res_ops = { .register_net = nbl_res_register_net, .unregister_net = nbl_res_unregister_net, @@ -1686,6 +2000,11 @@ static struct nbl_resource_ops res_ops = { .flr_clear_net = nbl_res_flr_clear_net, .flr_clear_rdma = nbl_res_flr_clear_rdma, .covert_vfid_to_vsi_id = nbl_res_covert_vfid_to_vsi_id, + .check_vf_is_active = nbl_res_check_vf_is_active, + .check_vf_is_vdpa = nbl_res_check_vf_is_vdpa, + .get_vdpa_vf_stats = nbl_res_get_vdpa_vf_stats, + .get_ustore_pkt_drop_stats = nbl_res_get_ustore_pkt_drop_stats, + .get_ustore_total_pkt_drop_stats = nbl_res_get_ustore_total_pkt_drop_stats, .init_vdpaq = nbl_res_init_vdpaq, .destroy_vdpaq = nbl_res_destroy_vdpaq, @@ -1726,8 +2045,20 @@ static struct nbl_resource_ops res_ops = { .set_hw_status = nbl_res_set_hw_status, .configure_qos = nbl_res_configure_qos, + .configure_rdma_bw = nbl_res_configure_rdma_bw, .set_pfc_buffer_size = nbl_res_set_pfc_buffer_size, .get_pfc_buffer_size = nbl_res_get_pfc_buffer_size, + .set_rate_limit = nbl_res_set_rate_limit, + + .get_perf_dump_length = nbl_res_get_perf_dump_length, + .get_perf_dump_data = nbl_res_get_perf_dump_data, + + .register_dev_name = nbl_res_register_dev_name, + .get_dev_name = nbl_res_get_dev_name, + + .get_mirror_table_id = nbl_res_get_mirror_table_id, + .configure_mirror = nbl_res_configure_mirror, + .clear_mirror_cfg = nbl_res_clear_mirror_cfg, }; static struct nbl_res_product_ops product_ops = { @@ -1946,10 +2277,10 @@ static int nbl_res_ctrl_dev_sriov_info_init(struct nbl_resource_mgt *res_mgt) sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; function = NBL_COMMON_TO_PCI_FUNC_ID(common) + func_id; - sriov_info->bdf = PCI_DEVID(common->bus, + common->hw_bus = (u8)phy_ops->get_real_bus(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + sriov_info->bdf = PCI_DEVID(common->hw_bus, PCI_DEVFN(common->devid, function)); - vf_fid = phy_ops->get_host_pf_fid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), - func_id); + vf_fid = phy_ops->get_host_pf_fid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id); vf_startid = vf_fid & 0xFFFF; vf_endid = (vf_fid >> 16) & 0xFFFF; sriov_info->start_vf_func_id = vf_startid + NBL_MAX_PF_LEONIS; @@ -1973,6 +2304,20 @@ static void nbl_res_ctrl_dev_sriov_info_remove(struct nbl_resource_mgt *res_mgt) *sriov_info = NULL; } +static void nbl_res_ctrl_dev_vdpa_vf_stats_remove(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_vdpa_status **vf_status = NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + int i = 0; + + for (i = 0; i < NBL_MAX_FUNC; i++) { + if (vf_status[i]) { + devm_kfree(dev, vf_status[i]); + vf_status[i] = NULL; + } + } +} + static int nbl_res_ctrl_dev_vsi_info_init(struct nbl_resource_mgt *res_mgt) { struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); @@ -2066,21 +2411,63 @@ static int nbl_res_ring_num_info_init(struct nbl_resource_mgt *res_mgt) return 0; } +static int nbl_res_ctrl_dev_ustore_stats_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_ustore_stats *ustore_stats; + + ustore_stats = devm_kcalloc(dev, NBL_MAX_ETHERNET, + sizeof(struct nbl_ustore_stats), GFP_KERNEL); + if (!ustore_stats) + return -ENOMEM; + + NBL_RES_MGT_TO_USTORE_STATS(res_mgt) = ustore_stats; + + return 0; +} + +static void nbl_res_ctrl_dev_ustore_stats_remove(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_ustore_stats **ustore_stats = &NBL_RES_MGT_TO_USTORE_STATS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + + if (!(*ustore_stats)) + return; + + devm_kfree(dev, *ustore_stats); + *ustore_stats = NULL; +} + static int nbl_res_check_fw_working(struct nbl_resource_mgt *res_mgt) { struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); unsigned long fw_pong_current; unsigned long seconds_current = 0; + unsigned long timeout_us = 500 * USEC_PER_MSEC; + unsigned long sleep_us = USEC_PER_MSEC; + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); + bool sleep_before_read = false; seconds_current = (unsigned long)ktime_get_real_seconds(); phy_ops->set_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), seconds_current - 1); phy_ops->set_fw_ping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), seconds_current); - /* Wait for FW to ack the first heartbeat seq */ - return nbl_read_poll_timeout(phy_ops->get_fw_pong, fw_pong_current, - fw_pong_current == seconds_current, - USEC_PER_MSEC, 100 * USEC_PER_MSEC, - false, NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + might_sleep_if(sleep_us != 0); + if (sleep_before_read && sleep_us) + usleep_range((sleep_us >> 2) + 1, sleep_us); + for (;;) { + fw_pong_current = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (fw_pong_current == seconds_current) + break; + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { + fw_pong_current = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + break; + } + if (sleep_us) + usleep_range((sleep_us >> 2) + 1, sleep_us); + } + return (fw_pong_current == seconds_current) ? 0 : -ETIMEDOUT; } static int nbl_res_init_pf_num(struct nbl_resource_mgt *res_mgt) @@ -2127,6 +2514,8 @@ static void nbl_res_stop(struct nbl_resource_mgt_leonis *res_mgt_leonis) nbl_vsi_mgt_stop(res_mgt); nbl_accel_mgt_stop(res_mgt); nbl_flow_mgt_stop_leonis(res_mgt); + nbl_res_ctrl_dev_ustore_stats_remove(res_mgt); + nbl_res_ctrl_dev_vdpa_vf_stats_remove(res_mgt); nbl_res_ctrl_dev_remove_vsi_info(res_mgt); nbl_res_ctrl_dev_remove_eth_info(res_mgt); nbl_res_ctrl_dev_sriov_info_remove(res_mgt); @@ -2149,41 +2538,6 @@ static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, u32 quirks; int ret = 0; - if (caps.has_factory_ctrl) { - ret = nbl_res_check_fw_working(res_mgt); - if (ret) { - nbl_err(common, NBL_DEBUG_RESOURCE, "fw is not working"); - return ret; - } - - ret = nbl_res_init_pf_num(res_mgt); - if (ret) { - nbl_err(common, NBL_DEBUG_RESOURCE, "pf number is illegal"); - return ret; - } - - nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_HB_CAP); - nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_RESET_CAP); - nbl_res_set_fix_capability(res_mgt, NBL_TASK_CLEAN_ADMINDQ_CAP); - nbl_res_set_fix_capability(res_mgt, NBL_RESTOOL_CAP); - - ret = nbl_res_ctrl_dev_sriov_info_init(res_mgt); - if (ret) { - nbl_err(common, NBL_DEBUG_RESOURCE, "Failed to init sr_iov info"); - return ret; - } - - ret = nbl_intr_mgt_start(res_mgt); - if (ret) - goto start_fail; - - ret = nbl_adminq_mgt_start(res_mgt); - if (ret) - goto start_fail; - - return 0; - } - if (caps.has_ctrl) { ret = nbl_res_check_fw_working(res_mgt); if (ret) { @@ -2217,6 +2571,10 @@ static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, if (ret) goto start_fail; + ret = nbl_res_ctrl_dev_ustore_stats_init(res_mgt); + if (ret) + goto start_fail; + ret = nbl_flow_mgt_start_leonis(res_mgt); if (ret) goto start_fail; @@ -2251,7 +2609,6 @@ static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, nbl_res_set_flex_capability(res_mgt, NBL_DUMP_FLOW_CAP); nbl_res_set_flex_capability(res_mgt, NBL_DUMP_FD_CAP); - nbl_res_set_flex_capability(res_mgt, NBL_SECURITY_ACCEL_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_OFFLOAD_NETWORK_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_HB_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_RESET_CAP); @@ -2262,9 +2619,12 @@ static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, nbl_res_set_fix_capability(res_mgt, NBL_TASK_RESET_CTRL_CAP); /* leonis af need a pmd_debug for dpdk gdb debug */ nbl_res_set_fix_capability(res_mgt, NBL_PMD_DEBUG); + nbl_res_set_fix_capability(res_mgt, NBL_HIGH_THROUGHPUT_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_HEALTH_REPORT_TEMP_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_HEALTH_REPORT_REBOOT_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_DVN_DESC_REQ_SYSFS_CAP); nbl_res_set_flex_capability(res_mgt, NBL_SECURITY_ACCEL_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_IPSEC_AGE_CAP); - upcall_port_info->upcall_port_active = false; } @@ -2286,6 +2646,9 @@ static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, nbl_res_set_fix_capability(res_mgt, NBL_P4_CAP); nbl_res_set_fix_capability(res_mgt, NBL_TASK_RESET_CAP); nbl_res_set_fix_capability(res_mgt, NBL_QOS_SYSFS_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_MIRROR_SYSFS_CAP); + + nbl_res_set_fix_capability(res_mgt, NBL_XDP_CAP); quirks = nbl_res_get_quirks(res_mgt); if (quirks & BIT(NBL_QUIRKS_NO_TOE)) { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h index 2d43449119e8..1c8cc4448b57 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -11,7 +11,7 @@ #define NBL_MAX_PF_LEONIS 8 /* product NO(ASIC SNIC as 3)-V NO.R NO.B NO.SP NO */ -#define NBL_LEONIS_DRIVER_VERSION "3-3.1.312.1" +#define NBL_LEONIS_DRIVER_VERSION "5-3.1.512.2" int nbl_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt); void nbl_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt); diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c index a7eb5dd7bfb2..c2bc208d047a 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c @@ -8,6 +8,7 @@ #include "nbl_p4_actions.h" #include "nbl_tc_tun_leonis.h" #include "nbl_tc_flow_leonis.h" +#include "nbl_tc_pedit.h" #define NBL_ACT_OFT 16 #define NBL_GET_ACT_INFO(data, idx) (*(u16 *)&(data) + ((idx) << NBL_ACT_OFT)) @@ -199,8 +200,7 @@ static int nbl_flow_ht_assign_proc(struct nbl_resource_mgt *res_mgt, tcam_item->key_mode == NBL_TC_KT_HALF_MODE)) { tcam_item->tcam_flag = true; nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow tcam:pp%d ht0=%x,cnt=%d,ht1=%x,cnt=%d, " - "put it to tcam.\n", + "tc flow :pp%d ht0=%x,cnt=%d,ht1=%x,cnt=%d, to tcam.\n", mt_input->pp_type, ht0_hash, pp_ht0_node->ref_cnt, ht1_hash, pp_ht1_node->ref_cnt); @@ -479,6 +479,344 @@ nbl_flow_tunnel_decap_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *ite return 0; } +static u32 nbl_flow_set_pedit_act(struct nbl_resource_mgt *res_mgt, + struct nbl_tc_pedit_entry *in_e, + enum nbl_flow_ped_type pedit_type, u32 act_id) +{ + u32 act = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(tc_flow_mgt->res_mgt); + + /* ref_node no need write ped cuz first node had done it */ + if (!NBL_TC_PEDIT_GET_NODE_VAL(in_e)) + phy_ops->write_ped_tbl(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + in_e->key, nbl_tc_pedit_get_hw_id(in_e), pedit_type); + act = nbl_tc_pedit_get_hw_id(in_e) + (act_id << 16); + + return act; +} + +static int nbl_flow_set_sip_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + void *out_e = NULL; + struct nbl_tc_pedit_entry in_e; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + + memset(&in_e, 0, sizeof(in_e)); + /* ipv4 should write in the high 32-bits of ped_tbl */ + in_e.ip[1] = be32_to_cpu(action->tc_pedit_info.val.ip4.saddr); + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DIP_TYPE; + else + pedit_type = NBL_FLOW_PED_UIP_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 1); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_sip error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):sip:%u, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, + action->tc_pedit_info.val.ip4.saddr, nbl_tc_pedit_get_hw_id(&in_e)); + + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, pedit_type, NBL_ACT_REP_IPV4_SIP); + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_dip_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + + memset(&in_e, 0, sizeof(in_e)); + /* ipv4 should write in the high 32-bits of ped_tbl */ + in_e.ip[1] = be32_to_cpu(action->tc_pedit_info.val.ip4.daddr); + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DIP_TYPE; + else + pedit_type = NBL_FLOW_PED_UIP_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 1); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_dip error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):dip:%u, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, + action->tc_pedit_info.val.ip4.daddr, nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, pedit_type, NBL_ACT_REP_IPV4_DIP); + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + + /* update pedit_type, for dst ip store in _D_TYPE */ + NBL_TC_PEDIT_SET_D_TYPE(pedit_type); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_sip6_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + int idx; + char ip6[128]; + int oft = 0; + u32 *cur_ip_s = (u32 *)&in_e.ip6; + u32 *ip = &action->tc_pedit_info.val.ip6.saddr.in6_u.u6_addr32[3]; + + memset(&in_e, 0, sizeof(in_e)); + for (idx = 0; idx < 4; ++idx) { + *cur_ip_s = be32_to_cpu(*ip); + oft += snprintf(&ip6[oft], 128, "-%x", *cur_ip_s); + --ip; + ++cur_ip_s; + } + + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DIP_TYPE; + else + pedit_type = NBL_FLOW_PED_UIP_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 4); + NBL_TC_PEDIT_SET_NODE_H(&in_e); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_sip6 error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):sip6:%s, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, ip6, + nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, + NBL_TC_PEDIT_GET_IP6_PHY_TYPE(pedit_type), + NBL_ACT_REP_IPV6_SIP); + + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_dip6_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + int idx; + char ip6[128]; + int oft = 0; + u32 *cur_ip_s = (u32 *)&in_e.ip6; + u32 *ip = &action->tc_pedit_info.val.ip6.daddr.in6_u.u6_addr32[3]; + + memset(&in_e, 0, sizeof(in_e)); + for (idx = 0; idx < 4; ++idx) { + *cur_ip_s = be32_to_cpu(*ip); + oft += snprintf(&ip6[oft], 128 - oft, "-%x", *cur_ip_s); + --ip; + ++cur_ip_s; + } + + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DIP_TYPE; + else + pedit_type = NBL_FLOW_PED_UIP_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 4); + NBL_TC_PEDIT_SET_NODE_H(&in_e); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_dip6 error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%u-%d):dip6:%s, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, ip6, + nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, + NBL_TC_PEDIT_GET_IP6_PHY_TYPE(pedit_type), + NBL_ACT_REP_IPV6_DIP); + + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + /* update pedit_type, for dst ip store in _D_TYPE */ + NBL_TC_PEDIT_SET_D_TYPE(pedit_type); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_smac_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + int idx; + char mac[128]; + int oft = 0; + u8 *cur_mac_s = (u8 *)&in_e.mac; + + memset(&in_e, 0, sizeof(in_e)); + /* update mac offset, for low 16-bit must be 0 */ + NBL_TC_UPDATE_MAC_OFT(cur_mac_s); + for (idx = 0; idx < ETH_ALEN; ++idx) { + *cur_mac_s = action->tc_pedit_info.val.eth.h_source[ETH_ALEN - 1 - idx]; + oft += snprintf(&mac[oft], 128 - oft, "-%x", *cur_mac_s); + ++cur_mac_s; + } + + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DMAC_TYPE; + else + pedit_type = NBL_FLOW_PED_UMAC_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 2); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_smac error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):smac:%s, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, mac, + nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, pedit_type, NBL_ACT_REP_SMAC); + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_dmac_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 act_idx = *item; + struct nbl_tc_pedit_entry in_e; + void *out_e = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + enum nbl_flow_ped_type pedit_type; + int idx; + char mac[128]; + int oft = 0; + u8 *cur_mac_s = in_e.mac; + + memset(&in_e, 0, sizeof(in_e)); + /* update mac offset, for low 16-bit must be 0 */ + NBL_TC_UPDATE_MAC_OFT(cur_mac_s); + for (idx = 0; idx < ETH_ALEN; ++idx) { + *cur_mac_s = action->tc_pedit_info.val.eth.h_dest[ETH_ALEN - 1 - idx]; + oft += snprintf(&mac[oft], 128 - oft, "-%x", *cur_mac_s); + ++cur_mac_s; + } + + if (action->flag & NBL_FLOW_ACTION_EGRESS) + pedit_type = NBL_FLOW_PED_DMAC_TYPE; + else + pedit_type = NBL_FLOW_PED_UMAC_TYPE; + + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 2); + ret = nbl_tc_pedit_add_node(&tc_flow_mgt->pedit_mgt, &in_e, &out_e, pedit_type); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_set_dmac error"); + return -ENOMEM; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%d-%u):dmac:%s, hw-idx:%u", + pedit_type, action->tc_pedit_info.pedit_node.pedits, mac, + nbl_tc_pedit_get_hw_id(&in_e)); + buf[act_idx] = nbl_flow_set_pedit_act(res_mgt, &in_e, pedit_type, NBL_ACT_REP_DMAC); + NBL_TC_PEDIT_SET_NODE_RES_VAL(action->tc_pedit_info.pedit_node); + + /* update pedit_type, for dst mac store in _D_TYPE */ + NBL_TC_PEDIT_SET_D_TYPE(pedit_type); + NBL_TC_PEDIT_SET_NODE_RES_ENTRY(action->tc_pedit_info.pedit_node, pedit_type, out_e); + return ret; +} + +static int nbl_flow_set_sp_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 port = 0; + u16 act_idx = *item; + bool is_udp = NBL_TC_PEDIT_GET_NODE_RES_PRO(action->tc_pedit_info.pedit_node); + + if (!is_udp) + port = be16_to_cpu(action->tc_pedit_info.val.tcp.source); + else + port = be16_to_cpu(action->tc_pedit_info.val.udp.source); + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%u):sp:%s-%u", + action->tc_pedit_info.pedit_node.pedits, + is_udp ? "udp" : "tcp", port); + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 1); + + buf[act_idx] = port + (NBL_ACT_REP_SPORT << 16); + return ret; +} + +static int nbl_flow_set_dp_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 port = 0; + u16 act_idx = *item; + bool is_udp = NBL_TC_PEDIT_GET_NODE_RES_PRO(action->tc_pedit_info.pedit_node); + + if (!is_udp) + port = be16_to_cpu(action->tc_pedit_info.val.tcp.dest); + else + port = be16_to_cpu(action->tc_pedit_info.val.udp.dest); + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_pedit_act(%u):dp:%s-%u", + action->tc_pedit_info.pedit_node.pedits, + is_udp ? "udp" : "tcp", port); + NBL_TC_PEDIT_DEC_NODE_RES_EDITS(action->tc_pedit_info.pedit_node, 1); + + buf[act_idx] = port + (NBL_ACT_REP_DPORT << 16); + return ret; +} + static struct nbl_flow_action_2hw acts_2hw[] = { { NBL_FLOW_ACTION_PORT_ID, nbl_flow_port_id_action_2hw }, { NBL_FLOW_ACTION_DROP, nbl_flow_drop_2hw }, @@ -490,6 +828,14 @@ static struct nbl_flow_action_2hw acts_2hw[] = { { NBL_FLOW_ACTION_POP_INNER_VLAN, nbl_flow_pop_inner_vlan_2hw }, { NBL_FLOW_ACTION_TUNNEL_ENCAP, nbl_flow_tunnel_encap_act_2hw }, { NBL_FLOW_ACTION_TUNNEL_DECAP, nbl_flow_tunnel_decap_act_2hw }, + { NBL_FLOW_ACTION_SET_IPV4_SRC_IP, nbl_flow_set_sip_act_2hw }, + { NBL_FLOW_ACTION_SET_IPV4_DST_IP, nbl_flow_set_dip_act_2hw }, + { NBL_FLOW_ACTION_SET_IPV6_SRC_IP, nbl_flow_set_sip6_act_2hw }, + { NBL_FLOW_ACTION_SET_IPV6_DST_IP, nbl_flow_set_dip6_act_2hw }, + { NBL_FLOW_ACTION_SET_SRC_MAC, nbl_flow_set_smac_act_2hw }, + { NBL_FLOW_ACTION_SET_DST_MAC, nbl_flow_set_dmac_act_2hw }, + { NBL_FLOW_ACTION_SET_SRC_PORT, nbl_flow_set_sp_act_2hw }, + { NBL_FLOW_ACTION_SET_DST_PORT, nbl_flow_set_dp_act_2hw }, }; static int nbl_flow_at_num_proc(struct nbl_resource_mgt *res_mgt, @@ -734,9 +1080,9 @@ static int nbl_flow_del_ht_2hw(struct nbl_tc_ht_item *ht_item, u8 pp_type, ret = nbl_cmdq_flow_ht_clear_2hw(ht_item, pp_type, res_mgt); if (ret) { nbl_err(common, NBL_DEBUG_FLOW, - "tc flow failed to del cmdq ht 2hw,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + "tc flow failed to del cmdq ht 2hw,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d., ret %d\n", pp_type, ht_item->ht0_hash, - ht_item->ht1_hash, ht_item->tbl_id); + ht_item->ht1_hash, ht_item->tbl_id, ret); return ret; } @@ -746,9 +1092,9 @@ static int nbl_flow_del_ht_2hw(struct nbl_tc_ht_item *ht_item, u8 pp_type, ht_item->tbl_id); if (ret) { nbl_err(common, NBL_DEBUG_FLOW, - "tc flow failed to del ht,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + "tc flow failed to del ht,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d, ret %d.\n", pp_type, ht_item->ht0_hash, - ht_item->ht1_hash, ht_item->tbl_id); + ht_item->ht1_hash, ht_item->tbl_id, ret); return ret; } } else { @@ -770,9 +1116,9 @@ static int nbl_flow_del_ht_2hw(struct nbl_tc_ht_item *ht_item, u8 pp_type, ret = nbl_cmdq_flow_ht_clear_2hw(ht_item, pp_type, res_mgt); if (ret) { nbl_err(common, NBL_DEBUG_FLOW, - "tc flow failed to del cmdq ht 2hw,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + "tc flow failed to del cmdq ht 2hw,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d, ret %d.\n", pp_type, ht_item->ht0_hash, - ht_item->ht1_hash, ht_item->tbl_id); + ht_item->ht1_hash, ht_item->tbl_id, ret); return ret; } @@ -782,9 +1128,9 @@ static int nbl_flow_del_ht_2hw(struct nbl_tc_ht_item *ht_item, u8 pp_type, ht_item->tbl_id); if (ret) { nbl_err(common, NBL_DEBUG_FLOW, - "tc flow failed to del ht, pp%d ht1_hash=%d, ht0_hash=%d, tbl_id=%d.\n", + "tc flow failed to del ht, pp%d ht1_hash=%d, ht0_hash=%d, tbl_id=%d, ret %d.\n", pp_type, ht_item->ht1_hash, - ht_item->ht0_hash, ht_item->tbl_id); + ht_item->ht0_hash, ht_item->tbl_id, ret); return ret; } } else { @@ -833,8 +1179,6 @@ static int nbl_flow_del_at_2hw(struct nbl_resource_mgt *res_mgt, act_collect->act_key[0].act[5], act_collect->act_key[0].act[6], act_collect->act_key[0].act[7]); - } else { - ret = -1; } } } @@ -855,8 +1199,6 @@ static int nbl_flow_del_at_2hw(struct nbl_resource_mgt *res_mgt, act_collect->act_key[1].act[5], act_collect->act_key[1].act[6], act_collect->act_key[1].act[7]); - } else { - ret = -1; } } } @@ -1231,8 +1573,7 @@ nbl_cmdq_send_flow_ktat(struct nbl_tc_ht_item *ht_item, ktat.info.at_size = 1; memcpy(&ktat.info.at_data, &at1.info, sizeof(at1)); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow kt index=0x%x,at_hw_index=0x%x," - "at data:0x%x-%x-%x-%x-%x-%x-%x-%x.", + "tc flow kt index=0x%x,hw_index=0x%x, data:0x%x-%x-%x-%x-%x-%x-%x-%x.", ktat.info.kt_index, at_item->act_collect.act_hw_index, at1.info.at1, at1.info.at2, at1.info.at3, at1.info.at4, at1.info.at5, at1.info.at6, at1.info.at7, at1.info.at8); @@ -1515,7 +1856,7 @@ static int nbl_flow_tab_add(struct nbl_flow_tab_filter *node, if (ret) { spin_unlock(&tc_flow_mgt->flow_lock); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow failed to alloc id for full table.\n"); + "tc flow failed to alloc id for full table, ret %d.\n", ret); return -ENOSPC; } } else { @@ -1526,7 +1867,7 @@ static int nbl_flow_tab_add(struct nbl_flow_tab_filter *node, if (ret) { spin_unlock(&tc_flow_mgt->flow_lock); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow failed to alloc id for half table.\n"); + "tc flow failed to alloc id for half table, ret %d.\n", ret); return -ENOSPC; } } @@ -1658,7 +1999,7 @@ static int nbl_flow_tab_del(struct nbl_flow_tab_filter *node, struct nbl_resourc ret = nbl_flow_del_at_2hw(res_mgt, &node->act_collect, select_input->pp_type); if (ret) { - nbl_err(common, NBL_DEBUG_FLOW, "tc flow failed to del at 2hw\n"); + nbl_err(common, NBL_DEBUG_FLOW, "tc flow failed to del at 2hw, ret %d\n", ret); goto ret_fail; } @@ -1684,6 +2025,7 @@ static int nbl_flow_tab_ht_at(struct nbl_flow_tab_filter *node, struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); struct nbl_profile_msg *profile_msg = &tc_flow_mgt->profile_msg[idx_info->profile_id]; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); if (!node || !idx_info) return -EINVAL; @@ -1691,8 +2033,11 @@ static int nbl_flow_tab_ht_at(struct nbl_flow_tab_filter *node, mt_input.key_full = profile_msg->key_full; ret = nbl_tc_set_pp_related_value(&select_input, &mt_input, tc_flow_mgt, idx_info->profile_id); - if (ret) + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow set pp failed, profile_id %u.\n", + profile_msg->key_full); return ret; + } if (opcode == NBL_OPCODE_ADD) ret = nbl_flow_tab_add(node, action, res_mgt, idx_info, &mt_input, &select_input); @@ -1709,9 +2054,12 @@ static int nbl_flow_tbl_op(void *ptr, struct nbl_rule_action *action, { struct nbl_flow_tab_filter *flow_tab_node = NULL; int ret = 0; + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); - if (opcode == NBL_OPCODE_ADD && !action) + if (opcode == NBL_OPCODE_ADD && !action) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow add failed as action is NULL.\n"); return -EINVAL; + } flow_tab_node = (struct nbl_flow_tab_filter *)ptr; ret = nbl_flow_tab_ht_at(flow_tab_node, action, opcode, res_mgt, idx_info); @@ -1729,7 +2077,7 @@ static int nbl_off_flow_op(void *ptr, struct nbl_rule_action *act, if (!ptr) { nbl_err(common, NBL_DEBUG_FLOW, - "tc flow offload op failed. op:%u\n", opcode); + "tc flow offload op failed, flow node is NULL. op:%u\n", opcode); return -EINVAL; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h index f0d2f7edfd18..2d24d571682a 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h @@ -1,8 +1,9 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan */ + #ifndef _NBL_TC_FLOW_FILTER_LEONIS_H_ #define _NBL_TC_FLOW_FILTER_LEONIS_H_ @@ -11,6 +12,9 @@ #define NBL_ACC_HT0 (0) #define NBL_ACC_HT1 (1) +#define NBL_TC_UPDATE_MAC_OFT(p) ((p) += 2) +#define NBL_TC_UPDATE_IP_OFT(p) ((p) += 4) + struct nbl_flow_offload_ops { int (*add) (void *ptr, diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c index 8d9a2715aa5d..ac00aaf2cc89 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c @@ -8,6 +8,7 @@ #include "nbl_p4_actions.h" #include "nbl_fc_leonis.h" #include "nbl_tc_tun_leonis.h" +#include "nbl_tc_pedit.h" #include "nbl_resource_leonis.h" static struct nbl_profile_msg g_prf_msg[NBL_ALL_PROFILE_NUM] = { @@ -2692,101 +2693,85 @@ static void nbl_debug_print_hash_key(struct nbl_common_info *common, if ((prf_msg->key_flag & test_tnl_v4) == test_tnl_v4) { nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw calculated tv4 profile: id %d, " - "dipv4 0x%x, optdata 0x%x, optclass 0x%x, dport 0x%x\n", + "v4:id %d, dipv4 0x%x, optdata 0x%x, optclass 0x%x, dport 0x%x\n", p0->info.template, p0->info.dst_ip, p0->info.option_data, p0->info.option_class, p0->info.dst_port); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw original tv4 profile: id %d, " - "dipv4 0x%x, dport 0x%x\n", + "v4:id %d, dipv4 0x%x, dport 0x%x\n", prf_msg->profile_id, input->ip_outer.dst_ip.addr, input->l4_outer.dst_port); } else if ((prf_msg->key_flag & test_tnl_v6) == test_tnl_v6) { nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw calculated tv6 profile: id %d, " - "dipv6 0x%lx 0x%lx, optdata 0x%x, optclass 0x%x, dport 0x%x\n", + "v6:id %d, dipv6 0x%lx 0x%lx, optdata 0x%x, optclass 0x%x, dport 0x%x\n", p1->info.template, (unsigned long)p1->info.dst_ipv6_1, (unsigned long)p1->info.dst_ipv6_2, p1->info.option_data, p1->info.option_class, p1->info.dst_port); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw originla tv6 profile: id %d, " - "dipv6 0x%x, dport 0x%x\n", + "v6:id %d, dipv6 0x%x, dport 0x%x\n", prf_msg->profile_id, input->ip_outer.dst_ip.addr, input->l4_outer.dst_port); } else if ((prf_msg->key_flag & test_tnl_l2) == test_tnl_l2) { nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw calculated tnl l2 profile: id %d, " - "vni %d, dstmac 0x%lx, etype 0x%04x, cvlan %d, svlan %d\n", + "l2:id %d,vni %d, dstmac 0x%lx, etype 0x%04x, cvlan %d, svlan %d\n", p2->info.template, p2->info.vni, (unsigned long)p2->info.dst_mac, p2->info.ether_type, p2->info.cvlan_id, p2->info.svlan_id); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw original tnl l2 profile: id %d, " - "dstmac 0x%llx, etype 0x%04x, cvlan %d, svlan %d\n", + "l2:id %d, dstmac 0x%llx, etype 0x%04x, cvlan %d, svlan %d\n", prf_msg->profile_id, *(u64 *)input->l2_data.dst_mac, input->l2_data.ether_type, input->cvlan_tag, input->svlan_tag); } else if ((prf_msg->key_flag & test_l2_notnl) == test_l2_notnl) { nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw calculated l2 profile: id %d, dstmac 0x%lx, " - "etype 0x%04x, svlan %d, cvlan %d\n", + "l2:id %d, dstmac 0x%lx, etype 0x%04x, svlan %d, cvlan %d\n", p3->info.template, (unsigned long)p3->info.dst_mac, p3->info.ether_type, p3->info.svlan_id, p3->info.cvlan_id); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw original l2 profile: id %d, dstmac 0x%llx, " - "etype 0x%04x, svlan %d, cvlan %d\n", + "l2:id %d, dstmac 0x%llx, etype 0x%04x, svlan %d, cvlan %d\n", prf_msg->profile_id, *(u64 *)input->l2_data.dst_mac, input->l2_data.ether_type, input->svlan_tag, input->cvlan_tag); } else if ((prf_msg->key_flag & test_l3_v4) == test_l3_v4) { nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw calculated l3 v4: id %d, sip 0x%x, " - "ttl %d, dscp %d\n", + "l3 v4: id %d, dip 0x%x, ttl %d, dscp %d\n", p4->info.template, p4->info.dst_ip, p4->info.ttl, p4->info.dscp); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw original l3 v4: id %d, sip 0x%x, " - "ttl %d, dscp %d\n", + "l3 v4: id %d, dip 0x%x, ttl %d, dscp %d\n", prf_msg->profile_id, input->ip.dst_ip.addr, - p4->info.ttl, p4->info.dscp); + input->ip.ttl, input->ip.tos); } else if ((prf_msg->key_flag & test_l3_v6) == test_l3_v6) { nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw calculated l3 v6: id %d, dip 0x%llx-%llx, " - "ttl %d, dscp %d\n", + "l3 v6: id %d, dip 0x%llx-%llx, ttl %d, dscp %d\n", p5->info.template, p5->info.dst_ipv6_1, p5->info.dst_ipv6_2, p5->info.hoplimit, p5->info.dscp); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw original l3 v6: id %d, dip 0x%llx-%llx, " - "ttl %d, dscp %d\n", + "l3 v6: id %d, dip 0x%llx-%llx, ttl %d, dscp %d\n", prf_msg->profile_id, *(u64 *)input->ip.dst_ip.v6_addr, *((u64 *)input->ip.dst_ip.v6_addr + 1), input->ip.ttl, input->ip.tos); } else if ((prf_msg->key_flag & test_t5_ipv4) == test_t5_ipv4) { nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw calculated t5 ipv4 profile: id %d, sip 0x%x, " - "srcport %d, dstport %d, protocol %d\n", + "ipv4: id %d, sip 0x%x, srcport %d, dstport %d, protocol %d\n", p8->info.template, p8->info.src_ip, p8->info.src_port, p8->info.dst_port, p8->info.proto); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw original data: sip: 0x%x, srcport %d, " - " dstport %d, protocol %d\n", + "sip: 0x%x, srcport %d, dstport %d, protocol %d\n", input->ip.src_ip.addr, input->l4.src_port, input->l4.dst_port, input->ip.proto); } else if ((prf_msg->key_flag & test_t5_ipv6) == test_t5_ipv6) { nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw calculated t5 ipv6 profile: sip 0x%llx-%llx, " - "srcport %d, dstport %d, protocol %d\n", + "ipv6: sip 0x%llx-%llx, srcport %d, dstport %d, protocol %d\n", p9->info.src_ipv6_1, p9->info.src_ipv6_2, p9->info.src_port, p9->info.dst_port, p9->info.proto); nbl_debug(common, NBL_DEBUG_FLOW, - "tc flow hw original data: sip: 0x%llx-%llx, srcport %d, " - " dstport %d, protocol %d\n", + "sip: 0x%llx-%llx, srcport %d, dstport %d, protocol %d\n", *(u64 *)input->ip.src_ip.v6_addr, *((u64 *)input->ip.src_ip.v6_addr + 1), input->l4.src_port, input->l4.dst_port, @@ -3201,8 +3186,7 @@ static int nbl_flow_tab_hash_add(struct nbl_resource_mgt *res_mgt, filter_data.assoc_tbl_id, 0); spin_unlock(&tc_flow_mgt->flow_lock); nbl_info(common, NBL_DEBUG_FLOW, - "tc flow hw failed to insert flow tab filter " - "to hash table %d.\n", ret); + "tc flow hw failed to insert flow tab filter to hash table %d.\n", ret); return ret; } @@ -3236,7 +3220,8 @@ static int nbl_flow_tab_storage(struct nbl_resource_mgt *res_mgt, ret = nbl_flow_tab_hash_add(res_mgt, filter, tc_flow_ptr, (void **)&flow_tab_node, prof_off_msg); if (ret || !flow_tab_node) { - nbl_info(common, NBL_DEBUG_FLOW, "tc flow hw flow_tab hash-list storage fail.\n"); + nbl_info(common, NBL_DEBUG_FLOW, "tc hw hash-list op fail, ret %d,node %p.\n", + ret, flow_tab_node); return ret; } if (flow_tab_node->ref_cnt > 1) @@ -3249,7 +3234,7 @@ static int nbl_flow_tab_storage(struct nbl_resource_mgt *res_mgt, idx_info.pt_cmd = prof_off_msg->pt_cmd; ret = nbl_add_nic_hw_flow_tab(flow_tab_node, act, res_mgt, &idx_info); if (ret) { - nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw nbl_flow_tab_key_2Nic fail.\n"); + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw add flow 2hw fail, ret %d.\n", ret); return ret; } return ret; @@ -3326,9 +3311,7 @@ static int nbl_flow_tab_storage_entr(struct nbl_resource_mgt *res_mgt, asso_graph->profile_id[i]); if (ret_2 != 0 && ret_2 != -ENONET) { nbl_err(common, NBL_DEBUG_FLOW, - "tc flow hw del failed " - "when flow table storage failed. " - "tnl_flag %d, ret_2 %d.\n", + "tc flow hw del failed tnl_flag %d, ret_2 %d.\n", filter->input.tnl_flag, ret_2); return ret_2; } @@ -3494,6 +3477,8 @@ static int nbl_flow_tab_filter_init(struct nbl_resource_mgt *res_mgt, if (!entries) return -EINVAL; + /* hash_buck is 2-bytes wide, update it if needed */ + entries = entries >= 0xffff ? 0xffff : entries; NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), sizeof(struct nbl_flow_tab_conf), sizeof(struct nbl_flow_tab_filter), entries, false); tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash = @@ -3501,6 +3486,9 @@ static int nbl_flow_tab_filter_init(struct nbl_resource_mgt *res_mgt, if (!tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash) return -EINVAL; + nbl_info(common, NBL_DEBUG_FLOW, "tc flow init profile:%u with %u entries", + profile_id, entries); + return 0; } @@ -3796,7 +3784,7 @@ int nbl_pp_at_lookup(struct nbl_resource_mgt *res_mgt, u8 pp_type, u8 at_type, NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); idx = nbl_common_get_index_with_data(at_tbl, act_key->act, &extra_key, NULL, 0, - (void **)&act_node); + (void **)act_node); return idx; } @@ -4033,6 +4021,60 @@ static int nbl_flow_mcc_init(struct nbl_resource_mgt *res_mgt) return 0; } +static void nbl_tc_flow_set_pedit_res(struct nbl_tc_pedit_res_info *pedit_res) +{ + pedit_res[NBL_FLOW_PED_UMAC_TYPE].pedit_num = NBL_FLOW_TC_PEDIT_MAC; + pedit_res[NBL_FLOW_PED_DMAC_TYPE].pedit_num = NBL_FLOW_TC_PEDIT_MAC; + pedit_res[NBL_FLOW_PED_UMAC_TYPE].pedit_base_id = NBL_FLOW_TC_PEDIT_MAC_BASE; + pedit_res[NBL_FLOW_PED_DMAC_TYPE].pedit_base_id = NBL_FLOW_TC_PEDIT_MAC_BASE; + + pedit_res[NBL_FLOW_PED_UIP_TYPE].pedit_num = NBL_FLOW_TC_PEDIT_IP; + pedit_res[NBL_FLOW_PED_DIP_TYPE].pedit_num = NBL_FLOW_TC_PEDIT_IP; + pedit_res[NBL_FLOW_PED_UIP_TYPE].pedit_base_id = NBL_FLOW_TC_PEDIT_IP_BASE; + pedit_res[NBL_FLOW_PED_DIP_TYPE].pedit_base_id = NBL_FLOW_TC_PEDIT_IP_BASE; + /* special handle:leonis ipv6 need 2 ped-addr, v4 & v6 could share the same hw-resource */ + pedit_res[NBL_FLOW_PED_UIP_TYPE].pedit_num_h = NBL_FLOW_TC_PEDIT_IP6; + pedit_res[NBL_FLOW_PED_DIP_TYPE].pedit_num_h = NBL_FLOW_TC_PEDIT_IP6; +} + +static int nbl_tc_flow_init_pedit(struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + struct nbl_tc_pedit_mgt *pedit_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + /* set pedit cap */ + memset(&tc_flow_mgt->pedit_mgt, 0, sizeof(tc_flow_mgt->pedit_mgt)); + pedit_mgt = &tc_flow_mgt->pedit_mgt; + nbl_tc_flow_set_pedit_res(pedit_mgt->pedit_res); + mutex_init(&pedit_mgt->pedit_lock); + pedit_mgt->common = common; + + /*set pedit hw-resource */ + ret = nbl_tc_pedit_init(pedit_mgt); + + if (ret) + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit init failed"); + else + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit init success"); + + return ret; +} + +static void nbl_tc_flow_uninit_pedit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + int ret = 0; + + ret = nbl_tc_pedit_uninit(&tc_flow_mgt->pedit_mgt); + if (ret) + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit uninit failed"); + else + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit uninit success"); +} + static struct nbl_flow_info_init flow_info_init_list[] = { { nbl_flow_pp1_ht0_tbl_hash_init }, { nbl_flow_pp1_ht1_tbl_hash_init }, @@ -4041,6 +4083,7 @@ static struct nbl_flow_info_init flow_info_init_list[] = { { nbl_flow_tcam_init }, { nbl_flow_mcc_init }, + { nbl_tc_flow_init_pedit }, }; static struct nbl_flow_info_uninit flow_info_uninit_list[] = { @@ -4050,6 +4093,7 @@ static struct nbl_flow_info_uninit flow_info_uninit_list[] = { { nbl_flow_pp2_ht1_tbl_hash_uninit }, { nbl_flow_tcam_uninit }, + { nbl_tc_flow_uninit_pedit }, }; static int nbl_flow_info_init_list(struct nbl_resource_mgt *res_mgt) @@ -4284,14 +4328,19 @@ static int nbl_tc_flow_add_tc_flow(void *priv, struct nbl_tc_flow_param *param) ret = -ENOMEM; goto out; } - memcpy(tc_flow_ptr->encap_key, ¶m->act.encap_key, sizeof(param->act.encap_key)); } + if (NBL_TC_PEDIT_GET_NODE_RES_VAL(param->act.tc_pedit_info.pedit_node)) + tc_flow_ptr->pedit_node = param->act.tc_pedit_info.pedit_node; + return ret; out: nbl_fc_del_stats_leonis(priv, param->key.cookie); + if (NBL_TC_PEDIT_GET_NODE_RES_VAL(param->act.tc_pedit_info.pedit_node)) + nbl_tc_pedit_del_node(&tc_flow_mgt->pedit_mgt, + ¶m->act.tc_pedit_info.pedit_node); stats_out: nbl_tc_flow_delete_index(res_mgt, ¶m->key); flow_idx_err: @@ -4302,12 +4351,21 @@ static int nbl_tc_flow_del_edit_act(struct nbl_resource_mgt *res_mgt, struct nbl_tc_flow *tc_flow_node) { int ret = 0; + struct nbl_tc_pedit_node_res *pedit_node = &tc_flow_node->pedit_node; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); if (tc_flow_node->act_flags & NBL_FLOW_ACTION_TUNNEL_ENCAP) { ret = nbl_tc_tun_encap_del(res_mgt, tc_flow_node->encap_key); kfree(tc_flow_node->encap_key); } + if (NBL_TC_PEDIT_GET_NODE_RES_VAL(*pedit_node)) { + ret = nbl_tc_pedit_del_node(&tc_flow_mgt->pedit_mgt, pedit_node); + if (ret) + nbl_err(common, NBL_DEBUG_FLOW, "del tc_pedit node error"); + } + return ret; } diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h index ddbba520572f..7b2de8556a3d 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h @@ -1,8 +1,9 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan */ + #ifndef _NBL_TC_FLOW_LEONIS_H_ #define _NBL_TC_FLOW_LEONIS_H_ @@ -51,12 +52,11 @@ #define NBL_FEM_AT_HALF_LEN 16 #define NBL_AT_WIDTH 22 -#define NBL_PP1_AT2_OFFSET (94 * 1024) -#define NBL_PP1_AT_OFFSET (88 * 1024) -#define NBL_PP2_AT2_OFFSET (72 * 1024) +#define NBL_PP1_AT2_OFFSET (92 * 1024) +#define NBL_PP1_AT_OFFSET (80 * 1024) +#define NBL_PP2_AT2_OFFSET (64 * 1024) -#define NBL_PP0_POWER 0 -#define NBL_PP1_POWER 12 +#define NBL_PP1_POWER 13 #define NBL_PP2_POWER 14 #define NBL_FEM_AT_NO_ENTRY (0) @@ -88,7 +88,6 @@ #define NBL_FLOW_TAB_ONE_TIME 1 #define NBL_FLOW_TAB_TWO_TIME 2 -#define NBL_FLOW_TABLE_IPV4_DEFAULT_MASK 0xFFFFFFFF #define NBL_INVALID_U32 0xFFFFFFFF #define NBL_FLOW_TABLE_L4_PORT_DEFAULT_MASK 0xFFFF #define NBL_FLOW_TABLE_FULL_MASK_AS_U32 0xFFFFFFFF @@ -98,6 +97,13 @@ #define NBL_GET_ARG_LEN(sz) ((sz) / sizeof(u32)) #define NBL_GET_ARG_COPY_LEN(sz) ((sz) * sizeof(u32)) +#define NBL_FLOW_TC_PEDIT_MAC 1024 +#define NBL_FLOW_TC_PEDIT_IP 1024 +#define NBL_FLOW_TC_PEDIT_IP6 512 + +#define NBL_FLOW_TC_PEDIT_MAC_BASE 0 +#define NBL_FLOW_TC_PEDIT_IP_BASE NBL_FLOW_TC_PEDIT_MAC + /* at node's idx has two continuous idx, and the begin idx need to be even number */ #define NBL_FLOW_AT_IDX_NUM 2 #define NBL_FLOW_AT_IDX_MULTIPLE 2 @@ -112,6 +118,7 @@ struct nbl_tc_flow { void *profile_rule[NBL_ASSOC_PROFILE_STAGE_NUM]; }; struct nbl_encap_key *encap_key; + struct nbl_tc_pedit_node_res pedit_node; }; struct nbl_tcam_item { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c index 9d4416548212..19ab68addd5e 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c @@ -1,5 +1,7 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2021-2030 nbl, Inc. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: */ #include "nbl_tc_mcc_leonis.h" @@ -112,7 +114,7 @@ void nbl_tc_mcc_add_hw_tbl(struct nbl_resource_mgt *res_mgt, struct nbl_tc_mcc_m else prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, - prev_mcc_id, mcc_action); + prev_mcc_id, NBL_MCC_ID_INVALID, mcc_action); mcc_add_succ = true; } if (mcc_add_succ) diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h index eaf67e5f453f..2f3a7f89514b 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h @@ -1,6 +1,9 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2021-2030 nbl, Inc. +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: */ + #ifndef _NBL_TC_MCC_LEONIS_H_ #define _NBL_TC_MCC_LEONIS_H_ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c index 149152b98d8c..2b3cc0796c94 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c @@ -1,3 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ #include "nbl_resource.h" #include "nbl_tc_tun_leonis.h" @@ -75,8 +80,8 @@ int nbl_tc_tun_encap_del(void *priv, struct nbl_encap_key *key) if (del_hw_encap_tbl) phy_ops->del_tnl_encap(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), encap_idx); - nbl_debug(common, NBL_DEBUG_FLOW, "nbl tc del encap_idx: %u, encap_node:%p, " - "del_hw:%d", encap_idx, e, del_hw_encap_tbl); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl tc del encap_idx: %u, encap_node:%p, del_hw:%d", + encap_idx, e, del_hw_encap_tbl); return 0; } @@ -84,6 +89,7 @@ int nbl_tc_tun_encap_del(void *priv, struct nbl_encap_key *key) static int nbl_tc_tun_encap_add(void *priv, struct nbl_rule_action *action) { u16 encap_idx; + int encap_cnt; int ret = 0; struct nbl_encap_entry e; struct nbl_encap_entry *encap_node; @@ -128,6 +134,7 @@ static int nbl_tc_tun_encap_add(void *priv, struct nbl_rule_action *action) } tc_flow_mgt->encap_tbl.tab_cnt++; + encap_cnt = tc_flow_mgt->encap_tbl.tab_cnt; mutex_unlock(&tc_flow_mgt->encap_tbl_lock); @@ -135,7 +142,8 @@ static int nbl_tc_tun_encap_add(void *priv, struct nbl_rule_action *action) phy_ops->add_tnl_encap(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), action->encap_buf, action->encap_idx, action->encap_idx_info); - nbl_debug(common, NBL_DEBUG_FLOW, "nbl tc new encap_idx: %u.", encap_idx); + nbl_debug(common, NBL_DEBUG_FLOW, "add encap_idx %u, cnt %d vni %u, size %u, out_dev %s", + encap_idx, encap_cnt, e.vni, e.encap_size, netdev_name(e.out_dev)); err: return ret; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h index 424ed8781820..381b8050329a 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h @@ -1,3 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + #ifndef __NBL_TC_TUN_LEONIS_H__ #define __NBL_TC_TUN_LEONIS_H__ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c index 52782beef501..bc96a6f722a0 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c @@ -113,6 +113,7 @@ static int nbl_res_intr_configure_msix_map(void *priv, u16 func_id, u16 num_net_ intr_mgt->func_intr_res[func_id].interrupts = interrupts; intr_mgt->func_intr_res[func_id].num_interrupts = requested; + intr_mgt->func_intr_res[func_id].num_net_interrupts = num_net_msix; for (i = 0; i < num_net_msix; i++) { intr_index = find_first_zero_bit(intr_mgt->interrupt_net_bitmap, @@ -156,7 +157,7 @@ static int nbl_res_intr_configure_msix_map(void *priv, u16 func_id, u16 num_net_ /* use ctrl dev bdf */ phy_ops->configure_msix_map(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, true, - msix_map_table->dma, common->bus, common->devid, + msix_map_table->dma, common->hw_bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common)); return 0; @@ -305,7 +306,7 @@ static int nbl_res_intr_get_abnormal_irq_num(void *priv) return 1; } -static u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level) +u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level) { switch (last_level) { case NBL_INTR_SUPPRESS_LEVEL0: @@ -314,17 +315,24 @@ static u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level else return NBL_INTR_SUPPRESS_LEVEL0; case NBL_INTR_SUPPRESS_LEVEL1: - if (rates > NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD) + if (rates > NBL_INTR_SUPPRESS_LEVEL2_THRESHOLD) + return NBL_INTR_SUPPRESS_LEVEL2; + else if (rates > NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD) return NBL_INTR_SUPPRESS_LEVEL1; else return NBL_INTR_SUPPRESS_LEVEL0; + case NBL_INTR_SUPPRESS_LEVEL2: + if (rates > NBL_INTR_SUPPRESS_LEVEL2_DOWNGRADE_THRESHOLD) + return NBL_INTR_SUPPRESS_LEVEL2; + else + return NBL_INTR_SUPPRESS_LEVEL1; default: return NBL_INTR_SUPPRESS_LEVEL0; } } -static void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 vector_id, - u16 num_net_msix, u16 level) +void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 level) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); @@ -343,11 +351,24 @@ static void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 ve rate = NBL_INTR_SUPPRESS_LEVEL1_25G_RATE; } break; + case NBL_INTR_SUPPRESS_LEVEL2: + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + pnum = NBL_INTR_SUPPRESS_LEVEL2_100G_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL2_100G_RATE; + } else { + pnum = NBL_INTR_SUPPRESS_LEVEL1_25G_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL1_25G_RATE; + } + break; default: pnum = NBL_INTR_SUPPRESS_LEVEL0_PNUM; rate = NBL_INTR_SUPPRESS_LEVEL0_RATE; break; } + + if (num_net_msix == U16_MAX) + num_net_msix = intr_mgt->func_intr_res[func_id].num_net_interrupts; + for (i = 0; i < num_net_msix; i++) { global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id + i]; phy_ops->set_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h index daaef6f86ac9..14d540b94106 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -12,15 +12,20 @@ #define NBL_MSIX_MAP_TABLE_MAX_ENTRIES (1024) #define NBL_INTR_SUPPRESS_LEVEL1_THRESHOLD (100000) /* 100k pps */ +#define NBL_INTR_SUPPRESS_LEVEL2_THRESHOLD (4000000) /* 4M pps */ #define NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD (60000) /* 60kpps */ +#define NBL_INTR_SUPPRESS_LEVEL2_DOWNGRADE_THRESHOLD (2400000) /* 2.4Mpps */ #define NBL_INTR_SUPPRESS_LEVEL0 (0) #define NBL_INTR_SUPPRESS_LEVEL1 (1) +#define NBL_INTR_SUPPRESS_LEVEL2 (2) #define NBL_INTR_SUPPRESS_LEVEL0_PNUM (0) #define NBL_INTR_SUPPRESS_LEVEL1_25G_PNUM (8) -#define NBL_INTR_SUPPRESS_LEVEL1_100G_PNUM (16) +#define NBL_INTR_SUPPRESS_LEVEL1_100G_PNUM (8) +#define NBL_INTR_SUPPRESS_LEVEL2_100G_PNUM (24) #define NBL_INTR_SUPPRESS_LEVEL0_RATE (0) #define NBL_INTR_SUPPRESS_LEVEL1_25G_RATE (1) -#define NBL_INTR_SUPPRESS_LEVEL1_100G_RATE (2) +#define NBL_INTR_SUPPRESS_LEVEL1_100G_RATE (1) +#define NBL_INTR_SUPPRESS_LEVEL2_100G_RATE (3) #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h index ccbc5cf9f1b5..383dbd5dd08f 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h @@ -1,3 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + #ifndef _NBL_P4_ACTION_H #define _NBL_P4_ACTION_H diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h index 898454b8bba9..054d68856205 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -28,6 +28,11 @@ struct nbl_phy_mgt { enum nbl_hw_status hw_status; }; +struct nbl_phy_ped_tbl { + u64 addr:56; + u64 addr_len:8; +}; + #define NBL_DELAY_MIN_TIME_FOR_REGS 400 /* 200us for palladium,3us for s2c */ #define NBL_DELAY_MAX_TIME_FOR_REGS 500 /* 300us for palladium,5us for s2c */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h index 88e4fff03926..d5b72c6368d9 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c index ddce1eac4acb..37c2a371123a 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c @@ -441,3 +441,18 @@ void nbl_res_set_hw_status(void *priv, enum nbl_hw_status hw_status) phy_ops->set_hw_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hw_status); } + +int nbl_res_get_pf_vf_num(void *priv, u16 pf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_sriov_info *sriov_info; + + if (pf_id >= NBL_RES_MGT_TO_PF_NUM(res_mgt)) + return -1; + + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + pf_id; + if (!sriov_info->num_vfs) + return -1; + + return sriov_info->num_vfs; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h index 12e567c0b031..f867e7a453fc 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -35,6 +35,8 @@ #define NBL_RES_MGT_TO_VSI_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->vsi_info) #define NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->eth_bond_info) #define NBL_RES_MGT_TO_PF_NUM(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->max_pf) +#define NBL_RES_MGT_TO_VDPA_VF_STATS(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->vdpa.vf_stats) +#define NBL_RES_MGT_TO_USTORE_STATS(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->ustore_stats) #define NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt) ((res_mgt)->phy_ops_tbl) #define NBL_RES_MGT_TO_PHY_OPS(res_mgt) (NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt)->ops) @@ -69,11 +71,15 @@ #define NBL_DEFAULT_PF_HW_QUEUE_NUM (16) #define NBL_DEFAULT_USER_HW_QUEUE_NUM (16) #define NBL_DEFAULT_VF_HW_QUEUE_NUM (2) +#define NBL_VSI_PF_LEGACY_QUEUE_NUM_MAX (NBL_MAX_TXRX_QUEUE_PER_FUNC - \ + NBL_DEFAULT_REP_HW_QUEUE_NUM) #define NBL_SPECIFIC_VSI_NET_ID_OFFSET (4) #define NBL_MAX_CACHE_SIZE (256) #define NBL_MAX_BATCH_DESC (64) +#define NBL_VDPA_ITR_BATCH_CNT (64) + enum nbl_qid_map_table_type { NBL_MASTER_QID_MAP_TABLE, NBL_SLAVE_QID_MAP_TABLE, @@ -102,6 +108,7 @@ struct nbl_queue_info { u16 rss_ret_base; u16 *txrx_queues; u16 *queues_context; + u32 *uvn_stat_pkt_drop; u16 rss_entry_size; u16 split; u32 curr_qps; @@ -145,6 +152,7 @@ struct nbl_msix_map_table { struct nbl_func_interrupt_resource_mng { u16 num_interrupts; + u16 num_net_interrupts; u16 msix_base; u16 msix_max; u16 *interrupts; @@ -196,6 +204,7 @@ struct nbl_tx_buffer { struct nbl_dma_info { dma_addr_t addr; struct page *page; + u32 size; }; struct nbl_page_cache { @@ -206,12 +215,14 @@ struct nbl_page_cache { struct nbl_rx_buffer { struct nbl_dma_info *di; - u32 offset; + u16 offset; + u16 rx_pad; + u16 size; bool last_in_page; }; struct nbl_res_vector { - struct napi_struct napi; + struct nbl_napi_struct nbl_napi; struct nbl_res_tx_ring *tx_ring; struct nbl_res_rx_ring *rx_ring; struct nbl_res_tx_ring *xdp_ring; @@ -277,6 +288,7 @@ struct nbl_res_rx_ring { u32 buf_len; u16 avail_used_flags; bool used_wrap_counter; + u8 nid; u16 next_to_use; u16 next_to_clean; u16 tail_ptr; @@ -290,12 +302,15 @@ struct nbl_res_rx_ring { struct nbl_common_info *common; void *txrx_mgt; void *xdp_prog; + struct xdp_rxq_info xdp_rxq; // dma for desc[] dma_addr_t dma; // size for desc[] unsigned int size; bool valid; u16 notify_qid; + + u16 frags_num_per_page; } ____cacheline_internodealigned_in_smp; struct nbl_txrx_bond_info { @@ -339,13 +354,13 @@ struct nbl_adminq_mgt { struct wait_queue_head wait_queue; struct mutex eth_lock; /* To prevent link_state_changed mismodified. */ + void *cmd_filter; }; /* --------- FLOW ---------- */ -#define NBL_FEM_HT_PP0_LEN (1 * 1024) - -#define NBL_MACVLAN_TABLE_LEN (4096) +#define NBL_FEM_HT_PP0_LEN (2 * 1024) +#define NBL_MACVLAN_TABLE_LEN (4096 * 2) enum nbl_next_stg_id_e { NBL_NEXT_STG_PA = 1, @@ -368,16 +383,19 @@ enum { NBL_FLOW_UP, NBL_FLOW_DOWN, NBL_FLOW_MACVLAN_MAX, - NBL_FLOW_L2_UP = NBL_FLOW_MACVLAN_MAX, - NBL_FLOW_L2_DOWN, - NBL_FLOW_L3_UP, - NBL_FLOW_L3_DOWN, - NBL_FLOW_TYPE_MAX, - NBL_FLOW_TLS_UP = NBL_FLOW_TYPE_MAX, - NBL_FLOW_IPSEC_DOWN, - NBL_FLOW_ACCEL_MAX, - NBL_FLOW_LLDP_LACP_UP, + NBL_FLOW_LLDP_LACP_UP = NBL_FLOW_MACVLAN_MAX, NBL_FLOW_PMD_ND_UPCALL, + NBL_FLOW_L2_UP_MULTI_MCAST, + NBL_FLOW_L3_UP_MULTI_MCAST, + NBL_FLOW_UP_MULTI_MCAST_END, + NBL_FLOW_L2_DOWN_MULTI_MCAST = NBL_FLOW_UP_MULTI_MCAST_END, + NBL_FLOW_L3_DOWN_MULTI_MCAST, + NBL_FLOW_DOWN_MULTI_MCAST_END, + NBL_FLOW_ACCEL_BEGIN = NBL_FLOW_DOWN_MULTI_MCAST_END, + NBL_FLOW_TLS_UP = NBL_FLOW_ACCEL_BEGIN, + NBL_FLOW_IPSEC_DOWN, + NBL_FLOW_ACCEL_END, + NBL_FLOW_TYPE_MAX = NBL_FLOW_ACCEL_END, }; struct nbl_flow_ht_key { @@ -409,19 +427,44 @@ struct nbl_flow_fem_entry { struct nbl_flow_mcc_node { struct list_head node; + u16 data; u16 mcc_id; - u16 mcc_head; + u16 mcc_action; + bool mcc_head; + u8 type; }; -struct nbl_flow_multi_group { - struct list_head mcc_list; +struct nbl_flow_mcc_group { + struct list_head group_node; + /* list_head for mcc_node_list */ + struct list_head mcc_node; struct list_head mcc_head; - struct nbl_flow_fem_entry entry[NBL_FLOW_TYPE_MAX - NBL_FLOW_MACVLAN_MAX]; - u8 ether_id; - u16 mcc_id; + unsigned long *vsi_bitmap; + u32 nbits; + u32 vsi_base; + u32 vsi_num; + u32 ref_cnt; + u16 up_mcc_id; + u16 down_mcc_id; + bool multi; +}; + +struct nbl_flow_switch_res { + void *mac_hash_tbl; + unsigned long *vf_bitmap; + struct list_head allmulti_head; + struct list_head allmulti_list; + struct list_head mcc_group_head; + struct nbl_flow_fem_entry allmulti_up[2]; + struct nbl_flow_fem_entry allmulti_down[2]; + u16 vld; u16 network_status; u16 pfc_mode; u16 bp_mode; + u16 allmulti_first_mcc; + u16 num_vfs; + u16 active_vfs; + u8 ether_id; }; struct nbl_flow_lacp_rule { @@ -459,23 +502,29 @@ struct nbl_flow_nd_upcall_rule { struct list_head node; }; +struct nbl_event_mirror_outputport_data { + u16 func_id; + bool opcode; /* true: add; false: del */ +}; + struct nbl_flow_mgt { unsigned long *flow_id_bitmap; + unsigned long *mcc_id_bitmap; DECLARE_BITMAP(tcam_id, NBL_TCAM_TABLE_LEN); - u32 pp_tcam_count; - u32 unicast_mac_threshold; - u32 accel_flow_count; struct nbl_flow_ht_mng pp0_ht0_mng; struct nbl_flow_ht_mng pp0_ht1_mng; - struct nbl_flow_multi_group multi_flow[NBL_MAX_ETHERNET]; - void *mac_hash_tbl[NBL_MAX_ETHERNET]; + struct nbl_flow_switch_res switch_res[NBL_MAX_ETHERNET]; struct list_head lldp_list; struct list_head lacp_list; struct list_head ul4s_head; struct list_head dprbac_head; - void *mcc_tbl_priv; struct list_head nd_upcall_list; // note: works only for offload network - // not the physical network + u32 pp_tcam_count; + u32 accel_flow_count; + u32 flow_id_cnt; + u16 vsi_max_per_switch; +#define NBL_MIRROR_OUTPUTPORT_MAX_FUNC 8 + u16 mirror_outputport_func[NBL_MIRROR_OUTPUTPORT_MAX_FUNC]; }; #define NBL_FLOW_INIT_BIT BIT(1) @@ -503,22 +552,22 @@ enum nbl_flow_key_type { }; #define NBL_PP0_KT_NUM (0) -#define NBL_PP1_KT_NUM (12 * 1024) -#define NBL_PP2_KT_NUM (112 * 1024) -#define NBL_PP0_KT_OFFSET (124 * 1024) -#define NBL_PP1_KT_OFFSET (112 * 1024) -#define NBL_FEM_HT_PP0_LEN (1 * 1024) -#define NBL_FEM_HT_PP1_LEN (3 * 1024) +#define NBL_PP1_KT_NUM (24 * 1024) +#define NBL_PP2_KT_NUM (96 * 1024) +#define NBL_PP0_KT_OFFSET (120 * 1024) +#define NBL_PP1_KT_OFFSET (96 * 1024) +#define NBL_FEM_HT_PP0_LEN (2 * 1024) +#define NBL_FEM_HT_PP1_LEN (6 * 1024) #define NBL_FEM_HT_PP2_LEN (16 * 1024) -#define NBL_FEM_HT_PP0_DEPTH (1 * 1024) -#define NBL_FEM_HT_PP1_DEPTH (3 * 1024) -#define NBL_FEM_HT_PP2_DEPTH (0) -#define NBL_FEM_AT_PP1_LEN (6 * 1024) -#define NBL_FEM_AT2_PP1_LEN (2 * 1024) -#define NBL_FEM_AT_PP2_LEN (72 * 1024) +#define NBL_FEM_HT_PP0_DEPTH (2 * 1024) +#define NBL_FEM_HT_PP1_DEPTH (6 * 1024) +#define NBL_FEM_HT_PP2_DEPTH (0) /* 16K, treat as zero */ +#define NBL_FEM_AT_PP1_LEN (12 * 1024) +#define NBL_FEM_AT2_PP1_LEN (4 * 1024) +#define NBL_FEM_AT_PP2_LEN (64 * 1024) #define NBL_FEM_AT2_PP2_LEN (16 * 1024) -#define NBL_TC_MCC_TBL_DEPTH (7168) -#define NBL_TC_ENCAP_TBL_DEPTH (4 * 1024) +#define NBL_TC_MCC_TBL_DEPTH (4096) +#define NBL_TC_ENCAP_TBL_DEPTH (4 * 1024) struct nbl_flow_key_info { bool valid; @@ -733,6 +782,30 @@ struct nbl_tc_mcc_mgt { u16 mcc_offload_cnt; }; +struct nbl_tc_pedit_res_info { +#define NBL_TC_MAX_PED_IDX 2048 + /* common pedit resource */ + DECLARE_BITMAP(pedit_pool, NBL_TC_MAX_PED_IDX); + void *pedit_tbl; + u32 pedit_num:16; + u32 pedit_cnt:16; + + /* special use for leonis-ipv6, ipv6 need 2 addrs */ + DECLARE_BITMAP(pedit_pool_h, NBL_TC_MAX_PED_H_IDX); + void *pedit_tbl_h; + /* normal could store in _h */ + u32 pedit_num_h:16; + u32 pedit_cnt_h:16; + + u32 pedit_base_id; +}; + +struct nbl_tc_pedit_mgt { + struct nbl_tc_pedit_res_info pedit_res[NBL_FLOW_PED_RES_MAX]; + struct nbl_common_info *common; + struct mutex pedit_lock; /* protect the pedit */ +}; + struct nbl_tc_flow_mgt { spinlock_t flow_lock; /* used to lock flow resource */ struct nbl_flow_prf_upcall_info prf_info; @@ -778,6 +851,9 @@ struct nbl_tc_flow_mgt { struct mutex encap_tbl_lock; /* used to lock encap resource */ struct nbl_flow_tab_hash_info encap_tbl; DECLARE_BITMAP(encap_tbl_bmp, NBL_TC_ENCAP_TBL_DEPTH); + + /* pedit info */ + struct nbl_tc_pedit_mgt pedit_mgt; }; /* --------- ACCEL ---------- */ @@ -862,6 +938,7 @@ struct nbl_eth_info { u8 resv[3]; u8 eth_id[NBL_MAX_PF]; u8 logic_eth_id[NBL_MAX_PF]; + u64 link_down_count[NBL_MAX_ETHERNET]; }; enum nbl_vsi_serv_type { @@ -885,6 +962,7 @@ struct nbl_vsi_mac_info { u16 vlan_tci; int rate; u8 mac[ETH_ALEN]; + bool trusted; }; struct nbl_vsi_info { @@ -929,9 +1007,23 @@ struct nbl_rdma_mem_type_info { u32 mem_type; }; +struct nbl_vdpa_status { + struct nbl_vf_stats init_stats; + struct nbl_vf_stats prev_stats; + unsigned long timestamp; + u16 itr_level; +}; + +struct nbl_vdpa_info { + DECLARE_BITMAP(vdpa_func_bitmap, NBL_MAX_FUNC); + struct nbl_vdpa_status *vf_stats[NBL_MAX_FUNC]; + u32 start; +}; + struct nbl_resource_info { /* ctrl-dev owned pfs */ DECLARE_BITMAP(func_bitmap, NBL_MAX_FUNC); + struct nbl_vdpa_info vdpa; struct nbl_sriov_info *sriov_info; struct nbl_eswitch_info *eswitch_info; struct nbl_eth_info *eth_info; @@ -950,8 +1042,13 @@ struct nbl_resource_info { u8 max_pf; u16 nd_upcall_refnt; struct nbl_board_port_info board_info; + /* store all pf names for vf/rep device name use */ + char pf_name_list[NBL_MAX_PF][IFNAMSIZ]; u8 link_forced_info[NBL_MAX_FUNC]; + struct nbl_mtu_entry mtu_list[NBL_MAX_MTU]; + + struct nbl_ustore_stats *ustore_stats; }; enum { @@ -1034,6 +1131,7 @@ struct nbl_pmd_status { struct nbl_resource_common_ops { u16 (*vsi_id_to_func_id)(void *res_mgt, u16 vsi_id); int (*vsi_id_to_pf_id)(void *res_mgt, u16 vsi_id); + u16 (*vsi_id_to_vf_id)(void *res_mgt, u16 vsi_id); u16 (*pfvfid_to_func_id)(void *res_mgt, int pfid, int vfid); u16 (*pfvfid_to_vsi_id)(void *res_mgt, int pfid, int vfid, u16 type); u16 (*func_id_to_vsi_id)(void *res_mgt, u16 func_id, u16 type); @@ -1161,5 +1259,9 @@ void nbl_res_pf_dev_vsi_type_to_hw_vsi_type(u16 src_type, enum nbl_vsi_serv_type int nbl_res_get_rep_idx(struct nbl_eswitch_info *eswitch_info, u16 rep_vsi_id); bool nbl_res_vf_is_active(void *priv, u16 func_id); void nbl_res_set_hw_status(void *priv, enum nbl_hw_status hw_status); +int nbl_res_get_pf_vf_num(void *priv, u16 pf_id); +u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level); +void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 level); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.c new file mode 100644 index 000000000000..27524213ad29 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_tc_pedit.h" +#include "nbl_p4_actions.h" + +static int nbl_tc_pedit_get_h_idx(struct nbl_tc_pedit_res_info *pedit_res, + struct nbl_common_info *common, struct nbl_tc_pedit_entry *e) +{ + u32 ped_idx = 0; + int idx = 0; + bool h_idx_vld = false; + int ret = -ENOMEM; + + if (pedit_res->pedit_cnt_h >= pedit_res->pedit_num_h) { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit over-hlimit (%u-%u)", + pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + return -ENOBUFS; + } + + ped_idx = find_first_zero_bit(pedit_res->pedit_pool_h, pedit_res->pedit_num_h); + WARN_ON(ped_idx >= pedit_res->pedit_num_h); + for (idx = ped_idx; idx < pedit_res->pedit_num_h; ++idx) { + /* don't overlap the pool */ + if (idx >= pedit_res->pedit_num) + break; + + /* only when idx in pool and h_pool are both available, then idx is valid */ + if (!test_bit(idx, pedit_res->pedit_pool_h) && + !test_bit(idx, pedit_res->pedit_pool)) { + h_idx_vld = true; + break; + } + } + + if (h_idx_vld) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit find a vld idx(%u)-(%u-%u)", + idx, pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + ret = 0; + /* now set bit in both pool and h_pool */ + set_bit(idx, pedit_res->pedit_pool); + set_bit(idx, pedit_res->pedit_pool_h); + + /* h_idx occupy 2 bits actually */ + ++pedit_res->pedit_cnt; + ++pedit_res->pedit_cnt_h; + NBL_TC_PEDIT_SET_NODE_H(e); + NBL_TC_PEDIT_SET_NODE_IDX(e, idx); + } else { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit no valid hidx in hpool-(%u-%u)", + pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + } + + return ret; +} + +static int nbl_tc_pedit_get_normal_idx(struct nbl_tc_pedit_res_info *pedit_res, + struct nbl_common_info *common, + struct nbl_tc_pedit_entry *e) +{ + u32 ped_idx = 0; + + ped_idx = find_first_zero_bit(pedit_res->pedit_pool, pedit_res->pedit_num); + /* normal ped_idx used up, try get from pedit_h if we got */ + if (ped_idx >= pedit_res->pedit_num && pedit_res->pedit_num_h) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit try to get idx from h_pool"); + + if (pedit_res->pedit_cnt_h >= pedit_res->pedit_num_h) { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit over-hlimit for normal (%u-%u)", + pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + return -ENOBUFS; + } + ped_idx = find_first_zero_bit(pedit_res->pedit_pool_h, pedit_res->pedit_num_h); + WARN_ON(ped_idx >= pedit_res->pedit_num_h); + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit get h-idx(%u) success(%u-%u)", + ped_idx, pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + NBL_TC_PEDIT_SET_NORMAL_IN_H(e); + ++pedit_res->pedit_cnt_h; + set_bit(ped_idx, pedit_res->pedit_pool_h); + } else if (ped_idx >= pedit_res->pedit_num) { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit get no available idx(%u-%u)", + pedit_res->pedit_num, pedit_res->pedit_cnt); + return -ENOMEM; + } + /* get a normal idx */ + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit get idx(%u) success(%u-%u)", + ped_idx, pedit_res->pedit_num, pedit_res->pedit_cnt); + set_bit(ped_idx, pedit_res->pedit_pool); + + NBL_TC_PEDIT_SET_NODE_IDX(e, ped_idx); + return 0; +} + +static int nbl_tc_pedit_get_idx(struct nbl_tc_pedit_res_info *pedit_res, + struct nbl_common_info *common, struct nbl_tc_pedit_entry *e) +{ + int ret = 0; + + if (pedit_res->pedit_cnt >= pedit_res->pedit_num) { + nbl_info(common, NBL_DEBUG_FLOW, "tc_pedit over-limit (%u-%u)", + pedit_res->pedit_num, pedit_res->pedit_cnt); + return -ENOBUFS; + } + + if (NBL_TC_PEDIT_GET_NODE_H(e)) + ret = nbl_tc_pedit_get_h_idx(pedit_res, common, e); + + else + ret = nbl_tc_pedit_get_normal_idx(pedit_res, common, e); + + if (ret) + return ret; + ++pedit_res->pedit_cnt; + NBL_TC_PEDIT_SET_NODE_VAL(e); + NBL_TC_PEDIT_SET_NODE_BASE_ID(e, pedit_res->pedit_base_id); + NBL_TC_PEDIT_INC_NODE_REF(e); + return 0; +} + +static int nbl_tc_pedit_put_idx(struct nbl_tc_pedit_res_info *pedit_res, + struct nbl_common_info *common, struct nbl_tc_pedit_entry *e) +{ + void *pool_addr; + bool idx_h = false; + + if (NBL_TC_PEDIT_GET_NODE_H(e)) { + WARN_ON(NBL_TC_PEDIT_GET_NODE_IDX(e) >= pedit_res->pedit_num_h); + pool_addr = pedit_res->pedit_pool_h; + idx_h = true; + clear_bit(NBL_TC_PEDIT_GET_NODE_IDX(e), pedit_res->pedit_pool); + pedit_res->pedit_cnt_h--; + pedit_res->pedit_cnt--; + } else if (NBL_TC_PEDIT_GET_NORMAL_IN_H(e)) { + WARN_ON(NBL_TC_PEDIT_GET_NODE_IDX(e) >= pedit_res->pedit_num_h); + pool_addr = pedit_res->pedit_pool_h; + idx_h = true; + pedit_res->pedit_cnt_h--; + } else { + WARN_ON(NBL_TC_PEDIT_GET_NODE_IDX(e) >= pedit_res->pedit_num); + pool_addr = pedit_res->pedit_pool; + } + + if (!test_bit(NBL_TC_PEDIT_GET_NODE_IDX(e), pool_addr)) + nbl_err(common, NBL_DEBUG_FLOW, "tc_pedit clear a null bit %u in h(%d)", + NBL_TC_PEDIT_GET_NODE_IDX(e), idx_h ? 1 : 0); + + pedit_res->pedit_cnt--; + nbl_debug(common, NBL_DEBUG_FLOW, "tc_pedit put idx(%u) success normal(%u-%u)-high(%u-%u)", + NBL_TC_PEDIT_GET_NODE_IDX(e), pedit_res->pedit_num, pedit_res->pedit_cnt, + pedit_res->pedit_num_h, pedit_res->pedit_cnt_h); + clear_bit(NBL_TC_PEDIT_GET_NODE_IDX(e), pool_addr); + NBL_TC_PEDIT_DEC_NODE_REF(e); + NBL_TC_PEDIT_SET_NODE_INVAL(e); + return 0; +} + +static enum nbl_flow_ped_type nbl_tc_pedit_get_ped_type(enum nbl_flow_ped_type ped_type) +{ + /* default ped_type return directly */ + if (NBL_TC_PEDIT_IS_DEFAULT_TYPE(ped_type)) + return ped_type; + + NBL_TC_PEDIT_UNSET_D_TYPE(ped_type); + /* we need get the hw recongnize ped_type */ + return ped_type; +} + +u16 nbl_tc_pedit_get_hw_id(struct nbl_tc_pedit_entry *ped_node) +{ + return (ped_node->hnode.node_idx + ped_node->hnode.node_base); +} + +int nbl_tc_pedit_del_node(struct nbl_tc_pedit_mgt *pedit_mgt, + struct nbl_tc_pedit_node_res *pedit_node) +{ + struct nbl_common_info *common = pedit_mgt->common; + struct nbl_tc_pedit_res_info *pedit_res = pedit_mgt->pedit_res; + int idx = 0; + struct nbl_tc_pedit_entry *l_e; + void *h_e; + u32 e_ref = 0; + int ret = -EINVAL; + void *pedit_tbl; + enum nbl_flow_ped_type ped_type; + + if (!NBL_TC_PEDIT_GET_NODE_RES_VAL(*pedit_node)) + return -EINVAL; + mutex_lock(&pedit_mgt->pedit_lock); + for (idx = 0; idx < NBL_FLOW_PED_RES_MAX; ++idx) { + l_e = NBL_TC_PEDIT_GET_NODE_RES_ENTRY(*pedit_node, idx); + if (l_e) + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_tc_pedit(%u):del %d-%u-%u-(%u-%u)", + NBL_TC_PEDIT_GET_NODE_REF(l_e), + idx, NBL_TC_PEDIT_GET_NODE_IDX(l_e), + nbl_tc_pedit_get_hw_id(l_e), + NBL_TC_PEDIT_GET_NORMAL_IN_H(l_e), + NBL_TC_PEDIT_GET_NODE_H(l_e)); + else + continue; + + /* get hw ped_type, cuz resource are stored in hw-style */ + ped_type = nbl_tc_pedit_get_ped_type(idx); + WARN_ON(!NBL_TC_PEDIT_GET_NODE_VAL(l_e)); + if (NBL_TC_PEDIT_GET_NODE_H(l_e)) + pedit_tbl = pedit_res[ped_type].pedit_tbl_h; + else + pedit_tbl = pedit_res[ped_type].pedit_tbl; + + h_e = nbl_common_get_hash_node(pedit_tbl, NBL_TC_PEDIT_GET_KEY(l_e)); + WARN_ON(l_e != h_e); + e_ref = NBL_TC_PEDIT_GET_NODE_REF(l_e); + if (e_ref > 1) { + NBL_TC_PEDIT_DEC_NODE_REF(l_e); + } else { + NBL_TC_PEDIT_DEC_NODE_REF(l_e); + nbl_tc_pedit_put_idx(&pedit_res[ped_type], common, l_e); + nbl_common_free_hash_node(pedit_tbl, NBL_TC_PEDIT_GET_KEY(l_e)); + } + ret = 0; + } + mutex_unlock(&pedit_mgt->pedit_lock); + + return ret; +} + +int nbl_tc_pedit_add_node(struct nbl_tc_pedit_mgt *pedit_mgt, + struct nbl_tc_pedit_entry *e, + void **e_out, enum nbl_flow_ped_type pedit_type) +{ + struct nbl_tc_pedit_res_info *pedit_res = &pedit_mgt->pedit_res[pedit_type]; + struct nbl_common_info *common = pedit_mgt->common; + struct nbl_tc_pedit_entry *h_e; + void *new_e; + int ret = 0; + void *pedit_tbl = pedit_res->pedit_tbl; + + if (NBL_TC_PEDIT_GET_NODE_H(e)) + pedit_tbl = pedit_res->pedit_tbl_h; + + if (!pedit_tbl) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl_tc_pedit add failed: not init type %d", + pedit_type); + return -EINVAL; + } + + mutex_lock(&pedit_mgt->pedit_lock); + h_e = nbl_common_get_hash_node(pedit_tbl, NBL_TC_PEDIT_GET_KEY(e)); + if (h_e) { + NBL_TC_PEDIT_INC_NODE_REF(h_e); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:type(%d) exist in %u-%u(%u)", + pedit_type, NBL_TC_PEDIT_GET_NODE_IDX(h_e), + nbl_tc_pedit_get_hw_id(h_e), NBL_TC_PEDIT_GET_NODE_REF(h_e)); + *e_out = h_e; + NBL_TC_PEDIT_COPY_NODE(h_e, e); + goto pedit_add_fin; + } + + ret = nbl_tc_pedit_get_idx(pedit_res, common, e); + if (ret) + goto pedit_add_fin; + + ret = nbl_common_alloc_hash_node(pedit_tbl, NBL_TC_PEDIT_GET_KEY(e), e, &new_e); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:type(%d) add hash failed", + pedit_type); + nbl_tc_pedit_put_idx(pedit_res, common, e); + goto pedit_add_fin; + } + + *e_out = new_e; + NBL_TC_PEDIT_SET_NODE_ENTRY(e, new_e); + /* tell caller this is the first node added in hash */ + NBL_TC_PEDIT_SET_NODE_INVAL(e); +pedit_add_fin: + mutex_unlock(&pedit_mgt->pedit_lock); + return ret; +} + +int nbl_tc_pedit_init(struct nbl_tc_pedit_mgt *pedit_mgt) +{ + int ret = 0; + int idx = 0; + struct nbl_common_info *common = pedit_mgt->common; + struct nbl_tc_pedit_res_info *pedit_res = pedit_mgt->pedit_res; + struct nbl_hash_tbl_key tbl_key = {0}; + + for (idx = 0; idx < NBL_FLOW_PED_RES_MAX; ++idx) { + if (!pedit_res[idx].pedit_num) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:pedit(%d) skip init", + idx); + continue; + } + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), NBL_TC_PEDIT_KEY_LEN, + sizeof(struct nbl_tc_pedit_entry), + pedit_res[idx].pedit_num, false); + pedit_res[idx].pedit_tbl = nbl_common_init_hash_table(&tbl_key); + if (!pedit_res[idx].pedit_tbl) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:pedit(%d) init failed", + idx); + return -ENOMEM; + } + + /* init pedit_h if needed */ + if (pedit_res[idx].pedit_num_h) { + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), + NBL_TC_PEDIT_KEY_LEN, + sizeof(struct nbl_tc_pedit_entry), + pedit_res[idx].pedit_num_h, false); + pedit_res[idx].pedit_tbl_h = nbl_common_init_hash_table(&tbl_key); + if (!pedit_res[idx].pedit_tbl_h) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl_tc_pedit:pedit(%d) init failed", + idx); + return -ENOMEM; + } + } + } + + return ret; +} + +int nbl_tc_pedit_uninit(struct nbl_tc_pedit_mgt *pedit_mgt) +{ + int idx = 0; + struct nbl_tc_pedit_res_info *pedit_res = pedit_mgt->pedit_res; + + if (!pedit_mgt) + return -EINVAL; + + for (idx = 0; idx < NBL_FLOW_PED_RES_MAX; ++idx) { + nbl_common_remove_hash_table(pedit_res[idx].pedit_tbl, NULL); + nbl_common_remove_hash_table(pedit_res[idx].pedit_tbl_h, NULL); + } + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.h new file mode 100644 index 000000000000..245a6ac8dde6 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_tc_pedit.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef __NBL_TC_PEDIT_H__ +#define __NBL_TC_PEDIT_H__ + +#include "nbl_include.h" +#include "nbl_core.h" +#include "nbl_resource.h" + +#define NBL_TC_PEDIT_MAC_LEN 6 +#define NBL_TC_PEDIT_IP6_LEN 16 +#define NBL_TC_PEDIT_KEY_LEN 16 +#define NBL_TC_PEDIT_TAB_LEN 8 + +#define NBL_TC_PEDIT_HW_END_PED_TYPE NBL_FLOW_PED_UMAC_D_TYPE +#define NBL_TC_PEDIT_IS_DEFAULT_TYPE(p_type) ((p_type) < NBL_TC_PEDIT_HW_END_PED_TYPE) +#define NBL_TC_PEDIT_SET_D_TYPE(p_type) ((p_type) += NBL_TC_PEDIT_HW_END_PED_TYPE) +#define NBL_TC_PEDIT_UNSET_D_TYPE(p_type) ((p_type) -= NBL_TC_PEDIT_HW_END_PED_TYPE) + +#define NBL_TC_PEDIT_IP6_PHY_TYPE_GAP (NBL_FLOW_PED_UIP6_TYPE - NBL_FLOW_PED_UIP_TYPE) +#define NBL_TC_PEDIT_GET_IP6_PHY_TYPE(p_type) ((p_type) + NBL_TC_PEDIT_IP6_PHY_TYPE_GAP) + +struct nbl_tc_pedit_node { + u32 ref_cnt:31; + u32 normal_in_h:1; + u32 node_idx:15; + u32 node_base:15; + u32 node_h:1; + u32 node_val:1; + void *entry; + u8 key[]; +}; + +struct nbl_tc_pedit_entry { + struct nbl_tc_pedit_node hnode; + union { + u8 mac[NBL_TC_PEDIT_MAC_LEN]; + u32 ip[2]; + u8 ip6[NBL_TC_PEDIT_IP6_LEN]; + u8 key[NBL_TC_PEDIT_KEY_LEN]; + }; +}; + +#define NBL_TC_PEDIT_SET_NODE_RES_VAL(node) ((node).pedit_val = 1) +#define NBL_TC_PEDIT_SET_NODE_RES_ENTRY(node, idx, e) ((node).pedit_node[idx] = e) + +#define NBL_TC_PEDIT_GET_NODE_RES_VAL(node) ((node).pedit_val) +#define NBL_TC_PEDIT_GET_NODE_RES_ENTRY(node, idx) \ + ((struct nbl_tc_pedit_entry *)(node).pedit_node[idx]) + +#define NBL_TC_PEDIT_GET_KEY(ped_node) ((ped_node)->hnode.key) +#define NBL_TC_PEDIT_GET_NODE_REF(ped_node) ((ped_node)->hnode.ref_cnt) +#define NBL_TC_PEDIT_GET_NODE_H(ped_node) ((ped_node)->hnode.node_h) +#define NBL_TC_PEDIT_GET_NORMAL_IN_H(ped_node) ((ped_node)->hnode.normal_in_h) +#define NBL_TC_PEDIT_GET_NODE_IDX(ped_node) ((ped_node)->hnode.node_idx) +#define NBL_TC_PEDIT_GET_NODE_VAL(ped_node) ((ped_node)->hnode.node_val) + +#define NBL_TC_PEDIT_INC_NODE_REF(ped_node) ((ped_node)->hnode.ref_cnt++) +#define NBL_TC_PEDIT_DEC_NODE_REF(ped_node) ((ped_node)->hnode.ref_cnt--) + +#define NBL_TC_PEDIT_SET_NODE_IDX(ped_node, idx) ((ped_node)->hnode.node_idx = idx) +#define NBL_TC_PEDIT_SET_NODE_BASE_ID(ped_node, idx) ((ped_node)->hnode.node_base = idx) +#define NBL_TC_PEDIT_SET_NODE_VAL(ped_node) ((ped_node)->hnode.node_val = 1) +#define NBL_TC_PEDIT_SET_NODE_INVAL(ped_node) ((ped_node)->hnode.node_val = 0) +#define NBL_TC_PEDIT_SET_NORMAL_IN_H(ped_node) ((ped_node)->hnode.normal_in_h = 1) +#define NBL_TC_PEDIT_SET_NODE_H(ped_node) ((ped_node)->hnode.node_h = 1) +#define NBL_TC_PEDIT_SET_NODE_ENTRY(ped_node, e) ((ped_node)->hnode.entry = e) + +#define NBL_TC_PEDIT_COPY_NODE(src_node, dst_node) ((dst_node)->hnode = (src_node)->hnode) + +u16 nbl_tc_pedit_get_hw_id(struct nbl_tc_pedit_entry *ped_node); +int nbl_tc_pedit_init(struct nbl_tc_pedit_mgt *pedit_mgt); +int nbl_tc_pedit_uninit(struct nbl_tc_pedit_mgt *pedit_mgt); +int nbl_tc_pedit_del_node(struct nbl_tc_pedit_mgt *pedit_mgt, + struct nbl_tc_pedit_node_res *ped_node); +int nbl_tc_pedit_add_node(struct nbl_tc_pedit_mgt *pedit_mgt, + struct nbl_tc_pedit_entry *e, + void **e_out, enum nbl_flow_ped_type pedit_type); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c index 515ceb631b10..b41c56c8747e 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c @@ -12,6 +12,8 @@ #include #include +#include + DEFINE_STATIC_KEY_FALSE(nbl_xdp_locking_key); static bool nbl_txrx_within_vsi(struct nbl_txrx_vsi_info *vsi_info, u16 ring_index) @@ -20,6 +22,11 @@ static bool nbl_txrx_within_vsi(struct nbl_txrx_vsi_info *vsi_info, u16 ring_ind ring_index < vsi_info->ring_offset + vsi_info->ring_num; } +static struct netdev_queue *txring_txq(const struct nbl_res_tx_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + static struct nbl_res_tx_ring * nbl_alloc_tx_ring(struct nbl_resource_mgt *res_mgt, struct net_device *netdev, u16 ring_index, u16 desc_num) @@ -147,8 +154,10 @@ static int nbl_alloc_rx_rings(struct nbl_resource_mgt *res_mgt, struct net_devic ring->notify_qid = NBL_RES_NOFITY_QID(res_mgt, ring_index * 2); ring->netdev = netdev; ring->desc_num = desc_num; - /* TODO: maybe TX buffer length should be determined by other factors */ - ring->buf_len = NBL_RX_BUFSZ - NBL_RX_PAD; + /* RX buffer length is determined by mtu, + * when netdev up we will set buf_len according to its mtu + */ + ring->buf_len = PAGE_SIZE / 2 - NBL_RX_PAD; ring->used_wrap_counter = 1; ring->avail_used_flags |= BIT(NBL_PACKED_DESC_F_AVAIL); @@ -320,12 +329,8 @@ static dma_addr_t nbl_res_txrx_start_tx_ring(void *priv, u8 ring_index) tx_ring->size = ALIGN(tx_ring->desc_num * sizeof(struct nbl_ring_desc), PAGE_SIZE); tx_ring->desc = dmam_alloc_coherent(dma_dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL | __GFP_ZERO); - if (!tx_ring->desc) { - nbl_err(res_mgt->common, NBL_DEBUG_RESOURCE, - "Allocate %u bytes descriptor DMA memory for TX queue %u failed\n", - tx_ring->size, tx_ring->queue_index); + if (!tx_ring->desc) goto alloc_dma_err; - } tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -361,7 +366,8 @@ static inline bool nbl_rx_cache_get(struct nbl_res_rx_ring *rx_ring, struct nbl_ cache->head = (cache->head + 1) & (NBL_MAX_CACHE_SIZE - 1); stats->rx_cache_reuse++; - dma_sync_single_for_device(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_device(rx_ring->dma_dev, dma_info->addr, + dma_info->size, DMA_FROM_DEVICE); return true; } @@ -375,7 +381,7 @@ static inline int nbl_page_alloc_pool(struct nbl_res_rx_ring *rx_ring, if (unlikely(!dma_info->page)) return -ENOMEM; - dma_info->addr = dma_map_page_attrs(rx_ring->dma_dev, dma_info->page, 0, PAGE_SIZE, + dma_info->addr = dma_map_page_attrs(rx_ring->dma_dev, dma_info->page, 0, dma_info->size, DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); if (unlikely(dma_mapping_error(rx_ring->dma_dev, dma_info->addr))) { @@ -392,7 +398,7 @@ static inline int nbl_get_rx_frag(struct nbl_res_rx_ring *rx_ring, struct nbl_rx int err = 0; /* first buffer alloc page */ - if (buffer->offset == NBL_RX_PAD) + if (buffer->offset == buffer->rx_pad) err = nbl_page_alloc_pool(rx_ring, buffer->di); return err; @@ -432,7 +438,7 @@ static inline bool nbl_alloc_rx_bufs(struct nbl_res_rx_ring *rx_ring, u16 count) if (nbl_get_rx_frag(rx_ring, rx_buf)) break; - for (i = 0; i < NBL_RX_PAGE_PER_FRAGS; i++, rx_desc++, rx_buf++) { + for (i = 0; i < rx_ring->frags_num_per_page; i++, rx_desc++, rx_buf++) { rx_desc->addr = cpu_to_le64(rx_buf->di->addr + rx_buf->offset); rx_desc->len = cpu_to_le32(buf_len); rx_desc->id = cpu_to_le16(next_to_use); @@ -445,9 +451,9 @@ static inline bool nbl_alloc_rx_bufs(struct nbl_res_rx_ring *rx_ring, u16 count) NBL_PACKED_DESC_F_WRITE); } - next_to_use += NBL_RX_PAGE_PER_FRAGS; - rx_ring->tail_ptr += NBL_RX_PAGE_PER_FRAGS; - count -= NBL_RX_PAGE_PER_FRAGS; + next_to_use += rx_ring->frags_num_per_page; + rx_ring->tail_ptr += rx_ring->frags_num_per_page; + count -= rx_ring->frags_num_per_page; if (next_to_use == rx_ring->desc_num) { next_to_use = 0; rx_desc = NBL_RX_DESC(rx_ring, next_to_use); @@ -506,6 +512,8 @@ static void nbl_unmap_and_free_tx_resource(struct nbl_res_tx_ring *ring, tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; tx_buffer->page = 0; + tx_buffer->bytecount = 0; + tx_buffer->gso_segs = 0; dma_unmap_len_set(tx_buffer, len, 0); } @@ -551,7 +559,7 @@ static void nbl_res_txrx_stop_tx_ring(void *priv, u8 ring_index) /* Flush napi task, to ensue the sched napi finish. So napi will no to access the * ring memory(wild point), bacause the vector->started has set false. */ - napi_synchronize(&vector->napi); + napi_synchronize(&vector->nbl_napi.napi); } tx_ring->valid = false; @@ -567,10 +575,18 @@ static void nbl_res_txrx_stop_tx_ring(void *priv, u8 ring_index) tx_ring->dma = (dma_addr_t)NULL; tx_ring->size = 0; + if (nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_DATA], tx_ring->queue_index)) + netdev_tx_reset_queue(txring_txq(tx_ring)); + nbl_debug(res_mgt->common, NBL_DEBUG_RESOURCE, "Stop tx ring %d", ring_index); } -static inline bool nbl_rx_cache_put(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info) +static inline bool nbl_dev_page_is_reusable(struct page *page, u8 nid) +{ + return likely(page_to_nid(page) == nid && !page_is_pfmemalloc(page)); +} + +static inline int nbl_rx_cache_put(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info) { struct nbl_page_cache *cache = &rx_ring->page_cache; u32 tail_next = (cache->tail + 1) & (NBL_MAX_CACHE_SIZE - 1); @@ -578,34 +594,41 @@ static inline bool nbl_rx_cache_put(struct nbl_res_rx_ring *rx_ring, struct nbl_ if (tail_next == cache->head) { stats->rx_cache_full++; - return false; + return 0; } - if (!dev_page_is_reusable(dma_info->page)) { + if (!nbl_dev_page_is_reusable(dma_info->page, rx_ring->nid)) { stats->rx_cache_waive++; - return false; + return 1; } cache->page_cache[cache->tail] = *dma_info; cache->tail = tail_next; - return true; + return 2; } static inline void nbl_page_release_dynamic(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info, bool recycle) { + u32 ret; + if (likely(recycle)) { - if (nbl_rx_cache_put(rx_ring, dma_info)) + ret = nbl_rx_cache_put(rx_ring, dma_info); + if (ret == 2) return; - dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, + if (ret == 1) + goto free_page; + dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, dma_info->size, DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); page_pool_recycle_direct(rx_ring->page_pool, dma_info->page); - } else { - dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, - DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); - page_pool_put_page(rx_ring->page_pool, dma_info->page, PAGE_SIZE, true); + + return; } +free_page: + dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, dma_info->size, + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); + page_pool_put_page(rx_ring->page_pool, dma_info->page, dma_info->size, true); } static inline void nbl_put_rx_frag(struct nbl_res_rx_ring *rx_ring, @@ -660,7 +683,11 @@ static dma_addr_t nbl_res_txrx_start_rx_ring(void *priv, u8 ring_index, bool use struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); struct nbl_res_vector *vector = NULL; struct page_pool_params pp_params = {0}; + int pkt_len_shift = 0; + int pkt_len = 0, order = 0; + int dma_size = 0, buf_size = 0; int i, j; + u16 rx_pad, tailroom; if (rx_ring->rx_bufs) { nbl_err(common, NBL_DEBUG_RESOURCE, @@ -671,13 +698,47 @@ static dma_addr_t nbl_res_txrx_start_rx_ring(void *priv, u8 ring_index, bool use if (!nbl_txrx_within_vsi(&txrx_mgt->vsi_info[NBL_VSI_XDP], ring_index)) vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); - pp_params.order = 0; + rx_pad = NBL_RX_PAD; + tailroom = 0; + if (rx_ring->xdp_prog) { + rx_pad = XDP_PACKET_HEADROOM; + tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } + if (!!adaptive_rxbuf_len_disable && !rx_ring->xdp_prog) { + buf_size = NBL_RX_BUFSZ; + pkt_len_shift = PAGE_SHIFT - 1; + } else { + pkt_len = rx_pad + ETH_HLEN + (VLAN_HLEN * 2) + rx_ring->netdev->mtu + + tailroom + NBL_BUFFER_HDR_LEN; + pkt_len_shift = ilog2((pkt_len) - 1) + 1; + pkt_len_shift = max(pkt_len_shift, NBL_RXBUF_MIN_ORDER); + buf_size = 1UL << pkt_len_shift; + } + + if (pkt_len_shift >= PAGE_SHIFT) { + order = pkt_len_shift - PAGE_SHIFT; + rx_ring->frags_num_per_page = 1; + } else { + order = 0; + rx_ring->frags_num_per_page = PAGE_SIZE / buf_size; + WARN_ON(rx_ring->frags_num_per_page > NBL_MAX_BATCH_DESC); + } + dma_size = PAGE_SIZE << order; + + rx_ring->buf_len = buf_size - rx_pad - tailroom; + + pp_params.order = order; pp_params.flags = 0; pp_params.pool_size = rx_ring->desc_num; pp_params.nid = dev_to_node(dev); pp_params.dev = dev; pp_params.dma_dir = DMA_FROM_DEVICE; + if (dev_to_node(dev) == NUMA_NO_NODE) + rx_ring->nid = 0; + else + rx_ring->nid = dev_to_node(dev); + rx_ring->page_pool = page_pool_create(&pp_params); if (IS_ERR(rx_ring->page_pool)) { nbl_err(common, NBL_DEBUG_RESOURCE, "Page_pool Allocate %u Failed failed\n", @@ -685,7 +746,7 @@ static dma_addr_t nbl_res_txrx_start_rx_ring(void *priv, u8 ring_index, bool use return (dma_addr_t)NULL; } - rx_ring->di = kvzalloc_node(array_size(rx_ring->desc_num / NBL_RX_PAGE_PER_FRAGS, + rx_ring->di = kvzalloc_node(array_size(rx_ring->desc_num / rx_ring->frags_num_per_page, sizeof(struct nbl_dma_info)), GFP_KERNEL, dev_to_node(dev)); if (!rx_ring->di) { @@ -715,15 +776,18 @@ static dma_addr_t nbl_res_txrx_start_rx_ring(void *priv, u8 ring_index, bool use rx_ring->tail_ptr = 0; j = 0; - for (i = 0; i < rx_ring->desc_num / NBL_RX_PAGE_PER_FRAGS; i++) { + for (i = 0; i < rx_ring->desc_num / rx_ring->frags_num_per_page; i++) { struct nbl_dma_info *di = &rx_ring->di[i]; - struct nbl_rx_buffer *buffer; + struct nbl_rx_buffer *buffer = &rx_ring->rx_bufs[j]; int f; - for (f = 0; f < NBL_RX_PAGE_PER_FRAGS; f++, j++) { + di->size = dma_size; + for (f = 0; f < rx_ring->frags_num_per_page; f++, j++) { buffer = &rx_ring->rx_bufs[j]; buffer->di = di; - buffer->offset = NBL_RX_PAD + f * NBL_RX_BUFSZ; + buffer->size = buf_size; + buffer->offset = rx_pad + f * buf_size; + buffer->rx_pad = rx_pad; buffer->last_in_page = false; } @@ -987,7 +1051,7 @@ static inline void nbl_add_rx_frag(struct nbl_rx_buffer *rx_buffer, { page_ref_inc(rx_buffer->di->page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->di->page, - rx_buffer->offset, size, NBL_RX_BUFSZ); + rx_buffer->offset, size, rx_buffer->size); } #ifdef CONFIG_TLS_DEVICE @@ -1133,31 +1197,27 @@ static void nbl_res_txrx_cfg_txrx_vlan(void *priv, u16 vlan_tci, u16 vlan_proto, * Current version support merging multiple descriptor for one packet. */ static struct sk_buff *nbl_construct_skb(struct nbl_res_rx_ring *rx_ring, struct napi_struct *napi, - struct nbl_rx_buffer *rx_buf, unsigned int size) + struct nbl_rx_buffer *rx_buf, struct xdp_buff *xdp) { struct sk_buff *skb; - char *p, *buf; int tailroom, shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - unsigned int truesize = NBL_RX_BUFSZ; + unsigned int truesize = rx_buf->size; unsigned int headlen; + unsigned int size = xdp->data_end - xdp->data; + u8 metasize = xdp->data - xdp->data_meta; - /* p point dma buff start, buf point whole buffer start*/ - p = page_address(rx_buf->di->page) + rx_buf->offset; - buf = p - NBL_RX_PAD; - - /* p point pkt start */ - p += NBL_BUFFER_HDR_LEN; - tailroom = truesize - size - NBL_RX_PAD; - size -= NBL_BUFFER_HDR_LEN; + tailroom = truesize - size - rx_buf->rx_pad - NBL_BUFFER_HDR_LEN; if (size > NBL_RX_HDR_SIZE && tailroom >= shinfo_size) { - skb = build_skb(buf, truesize); + skb = build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; page_ref_inc(rx_buf->di->page); - skb_reserve(skb, p - buf); - skb_put(skb, size); + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); goto ok; } @@ -1167,8 +1227,9 @@ static struct sk_buff *nbl_construct_skb(struct nbl_res_rx_ring *rx_ring, struct headlen = size; if (headlen > NBL_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, p, NBL_RX_HDR_SIZE); - memcpy(__skb_put(skb, headlen), p, ALIGN(headlen, sizeof(long))); + headlen = eth_get_headlen(skb->dev, xdp->data, NBL_RX_HDR_SIZE); + + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); size -= headlen; if (size) { page_ref_inc(rx_buf->di->page); @@ -1236,21 +1297,289 @@ static inline int nbl_maybe_stop_tx(struct nbl_res_tx_ring *tx_ring, unsigned in return 0; } +static int nbl_res_txrx_xmit_xdp_ring(struct nbl_res_tx_ring *xdp_ring, struct xdp_frame *xdpf) +{ + u16 index = xdp_ring->next_to_use; + u16 avail_used_flags = xdp_ring->avail_used_flags; + unsigned int size; + dma_addr_t dma; + union nbl_tx_extend_head *hdr; + struct device *dma_dev = NBL_RING_TO_DMA_DEV(xdp_ring); + struct nbl_tx_buffer *tx_buffer = NBL_TX_BUF(xdp_ring, index); + struct nbl_ring_desc *tx_desc = NBL_TX_DESC(xdp_ring, index); + const struct ethhdr *eth; + + if (xdpf->headroom < sizeof(union nbl_tx_extend_head)) + return -EOVERFLOW; + + if (unlikely(nbl_maybe_stop_tx(xdp_ring, 1))) { + xdp_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + size = xdpf->len; + eth = (struct ethhdr *)xdpf->data; + xdpf->headroom -= sizeof(union nbl_tx_extend_head); + xdpf->data -= sizeof(union nbl_tx_extend_head); + hdr = xdpf->data; + memset(hdr, 0, sizeof(union nbl_tx_extend_head)); + hdr->fwd = NBL_TX_FWD_TYPE_NORMAL; + xdpf->len += sizeof(union nbl_tx_extend_head); + dma = dma_map_single(dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) { + xdp_ring->tx_stats.tx_dma_busy++; + return NETDEV_TX_BUSY; + } + + dma_unmap_addr_set(tx_buffer, dma, dma); + dma_unmap_len_set(tx_buffer, len, xdpf->len); + tx_buffer->raw_buff = xdpf->data; + tx_buffer->gso_segs = 1; + tx_buffer->bytecount = size; + tx_desc->addr = cpu_to_le64(dma); + tx_desc->len = xdpf->len; + tx_desc->id = 0; + index++; + if (index == xdp_ring->desc_num) { + index = 0; + xdp_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } + + /* todo:xdp add multicast case */ + xdp_ring->tx_stats.tx_unicast_packets++; + tx_buffer->next_to_watch = tx_desc; + + /* wmb */ + wmb(); + + xdp_ring->next_to_use = index; + tx_desc->flags = cpu_to_le16(avail_used_flags); + + return NETDEV_TX_OK; +} + +static int nbl_res_txrx_xmit_xdp_buff(struct nbl_res_rx_ring *rx_ring, struct xdp_buff *xdp_buff) +{ + int ret; + struct nbl_res_tx_ring *xdp_ring; + struct xdp_frame *xdpf; + struct nbl_txrx_mgt *txrx_mgt = rx_ring->txrx_mgt; + + xdpf = xdp_convert_buff_to_frame(xdp_buff); + if (unlikely(!xdpf)) + goto buff_to_frame_failed; + + xdp_ring = nbl_res_txrx_select_xdp_ring(txrx_mgt); + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_lock(&xdp_ring->xmit_lock); + + ret = nbl_res_txrx_xmit_xdp_ring(xdp_ring, xdpf); + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_unlock(&xdp_ring->xmit_lock); + + return ret; +buff_to_frame_failed: + return -1; +} + +static int +nbl_res_txrx_run_xdp(struct nbl_res_rx_ring *rx_ring, struct nbl_rx_buffer *rx_buf, + struct nbl_xdp_output *xdp_output, struct xdp_buff *xdp_buff) +{ + struct nbl_rx_extend_head *hdr; + struct nbl_ring_desc *rx_desc; + const struct ethhdr *eth; + int i; + int err; + enum xdp_action act; + int nbl_act; + u16 num_buffers = 0; + + hdr = xdp_buff->data - NBL_BUFFER_HDR_LEN; + net_prefetch(hdr); + num_buffers = le16_to_cpu(hdr->num_buffers); + + /* receive xdp only support one desc for one packet */ + if (num_buffers > 1) + goto drop_big_packet; + + xdp_output->bytes = xdp_buff->data_end - xdp_buff->data; + eth = (struct ethhdr *)(hdr + 1); + if (unlikely(is_multicast_ether_addr(eth->h_dest))) + xdp_output->flags |= NBL_XDP_FLAG_MULTICAST; + + xdp_output->desc_done_num++; + xdp_init_buff(xdp_buff, rx_buf->size, &rx_ring->xdp_rxq); + act = bpf_prog_run_xdp(rx_ring->xdp_prog, xdp_buff); + switch (act) { + case XDP_PASS: + nbl_act = 0; + break; + case XDP_TX: + nbl_act = 1; + page_ref_inc(rx_buf->di->page); + err = nbl_res_txrx_xmit_xdp_buff(rx_ring, xdp_buff); + if (unlikely(err)) { + page_ref_dec(rx_buf->di->page); + goto xdp_aborted; + } + + xdp_output->flags |= NBL_XDP_FLAG_TX; + break; + case XDP_REDIRECT: + nbl_act = 1; + page_ref_inc(rx_buf->di->page); + err = xdp_do_redirect(rx_ring->netdev, xdp_buff, rx_ring->xdp_prog); + if (unlikely(err)) { + page_ref_dec(rx_buf->di->page); + goto xdp_aborted; + } + + xdp_output->flags |= NBL_XDP_FLAG_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, rx_ring->xdp_prog, act); + fallthrough; + case XDP_ABORTED: +xdp_aborted: + trace_xdp_exception(rx_ring->netdev, rx_ring->xdp_prog, act); + fallthrough; + case XDP_DROP: + xdp_output->flags |= NBL_XDP_FLAG_DROP; + nbl_act = 1; + break; + } + + if (nbl_act) + nbl_put_rx_buf(rx_ring, rx_buf); + + return nbl_act; + +drop_big_packet: + nbl_put_rx_buf(rx_ring, rx_buf); + xdp_output->desc_done_num++; + xdp_output->flags |= NBL_XDP_FLAG_OVERSIZE; + for (i = 1; i < num_buffers; i++) { + rx_desc = NBL_RX_DESC(rx_ring, rx_ring->next_to_clean); + if (!nbl_ring_desc_used(rx_desc, rx_ring->used_wrap_counter)) + break; + + dma_rmb(); + xdp_output->bytes += le32_to_cpu(rx_desc->len); + xdp_output->desc_done_num++; + rx_buf = nbl_get_rx_buf(rx_ring); + nbl_put_rx_buf(rx_ring, rx_buf); + } + + return 1; +} + static int -nbl_res_txrx_run_xdp(struct nbl_res_rx_ring *rx_ring, struct nbl_ring_desc *rx_desc, - struct nbl_rx_buffer *rx_buf, struct nbl_xdp_output *xdp_output) +nbl_res_txrx_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **frame, u32 flags) +{ + int ret; + int i; + int nxmit = 0; + struct nbl_res_tx_ring *xdp_ring; + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + xdp_ring = nbl_res_txrx_select_xdp_ring(txrx_mgt); + if (unlikely(!xdp_ring)) + return -ENXIO; + + if (unlikely(!xdp_ring->valid)) + return -ENETDOWN; + + if (unlikely(!nbl_res_txrx_is_xdp_ring(xdp_ring))) + return -ENXIO; + + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_lock(&xdp_ring->xmit_lock); + + for (i = 0; i < n; i++) { + ret = nbl_res_txrx_xmit_xdp_ring(xdp_ring, frame[i]); + if (ret) + break; + + nxmit++; + } + + if (unlikely(flags & XDP_XMIT_FLUSH && nxmit)) + writel(xdp_ring->notify_qid, xdp_ring->notify_addr); + + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_unlock(&xdp_ring->xmit_lock); + + return nxmit; +} + +static void +nbl_res_txrx_update_xdp_tail_locked(struct nbl_res_rx_ring *rx_ring) { - return NBL_XDP_PASS; + struct nbl_res_tx_ring *xdp_ring; + struct nbl_txrx_mgt *txrx_mgt = rx_ring->txrx_mgt; + + xdp_ring = nbl_res_txrx_select_xdp_ring(txrx_mgt); + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_lock(&xdp_ring->xmit_lock); + + writel(xdp_ring->notify_qid, xdp_ring->notify_addr); + + if (static_branch_unlikely(&nbl_xdp_locking_key)) + spin_unlock(&xdp_ring->xmit_lock); } static int nbl_res_txrx_register_xdp_rxq(void *priv, u8 ring_index) { + int err; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + + err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->queue_index, + vector->nbl_napi.napi.napi_id); + if (err < 0) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Register xdp rxq err\n"); + return -1; + } + + err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Register xdp rxq mem model err\n"); + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + return -1; + } + return 0; } static void nbl_res_txrx_unregister_xdp_rxq(void *priv, u8 ring_index) { - /* nothing need to do */ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); +} + +static inline void nbl_res_txrx_build_xdp_buff(struct nbl_rx_buffer *rx_buf, + struct nbl_ring_desc *rx_desc, + struct xdp_buff *xdp) +{ + char *p, *buf; + u32 size; + + p = page_address(rx_buf->di->page) + rx_buf->offset; + buf = p - rx_buf->rx_pad; + size = rx_desc->len - NBL_BUFFER_HDR_LEN; + xdp_prepare_buff(xdp, buf, rx_buf->rx_pad + NBL_BUFFER_HDR_LEN, size, true); } static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, @@ -1258,6 +1587,7 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, int budget) { struct nbl_xdp_output xdp_output; + struct xdp_buff xdp; struct nbl_ring_desc *rx_desc; struct nbl_rx_buffer *rx_buf; struct nbl_rx_extend_head *hdr; @@ -1267,11 +1597,11 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, unsigned int xdp_tx_pkts = 0; unsigned int xdp_redirect_pkts = 0; unsigned int xdp_oversize = 0; + unsigned int xdp_drop = 0; unsigned int size; int nbl_act; u32 rx_multicast_packets = 0; u32 rx_unicast_packets = 0; - int xdp_act_final = 0; u16 desc_count = 0; u16 num_buffers = 0; u16 cleaned_count = nbl_unused_rx_desc_count(rx_ring); @@ -1290,30 +1620,25 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, size = le32_to_cpu(rx_desc->len); rx_buf = nbl_get_rx_buf(rx_ring); + nbl_res_txrx_build_xdp_buff(rx_buf, rx_desc, &xdp); + if (READ_ONCE(rx_ring->xdp_prog)) { memset(&xdp_output, 0, sizeof(xdp_output)); - nbl_act = nbl_res_txrx_run_xdp(rx_ring, rx_desc, rx_buf, &xdp_output); + nbl_act = nbl_res_txrx_run_xdp(rx_ring, rx_buf, &xdp_output, &xdp); if (nbl_act) { cleaned_count += xdp_output.desc_done_num; - if (unlikely(xdp_output.multicast)) + if (unlikely(xdp_output.flags & NBL_XDP_FLAG_MULTICAST)) rx_multicast_packets++; else rx_unicast_packets++; - if (xdp_output.xdp_tx_act) { - xdp_tx_pkts++; - xdp_act_final |= NBL_XDP_TX; - } else if (xdp_output.xdp_redirect_act) { - xdp_redirect_pkts++; - xdp_act_final |= NBL_XDP_REDIRECT; - } - - if (xdp_output.xdp_oversize) - xdp_oversize++; + xdp_tx_pkts += !!(xdp_output.flags & NBL_XDP_FLAG_TX); + xdp_redirect_pkts += !!(xdp_output.flags & NBL_XDP_FLAG_REDIRECT); + xdp_drop += !!(xdp_output.flags & NBL_XDP_FLAG_DROP); + xdp_oversize += !!(xdp_output.flags & NBL_XDP_FLAG_OVERSIZE); total_rx_pkts++; total_rx_bytes += xdp_output.bytes; - continue; } } @@ -1325,7 +1650,7 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, } else { hdr = page_address(rx_buf->di->page) + rx_buf->offset; net_prefetch(hdr); - skb = nbl_construct_skb(rx_ring, napi, rx_buf, size); + skb = nbl_construct_skb(rx_ring, napi, rx_buf, &xdp); if (unlikely(!skb)) { rx_ring->rx_stats.rx_alloc_buf_err_cnt++; break; @@ -1358,6 +1683,7 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, continue; } + total_rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, rx_ring->netdev); if (unlikely(skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST)) @@ -1365,7 +1691,6 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, else rx_unicast_packets++; - total_rx_bytes += skb->len; if (sport_type) nbl_rep_update_rx_stats(rx_ring->netdev, skb, sport_id); @@ -1376,6 +1701,11 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, total_rx_pkts++; } + if (xdp_redirect_pkts) + xdp_do_flush(); + + if (xdp_tx_pkts) + nbl_res_txrx_update_xdp_tail_locked(rx_ring); if (cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))) failure = nbl_alloc_rx_bufs(rx_ring, cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))); @@ -1384,6 +1714,10 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, rx_ring->stats.bytes += total_rx_bytes; rx_ring->rx_stats.rx_multicast_packets += rx_multicast_packets; rx_ring->rx_stats.rx_unicast_packets += rx_unicast_packets; + rx_ring->rx_stats.xdp_tx_packets += xdp_tx_pkts; + rx_ring->rx_stats.xdp_redirect_packets += xdp_redirect_pkts; + rx_ring->rx_stats.xdp_oversize_packets += xdp_oversize; + rx_ring->rx_stats.xdp_drop_packets += xdp_drop; u64_stats_update_end(&rx_ring->syncp); return failure ? budget : total_rx_pkts; @@ -1391,7 +1725,8 @@ static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, static int nbl_res_napi_poll(struct napi_struct *napi, int budget) { - struct nbl_res_vector *vector = container_of(napi, struct nbl_res_vector, napi); + struct nbl_napi_struct *nbl_napi = container_of(napi, struct nbl_napi_struct, napi); + struct nbl_res_vector *vector = container_of(nbl_napi, struct nbl_res_vector, nbl_napi); struct nbl_res_tx_ring *tx_ring; struct nbl_res_tx_ring *xdp_ring; struct nbl_res_rx_ring *rx_ring; @@ -1743,17 +2078,6 @@ static inline void nbl_tx_fill_tx_extend_header_leonis(union nbl_tx_extend_head pkthdr->l4_csum_en = param->l4_csum_en; } -static inline void nbl_tx_fill_tx_extend_header_virtio(union nbl_tx_extend_head *pkthdr, - struct nbl_tx_hdr_param *param) -{ - pkthdr->bootis.tso = 0; - pkthdr->bootis.dport_info = 0; - pkthdr->bootis.dport_id = 0; - pkthdr->bootis.dport = 0; - /* 0x0: drop, 0x1: normal fwd, 0x2: rsv, 0x3: cpu set dport */ - pkthdr->bootis.fwd = NBL_TX_FWD_TYPE_NORMAL; -} - #ifdef CONFIG_TLS_DEVICE static bool nbl_ktls_send_init_packet(struct nbl_resource_mgt *res_mgt, struct nbl_res_tx_ring *tx_ring, @@ -2086,7 +2410,6 @@ static bool nbl_ktls_send_resync_mul(struct nbl_resource_mgt *res_mgt, tx_buffer->dma = firstdma; tx_buffer->len = total_len; } - /* wmb for head desc */ wmb(); @@ -2300,6 +2623,7 @@ static int nbl_tx_map(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, u16 avail_used_flags = tx_ring->avail_used_flags; u32 pkthdr_len; bool can_push; + bool doorbell = true; first_desc = NBL_TX_DESC(tx_ring, desc_index); first = NBL_TX_BUF(tx_ring, desc_index); @@ -2333,12 +2657,6 @@ static int nbl_tx_map(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, case NBL_LEONIS_TYPE: nbl_tx_fill_tx_extend_header_leonis(pkthdr, hdr_param); break; - case NBL_BOOTIS_TYPE: - nbl_tx_fill_tx_extend_header_bootis(pkthdr, hdr_param); - break; - case NBL_VIRTIO_TYPE: - nbl_tx_fill_tx_extend_header_virtio(pkthdr, hdr_param); - break; default: netdev_err(tx_ring->netdev, "fill tx extend header failed, product type: %d, eth: %u.\n", tx_ring->product_type, hdr_param->dport_id); @@ -2389,24 +2707,24 @@ static int nbl_tx_map(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, tx_desc = NBL_TX_DESC(tx_ring, (desc_index == 0 ? tx_ring->desc_num : desc_index) - 1); tx_desc->flags &= cpu_to_le16(~NBL_PACKED_DESC_F_NEXT); - first->next_to_watch = tx_desc; first_desc->len += (hdr_param->total_hlen << NBL_TX_TOTAL_HEADERLEN_SHIFT); first_desc->id = cpu_to_le16(skb_shinfo(skb)->gso_size); + tx_ring->next_to_use = desc_index; + nbl_maybe_stop_tx(tx_ring, DESC_NEEDED); /* wmb */ wmb(); + first->next_to_watch = tx_desc; /* first desc last set flag */ if (first_desc == tx_desc) first_desc->flags = cpu_to_le16(avail_used_flags); else first_desc->flags = cpu_to_le16(avail_used_flags | NBL_PACKED_DESC_F_NEXT); - tx_ring->next_to_use = desc_index; - - nbl_maybe_stop_tx(tx_ring, DESC_NEEDED); /* kick doorbell passthrough for performace */ - writel(tx_ring->notify_qid, tx_ring->notify_addr); + if (doorbell) + writel(tx_ring->notify_qid, tx_ring->notify_addr); // nbl_trace(tx_map_ok, tx_ring, skb, head, first_desc, pkthdr); @@ -2454,8 +2772,7 @@ static netdev_tx_t nbl_res_txrx_rep_xmit(struct sk_buff *skb, WARN_ON(count > MAX_DESC_NUM_PER_PKT); if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { if (net_ratelimit()) - dev_dbg(NBL_RING_TO_DEV(tx_ring), "There is no enough " - "descriptor to transmit packet in queue %u\n", + dev_dbg(NBL_RING_TO_DEV(tx_ring), "no desc to tx pkt in queue %u\n", tx_ring->queue_index); tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; @@ -2492,8 +2809,7 @@ static netdev_tx_t nbl_res_txrx_self_test_start_xmit(struct sk_buff *skb, struct WARN_ON(count > MAX_DESC_NUM_PER_PKT); if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { if (net_ratelimit()) - dev_dbg(NBL_RING_TO_DEV(tx_ring), "There is no enough " - "descriptor to transmit packet in queue %u\n", + dev_dbg(NBL_RING_TO_DEV(tx_ring), "no desc to tx pkt in queue %u\n", tx_ring->queue_index); tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; @@ -2523,6 +2839,8 @@ static netdev_tx_t nbl_res_txrx_start_xmit(struct sk_buff *skb, .l4_len = 20 >> 2, .mss = 256, }; + u16 vlan_tci; + u16 vlan_proto; struct sk_buff *skb2 = NULL; unsigned int count; int ret = 0; @@ -2534,16 +2852,24 @@ static netdev_tx_t nbl_res_txrx_start_xmit(struct sk_buff *skb, WARN_ON(count > MAX_DESC_NUM_PER_PKT); if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { if (net_ratelimit()) - dev_dbg(NBL_RING_TO_DEV(tx_ring), "There is no enough " - "descriptor to transmit packet in queue %u\n", + dev_dbg(NBL_RING_TO_DEV(tx_ring), "no desc to tx pkt in queue %u\n", tx_ring->queue_index); tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } - if (tx_ring->vlan_proto) { - skb = vlan_insert_tag_set_proto(skb, htons(tx_ring->vlan_proto), - tx_ring->vlan_tci); + if (tx_ring->vlan_proto || skb_vlan_tag_present(skb)) { + if (tx_ring->vlan_proto) { + vlan_proto = htons(tx_ring->vlan_proto); + vlan_tci = tx_ring->vlan_tci; + } + + if (skb_vlan_tag_present(skb)) { + vlan_proto = skb->vlan_proto; + vlan_tci = skb_vlan_tag_get(skb); + } + + skb = vlan_insert_tag_set_proto(skb, vlan_proto, vlan_tci); if (!skb) return NETDEV_TX_OK; } @@ -2617,7 +2943,7 @@ static int nbl_res_txring_is_invalid(struct nbl_resource_mgt *res_mgt, { struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); struct nbl_res_tx_ring *tx_ring; - u8 ring_num = txrx_mgt->tx_ring_num; + u16 ring_num = txrx_mgt->tx_ring_num; if (index >= ring_num) { seq_printf(m, "Invalid tx index %d, max ring num is %d\n", index, ring_num); @@ -2638,7 +2964,7 @@ static int nbl_res_rxring_is_invalid(struct nbl_resource_mgt *res_mgt, { struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); struct nbl_res_rx_ring *rx_ring; - u8 ring_num = txrx_mgt->rx_ring_num; + u16 ring_num = txrx_mgt->rx_ring_num; if (index >= ring_num) { seq_printf(m, "Invalid rx index %d, max ring num is %d\n", index, ring_num); @@ -2776,7 +3102,7 @@ static int nbl_res_txrx_dump_ring_stats(void *priv, struct seq_file *m, bool is_ return nbl_res_rx_dump_ring_stats(res_mgt, m, index); } -static struct napi_struct *nbl_res_txrx_get_vector_napi(void *priv, u16 index) +static struct nbl_napi_struct *nbl_res_txrx_get_vector_napi(void *priv, u16 index) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); @@ -2787,7 +3113,7 @@ static struct napi_struct *nbl_res_txrx_get_vector_napi(void *priv, u16 index) return NULL; } - return &txrx_mgt->vectors[index]->napi; + return &txrx_mgt->vectors[index]->nbl_napi; } static void nbl_res_txrx_set_vector_info(void *priv, u8 *irq_enable_base, @@ -2813,6 +3139,7 @@ static void nbl_res_get_pt_ops(void *priv, struct nbl_resource_pt_ops *pt_ops) pt_ops->rep_xmit = nbl_res_txrx_rep_xmit; pt_ops->self_test_xmit = nbl_res_txrx_self_test_start_xmit; pt_ops->napi_poll = nbl_res_napi_poll; + pt_ops->xdp_xmit = nbl_res_txrx_xdp_xmit; } static u32 nbl_res_txrx_get_tx_headroom(void *priv) @@ -2846,10 +3173,23 @@ static void nbl_res_txrx_get_queue_stats(void *priv, u8 queue_id, } while (u64_stats_fetch_retry(syncp, start)); } +static bool nbl_res_is_ctrlq(struct nbl_txrx_mgt *txrx_mgt, u16 qid) +{ + u16 ring_num = txrx_mgt->vsi_info[NBL_VSI_CTRL].ring_num; + u16 ring_offset = txrx_mgt->vsi_info[NBL_VSI_CTRL].ring_offset; + + if (qid >= ring_offset && qid < ring_offset + ring_num) + return true; + + return false; +} + static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) { struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring; + struct nbl_res_tx_ring *tx_ring; int i; u64 bytes = 0, packets = 0; u64 tso_packets = 0, tso_bytes = 0; @@ -2875,32 +3215,42 @@ static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) u64 rx_cache_busy = 0; u64 rx_cache_waive = 0; u64 tx_skb_free = 0; + u64 xdp_tx_packets = 0; + u64 xdp_redirect_packets = 0; + u64 xdp_oversize_packets = 0; + u64 xdp_drop_packets = 0; unsigned int start; rcu_read_lock(); for (i = 0; i < txrx_mgt->rx_ring_num; i++) { - struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); + if (nbl_res_is_ctrlq(txrx_mgt, i)) + continue; + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); do { - start = u64_stats_fetch_begin(&ring->syncp); - bytes += ring->stats.bytes; - packets += ring->stats.packets; - rx_csum_packets += ring->rx_stats.rx_csum_packets; - rx_csum_errors += ring->rx_stats.rx_csum_errors; - rx_multicast_packets += ring->rx_stats.rx_multicast_packets; - rx_unicast_packets += ring->rx_stats.rx_unicast_packets; - rx_desc_addr_err_cnt += ring->rx_stats.rx_desc_addr_err_cnt; - rx_alloc_buf_err_cnt += ring->rx_stats.rx_alloc_buf_err_cnt; - rx_cache_reuse += ring->rx_stats.rx_cache_reuse; - rx_cache_full += ring->rx_stats.rx_cache_full; - rx_cache_empty += ring->rx_stats.rx_cache_empty; - rx_cache_busy += ring->rx_stats.rx_cache_busy; - rx_cache_waive += ring->rx_stats.rx_cache_waive; + start = u64_stats_fetch_begin(&rx_ring->syncp); + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + rx_csum_packets += rx_ring->rx_stats.rx_csum_packets; + rx_csum_errors += rx_ring->rx_stats.rx_csum_errors; + rx_multicast_packets += rx_ring->rx_stats.rx_multicast_packets; + rx_unicast_packets += rx_ring->rx_stats.rx_unicast_packets; + rx_desc_addr_err_cnt += rx_ring->rx_stats.rx_desc_addr_err_cnt; + rx_alloc_buf_err_cnt += rx_ring->rx_stats.rx_alloc_buf_err_cnt; + rx_cache_reuse += rx_ring->rx_stats.rx_cache_reuse; + rx_cache_full += rx_ring->rx_stats.rx_cache_full; + rx_cache_empty += rx_ring->rx_stats.rx_cache_empty; + rx_cache_busy += rx_ring->rx_stats.rx_cache_busy; + rx_cache_waive += rx_ring->rx_stats.rx_cache_waive; + xdp_tx_packets += rx_ring->rx_stats.xdp_tx_packets; + xdp_redirect_packets += rx_ring->rx_stats.xdp_redirect_packets; + xdp_oversize_packets += rx_ring->rx_stats.xdp_oversize_packets; + xdp_drop_packets += rx_ring->rx_stats.xdp_drop_packets; #ifdef CONFIG_TLS_DEVICE - tls_decrypted_packets += ring->rx_stats.tls_decrypted_packets; - tls_resync_req_num += ring->rx_stats.tls_resync_req_num; + tls_decrypted_packets += rx_ring->rx_stats.tls_decrypted_packets; + tls_resync_req_num += rx_ring->rx_stats.tls_resync_req_num; #endif - } while (u64_stats_fetch_retry(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); } net_stats->rx_packets = packets; @@ -2910,6 +3260,10 @@ static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) net_stats->rx_csum_errors = rx_csum_errors; net_stats->rx_multicast_packets = rx_multicast_packets; net_stats->rx_unicast_packets = rx_unicast_packets; + net_stats->xdp_tx_packets = xdp_tx_packets; + net_stats->xdp_redirect_packets = xdp_redirect_packets; + net_stats->xdp_oversize_packets = xdp_oversize_packets; + net_stats->xdp_drop_packets = xdp_drop_packets; #ifdef CONFIG_TLS_DEVICE net_stats->tls_decrypted_packets = tls_decrypted_packets; net_stats->tls_resync_req_num = tls_resync_req_num; @@ -2919,28 +3273,30 @@ static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) packets = 0; for (i = 0; i < txrx_mgt->tx_ring_num; i++) { - struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i); + if (nbl_res_is_ctrlq(txrx_mgt, i)) + continue; + tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i); do { - start = u64_stats_fetch_begin(&ring->syncp); - bytes += ring->stats.bytes; - packets += ring->stats.packets; - tso_packets += ring->tx_stats.tso_packets; - tso_bytes += ring->tx_stats.tso_bytes; - tx_csum_packets += ring->tx_stats.tx_csum_packets; - tx_busy += ring->tx_stats.tx_busy; - tx_dma_busy += ring->tx_stats.tx_dma_busy; - tx_multicast_packets += ring->tx_stats.tx_multicast_packets; - tx_unicast_packets += ring->tx_stats.tx_unicast_packets; - tx_skb_free += ring->tx_stats.tx_skb_free; - tx_desc_addr_err_cnt += ring->tx_stats.tx_desc_addr_err_cnt; - tx_desc_len_err_cnt += ring->tx_stats.tx_desc_len_err_cnt; + start = u64_stats_fetch_begin(&tx_ring->syncp); + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + tso_packets += tx_ring->tx_stats.tso_packets; + tso_bytes += tx_ring->tx_stats.tso_bytes; + tx_csum_packets += tx_ring->tx_stats.tx_csum_packets; + tx_busy += tx_ring->tx_stats.tx_busy; + tx_dma_busy += tx_ring->tx_stats.tx_dma_busy; + tx_multicast_packets += tx_ring->tx_stats.tx_multicast_packets; + tx_unicast_packets += tx_ring->tx_stats.tx_unicast_packets; + tx_skb_free += tx_ring->tx_stats.tx_skb_free; + tx_desc_addr_err_cnt += tx_ring->tx_stats.tx_desc_addr_err_cnt; + tx_desc_len_err_cnt += tx_ring->tx_stats.tx_desc_len_err_cnt; #ifdef CONFIG_TLS_DEVICE - tls_encrypted_packets += ring->tx_stats.tls_encrypted_packets; - tls_encrypted_bytes += ring->tx_stats.tls_encrypted_bytes; - tls_ooo_packets += ring->tx_stats.tls_ooo_packets; + tls_encrypted_packets += tx_ring->tx_stats.tls_encrypted_packets; + tls_encrypted_bytes += tx_ring->tx_stats.tls_encrypted_bytes; + tls_ooo_packets += tx_ring->tx_stats.tls_ooo_packets; #endif - } while (u64_stats_fetch_retry(&ring->syncp, start)); + } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); } rcu_read_unlock(); @@ -3131,7 +3487,7 @@ nbl_res_queue_stop_abnormal_sw_queue(void *priv, u16 local_queue_id, int type) if (vector) { vector->started = false; - napi_synchronize(&vector->napi); + napi_synchronize(&vector->nbl_napi.napi); netif_stop_subqueue(tx_ring->netdev, local_queue_id); } @@ -3202,8 +3558,11 @@ static int nbl_res_txrx_restart_abnormal_ring(void *priv, int ring_index, int ty break; } - if (vector) + if (vector) { + if (vector->net_msix_mask_en) + writel(vector->irq_data, vector->irq_enable_base); vector->started = true; + } return ret; } @@ -3233,6 +3592,19 @@ static void nbl_res_txrx_set_xdp_prog(void *priv, void *prog) } } +static int nbl_res_get_max_mtu(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring; + + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, 0); + + if (!!(txrx_mgt->xdp_ring_num) && rx_ring->xdp_prog) + return rx_ring->buf_len - NBL_BUFFER_HDR_LEN - ETH_HLEN - (2 * VLAN_HLEN); + return NBL_MAX_JUMBO_FRAME_SIZE - NBL_PKT_HDR_PAD; +} + /* NBL_TXRX_SET_OPS(ops_name, func) * * Use X Macros to reduce setup and remove codes. @@ -3270,6 +3642,7 @@ do { \ NBL_TXRX_SET_OPS(set_rings_xdp_prog, nbl_res_txrx_set_xdp_prog); \ NBL_TXRX_SET_OPS(register_xdp_rxq, nbl_res_txrx_register_xdp_rxq); \ NBL_TXRX_SET_OPS(unregister_xdp_rxq, nbl_res_txrx_unregister_xdp_rxq); \ + NBL_TXRX_SET_OPS(get_max_mtu, nbl_res_get_max_mtu); \ } while (0) /* Structure starts here, adding an op should not modify anything below */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h index 22a186f30ef0..296115ad027f 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -16,12 +16,11 @@ #define NBL_MIN_DESC_NUM 128 #define NBL_MAX_DESC_NUM 32768 -#define NBL_PACKED_DESC_F_NEXT 1 -#define NBL_PACKED_DESC_F_WRITE 2 - #define DEFAULT_MAX_PF_QUEUE_PAIRS_NUM 16 #define DEFAULT_MAX_VF_QUEUE_PAIRS_NUM 2 +#define NBL_PACKED_DESC_F_NEXT 1 +#define NBL_PACKED_DESC_F_WRITE 2 #define NBL_PACKED_DESC_F_AVAIL 7 #define NBL_PACKED_DESC_F_USED 15 @@ -30,28 +29,21 @@ #define NBL_TX_BUF(tx_ring, i) (&(((tx_ring)->tx_bufs)[i])) #define NBL_RX_BUF(rx_ring, i) (&(((rx_ring)->rx_bufs)[i])) -#define DESC_NEEDED (MAX_SKB_FRAGS + 4) - -#define NBL_TX_POLL_WEIGHT 256 - #define NBL_RX_BUF_256 256 #define NBL_RX_HDR_SIZE NBL_RX_BUF_256 -#define NBL_RX_BUF_WRITE 16 -#define NBL_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD - NBL_BUFFER_HDR_LEN) -#define NBL_XDP_RX_HARD_BUFF (NBL_RX_PAD + NBL_BUFFER_HDR_LEN) +#define NBL_BUFFER_HDR_LEN (sizeof(struct nbl_rx_extend_head)) +#define NBL_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) +#define NBL_RX_BUFSZ (2048) +#define NBL_RXBUF_MIN_ORDER (10) +#define NBL_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define NBL_TX_TOTAL_HEADERLEN_SHIFT 24 +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define NBL_TX_POLL_WEIGHT 256 #define NBL_TXD_DATALEN_BITS 16 #define NBL_TXD_DATALEN_MAX BIT(NBL_TXD_DATALEN_BITS) - #define MAX_DESC_NUM_PER_PKT (32) -#define NBL_RX_BUFSZ (2048) -#define NBL_RX_BUFSZ_ORDER (11) - -#define NBL_BUFFER_HDR_LEN (sizeof(struct nbl_rx_extend_head)) - -#define NBL_ETH_FRAME_MIN_SIZE 60 - #define NBL_TX_TSO_MSS_MIN (256) #define NBL_TX_TSO_MSS_MAX (16383) #define NBL_TX_TSO_L2L3L4_HDR_LEN_MIN (42) @@ -60,11 +52,6 @@ #define IP_VERSION_V4 (4) #define NBL_TX_FLAGS_TSO BIT(0) -#define NBL_TX_TOTAL_HEADERLEN_SHIFT 24 - -#define NBL_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) -#define NBL_RX_PAGE_PER_FRAGS (PAGE_SIZE >> NBL_RX_BUFSZ_ORDER) - #define NBL_KTLS_INIT_PAD_LEN 28 #define NBL_KTLS_SYNC_PKT_LEN 30 #define NBL_KTLS_PER_CELL_LEN 4096 @@ -276,26 +263,18 @@ struct nbl_tx_resync_info { skb_frag_t frags[MAX_SKB_FRAGS]; }; -#define NBL_XDP_PASS 0 -#define NBL_XDP_CONSUMED BIT(0) -#define NBL_XDP_TX BIT(1) -#define NBL_XDP_REDIRECT BIT(2) -#define NBL_XDP_ABORTED BIT(3) -#define NBL_XDP_DROP BIT(4) +#define NBL_XDP_FLAG_TX BIT(0) +#define NBL_XDP_FLAG_REDIRECT BIT(1) +#define NBL_XDP_FLAG_DROP BIT(2) +#define NBL_XDP_FLAG_OVERSIZE BIT(3) +#define NBL_XDP_FLAG_MULTICAST BIT(4) struct nbl_xdp_output { - u16 desc_done_num; - bool xdp_tx_act; - bool xdp_redirect_act; - bool xdp_drop; - bool multicast; - bool xdp_oversize; - u8 resv; u64 bytes; + u16 desc_done_num; + u16 flags; }; - DECLARE_STATIC_KEY_FALSE(nbl_xdp_locking_key); - static inline u16 nbl_unused_rx_desc_count(struct nbl_res_rx_ring *ring) { u16 ntc = ring->next_to_clean; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c index a1d3adf2f2fa..dd46605f426d 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c @@ -121,6 +121,9 @@ static void nbl_res_register_func_mac(void *priv, u8 *mac, u16 func_id) struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + if (func_id >= NBL_MAX_FUNC) + return; + ether_addr_copy(vsi_info->mac_info[func_id].mac, mac); } @@ -130,6 +133,9 @@ static int nbl_res_register_func_link_forced(void *priv, u16 func_id, u8 link_fo struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + resource_info->link_forced_info[func_id] = link_forced; *should_notify = test_bit(func_id, resource_info->func_bitmap); @@ -142,9 +148,28 @@ static int nbl_res_get_link_forced(void *priv, u16 vsi_id) struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + return resource_info->link_forced_info[func_id]; } +static int nbl_res_register_func_trust(void *priv, u16 func_id, + bool trusted, bool *should_notify) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + + vsi_info->mac_info[func_id].trusted = trusted; + *should_notify = test_bit(func_id, resource_info->func_bitmap); + + return 0; +} + static int nbl_res_register_func_vlan(void *priv, u16 func_id, u16 vlan_tci, u16 vlan_proto, bool *should_notify) { @@ -152,6 +177,9 @@ static int nbl_res_register_func_vlan(void *priv, u16 func_id, struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + vsi_info->mac_info[func_id].vlan_proto = vlan_proto; vsi_info->mac_info[func_id].vlan_tci = vlan_tci; *should_notify = test_bit(func_id, resource_info->func_bitmap); @@ -164,6 +192,9 @@ static int nbl_res_register_rate(void *priv, u16 func_id, int rate) struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + if (func_id >= NBL_MAX_FUNC) + return -EINVAL; + vsi_info->mac_info[func_id].rate = rate; return 0; @@ -187,7 +218,8 @@ do { \ NBL_VSI_SET_OPS(register_func_link_forced, nbl_res_register_func_link_forced); \ NBL_VSI_SET_OPS(register_func_vlan, nbl_res_register_func_vlan); \ NBL_VSI_SET_OPS(get_link_forced, nbl_res_get_link_forced); \ - NBL_VSI_SET_OPS(register_func_rate, nbl_res_register_rate); \ + NBL_VSI_SET_OPS(register_func_rate, nbl_res_register_rate); \ + NBL_VSI_SET_OPS(register_func_trust, nbl_res_register_func_trust); \ } while (0) /* Structure starts here, adding an op should not modify anything below */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h index 50be586cd06c..429aca5389d1 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h index 7b19d0a47182..6bde9392fc55 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -261,6 +261,41 @@ enum nbl_chan_msg_type { NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, NBL_CHAN_MSG_GET_VF_STATS, + NBL_CHAN_MSG_REGISTER_FUNC_TRUST, + NBL_CHAN_MSG_NOTIFY_TRUST, + NBL_CHAN_CHECK_VF_IS_ACTIVE, + NBL_CHAN_MSG_GET_ETH_ABNORMAL_STATS, + NBL_CHAN_MSG_GET_ETH_CTRL_STATS, + NBL_CHAN_MSG_GET_PAUSE_STATS, + NBL_CHAN_MSG_GET_ETH_MAC_STATS, + NBL_CHAN_MSG_GET_FEC_STATS, + NBL_CHAN_MSG_CFG_MULTI_MCAST_RULE, + NBL_CHAN_MSG_GET_LINK_DOWN_COUNT, + NBL_CHAN_MSG_GET_LINK_STATUS_OPCODE, + NBL_CHAN_MSG_GET_RMON_STATS, + NBL_CHAN_MSG_REGISTER_PF_NAME, + NBL_CHAN_MSG_GET_PF_NAME, + NBL_CHAN_MSG_CONFIGURE_RDMA_BW, + NBL_CHAN_MSG_SET_RATE_LIMIT, + NBL_CHAN_MSG_SET_TC_WGT, + NBL_CHAN_MSG_REMOVE_QUEUE, + NBL_CHAN_MSG_GET_MIRROR_TABLE_ID, + NBL_CHAN_MSG_CONFIGURE_MIRROR, + NBL_CHAN_MSG_CONFIGURE_MIRROR_TABLE, + NBL_CHAN_MSG_CLEAR_MIRROR_CFG, + NBL_CHAN_MSG_MIRROR_OUTPUTPORT_NOTIFY, + NBL_CHAN_MSG_CHECK_FLOWTABLE_SPEC, + NBL_CHAN_CHECK_VF_IS_VDPA, + NBL_CHAN_MSG_GET_VDPA_VF_STATS, + NBL_CHAN_MSG_SET_RX_RATE, + NBL_CHAN_GET_UVN_PKT_DROP_STATS, + NBL_CHAN_GET_USTORE_PKT_DROP_STATS, + NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS, + NBL_CHAN_MSG_SET_WOL, + + NBL_CHAN_MSG_MTU_SET = 501, + NBL_CHAN_MSG_SET_RXFH_INDIR = 506, + NBL_CHAN_MSG_SET_RXFH_RSS_ALG_SEL = 508, /* mailbox msg end */ NBL_CHAN_MSG_MAILBOX_MAX, @@ -280,6 +315,8 @@ enum nbl_chan_msg_type { NBL_CHAN_MSG_ADMINQ_FLASH_ACTIVATE = 0x8204, NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE = 0x8205, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ = 0x8206, + NBL_CHAN_MSG_ADMINQ_REGISTER_WRITE = 0x8207, + NBL_CHAN_MSG_ADMINQ_REGISTER_READ = 0x8208, NBL_CHAN_MSG_ADMINQ_GET_NVM_BANK_INDEX = 0x820B, NBL_CHAN_MSG_ADMINQ_VERIFY_NVM_BANK = 0x820C, NBL_CHAN_MSG_ADMINQ_FLASH_LOCK = 0x820D, @@ -288,8 +325,8 @@ enum nbl_chan_msg_type { NBL_CHAN_MSG_ADMINQ_PORT_NOTIFY = 0x8301, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM = 0x8302, NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS = 0x8303, + NBL_CHAN_MSG_ADMINQ_GET_FEC_STATS = 0x8305, /* TODO: new kernel and ethtool support show fec stats */ - NBL_CHAN_MSG_ADMINQ_GET_FEC_STATS = 0x408, NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_WRITE = 0x8F01, NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_READ = 0x8F02, @@ -322,6 +359,11 @@ struct nbl_chan_param_del_macvlan { u16 vsi; }; +struct nbl_chan_param_cfg_multi_mcast { + u16 vsi; + u16 enable; +}; + struct nbl_chan_param_register_net_info { u16 pf_bdf; u64 vf_bar_start; @@ -330,6 +372,7 @@ struct nbl_chan_param_register_net_info { u16 offset; u16 stride; u64 pf_bar_start; + u16 is_vdpa; }; struct nbl_chan_param_alloc_txrx_queues { @@ -357,6 +400,7 @@ struct nbl_chan_param_cfg_dsch { struct nbl_chan_param_setup_cqs { u16 vsi_id; u16 real_qps; + bool rss_indir_set; }; struct nbl_chan_param_set_promisc_mode { @@ -430,6 +474,11 @@ struct nbl_chan_param_get_rxfh_indir { u32 rxfh_indir_size; }; +struct nbl_chan_param_set_rxfh_rss_alg_sel { + u16 vsi_id; + u8 rss_alg_sel; +}; + struct nbl_chan_result_get_real_bdf { u8 bus; u8 dev; @@ -482,6 +531,15 @@ struct nbl_chan_resource_read_param { u32 len; }; +struct nbl_chan_adminq_reg_read_param { + u32 reg; +}; + +struct nbl_chan_adminq_reg_write_param { + u32 reg; + u32 value; +}; + struct nbl_chan_param_flash_write { u32 bank_id; u32 offset; @@ -522,7 +580,8 @@ struct nbl_chan_param_module_eeprom_info { u8 page; u8 bank; u32 write:1; - u32 rsvd:31; + u32 version:2; + u32 rsvd:29; u16 offset; u16 length; #define NBL_MODULE_EEPRO_WRITE_MAX_LEN (4) @@ -534,6 +593,13 @@ struct nbl_chan_param_eth_rep_notify_link_state { u8 link_state; }; +struct nbl_chan_param_set_rxfh_indir { + u16 vsi_id; + u32 indir_size; +#define NBL_RXFH_INDIR_MAX_SIZE (512) + u32 indir[NBL_RXFH_INDIR_MAX_SIZE]; +}; + struct nbl_chan_cfg_ktls_keymat { u32 index; u8 mode; @@ -783,6 +849,11 @@ struct nbl_chan_param_register_func_mac { u8 mac[ETH_ALEN]; }; +struct nbl_chan_param_register_trust { + u16 func_id; + bool trusted; +}; + struct nbl_chan_param_register_vlan { u16 func_id; u16 vlan_tci; @@ -794,6 +865,12 @@ struct nbl_chan_param_set_tx_rate { int tx_rate; }; +struct nbl_chan_param_set_txrx_rate { + u16 func_id; + int txrx_rate; + int burst; +}; + struct nbl_chan_param_register_func_link_forced { u16 func_id; u8 link_forced; @@ -805,6 +882,16 @@ struct nbl_chan_param_notify_link_state { u32 link_speed; }; +struct nbl_chan_param_set_mtu { + u16 vsi_id; + u16 mtu; +}; + +struct nbl_chan_param_get_uvn_pkt_drop_stats { + u16 vsi_id; + u16 num_queues; +}; + struct nbl_register_net_param { u16 pf_bdf; u64 vf_bar_start; @@ -813,6 +900,7 @@ struct nbl_register_net_param { u16 offset; u16 stride; u64 pf_bar_start; + u16 is_vdpa; }; struct nbl_register_net_result { @@ -831,6 +919,10 @@ struct nbl_register_net_result { u16 vlan_proto; u16 vlan_tci; u32 rate; + bool trusted; + + u64 vlan_features; + u64 hw_enc_features; }; #define NBL_CHAN_FDIR_FLOW_RULE_SIZE 1024 @@ -950,6 +1042,39 @@ struct nbl_queue_err_stats { u32 uvn_stat_pkt_drop; }; +struct nbl_eth_mac_stats { + u64 frames_txd_ok; + u64 frames_rxd_ok; + u64 octets_txd_ok; + u64 octets_rxd_ok; + u64 multicast_frames_txd_ok; + u64 broadcast_frames_txd_ok; + u64 multicast_frames_rxd_ok; + u64 broadcast_frames_rxd_ok; +}; + +enum rmon_range { + ETHER_STATS_PKTS_64_OCTETS, + ETHER_STATS_PKTS_65_TO_127_OCTETS, + ETHER_STATS_PKTS_128_TO_255_OCTETS, + ETHER_STATS_PKTS_256_TO_511_OCTETS, + ETHER_STATS_PKTS_512_TO_1023_OCTETS, + ETHER_STATS_PKTS_1024_TO_1518_OCTETS, + ETHER_STATS_PKTS_1519_TO_2047_OCTETS, + ETHER_STATS_PKTS_2048_TO_MAX_OCTETS, + ETHER_STATS_PKTS_MAX, +}; + +struct nbl_rmon_stats { + u64 undersize_frames_rxd_goodfcs; + u64 oversize_frames_rxd_goodfcs; + u64 undersize_frames_rxd_badfcs; + u64 oversize_frames_rxd_badfcs; + + u64 rmon_rx_range[ETHER_STATS_PKTS_MAX]; + u64 rmon_tx_range[ETHER_STATS_PKTS_MAX]; +}; + struct nbl_rdma_register_param { bool has_rdma; u32 mem_type; @@ -989,13 +1114,29 @@ struct nbl_port_notify { u64 lp_advertising; /* enum nbl_port_cap */ }; -#define NBL_EMP_ALERT_DATA_MAX_SIZE 64 +#define NBL_EMP_LOG_MAX_SIZE (256) +struct nbl_emp_alert_log_event { + u64 uptime; + u8 level; + u8 data[256]; +}; + +#define NBL_EMP_ALERT_DATA_MAX_SIZE (4032) struct nbl_chan_param_emp_alert_event { u16 type; u16 len; u8 data[NBL_EMP_ALERT_DATA_MAX_SIZE]; }; +struct nbl_fec_stats { + u32 corrected_blocks; + u32 uncorrectable_blocks; + u32 corrected_bits; + u32 corrected_lane[4]; + u32 uncorrectable_lane[4]; + u32 corrected_bits_lane[4]; +}; + struct nbl_port_state { u64 port_caps; u64 port_advertising; @@ -1011,6 +1152,17 @@ struct nbl_port_state { u8 module_repluged; }; +struct nbl_eth_ctrl_stats { + u64 macctrl_frames_txd_ok; + u64 macctrl_frames_rxd; + u64 unsupported_opcodes_rx; +}; + +struct nbl_pause_stats { + u64 rx_pause_frames; + u64 tx_pause_frames; +}; + struct nbl_port_advertising { u8 eth_id; u64 speed_advert; @@ -1019,12 +1171,6 @@ struct nbl_port_advertising { u8 autoneg; }; -struct nbl_port_key { - u32 id; /* port id */ - u32 subop; /* 1: read, 2: write */ - u64 data[]; /* [47:0]: data, [55:48]: rsvd, [63:56]: key */ -}; - struct nbl_eth_link_info { u8 link_status; u32 link_speed; @@ -1094,6 +1240,11 @@ struct nbl_chan_param_notify_fw_reset_info { u16 data[]; }; +struct nbl_chan_param_configure_rdma_bw { + u8 eth_id; + int rdma_bw; +}; + struct nbl_chan_param_configure_qos { u8 eth_id; u8 trust; @@ -1118,6 +1269,52 @@ struct nbl_chan_param_get_pfc_buffer_size_resp { int xon; }; +struct nbl_chan_param_set_rate_limit { + enum nbl_traffic_type type; + u32 rate; +}; + +struct nbl_chan_param_pf_name { + u16 vsi_id; + char dev_name[IFNAMSIZ]; +}; + +struct nbl_chan_param_set_tc_wgt { + u16 vsi_id; + u8 num_tc; + u8 weight[NBL_MAX_TC_NUM]; +}; + +struct nbl_chan_param_get_mirror_table_id { + u16 vsi_id; + int dir; + bool mirror_en; + u8 mt_id; +}; + +struct nbl_chan_param_mirror { + int dir; + bool mirror_en; + u8 mt_id; +}; + +struct nbl_chan_param_mirror_table { + bool mirror_en; + u8 mt_id; + u16 func_id; +}; + +struct nbl_chan_param_check_flow_spec { + u16 vlan_list_cnt; + u16 unicast_mac_cnt; + u16 multi_mac_cnt; +}; + +struct nbl_chan_param_set_wol { + u8 eth_id; + bool enable; +}; + struct nbl_chan_send_info { void *arg; size_t arg_len; @@ -1155,6 +1352,7 @@ struct nbl_channel_ops { int (*send_msg)(void *priv, struct nbl_chan_send_info *chan_send); int (*send_ack)(void *priv, struct nbl_chan_ack_info *chan_ack); int (*register_msg)(void *priv, u16 msg_type, nbl_chan_resp func, void *callback_priv); + void (*unregister_msg)(void *priv, u16 msg_type); int (*cfg_chan_qinfo_map_table)(void *priv, u8 chan_type); bool (*check_queue_exist)(void *priv, u8 chan_type); int (*setup_queue)(void *priv, u8 chan_type); @@ -1182,10 +1380,6 @@ struct nbl_channel_ops_tbl { int nbl_chan_init_common(void *p, struct nbl_init_param *param); void nbl_chan_remove_common(void *p); -int nbl_chan_init_bootis(void *p, struct nbl_init_param *param); -void nbl_chan_remove_bootis(void *p); -int nbl_chan_init_virtio(void *p, struct nbl_init_param *param); -void nbl_chan_remove_virtio(void *p); enum nbl_cmd_opcode_list { NBL_CMD_OP_WRITE, diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h index fb91ebfc6c40..22504168d5b4 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -201,20 +201,27 @@ do { \ #define NBL_COMMON_TO_ETH_MODE(common) ((common)->eth_mode) #define NBL_COMMON_TO_DEBUG_LVL(common) ((common)->debug_lvl) #define NBL_COMMON_TO_VF_CAP(common) ((common)->is_vf) +#define NBL_COMMON_TO_OCP_CAP(common) ((common)->is_ocp) #define NBL_COMMON_TO_PCI_USING_DAC(common) ((common)->pci_using_dac) #define NBL_COMMON_TO_MGT_PF(common) ((common)->mgt_pf) #define NBL_COMMON_TO_PCI_FUNC_ID(common) ((common)->function) #define NBL_COMMON_TO_BOARD_ID(common) ((common)->board_id) #define NBL_COMMON_TO_LOGIC_ETH_ID(common) ((common)->logic_eth_id) +#define NBL_COMMON_TO_ETH_MAX_SPEED(common) ((common)->eth_max_speed) #define NBL_ONE_ETHERNET_PORT (1) #define NBL_TWO_ETHERNET_PORT (2) #define NBL_FOUR_ETHERNET_PORT (4) +#define NBL_DEFAULT_VSI_ID_GAP (1024) #define NBL_TWO_ETHERNET_VSI_ID_GAP (512) #define NBL_FOUR_ETHERNET_VSI_ID_GAP (256) -#define NBL_VSI_ID_GAP(mode) ((mode) == NBL_FOUR_ETHERNET_PORT ? \ - NBL_FOUR_ETHERNET_VSI_ID_GAP : \ - NBL_TWO_ETHERNET_VSI_ID_GAP) + +#define NBL_VSI_ID_GAP(m) \ +({ \ + typeof(m) _m = (m); \ + _m == NBL_FOUR_ETHERNET_PORT ? NBL_FOUR_ETHERNET_VSI_ID_GAP : \ + (_m == NBL_TWO_ETHERNET_PORT ? NBL_TWO_ETHERNET_VSI_ID_GAP : NBL_DEFAULT_VSI_ID_GAP); \ +}) #define NBL_BOOTIS_ECPU_ETH0_FUNCTION (2) #define NBL_BOOTIS_ECPU_ETH1_FUNCTION (3) @@ -243,6 +250,7 @@ struct nbl_common_info { struct pci_dev *pdev; struct device *dev; struct device *dma_dev; + struct devlink_port *devlink_port; u32 debug_lvl; u32 msg_enable; u16 vsi_id; @@ -254,23 +262,29 @@ struct nbl_common_info { u8 function; u8 devid; u8 bus; + /* only valid for ctrldev */ + u8 hw_bus; u16 mgt_pf; u8 board_id; bool pci_using_dac; u8 tc_inst_id; /* for tc flow and cmdq */ + u8 is_ocp; enum nbl_product_type product_type; + + u32 eth_max_speed; + bool wol_ena; }; -struct nbl_netdev_rep_attr { +struct nbl_netdev_name_attr { struct attribute attr; ssize_t (*show)(struct device *dev, - struct nbl_netdev_rep_attr *attr, char *buf); + struct nbl_netdev_name_attr *attr, char *buf); ssize_t (*store)(struct device *dev, - struct nbl_netdev_rep_attr *attr, const char *buf, size_t len); - int rep_id; + struct nbl_netdev_name_attr *attr, const char *buf, size_t len); + char net_dev_name[IFNAMSIZ]; }; struct nbl_index_tbl_key { @@ -495,7 +509,6 @@ int nbl_dma_iommu_change_translate(struct nbl_common_info *common); void nbl_dma_iommu_exit_translate(struct nbl_common_info *common); bool nbl_dma_iommu_status(struct pci_dev *pdev); bool nbl_dma_remap_status(struct pci_dev *pdev, u64 *dma_limit); -void nbl_net_addr_rep_attr(struct nbl_netdev_rep_attr *rep_attr, int rep_id); u32 nbl_common_pf_id_subtraction_mgtpf_id(struct nbl_common_info *common, u32 pf_id); void *nbl_common_init_index_table(struct nbl_index_tbl_key *key); void nbl_common_remove_index_table(void *priv, struct nbl_index_tbl_del_key *key); @@ -513,11 +526,14 @@ enum nbl_event_type { NBL_EVENT_RDMA_BOND_UPDATE = 0, NBL_EVENT_OFFLOAD_STATUS_CHANGED, NBL_EVENT_LINK_STATE_UPDATE, - NBL_EVENT_DEV_MODE_SWITCH, NBL_EVENT_ACL_STATE_UPDATE, NBL_EVENT_NETDEV_STATE_CHANGE, NBL_EVENT_RESET_EVENT, NBL_EVENT_QUEUE_ALLOC, + NBL_EVENT_CHANGE_MTU, + NBL_EVENT_MIRROR_OUTPUTPORT, + NBL_EVENT_MIRROR_OUTPUTPORT_DEVLAYER, /* for dev layer */ + NBL_EVENT_MIRROR_SELECTPORT, NBL_EVENT_MAX, }; @@ -532,12 +548,14 @@ enum nbl_rdma_subevent_type { NBL_SUBEVENT_CREATE_BOND_ADEV, NBL_SUBEVENT_RELEASE_BOND_ADEV, NBL_SUBEVENT_UPDATE_BOND_MEMBER, + NBL_SUBEVENT_UPDATE_MTU, NBL_SUBEVENT_MAX, }; -struct nbl_event_rdma_bond_update { +struct nbl_event_param { enum nbl_rdma_subevent_type subevent; struct nbl_lag_member_list_param param; + int mtu; }; struct nbl_event_offload_status_data { @@ -548,13 +566,6 @@ struct nbl_event_offload_status_data { enum nbl_dev_mode_switch_op { NBL_DEV_KERNEL_TO_USER, NBL_DEV_USER_TO_KERNEL, - NBL_DEV_SET_USER_PROMISC_MODE, -}; - -struct nbl_event_dev_mode_switch_data { - int op; - int ret; - bool promosic; }; struct nbl_event_acl_state_update_data { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h index 5f7af46fe299..3ddd77f8458e 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -34,6 +34,8 @@ int nbl_dev_destroy_rep(void *p); int nbl_dev_setup_vf_config(void *p, int num_vfs); void nbl_dev_remove_vf_config(void *p); +void nbl_dev_register_dev_name(void *p); +void nbl_dev_get_dev_name(void *p, char *dev_name); int nbl_dev_resume(void *p); int nbl_dev_suspend(void *p); #endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h index 587ecea46a4c..0433b38952cc 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -51,7 +51,7 @@ struct nbl_dispatch_ops { void (*unregister_xdp_rxq)(void *priv, u8 ring_index); int (*dump_ring)(void *priv, struct seq_file *m, bool is_tx, int index); int (*dump_ring_stats)(void *priv, struct seq_file *m, bool is_tx, int index); - struct napi_struct *(*get_vector_napi)(void *priv, u16 index); + struct nbl_napi_struct *(*get_vector_napi)(void *priv, u16 index); void (*set_vector_info)(void *priv, u8 *irq_enable_base, u32 irq_data, u16 index, bool mask_en); int (*register_net)(void *priv, struct nbl_register_net_param *register_param, @@ -61,6 +61,7 @@ struct nbl_dispatch_ops { int (*alloc_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num); void (*free_txrx_queues)(void *priv, u16 vsi_id); int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); + int (*remove_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); void (*remove_all_queues)(void *priv, u16 vsi_id); int (*register_vsi2q)(void *priv, u16 vsi_index, u16 vsi_id, u16 queue_offset, u16 queue_num); @@ -69,7 +70,7 @@ struct nbl_dispatch_ops { int (*setup_rss)(void *priv, u16 vsi_id); void (*remove_rss)(void *priv, u16 vsi_id); int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld); - int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps); + int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set); void (*remove_cqs)(void *priv, u16 vsi_id); int (*cfg_qdisc_mqprio)(void *priv, struct nbl_tc_qidsc_param *param); void (*clear_queues)(void *priv, u16 vsi_id); @@ -91,6 +92,7 @@ struct nbl_dispatch_ops { void (*del_lldp_flow)(void *priv, u16 vsi); int (*add_multi_rule)(void *priv, u16 vsi); void (*del_multi_rule)(void *priv, u16 vsi); + int (*cfg_multi_mcast)(void *priv, u16 vsi, u16 enable); int (*setup_multi_group)(void *priv); void (*remove_multi_group)(void *priv); void (*clear_accel_flow)(void *priv, u16 vsi_id); @@ -100,6 +102,8 @@ struct nbl_dispatch_ops { u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id); int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode); + int (*set_mtu)(void *priv, u16 vsi_id, u16 mtu); + int (*get_max_mtu)(void *priv); u32 (*get_tx_headroom)(void *priv); void (*get_rep_feature)(void *priv, struct nbl_register_net_result *register_result); void (*get_rep_queue_info)(void *priv, u16 *queue_num, u16 *queue_size); @@ -129,10 +133,20 @@ struct nbl_dispatch_ops { struct nbl_queue_stats *queue_stats, bool is_tx); int (*get_queue_err_stats)(void *priv, u8 queue_id, struct nbl_queue_err_stats *queue_err_stats, bool is_tx); + int (*get_eth_mac_stats)(void *priv, u32 eth_id, + struct nbl_eth_mac_stats *eth_mac_stats, u32 data_len); + int (*get_rmon_stats)(void *priv, u32 eth_id, + struct nbl_rmon_stats *rmon_stats, u32 data_len); void (*get_net_stats)(void *priv, struct nbl_stats *queue_stats); + int (*get_eth_ctrl_stats)(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats, u32 data_len); + int (*get_pause_stats)(void *priv, u32 eth_id, + struct nbl_pause_stats *pause_stats, u32 data_len); void (*get_private_stat_len)(void *priv, u32 *len); void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data, u32 data_len); void (*fill_private_stat_strings)(void *priv, u8 *strings); + int (*get_eth_abnormal_stats)(void *priv, u8 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats); u16 (*get_max_desc_num)(void *priv); u16 (*get_min_desc_num)(void *priv); u16 (*get_tx_desc_num)(void *priv, u32 ring_index); @@ -146,9 +160,11 @@ struct nbl_dispatch_ops { u16 num_net_msix, u16 level); void (*get_rxfh_indir_size)(void *priv, u16 vsi_id, u32 *rxfh_indir_size); void (*get_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir, u32 indir_size); + int (*set_rxfh_indir)(void *priv, u16 vsi_id, const u32 *indir, u32 indir_size); void (*get_rxfh_rss_key_size)(void *priv, u32 *rxfh_rss_key_size); void (*get_rxfh_rss_key)(void *priv, u8 *rss_key, u32 rss_key_size); - void (*get_rxfh_rss_alg_sel)(void *priv, u8 *alg_sel, u8 eth_id); + void (*get_rxfh_rss_alg_sel)(void *priv, u16 vsi_id, u8 *alg_sel); + int (*set_rxfh_rss_alg_sel)(void *priv, u16 vsi_id, u8 alg_sel); int (*get_port_attributes)(void *priv); int (*enable_port)(void *priv, bool enable); void (*init_port)(void *priv); @@ -156,15 +172,20 @@ struct nbl_dispatch_ops { int (*get_eth_bond_info)(void *priv, struct nbl_bond_param *param); void (*recv_port_notify)(void *priv); int (*get_port_state)(void *priv, u8 eth_id, struct nbl_port_state *port_state); + int (*get_fec_stats)(void *priv, u8 eth_id, struct nbl_fec_stats *fec_stats); int (*set_port_advertising)(void *priv, struct nbl_port_advertising *port_advertising); int (*get_module_info)(void *priv, u8 eth_id, struct ethtool_modinfo *info); int (*get_module_eeprom)(void *priv, u8 eth_id, struct ethtool_eeprom *eeprom, u8 *data); int (*get_link_state)(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info); + int (*get_link_down_count)(void *priv, u8 eth_id, u64 *link_down_count); + int (*get_link_status_opcode)(void *priv, u8 eth_id, u32 *link_status_opcode); int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); int (*nway_reset)(void *priv, u8 eth_id); + int (*set_wol)(void *priv, u8 eth_id, bool enable); void (*adapt_desc_gother)(void *priv); + void (*set_desc_high_throughput)(void *priv); void (*flr_clear_net)(void *priv, u16 vfid); void (*flr_clear_queues)(void *priv, u16 vfid); void (*flr_clear_accel_flow)(void *priv, u16 vfid); @@ -196,7 +217,6 @@ struct nbl_dispatch_ops { int (*cfg_lag_member_fwd)(void *priv, u16 eth_id, u16 lag_id, u8 fwd); int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); - int (*cfg_lag_mcc)(void *priv, u16 eth_id, u16 lag_id, bool enable); int (*cfg_duppkt_info)(void *priv, struct nbl_lag_member_list_param *param); int (*cfg_duppkt_mcc)(void *priv, struct nbl_lag_member_list_param *param); int (*cfg_bond_shaping)(void *priv, u8 eth_id, bool enable); @@ -258,6 +278,7 @@ struct nbl_dispatch_ops { u32 (*get_uipsec_lft_info)(void *priv); void (*handle_uipsec_soft_expire)(void *priv, u32 index); void (*handle_uipsec_hard_expire)(void *priv, u32 index); + void (*get_board_info)(void *priv, struct nbl_board_port_info *board_info); void (*dummy_func)(void *priv); @@ -300,19 +321,40 @@ struct nbl_dispatch_ops { int (*stop_abnormal_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, int type); u16 (*get_vf_function_id)(void *priv, u16 vsi_id, int vf_id); u16 (*get_vf_vsi_id)(void *priv, u16 vsi_id, int vf_id); + bool (*check_vf_is_active)(void *priv, u16 func_id); + int (*check_vf_is_vdpa)(void *priv, u16 func_id, u8 *is_vdpa); + int (*get_vdpa_vf_stats)(void *priv, u16 func_id, struct nbl_vf_stats *vf_stats); + int (*get_uvn_pkt_drop_stats)(void *priv, u16 vsi_id, + u16 num_queues, u32 *uvn_stat_pkt_drop); + int (*get_ustore_pkt_drop_stats)(void *priv); + int (*get_ustore_total_pkt_drop_stats)(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats); int (*set_pmd_debug)(void *priv, bool pmd_debug); void (*register_func_mac)(void *priv, u8 *mac, u16 func_id); + int (*register_func_trust)(void *priv, u16 func_id, bool trusted, + bool *should_notify); int (*register_func_vlan)(void *priv, u16 func_id, u16 vlan_tci, u16 vlan_proto, bool *should_notify); int (*register_func_rate)(void *priv, u16 func_id, int rate); int (*register_func_link_forced)(void *priv, u16 func_id, u8 link_forced, bool *should_notify); int (*get_link_forced)(void *priv, u16 vsi_id); - int (*set_tx_rate)(void *priv, u16 func_id, int tx_rate); + int (*set_tx_rate)(void *priv, u16 func_id, int tx_rate, int burst); + int (*set_rx_rate)(void *priv, u16 func_id, int rx_rate, int burst); void (*get_driver_version)(void *priv, char *ver, int len); + void (*register_dev_name)(void *priv, u16 vsi_id, char *name); + void (*get_dev_name)(void *priv, u16 vsi_id, char *name); + + int (*get_mirror_table_id)(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id); + int (*configure_mirror)(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id); + int (*configure_mirror_table)(void *priv, bool mirror_en, u16 func_id, u8 mt_id); + int (*clear_mirror_cfg)(void *priv, u16 func_id); + int (*get_fd_flow)(void *priv, u16 vsi_id, u32 location, enum nbl_chan_fdir_rule_type rule_type, struct nbl_chan_param_fdir_replace *cmd); @@ -334,8 +376,18 @@ struct nbl_dispatch_ops { void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); void (*get_active_func_bitmaps)(void *priv, unsigned long *bitmap, int max_func); int (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + int (*configure_rdma_bw)(void *priv, u8 eth_id, int rdma_bw); int (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); + int (*set_rate_limit)(void *priv, enum nbl_traffic_type type, u32 rate); + int (*set_tc_wgt)(void *priv, u16 vsi_id, u8 *weight, u8 num_tc); + + u32 (*get_perf_dump_length)(void *priv); + u32 (*get_perf_dump_data)(void *priv, u8 *buffer, u32 size); + void (*cfg_mirror_outputport_event)(void *priv, bool enable); + int (*check_flow_table_spec)(void *priv, u16 vlan_cnt, u16 unicast_cnt, u16 multicast_cnt); + u32 (*get_dvn_desc_req)(void *priv); + void (*set_dvn_desc_req)(void *priv, u32 desc_req); }; struct nbl_dispatch_ops_tbl { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h index 8440d82f92db..98191e2394c5 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -38,19 +38,24 @@ struct nbl_phy_ops { int (*cfg_q2tc_tcid)(void *priv, u16 queue_id, u16 tcid); int (*set_tc_wgt)(void *priv, u16 func_id, u8 *weight, u16 num_tc); int (*set_tc_spwrr)(void *priv, u16 func_id, u8 spwrr); - int (*set_shaping)(void *priv, u16 func_id, u64 total_tx_rate, u8 vld, bool active); + int (*set_shaping)(void *priv, u16 func_id, u64 total_tx_rate, u64 burst, + u8 vld, bool active); void (*active_shaping)(void *priv, u16 func_id); void (*deactive_shaping)(void *priv, u16 func_id); + int (*set_ucar)(void *priv, u16 func_id, u64 total_tx_rate, u64 burst, + u8 vld); int (*cfg_dsch_net_to_group)(void *priv, u16 func_id, u16 group_id, u16 vld); int (*cfg_dsch_group_to_port)(void *priv, u16 group_id, u16 dport, u16 vld); int (*init_epro_rss_key)(void *priv); void (*read_rss_key)(void *priv, u8 *rss_key); void (*read_rss_indir)(void *priv, u16 vsi_id, u32 *rss_indir, u16 rss_ret_base, u16 rss_entry_size); - void (*get_rss_alg_sel)(void *priv, u8 eth_id, u8 *rss_alg_sel); + void (*get_rss_alg_sel)(void *priv, u16 vsi_id, u8 *alg_sel); + int (*set_rss_alg_sel)(void *priv, u16 vsi_id, u8 alg_sel); int (*init_epro_vpt_tbl)(void *priv, u16 vsi_id); int (*set_epro_rss_default)(void *priv, u16 vsi_id); - int (*cfg_epro_rss_ret)(void *priv, u32 index, u8 size_type, u32 q_num, u16 *queue_list); + int (*cfg_epro_rss_ret)(void *priv, u32 index, u8 size_type, u32 q_num, + u16 *queue_list, const u32 *indir); int (*set_epro_rss_pt)(void *priv, u16 vsi_id, u16 rss_ret_base, u16 rss_entry_size); int (*clear_epro_rss_pt)(void *priv, u16 vsi_id); int (*disable_dvn)(void *priv, u16 queue_id); @@ -79,6 +84,8 @@ struct nbl_phy_ops { void (*get_coalesce)(void *priv, u16 interrupt_id, u16 *pnum, u16 *rate); void (*set_coalesce)(void *priv, u16 interrupt_id, u16 pnum, u16 rate); + void (*write_ped_tbl)(void *priv, u8 *data, u16 idx, enum nbl_flow_ped_type ped_type); + void (*update_mailbox_queue_tail_ptr)(void *priv, u16 tail_ptr, u8 txrx); void (*config_mailbox_rxq)(void *priv, dma_addr_t dma_addr, int size_bwid); void (*config_mailbox_txq)(void *priv, dma_addr_t dma_addr, int size_bwid); @@ -87,7 +94,10 @@ struct nbl_phy_ops { u16 (*get_mailbox_rx_tail_ptr)(void *priv); bool (*check_mailbox_dma_err)(void *priv, bool tx); u32 (*get_host_pf_mask)(void *priv); - u32 (*get_host_pf_fid)(void *priv, u8 func_id); + u32 (*get_host_pf_fid)(void *priv, u16 func_id); + u32 (*get_real_bus)(void *priv); + u64 (*get_pf_bar_addr)(void *priv, u16 func_id); + u64 (*get_vf_bar_addr)(void *priv, u16 func_id); void (*cfg_mailbox_qinfo)(void *priv, u16 func_id, u16 bus, u16 devid, u16 function); void (*enable_mailbox_irq)(void *priv, u16 func_id, bool enable_msix, u16 global_vector_id); void (*enable_abnormal_irq)(void *priv, bool enable_msix, u16 global_vector_id); @@ -108,6 +118,7 @@ struct nbl_phy_ops { int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); int (*set_spoof_check_enable)(void *priv, u16 vsi_id, u8 enable); + int (*set_vsi_mtu)(void *priv, u16 vsi_id, u16 mtu_sel); u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); int (*enable_lag_protocol)(void *priv, u16 eth_id, void *data); @@ -117,7 +128,6 @@ struct nbl_phy_ops { int (*set_sfp_state)(void *priv, u8 eth_id, u8 state); int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); - int (*cfg_lag_mcc)(void *priv, u16 mcc_id, u16 action); bool (*get_lag_fwd)(void *priv, u16 eth_id); int (*cfg_bond_shaping)(void *priv, u8 eth_id, u8 speed, bool enable); @@ -135,6 +145,8 @@ struct nbl_phy_ops { int (*set_fd_action_ram)(void *priv, u32 action, u16 ram_index, u32 depth_index); void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); enum nbl_hw_status (*get_hw_status)(void *priv); + int (*set_mtu)(void *priv, u16 mtu_index, u16 mtu); + u16 (*get_mtu_index)(void *priv, u16 vsi_id); /* For leonis */ int (*set_ht)(void *priv, u16 hash, u16 hash_other, u8 ht_table, @@ -143,8 +155,9 @@ struct nbl_phy_ops { int (*search_key)(void *priv, u8 *key, u8 key_type); int (*add_tcam)(void *priv, u32 index, u8 *key, u32 *action, u8 key_type, u8 pp_type); void (*del_tcam)(void *priv, u32 index, u8 key_type, u8 pp_type); - int (*add_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 action); + int (*add_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id, u16 action); void (*del_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id); + void (*update_mcc_next_node)(void *priv, u16 mcc_id, u16 next_mcc_id); int (*add_tnl_encap)(void *priv, const u8 encap_buf[], u16 encap_idx, union nbl_flow_encap_offset_tbl_u encap_idx_info); void (*del_tnl_encap)(void *priv, u16 encap_idx); @@ -215,21 +228,17 @@ struct nbl_phy_ops { void (*set_ped_tab_vsi_type)(void *priv, u32 port_id, u16 eth_proto); void (*load_p4)(void *priv, u32 addr, u32 size, u8 *data); void (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + void (*configure_rdma_bw)(void *priv, u8 eth_id, int rdma_bw); void (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); + void (*set_rate_limit)(void *priv, u16 func_id, enum nbl_traffic_type type, u32 rate); + int (*get_dstat_vsi_stat)(void *priv, u16 vsi_id, u64 *fwd_pkt, u64 *fwd_byte); + int (*get_ustat_vsi_stat)(void *priv, u16 vsi_id, u64 *fwd_pkt, u64 *fwd_byte); + int (*get_uvn_pkt_drop_stats)(void *priv, u16 global_queue_id, u32 *uvn_stat_pkt_drop); + int (*get_ustore_pkt_drop_stats)(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats); - /* For bootis */ - int (*add_mv_tbl)(void *priv, u16 vsi, const void *key, const void *act, u16 result_idx); - int (*del_mv_tbl)(void *priv, const void *key); - int (*cfg_rss_alg)(void *priv, u16 vsi, const void *param); - void (*cfg_padpt_txrx_enable)(void *priv, bool tx_enable, bool rx_enable); - int (*init_port)(void *priv); - int (*init_fec)(void *priv); int (*setup_loopback)(void *priv, u32 eth_id, u32 enable); - bool (*sfp_is_present)(void *priv, u32 eth_id); - int (*read_i2c)(void *priv, u32 eth_id, u16 slave_addr, - u8 channel, u8 read_byte, u8 addr, u32 *rdata); - int (*get_eth_mac_address)(void *priv, u32 eth_id, u8 *mac_addr); int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); /* for board cfg */ @@ -255,13 +264,25 @@ struct nbl_phy_ops { void (*get_eth_ip_reg)(void *priv, u32 eth_id, u64 addr_off, u32 *data); int (*set_eth_fec_mode)(void *priv, u32 eth_id, enum nbl_port_mode mode); void (*clear_profile_table_action)(void *priv); - - /* For virtio */ + void (*ipro_chksum_err_ctrl)(void *priv, u8 status); void (*get_common_cfg)(void *priv, u32 offset, void *buf, u32 len); void (*set_common_cfg)(void *priv, u32 offset, void *buf, u32 len); void (*get_device_cfg)(void *priv, u32 offset, void *buf, u32 len); void (*set_device_cfg)(void *priv, u32 offset, void *buf, u32 len); bool (*get_rdma_capability)(void *priv); + + u32 (*get_perf_dump_length)(void *priv); + u32 (*get_perf_dump_data)(void *priv, u8 *buffer, u32 size); + + int (*get_mirror_table_id)(void *priv, u16 vsi_id, int dir, + bool mirror_en, u8 *mt_id); + int (*configure_mirror)(void *priv, u16 vsi_id, bool mirror_en, int dir, + u8 mt_id); + int (*configure_mirror_table)(void *priv, bool mirror_en, + u16 mirror_vsi_id, u16 mirror_queue_id, u8 mt_id); + int (*clear_mirror_cfg)(void *priv, u16 vsi_id); + u32 (*get_dvn_desc_req)(void *priv); + void (*set_dvn_desc_req)(void *priv, u32 desc_req); }; struct nbl_phy_ops_tbl { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h index 5fa6a50c13bb..201c49826952 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -52,7 +52,7 @@ struct nbl_resource_ops { void (*set_rings_xdp_prog)(void *priv, void *prog); int (*register_xdp_rxq)(void *priv, u8 ring_index); void (*unregister_xdp_rxq)(void *priv, u8 ring_index); - struct napi_struct *(*get_vector_napi)(void *priv, u16 index); + struct nbl_napi_struct *(*get_vector_napi)(void *priv, u16 index); void (*set_vector_info)(void *priv, u8 *irq_enable_base, u32 irq_data, u16 index, bool mask_en); void (*register_vsi_ring)(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num); @@ -69,9 +69,10 @@ struct nbl_resource_ops { int (*setup_rss)(void *priv, u16 vsi_id); void (*remove_rss)(void *priv, u16 vsi_id); int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); + int (*remove_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); void (*remove_all_queues)(void *priv, u16 vsi_id); int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld); - int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps); + int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set); void (*remove_cqs)(void *priv, u16 vsi_id); int (*cfg_qdisc_mqprio)(void *priv, struct nbl_tc_qidsc_param *param); void (*clear_queues)(void *priv, u16 vsi_id); @@ -93,6 +94,8 @@ struct nbl_resource_ops { void (*del_lldp_flow)(void *priv, u16 vsi); int (*add_multi_rule)(void *priv, u16 vsi); void (*del_multi_rule)(void *priv, u16 vsi); + int (*add_multi_mcast)(void *priv, u16 vsi); + void (*del_multi_mcast)(void *priv, u16 vsi); int (*setup_multi_group)(void *priv); void (*remove_multi_group)(void *priv); void (*clear_accel_flow)(void *priv, u16 vsi_id); @@ -106,6 +109,8 @@ struct nbl_resource_ops { void (*get_rep_feature)(void *priv, struct nbl_register_net_result *register_result); void (*get_rep_queue_info)(void *priv, u16 *queue_num, u16 *queue_size); void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + int (*set_mtu)(void *priv, u16 vsi_id, u16 mtu); + int (*get_max_mtu)(void *priv); void (*set_eswitch_mode)(void *priv, u16 switch_mode); u16 (*get_eswitch_mode)(void *priv); @@ -130,9 +135,16 @@ struct nbl_resource_ops { int (*get_queue_err_stats)(void *priv, u16 func_id, u8 queue_id, struct nbl_queue_err_stats *queue_err_stats, bool is_tx); void (*get_net_stats)(void *priv, struct nbl_stats *queue_stats); + int (*get_eth_ctrl_stats)(void *priv, u32 eth_id, + struct nbl_eth_ctrl_stats *eth_ctrl_stats); void (*get_private_stat_len)(void *priv, u32 *len); - void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data); + void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data, u32 data_len); + int (*get_pause_stats)(void *priv, u32 eth_id, struct nbl_pause_stats *pause_stats); + int (*get_eth_mac_stats)(void *priv, u32 eth_id, struct nbl_eth_mac_stats *eth_mac_stats); + int (*get_rmon_stats)(void *priv, u32 eth_id, struct nbl_rmon_stats *rmon_stats); void (*fill_private_stat_strings)(void *priv, u8 *strings); + int (*get_eth_abnormal_stats)(void *priv, u32 eth_id, + struct nbl_eth_abnormal_stats *eth_abnormal_stats); u16 (*get_max_desc_num)(void); u16 (*get_min_desc_num)(void); u16 (*get_tx_desc_num)(void *priv, u32 ring_index); @@ -148,9 +160,11 @@ struct nbl_resource_ops { u16 num_net_msix, u16 level); void (*get_rxfh_indir_size)(void *priv, u16 vsi_id, u32 *rxfh_indir_size); void (*get_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir); + int (*set_rxfh_indir)(void *priv, u16 vsi_id, const u32 *indir, u32 indir_size); void (*get_rxfh_rss_key_size)(void *priv, u32 *rxfh_rss_key_size); void (*get_rxfh_rss_key)(void *priv, u8 *rss_key); - void (*get_rss_alg_sel)(void *priv, u8 *alg_sel, u8 eth_id); + void (*get_rss_alg_sel)(void *priv, u16 vsi_id, u8 *alg_sel); + int (*set_rss_alg_sel)(void *priv, u16 vsi_id, u8 alg_sel); int (*get_firmware_version)(void *priv, char *firmware_verion); int (*get_driver_info)(void *priv, struct nbl_driver_info *driver_info); int (*nway_reset)(void *priv, u8 eth_id); @@ -176,7 +190,6 @@ struct nbl_resource_ops { int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); int (*cfg_duppkt_info)(void *priv, struct nbl_lag_member_list_param *param); int (*cfg_duppkt_mcc)(void *priv, struct nbl_lag_member_list_param *param); - int (*cfg_lag_mcc)(void *priv, u16 eth_id, u16 lag_id, bool enable); int (*cfg_bond_shaping)(void *priv, u8 eth_id, bool enable); void (*cfg_bgid_back_pressure)(void *priv, u8 main_eth_id, u8 other_eth_id, bool enable); @@ -193,14 +206,19 @@ struct nbl_resource_ops { void (*cfg_eth_bond_event)(void *priv, bool enable); void (*recv_port_notify)(void *priv, void *data); int (*get_port_state)(void *priv, u8 eth_id, struct nbl_port_state *port_state); + int (*get_fec_stats)(void *priv, u32 eth_id, struct nbl_fec_stats *fec_stats); int (*set_port_advertising)(void *priv, struct nbl_port_advertising *port_advertising); int (*get_module_info)(void *priv, u8 eth_id, struct ethtool_modinfo *info); int (*get_module_eeprom)(void *priv, u8 eth_id, struct ethtool_eeprom *eeprom, u8 *data); int (*get_link_state)(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info); + int (*get_link_down_count)(void *priv, u8 eth_id, u64 *link_down_count); + int (*get_link_status_opcode)(void *priv, u8 eth_id, u32 *link_status_opcode); int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); + int (*set_wol)(void *priv, u8 eth_id, bool enable); void (*adapt_desc_gother)(void *priv); + void (*set_desc_high_throughput)(void *priv); void (*flr_clear_net)(void *priv, u16 vfid); void (*flr_clear_queues)(void *priv, u16 vfid); void (*flr_clear_accel_flow)(void *priv, u16 vfid); @@ -213,6 +231,14 @@ struct nbl_resource_ops { int (*set_bridge_mode)(void *priv, u16 func_id, u16 bmode); u16 (*get_vf_function_id)(void *priv, u16 vsi_id, int vf_id); u16 (*get_vf_vsi_id)(void *priv, u16 vsi_id, int vf_id); + bool (*check_vf_is_active)(void *priv, u16 func_id); + int (*check_vf_is_vdpa)(void *priv, u16 func_id, u8 *is_vdpa); + int (*get_vdpa_vf_stats)(void *priv, u16 func_id, struct nbl_vf_stats *vf_stats); + int (*get_uvn_pkt_drop_stats)(void *priv, u16 vsi_id, + u16 num_queues, u32 *uvn_stat_pkt_drop); + int (*get_ustore_pkt_drop_stats)(void *priv); + int (*get_ustore_total_pkt_drop_stats)(void *priv, u8 eth_id, + struct nbl_ustore_stats *ustore_stats); bool (*check_fw_heartbeat)(void *priv); bool (*check_fw_reset)(void *priv); @@ -274,11 +300,14 @@ struct nbl_resource_ops { void (*register_func_mac)(void *priv, u8 *mac, u16 func_id); int (*register_func_link_forced)(void *priv, u16 func_id, u8 link_forced, bool *should_notify); + int (*register_func_trust)(void *priv, u16 func_id, + bool trust, bool *should_notify); int (*register_func_vlan)(void *priv, u16 func_id, u16 vlan_tci, u16 vlan_proto, bool *should_notify); int (*register_func_rate)(void *priv, u16 func_id, int rate); int (*get_link_forced)(void *priv, u16 vsi_id); - int (*set_tx_rate)(void *priv, u16 func_id, int tx_rate); + int (*set_tx_rate)(void *priv, u16 func_id, int tx_rate, int burst); + int (*set_rx_rate)(void *priv, u16 func_id, int rx_rate, int burst); void (*get_driver_version)(void *priv, char *ver, int len); @@ -330,10 +359,7 @@ struct nbl_resource_ops { void (*get_flow_acl_switch)(void *priv, u8 *acl_enable); void (*get_board_info)(void *priv, struct nbl_board_port_info *board_info); - /* For virtio */ - void (*configure_virtio_dev_msix)(void *priv, u16 vector); void (*configure_rdma_msix_off)(void *priv, u16 vector); - void (*configure_virtio_dev_ready)(void *priv); int (*switchdev_init_cmdq)(void *priv); int (*switchdev_deinit_cmdq)(void *priv, u8 index); @@ -366,9 +392,29 @@ struct nbl_resource_ops { void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); void (*get_active_func_bitmaps)(void *priv, unsigned long *bitmap, int max_func); int (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + int (*configure_rdma_bw)(void *priv, u8 eth_id, int rdma_bw); int (*set_eth_pfc)(void *priv, u8 eth_id, u8 *pfc); int (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); + int (*set_rate_limit)(void *priv, u16 func_id, enum nbl_traffic_type type, u32 rate); + int (*set_tc_wgt)(void *priv, u16 vsi_id, u8 *weight, u8 num_tc); + + u32 (*get_perf_dump_length)(void *priv); + u32 (*get_perf_dump_data)(void *priv, u8 *buffer, u32 size); + + void (*register_dev_name)(void *priv, u16 vsi_id, char *name); + void (*get_dev_name)(void *priv, u16 vsi_id, char *name); + + int (*get_mirror_table_id)(void *priv, u16 vsi_id, int dir, bool mirror_en, + u8 *mt_id); + int (*configure_mirror)(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id); + int (*configure_mirror_table)(void *priv, bool mirror_en, u16 func_id, u8 mt_id); + int (*clear_mirror_cfg)(void *priv, u16 func_id); + void (*cfg_mirror_outputport_event)(void *priv, bool enable); + int (*check_flow_table_spec)(void *priv, u16 vlan_cnt, u16 unicast_cnt, u16 multicast_cnt); + u32 (*get_dvn_desc_req)(void *priv); + void (*set_dvn_desc_req)(void *priv, u32 desc_req); }; struct nbl_resource_ops_tbl { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h index c7ba4b56d7e8..234f29387dc5 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -9,23 +9,14 @@ #include "nbl_include.h" -#define NBL_SERV_OPS_TBL_TO_OPS(serv_ops_tbl) ((serv_ops_tbl)->ops) -#define NBL_SERV_OPS_TBL_TO_PRIV(serv_ops_tbl) ((serv_ops_tbl)->priv) - -struct nbl_service_traffic_switch { - u16 normal_vsi; - u16 sync_other_vsi; - u16 async_other_vsi; - bool promisc; - bool has_lacp; - bool has_lldp; -}; +#define NBL_SERV_OPS_TBL_TO_OPS(serv_ops_tbl) ((serv_ops_tbl)->ops) +#define NBL_SERV_OPS_TBL_TO_PRIV(serv_ops_tbl) ((serv_ops_tbl)->priv) struct nbl_service_ops { - int (*init_chip_factory)(void *priv); - int (*destroy_chip_factory)(void *p); + int (*clear_mirrior_table)(void *p); int (*init_chip)(void *p); int (*destroy_chip)(void *p); + int (*init_p4)(void *priv); int (*configure_msix_map)(void *p, u16 num_net_msix, u16 num_others_msix, bool net_msix_mask_en); int (*destroy_msix_map)(void *priv); @@ -47,13 +38,13 @@ struct nbl_service_ops { int (*vsi_open)(void *priv, struct net_device *netdev, u16 vsi_index, u16 real_qps, bool use_napi); int (*vsi_stop)(void *priv, u16 vsi_index); - int (*switch_traffic_default_dest)(void *priv, struct nbl_service_traffic_switch *info); + int (*switch_traffic_default_dest)(void *priv, int op); int (*config_fd_flow_state)(void *priv, enum nbl_chan_fdir_rule_type type, u32 state); int (*netdev_open)(struct net_device *netdev); int (*netdev_stop)(struct net_device *netdev); - netdev_tx_t (*start_xmit)(struct sk_buff *skb, struct net_device *netdev); int (*change_mtu)(struct net_device *netdev, int new_mtu); + int (*change_rep_mtu)(struct net_device *netdev, int new_mtu); void (*get_stats64)(struct net_device *netdev, struct rtnl_link_stats64 *stats); void (*set_rx_mode)(struct net_device *dev); void (*change_rx_flags)(struct net_device *dev, int flag); @@ -64,23 +55,29 @@ struct nbl_service_ops { netdev_features_t (*features_check)(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); int (*setup_tc)(struct net_device *dev, enum tc_setup_type type, void *type_data); + int (*get_phys_port_name)(struct net_device *dev, char *name, size_t len); + int (*get_port_parent_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); int (*set_vf_spoofchk)(struct net_device *netdev, int vf_id, bool ena); - void (*tx_timeout)(struct net_device *netdev, u32 txqueue); - int (*bridge_setlink)(struct net_device *netdev, struct nlmsghdr *nlh, - u16 flags, struct netlink_ext_ack *extack); - - int (*bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u32 filter_mask, int nlflags); int (*set_vf_link_state)(struct net_device *dev, int vf_id, int link_state); int (*set_vf_mac)(struct net_device *netdev, int vf_id, u8 *mac); int (*set_vf_rate)(struct net_device *netdev, int vf_id, int min_rate, int max_rate); int (*set_vf_vlan)(struct net_device *dev, int vf_id, u16 vlan, u8 pri, __be16 proto); int (*get_vf_config)(struct net_device *dev, int vf_id, struct ifla_vf_info *ivi); + int (*get_vf_stats)(struct net_device *dev, int vf_id, struct ifla_vf_stats *vf_stats); + void (*tx_timeout)(struct net_device *netdev, u32 txqueue); + + int (*bridge_setlink)(struct net_device *netdev, struct nlmsghdr *nlh, + u16 flags, struct netlink_ext_ack *extack); + int (*bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, int nlflags); u16 (*select_queue)(struct net_device *netdev, struct sk_buff *skb, struct net_device *sb_dev); - int (*get_phys_port_name)(struct net_device *dev, char *name, size_t len); - int (*get_port_parent_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); - + void (*get_eth_ctrl_stats)(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *eth_ctrl_stats); + void (*get_eth_mac_stats)(struct net_device *netdev, + struct ethtool_eth_mac_stats *eth_mac_stats); + void (*get_fec_stats)(struct net_device *netdev, struct ethtool_fec_stats *fec_stats); + int (*set_vf_trust)(struct net_device *netdev, int vf_id, bool trusted); int (*register_net)(void *priv, struct nbl_register_net_param *register_param, struct nbl_register_net_result *register_result); int (*unregister_net)(void *priv); @@ -92,6 +89,7 @@ struct nbl_service_ops { void (*remove_q2vsi)(void *priv, u16 vsi_id); int (*setup_rss)(void *priv, u16 vsi_id); void (*remove_rss)(void *priv, u16 vsi_id); + int (*setup_rss_indir)(void *priv, u16 vsi_id); int (*check_offload_status)(void *priv); u32 (*get_chip_temperature)(void *priv, enum nbl_hwmon_type type, u32 senser_id); int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_hwmon_type type); @@ -102,8 +100,12 @@ struct nbl_service_ops { int (*enable_napis)(void *priv, u16 vsi_index); void (*disable_napis)(void *priv, u16 vsi_index); void (*set_mask_en)(void *priv, bool enable); - int (*start_net_flow)(void *priv, struct net_device *dev, u16 vsi_id, u16 vid); + int (*start_net_flow)(void *priv, struct net_device *dev, u16 vsi_id, u16 vid, + bool trusted); void (*stop_net_flow)(void *priv, u16 vsi_id); + void (*clear_flow)(void *priv, u16 vsi_id); + int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode); + int (*cfg_multi_mcast)(void *priv, u16 vsi, u16 enable); int (*set_lldp_flow)(void *priv, u16 vsi_id); void (*remove_lldp_flow)(void *priv, u16 vsi_id); int (*start_mgt_flow)(void *priv); @@ -120,6 +122,9 @@ struct nbl_service_ops { int (*setup_net_resource_mgt)(void *priv, struct net_device *dev, u16 vlan_proto, u16 vlan_tci, u32 rate); void (*remove_net_resource_mgt)(void *priv); + int (*init_hw_stats)(void *priv); + int (*remove_hw_stats)(void *priv); + int (*get_rx_dropped)(void *priv, u64 *rx_dropped); int (*enable_lag_protocol)(void *priv, u16 eth_id, bool lag_en); int (*cfg_lag_hash_algorithm)(void *priv, u16 eth_id, u16 lag_id, enum netdev_lag_hash hash_type); @@ -132,6 +137,7 @@ struct nbl_service_ops { bool open, bool is_force); int (*get_board_id)(void *priv); void (*cfg_eth_bond_event)(void *priv, bool enable); + void (*get_board_info)(void *priv, struct nbl_board_port_info *board_info); /* rep associated */ int (*rep_netdev_open)(struct net_device *netdev); @@ -188,6 +194,9 @@ struct nbl_service_ops { void (*get_channels)(struct net_device *netdev, struct ethtool_channels *channels); int (*set_channels)(struct net_device *netdev, struct ethtool_channels *channels); u32 (*get_link)(struct net_device *netdev); + int (*get_link_ext_state)(struct net_device *netdev, + struct ethtool_link_ext_state_info *link_ext_state_info); + void (*get_link_ext_stats)(struct net_device *netdev, struct ethtool_link_ext_stats *stats); int (*get_ksettings)(struct net_device *netdev, struct ethtool_link_ksettings *cmd); int (*set_ksettings)(struct net_device *netdev, const struct ethtool_link_ksettings *cmd); void (*get_ringparam)(struct net_device *netdev, struct ethtool_ringparam *ringparam, @@ -197,6 +206,10 @@ struct nbl_service_ops { struct kernel_ethtool_ringparam *k_ringparam, struct netlink_ext_ack *extack); + int (*flash_device)(struct net_device *netdev, struct ethtool_flash *flash); + int (*get_dump_flag)(struct net_device *netdev, struct ethtool_dump *dump); + int (*get_dump_data)(struct net_device *netdev, struct ethtool_dump *dump, void *buffer); + int (*set_dump)(struct net_device *netdev, struct ethtool_dump *dump); int (*get_coalesce)(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_ec, struct netlink_ext_ack *extack); @@ -209,6 +222,7 @@ struct nbl_service_ops { u32 (*get_rxfh_indir_size)(struct net_device *netdev); u32 (*get_rxfh_key_size)(struct net_device *netdev); int (*get_rxfh)(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); + int (*set_rxfh)(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc); u32 (*get_msglevel)(struct net_device *netdev); void (*set_msglevel)(struct net_device *netdev, u32 msglevel); int (*get_regs_len)(struct net_device *netdev); @@ -221,6 +235,9 @@ struct nbl_service_ops { void (*self_test)(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data); u32 (*get_priv_flags)(struct net_device *netdev); int (*set_priv_flags)(struct net_device *netdev, u32 priv_flags); + void (*get_pause_stats)(struct net_device *netdev, struct ethtool_pause_stats *pause_stats); + void (*get_rmon_stats)(struct net_device *netdev, struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **range); int (*set_pause_param)(struct net_device *netdev, struct ethtool_pauseparam *param); void (*get_pause_param)(struct net_device *netdev, struct ethtool_pauseparam *param); int (*set_fec_param)(struct net_device *netdev, struct ethtool_fecparam *fec); @@ -232,6 +249,8 @@ struct nbl_service_ops { int (*get_rep_sset_count)(struct net_device *netdev, int sset); void (*get_rep_ethtool_stats)(struct net_device *netdev, struct ethtool_stats *stats, u64 *data); + void (*get_wol)(struct net_device *netdev, struct ethtool_wolinfo *wol); + int (*set_wol)(struct net_device *netdev, struct ethtool_wolinfo *wol); u16 (*get_rdma_cap_num)(void *priv); void (*setup_rdma_id)(void *priv); @@ -258,6 +277,7 @@ struct nbl_service_ops { int (*update_devlink_flash)(struct devlink *devlink, struct devlink_flash_update_params *params, struct netlink_ext_ack *extack); + u32 (*get_adminq_tx_buf_size)(void *priv); int (*emp_console_write)(void *priv, char *buf, size_t count); bool (*check_fw_heartbeat)(void *priv); @@ -292,6 +312,16 @@ struct nbl_service_ops { u16 (*get_vf_base_vsi_id)(void *priv, u16 func_id); int (*setup_vf_config)(void *priv, int num_vfs, bool is_flush); void (*remove_vf_config)(void *priv); + void (*register_dev_name)(void *priv, u16 vsi_id, char *name); + void (*get_dev_name)(void *priv, u16 vsi_id, char *name); + + void (*get_mirror_table_id)(void *priv, u16 vsi_id, int dir, bool mirror_en, + u8 *mt_id); + int (*configure_mirror)(void *priv, u16 func_id, bool mirror_en, int dir, + u8 mt_id); + int (*configure_mirror_table)(void *priv, bool mirror_en, u16 func_id, u8 mt_id); + int (*clear_mirror_cfg)(void *priv, u16 func_id); + int (*setup_vf_resource)(void *priv, int num_vfs); void (*remove_vf_resource)(void *priv); void (*cfg_fd_update_event)(void *priv, bool enable); @@ -300,9 +330,38 @@ struct nbl_service_ops { int (*set_xdp)(struct net_device *netdev, struct netdev_bpf *xdp); void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); void (*get_active_func_bitmaps)(void *priv, unsigned long *bitmap, int max_func); - int (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); - int (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); + void (*get_rdma_bw)(void *priv, int *rdma_bw); + void (*get_rdma_rate)(void *priv, int *rdma_rate); + void (*get_net_rate)(void *priv, int *net_rate); + int (*configure_rdma_bw)(void *priv, u8 eth_id, int rdma_bw); + int (*configure_pfc)(void *priv, u8 eth_id, u8 *pfc); + int (*configure_trust)(void *priv, u8 eth_id, u8 trust); + int (*configure_dscp2prio)(void *priv, u8 eth_id, const char *buf, size_t count); int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); + int (*set_rate_limit)(void *priv, enum nbl_traffic_type type, u32 rate); + ssize_t (*trust_mode_show)(void *priv, u8 eth_id, char *buf); + ssize_t (*pfc_show)(void *priv, u8 eth_id, char *buf); + ssize_t (*dscp2prio_show)(void *priv, u8 eth_id, char *buf); + ssize_t (*pfc_buffer_size_show)(void *priv, u8 eth_id, char *buf); + + /* dcb nl ops */ + int (*ieee_setets)(struct net_device *netdev, struct ieee_ets *ets); + int (*ieee_getets)(struct net_device *netdev, struct ieee_ets *ets); + int (*ieee_setpfc)(struct net_device *netdev, struct ieee_pfc *pfc); + int (*ieee_getpfc)(struct net_device *netdev, struct ieee_pfc *pfc); + int (*ieee_setapp)(struct net_device *netdev, struct dcb_app *app); + int (*ieee_delapp)(struct net_device *netdev, struct dcb_app *app); + void (*dcbnl_getpfccfg)(struct net_device *netdev, int prio, u8 *setting); + void (*dcbnl_setpfccfg)(struct net_device *netdev, int prio, u8 set); + int (*dcbnl_getnumtcs)(struct net_device *netdev, int tcid, u8 *num); + u8 (*ieee_getdcbx)(struct net_device *netdev); + u8 (*ieee_setdcbx)(struct net_device *netdev, u8 mode); + u8 (*dcbnl_getstate)(struct net_device *netdev); + u8 (*dcbnl_setstate)(struct net_device *netdev, u8 state); + u8 (*dcbnl_getpfcstate)(struct net_device *netdev); + u8 (*dcbnl_getcap)(struct net_device *netdev, int capid, u8 *cap); + u16 (*get_vf_function_id)(void *priv, int vf_id); + void (*cfg_mirror_outputport_event)(void *priv, bool enable); }; struct nbl_service_ops_tbl { diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h index b81fa26728bf..ba5ccb34a15a 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan @@ -27,6 +27,7 @@ #include #include #include +#include #include #ifdef CONFIG_TLS_DEVICE #include @@ -34,7 +35,6 @@ #include #include #include - #include #include #include @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -51,6 +50,8 @@ #include #include #include +#include +#include /* ------ Basic definitions ------- */ #define NBL_DRIVER_NAME "nbl_core" @@ -64,7 +65,7 @@ */ #define NBL_DRIVER_VERSION "1-1.1.100.0" -#define NBL_DRIVER_DEV_MAX 8 +#define NBL_DRIVER_DEV_MAX 24 #define NBL_PAIR_ID_GET_TX(id) ((id) * 2 + 1) #define NBL_PAIR_ID_GET_RX(id) ((id) * 2) @@ -79,6 +80,7 @@ #define NBL_RATE_MBPS_100G (100000) #define NBL_RATE_MBPS_25G (25000) +#define NBL_RATE_MBPS_10G (10000) #define NBL_NEXT_ID(id, max) ({ typeof(id) _id = (id); ((_id) == (max) ? 0 : (_id) + 1); }) #define NBL_IPV6_U32LEN 4 @@ -94,103 +96,112 @@ #define NBL_IP_VERSION_V4 4 #define NBL_IP_VERSION_V6 6 #define NBL_MAX_FUNC (520) +#define NBL_MAX_MTU 15 + +#define NBL_FLOW_TABLE_IPV4_DEFAULT_MASK 0xFFFFFFFF +#define NBL_FLOW_TABLE_L4_PORT_DEFAULT_MASK 0xFFFF +#define NBL_TC_MAX_PED_H_IDX 512 + +#define NBL_TC_PEDIT_SET_NODE_RES_PRO(node) ((node).pedit_proto = 1) +#define NBL_TC_PEDIT_GET_NODE_RES_PRO(node) ((node).pedit_proto) + +#define NBL_TC_PEDIT_INC_NODE_RES_EDITS(node) ((node).pedits++) +#define NBL_TC_PEDIT_DEC_NODE_RES_EDITS(node, dec) ((node).pedits -= dec) /* key element: key flag bitmap */ -#define NBL_FLOW_KEY_TABLE_IDX_FLAG BIT_ULL(0) -#define NBL_FLOW_KEY_INPORT8_FLAG BIT_ULL(1) -#define NBL_FLOW_KEY_INPORT4_FLAG BIT_ULL(39) -#define NBL_FLOW_KEY_INPORT2_FLAG BIT_ULL(40) // error -#define NBL_FLOW_KEY_INPORT2L_FLAG BIT_ULL(41) // error -#define NBL_FLOW_KEY_T_DIPV4_FLAG BIT_ULL(2) -#define NBL_FLOW_KEY_T_DIPV6_FLAG BIT_ULL(3) -#define NBL_FLOW_KEY_T_OPT_DATA_FLAG BIT_ULL(4) -#define NBL_FLOW_KEY_T_VNI_FLAG BIT_ULL(5) -#define NBL_FLOW_KEY_T_DSTMAC_FLAG BIT_ULL(6) // error -#define NBL_FLOW_KEY_T_SRCMAC_FLAG BIT_ULL(7) // error -#define NBL_FLOW_KEY_T_SVLAN_FLAG BIT_ULL(8) // error -#define NBL_FLOW_KEY_T_CVLAN_FLAG BIT_ULL(9) // error -#define NBL_FLOW_KEY_T_ETHERTYPE_FLAG BIT_ULL(10) // error -#define NBL_FLOW_KEY_T_SRCPORT_FLAG BIT_ULL(11) -#define NBL_FLOW_KEY_T_DSTPORT_FLAG BIT_ULL(12) -#define NBL_FLOW_KEY_T_NPROTO_FLAG BIT_ULL(13) // delete -#define NBL_FLOW_KEY_T_OPT_CLASS_FLAG BIT_ULL(14) -#define NBL_FLOW_KEY_T_PROTOCOL_FLAG BIT_ULL(15) -#define NBL_FLOW_KEY_T_TCPSTAT_FLAG BIT_ULL(16) // delete -#define NBL_FLOW_KEY_T_TOS_FLAG BIT_ULL(17) -#define NBL_FLOW_KEY_T_TTL_FLAG BIT_ULL(18) -#define NBL_FLOW_KEY_SIPV4_FLAG BIT_ULL(19) -#define NBL_FLOW_KEY_SIPV6_FLAG BIT_ULL(20) -#define NBL_FLOW_KEY_DIPV4_FLAG BIT_ULL(21) -#define NBL_FLOW_KEY_DIPV6_FLAG BIT_ULL(22) -#define NBL_FLOW_KEY_DSTMAC_FLAG BIT_ULL(23) -#define NBL_FLOW_KEY_SRCMAC_FLAG BIT_ULL(24) -#define NBL_FLOW_KEY_SVLAN_FLAG BIT_ULL(25) -#define NBL_FLOW_KEY_CVLAN_FLAG BIT_ULL(26) -#define NBL_FLOW_KEY_ETHERTYPE_FLAG BIT_ULL(27) -#define NBL_FLOW_KEY_SRCPORT_FLAG BIT_ULL(28) -#define NBL_FLOW_KEY_ICMP_TYPE_FLAG BIT_ULL(28) -#define NBL_FLOW_KEY_DSTPORT_FLAG BIT_ULL(29) -#define NBL_FLOW_KEY_ICMP_CODE_FLAG BIT_ULL(29) -#define NBL_FLOW_KEY_ARP_OP_FLAG BIT_ULL(30) // error -#define NBL_FLOW_KEY_ICMPV6_TYPE_FLAG BIT_ULL(31) // error -#define NBL_FLOW_KEY_PROTOCOL_FLAG BIT_ULL(32) -#define NBL_FLOW_KEY_TCPSTAT_FLAG BIT_ULL(33) -#define NBL_FLOW_KEY_TOS_FLAG BIT_ULL(34) -#define NBL_FLOW_KEY_DSCP_FLAG BIT_ULL(34) -#define NBL_FLOW_KEY_TTL_FLAG BIT_ULL(35) -#define NBL_FLOW_KEY_HOPLIMIT_FLAG BIT_ULL(35) -#define NBL_FLOW_KEY_RDMA_ACK_SEQ_FLAG BIT_ULL(36) // error -#define NBL_FLOW_KEY_RDMA_QPN_FLAG BIT_ULL(37) // error -#define NBL_FLOW_KEY_RDMA_OP_FLAG BIT_ULL(38) // error -#define NBL_FLOW_KEY_EXEHASH_FLAG BIT_ULL(43) -#define NBL_FLOW_KEY_DPHASH_FLAG BIT_ULL(44) -#define NBL_FLOW_KEY_RECIRC_FLAG BIT_ULL(63) +#define NBL_FLOW_KEY_TABLE_IDX_FLAG (BIT_ULL(0)) +#define NBL_FLOW_KEY_INPORT8_FLAG (BIT_ULL(1)) +#define NBL_FLOW_KEY_INPORT4_FLAG (BIT_ULL(39)) +#define NBL_FLOW_KEY_INPORT2_FLAG (BIT_ULL(40)) // error +#define NBL_FLOW_KEY_INPORT2L_FLAG (BIT_ULL(41)) // error +#define NBL_FLOW_KEY_T_DIPV4_FLAG (BIT_ULL(2)) +#define NBL_FLOW_KEY_T_DIPV6_FLAG (BIT_ULL(3)) +#define NBL_FLOW_KEY_T_OPT_DATA_FLAG (BIT_ULL(4)) +#define NBL_FLOW_KEY_T_VNI_FLAG (BIT_ULL(5)) +#define NBL_FLOW_KEY_T_DSTMAC_FLAG (BIT_ULL(6)) // error +#define NBL_FLOW_KEY_T_SRCMAC_FLAG (BIT_ULL(7)) // error +#define NBL_FLOW_KEY_T_SVLAN_FLAG (BIT_ULL(8)) // error +#define NBL_FLOW_KEY_T_CVLAN_FLAG (BIT_ULL(9)) // error +#define NBL_FLOW_KEY_T_ETHERTYPE_FLAG (BIT_ULL(10)) // error +#define NBL_FLOW_KEY_T_SRCPORT_FLAG (BIT_ULL(11)) +#define NBL_FLOW_KEY_T_DSTPORT_FLAG (BIT_ULL(12)) +#define NBL_FLOW_KEY_T_NPROTO_FLAG (BIT_ULL(13)) // delete +#define NBL_FLOW_KEY_T_OPT_CLASS_FLAG (BIT_ULL(14)) +#define NBL_FLOW_KEY_T_PROTOCOL_FLAG (BIT_ULL(15)) +#define NBL_FLOW_KEY_T_TCPSTAT_FLAG (BIT_ULL(16)) // delete +#define NBL_FLOW_KEY_T_TOS_FLAG (BIT_ULL(17)) +#define NBL_FLOW_KEY_T_TTL_FLAG (BIT_ULL(18)) +#define NBL_FLOW_KEY_SIPV4_FLAG (BIT_ULL(19)) +#define NBL_FLOW_KEY_SIPV6_FLAG (BIT_ULL(20)) +#define NBL_FLOW_KEY_DIPV4_FLAG (BIT_ULL(21)) +#define NBL_FLOW_KEY_DIPV6_FLAG (BIT_ULL(22)) +#define NBL_FLOW_KEY_DSTMAC_FLAG (BIT_ULL(23)) +#define NBL_FLOW_KEY_SRCMAC_FLAG (BIT_ULL(24)) +#define NBL_FLOW_KEY_SVLAN_FLAG (BIT_ULL(25)) +#define NBL_FLOW_KEY_CVLAN_FLAG (BIT_ULL(26)) +#define NBL_FLOW_KEY_ETHERTYPE_FLAG (BIT_ULL(27)) +#define NBL_FLOW_KEY_SRCPORT_FLAG (BIT_ULL(28)) +#define NBL_FLOW_KEY_ICMP_TYPE_FLAG (BIT_ULL(28)) +#define NBL_FLOW_KEY_DSTPORT_FLAG (BIT_ULL(29)) +#define NBL_FLOW_KEY_ICMP_CODE_FLAG (BIT_ULL(29)) +#define NBL_FLOW_KEY_ARP_OP_FLAG (BIT_ULL(30)) // error +#define NBL_FLOW_KEY_ICMPV6_TYPE_FLAG (BIT_ULL(31)) // error +#define NBL_FLOW_KEY_PROTOCOL_FLAG (BIT_ULL(32)) +#define NBL_FLOW_KEY_TCPSTAT_FLAG (BIT_ULL(33)) +#define NBL_FLOW_KEY_TOS_FLAG (BIT_ULL(34)) +#define NBL_FLOW_KEY_DSCP_FLAG (BIT_ULL(34)) +#define NBL_FLOW_KEY_TTL_FLAG (BIT_ULL(35)) +#define NBL_FLOW_KEY_HOPLIMIT_FLAG (BIT_ULL(35)) +#define NBL_FLOW_KEY_RDMA_ACK_SEQ_FLAG (BIT_ULL(36)) // error +#define NBL_FLOW_KEY_RDMA_QPN_FLAG (BIT_ULL(37)) // error +#define NBL_FLOW_KEY_RDMA_OP_FLAG (BIT_ULL(38)) // error +#define NBL_FLOW_KEY_EXEHASH_FLAG (BIT_ULL(43)) +#define NBL_FLOW_KEY_DPHASH_FLAG (BIT_ULL(44)) +#define NBL_FLOW_KEY_RECIRC_FLAG (BIT_ULL(63)) /* action flag */ -#define NBL_FLOW_ACTION_METADATA_FLAG BIT_ULL(1) -#define NBL_FLOW_ACTION_DROP BIT_ULL(2) -#define NBL_FLOW_ACTION_REDIRECT BIT_ULL(3) -#define NBL_FLOW_ACTION_MIRRED BIT_ULL(4) -#define NBL_FLOW_ACTION_TUNNEL_ENCAP BIT_ULL(5) -#define NBL_FLOW_ACTION_TUNNEL_DECAP BIT_ULL(6) -#define NBL_FLOW_ACTION_COUNTER BIT_ULL(7) -#define NBL_FLOW_ACTION_SET_IPV4_SRC_IP BIT_ULL(8) -#define NBL_FLOW_ACTION_SET_IPV4_DST_IP BIT_ULL(9) -#define NBL_FLOW_ACTION_SET_IPV6_SRC_IP BIT_ULL(10) -#define NBL_FLOW_ACTION_SET_IPV6_DST_IP BIT_ULL(11) -#define NBL_FLOW_ACTION_SET_SRC_MAC BIT_ULL(12) -#define NBL_FLOW_ACTION_SET_DST_MAC BIT_ULL(13) -#define NBL_FLOW_ACTION_SET_SRC_PORT BIT_ULL(14) -#define NBL_FLOW_ACTION_SET_DST_PORT BIT_ULL(15) -#define NBL_FLOW_ACTION_SET_TTL BIT_ULL(16) -#define NBL_FLOW_ACTION_SET_IPV4_DSCP BIT_ULL(17) -#define NBL_FLOW_ACTION_SET_IPV6_DSCP BIT_ULL(18) -#define NBL_FLOW_ACTION_RSS BIT_ULL(19) -#define NBL_FLOW_ACTION_QUEUE BIT_ULL(20) -#define NBL_FLOW_ACTION_MARK BIT_ULL(21) -#define NBL_FLOW_ACTION_PUSH_INNER_VLAN BIT_ULL(22) -#define NBL_FLOW_ACTION_PUSH_OUTER_VLAN BIT_ULL(23) -#define NBL_FLOW_ACTION_POP_INNER_VLAN BIT_ULL(24) -#define NBL_FLOW_ACTION_POP_OUTER_VLAN BIT_ULL(25) -#define NBL_FLOW_ACTION_REPLACE_INNER_VLAN BIT_ULL(26) -#define NBL_FLOW_ACTION_REPLACE_SINGLE_INNER_VLAN BIT_ULL(27) -#define NBL_FLOW_ACTION_REPLACE_OUTER_VLAN BIT_ULL(28) -#define NBL_FLOW_ACTION_PHY_PORT BIT_ULL(29) -#define NBL_FLOW_ACTION_PORT_ID BIT_ULL(30) -#define NBL_FLOW_ACTION_INGRESS BIT_ULL(31) -#define NBL_FLOW_ACTION_EGRESS BIT_ULL(32) -#define NBL_FLOW_ACTION_IPV4 BIT_ULL(33) -#define NBL_FLOW_ACTION_IPV6 BIT_ULL(34) -#define NBL_FLOW_ACTION_CAR BIT_ULL(35) -#define NBL_FLOW_ACTION_MCC BIT_ULL(36) -#define NBL_FLOW_ACTION_MIRRED_ENCAP BIT_ULL(37) -#define NBL_FLOW_ACTION_META_RECIRC BIT_ULL(38) -#define NBL_FLOW_ACTION_STAT BIT_ULL(39) -#define NBL_ACTION_FLAG_OFFSET_MAX BIT_ULL(40) - +#define NBL_FLOW_ACTION_METADATA_FLAG (BIT_ULL(1)) +#define NBL_FLOW_ACTION_DROP (BIT_ULL(2)) +#define NBL_FLOW_ACTION_REDIRECT (BIT_ULL(3)) +#define NBL_FLOW_ACTION_MIRRED (BIT_ULL(4)) +#define NBL_FLOW_ACTION_TUNNEL_ENCAP (BIT_ULL(5)) +#define NBL_FLOW_ACTION_TUNNEL_DECAP (BIT_ULL(6)) +#define NBL_FLOW_ACTION_COUNTER (BIT_ULL(7)) +#define NBL_FLOW_ACTION_SET_IPV4_SRC_IP (BIT_ULL(8)) +#define NBL_FLOW_ACTION_SET_IPV4_DST_IP (BIT_ULL(9)) +#define NBL_FLOW_ACTION_SET_IPV6_SRC_IP (BIT_ULL(10)) +#define NBL_FLOW_ACTION_SET_IPV6_DST_IP (BIT_ULL(11)) +#define NBL_FLOW_ACTION_SET_SRC_MAC (BIT_ULL(12)) +#define NBL_FLOW_ACTION_SET_DST_MAC (BIT_ULL(13)) +#define NBL_FLOW_ACTION_SET_SRC_PORT (BIT_ULL(14)) +#define NBL_FLOW_ACTION_SET_DST_PORT (BIT_ULL(15)) +#define NBL_FLOW_ACTION_SET_TTL (BIT_ULL(16)) +#define NBL_FLOW_ACTION_SET_IPV4_DSCP (BIT_ULL(17)) +#define NBL_FLOW_ACTION_SET_IPV6_DSCP (BIT_ULL(18)) +#define NBL_FLOW_ACTION_RSS (BIT_ULL(19)) +#define NBL_FLOW_ACTION_QUEUE (BIT_ULL(20)) +#define NBL_FLOW_ACTION_MARK (BIT_ULL(21)) +#define NBL_FLOW_ACTION_PUSH_INNER_VLAN (BIT_ULL(22)) +#define NBL_FLOW_ACTION_PUSH_OUTER_VLAN (BIT_ULL(23)) +#define NBL_FLOW_ACTION_POP_INNER_VLAN (BIT_ULL(24)) +#define NBL_FLOW_ACTION_POP_OUTER_VLAN (BIT_ULL(25)) +#define NBL_FLOW_ACTION_REPLACE_INNER_VLAN (BIT_ULL(26)) +#define NBL_FLOW_ACTION_REPLACE_SINGLE_INNER_VLAN (BIT_ULL(27)) +#define NBL_FLOW_ACTION_REPLACE_OUTER_VLAN (BIT_ULL(28)) +#define NBL_FLOW_ACTION_PHY_PORT (BIT_ULL(29)) +#define NBL_FLOW_ACTION_PORT_ID (BIT_ULL(30)) +#define NBL_FLOW_ACTION_INGRESS (BIT_ULL(31)) +#define NBL_FLOW_ACTION_EGRESS (BIT_ULL(32)) +#define NBL_FLOW_ACTION_IPV4 (BIT_ULL(33)) +#define NBL_FLOW_ACTION_IPV6 (BIT_ULL(34)) +#define NBL_FLOW_ACTION_CAR (BIT_ULL(35)) +#define NBL_FLOW_ACTION_MCC (BIT_ULL(36)) +#define NBL_FLOW_ACTION_MIRRED_ENCAP (BIT_ULL(37)) +#define NBL_FLOW_ACTION_META_RECIRC (BIT_ULL(38)) +#define NBL_FLOW_ACTION_STAT (BIT_ULL(39)) +#define NBL_ACTION_FLAG_OFFSET_MAX (BIT_ULL(40)) extern struct list_head lag_resource_head; extern struct mutex nbl_lag_mutex; - #define SET_DEV_MIN_MTU(netdev, mtu) ((netdev)->min_mtu = (mtu)) #define SET_DEV_MAX_MTU(netdev, mtu) ((netdev)->max_mtu = (mtu)) @@ -211,8 +222,6 @@ do { \ enum nbl_product_type { NBL_LEONIS_TYPE, - NBL_BOOTIS_TYPE, - NBL_VIRTIO_TYPE, NBL_PRODUCT_MAX, }; @@ -230,7 +239,6 @@ enum nbl_fix_cap_type { NBL_TASK_CLEAN_ADMINDQ_CAP, NBL_TASK_CLEAN_MAILBOX_CAP, NBL_TASK_IPSEC_AGE_CAP, - NBL_VIRTIO_CAP, NBL_ETH_SUPPORT_NRZ_RS_FEC_544, NBL_RESTOOL_CAP, NBL_HWMON_TEMP_CAP, @@ -245,15 +253,14 @@ enum nbl_fix_cap_type { NBL_TASK_RESET_CAP, NBL_TASK_RESET_CTRL_CAP, NBL_QOS_SYSFS_CAP, + NBL_MIRROR_SYSFS_CAP, + NBL_HIGH_THROUGHPUT_CAP, + NBL_TASK_HEALTH_REPORT_TEMP_CAP, + NBL_TASK_HEALTH_REPORT_REBOOT_CAP, + NBL_DVN_DESC_REQ_SYSFS_CAP, NBL_FIX_CAP_NBITS }; -enum nbl_bootis_port_id { - NBL_PORT_ETH0 = 0, - NBL_PORT_ETH1, - NBL_PORT_MAX, -}; - enum nbl_sfp_module_state { NBL_SFP_MODULE_OFF, NBL_SFP_MODULE_ON, @@ -304,7 +311,8 @@ struct nbl_func_caps { u32 support_lag:1; u32 has_grc:1; u32 has_factory_ctrl:1; - u32 rsv:24; + u32 is_ocp:1; + u32 rsv:23; }; struct nbl_init_param { @@ -465,6 +473,10 @@ struct nbl_rx_queue_stats { u64 tls_decrypted_packets; u64 tls_resync_req_num; #endif + u64 xdp_tx_packets; + u64 xdp_redirect_packets; + u64 xdp_oversize_packets; + u64 xdp_drop_packets; }; struct nbl_stats { @@ -478,6 +490,10 @@ struct nbl_stats { u64 tx_dma_busy; u64 tx_multicast_packets; u64 tx_unicast_packets; + u64 xdp_tx_packets; + u64 xdp_redirect_packets; + u64 xdp_oversize_packets; + u64 xdp_drop_packets; #ifdef CONFIG_TLS_DEVICE u64 tls_encrypted_packets; u64 tls_encrypted_bytes; @@ -508,6 +524,44 @@ struct nbl_priv_stats { u64 total_uvn_stat_pkt_drop; }; +struct nbl_vf_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u64 broadcast; + u64 multicast; + u64 rx_dropped; + u64 tx_dropped; +}; + +struct nbl_ustore_stats { + u64 rx_drop_packets; + u64 rx_trun_packets; +}; + +struct nbl_hw_stats { + u64 *total_uvn_stat_pkt_drop; + struct nbl_ustore_stats start_ustore_stats; +}; + +struct nbl_eth_abnormal_stats { + /* detailed rx_errors: */ + u64 rx_length_errors; + u64 rx_over_errors; + u64 rx_crc_errors; + u64 rx_frame_errors; + u64 rx_fifo_errors; + u64 rx_missed_errors; + + /* detailed tx_errors */ + u64 tx_aborted_errors; + u64 tx_carrier_errors; + u64 tx_fifo_errors; + u64 tx_heartbeat_errors; + u64 tx_window_errors; +}; + struct nbl_notify_param { u16 notify_qid; u16 tail_ptr; @@ -652,31 +706,6 @@ struct nbl_ctrl_irq_num { int abnormal_irq_num; }; -#define NBL_PORT_KEY_ILLEGAL 0x0 -#define NBL_PORT_KEY_CAPABILITIES 0x1 -#define NBL_PORT_KEY_ENABLE 0x2 /* BIT(0): NBL_PORT_FLAG_ENABLE_NOTIFY */ -#define NBL_PORT_KEY_DISABLE 0x3 -#define NBL_PORT_KEY_ADVERT 0x4 -#define NBL_PORT_KEY_LOOPBACK 0x5 /* 0: disable eth loopback, 1: enable eth loopback */ -#define NBL_PORT_KEY_MODULE_SWITCH 0x6 /* 0: sfp off, 1: sfp on */ -#define NBL_PORT_KEY_MAC_ADDRESS 0x7 -#define NBL_PORT_KRY_LED_BLINK 0x8 -#define NBL_PORT_KEY_RESTORE_DEFAULTE_CFG 11 -#define NBL_PORT_KEY_SET_PFC_CFG 12 - -enum { - NBL_PORT_SUBOP_READ = 1, - NBL_PORT_SUBOP_WRITE = 2, -}; - -#define NBL_PORT_FLAG_ENABLE_NOTIFY BIT(0) -#define NBL_PORT_ENABLE_LOOPBACK 1 -#define NBL_PORT_DISABLE_LOOPBCK 0 -#define NBL_PORT_SFP_ON 1 -#define NBL_PORT_SFP_OFF 0 -#define NBL_PORT_KEY_KEY_SHIFT 56 -#define NBL_PORT_KEY_DATA_MASK 0xFFFFFFFFFFFF - enum nbl_flow_ctrl { NBL_PORT_TX_PAUSE = 0x1, NBL_PORT_RX_PAUSE = 0x2, @@ -806,6 +835,24 @@ enum nbl_fw_port_speed { NBL_FW_PORT_SPEED_100G, }; +static inline u32 nbl_port_speed_to_speed(enum nbl_fw_port_speed port_speed) +{ + switch (port_speed) { + case NBL_FW_PORT_SPEED_10G: + return SPEED_10000; + case NBL_FW_PORT_SPEED_25G: + return SPEED_25000; + case NBL_FW_PORT_SPEED_50G: + return SPEED_50000; + case NBL_FW_PORT_SPEED_100G: + return SPEED_100000; + default: + return SPEED_25000; + } + + return SPEED_25000; +} + #define PASSTHROUGH_FW_CMD_DATA_LEN (3072) struct nbl_passthrough_fw_cmd_param { u16 opcode; @@ -902,9 +949,6 @@ static inline int nbl_##_struct##_size_is_not_equal_to_define(void) \ return check[0]; \ } -#define nbl_list_entry_is_head(pos, head, member) \ - (&pos->member == (head)) - /** * list_is_first -- tests whether @ list is the first entry in list @head * @list: the entry to test @@ -936,50 +980,6 @@ static inline int nbl_list_empty(const struct list_head *head) return READ_ONCE(head->next) == head; } -/** - * nbl_read_poll_timeout - Periodically poll an address until a condition is - * met or a timeout occurs - * @op: accessor function (takes @args as its arguments) - * @val: Variable to read the value into - * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). - * @timeout_us: Timeout in us, 0 means never timeout - * @sleep_before_read: if it is true, sleep @sleep_us before read. - * @args: arguments for @op poll - * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @args is stored in @val. Must not - * be called from atomic context if sleep_us or timeout_us are used. - * - * When available, you'll probably want to use one of the specialized - * macros defined below rather than this macro directly. - */ -#define nbl_read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ - sleep_before_read, args...) \ -({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __sleep_us = (sleep_us); \ - ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ - might_sleep_if((__sleep_us) != 0); \ - if (sleep_before_read && __sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - for (;;) { \ - (val) = op(args); \ - if (cond) \ - break; \ - if (__timeout_us && \ - ktime_compare(ktime_get(), __timeout) > 0) { \ - (val) = op(args); \ - break; \ - } \ - if (__sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - } \ - (cond) ? 0 : -ETIMEDOUT; \ -}) - #define NBL_OPS_CALL(func, para) \ ({ typeof(func) _func = (func); \ (!_func) ? 0 : _func para; }) @@ -992,8 +992,8 @@ enum { }; struct nbl_tc_port { - u32 id; - u8 type; + u32 id:24; + u32 type:8; }; enum nbl_cmd_status { @@ -1051,8 +1051,8 @@ struct nbl_tc_fdir_tnl { }; struct nbl_port_mcc { - u16 dport_id; - u8 port_type; + u16 dport_id:12; + u16 port_type:4; }; #define NBL_VLAN_TYPE_ETH_BASE 1027 @@ -1109,35 +1109,78 @@ union nbl_flow_encap_offset_tbl_u { u32 data[NBL_FLOW_ENCAP_OFFSET_TBL_WIDTH]; } __packed; +struct nbl_tc_pedit_headers { + struct ethhdr eth; + struct iphdr ip4; + struct ipv6hdr ip6; + struct tcphdr tcp; + struct udphdr udp; +}; + +enum nbl_flow_ped_type { + /* ped type: default is src dir if ped_type is ip & mac */ + NBL_FLOW_PED_UMAC_TYPE = 0, + NBL_FLOW_PED_DMAC_TYPE, + NBL_FLOW_PED_UIP_TYPE, + NBL_FLOW_PED_DIP_TYPE, + + /* ped for mac & ip got src and dst, _D_TYPE represents the dst dir */ + NBL_FLOW_PED_UMAC_D_TYPE, + NBL_FLOW_PED_DMAC_D_TYPE, + NBL_FLOW_PED_UIP_D_TYPE, + NBL_FLOW_PED_DIP_D_TYPE, + + NBL_FLOW_PED_RES_MAX, + /* the following no need store rsource */ + NBL_FLOW_PED_UIP6_TYPE, + NBL_FLOW_PED_DIP6_TYPE, + NBL_FLOW_PED_RECORD_MAX, +}; + +struct nbl_tc_pedit_node_res { + void *pedit_node[NBL_FLOW_PED_RES_MAX]; + u32 pedits:30; + u32 pedit_val:1; + /* 0 tcp, 1 udp */ + u32 pedit_proto:1; +}; + +struct nbl_tc_pedit_info { + struct nbl_tc_pedit_headers val; + struct nbl_tc_pedit_headers mask; + struct nbl_tc_pedit_node_res pedit_node; +}; + struct nbl_rule_action { u64 flag; /* action flag, eg:set ipv4 src/redirect */ - u32 drop_flag; /* drop or forward */ - u32 counter_id; - u32 port_id; - u8 port_type; - u8 action_cnt; /* different action type total cnt */ + u32 drop_flag:1; /* drop or forward */ + u32 counter_id:31; - u8 next_stg_sel; + u32 port_id:15; + u32 port_type:8; + u32 action_cnt:5; /* different action type total cnt */ + u32 next_stg_sel:4; + + u32 vni; + u16 encap_size; + u16 encap_idx:15; + u16 encap_parse_ok:1; + + u32 encap_out_dev_ifindex:14; + u32 encap_in_hw:1; + u32 dscp:8; + u32 lag_id:4; + u32 mcc_cnt:5; - u8 dscp; /* set dscp */ - /* set ops */ - struct nbl_fdir_l4 l4_outer; - struct nbl_fdir_l2 l2_data_outer; - struct nbl_fdir_l3 ip_outer; - u8 lag_id; struct nbl_port_mcc port_mcc[NBL_TC_MCC_MEMBER_MAX]; - u16 mcc_cnt; struct nbl_vlan vlan; struct ip_tunnel_info *tunnel; struct nbl_encap_key encap_key; union nbl_flow_encap_offset_tbl_u encap_idx_info; - u32 vni; u8 encap_buf[NBL_FLOW_ACTION_ENCAP_TOTAL_LEN]; - u16 encap_size; - u16 encap_idx; - bool encap_parse_ok; struct net_device *in_port; struct net_device *tc_tun_encap_out_dev; + struct nbl_tc_pedit_info tc_pedit_info; }; struct nbl_fdir_fltr { @@ -1189,7 +1232,6 @@ struct nbl_flow_pattern_conf { u8 flow_send; u8 graph_idx; u16 pp_flag; - u64 input_set; u64 key_flag; }; @@ -1350,6 +1392,7 @@ enum nbl_performance_mode { }; extern int performance_mode; +extern int adaptive_rxbuf_len_disable; struct nbl_vsi_param { u16 vsi_id; @@ -1370,9 +1413,28 @@ enum nbl_trust_mode { NBL_TRUST_MODE_DSCP }; -#define NBL_MAX_PFC_PRIORITIES 8 -#define NBL_DSCP_MAX 64 +#define NBL_VSI_MAX_ID 1024 + +struct nbl_mtu_entry { + u32 ref_count; + u16 mtu_value; +}; + +#define NBL_MAX_PFC_PRIORITIES (8) +#define NBL_DSCP_MAX (64) +#define NBL_TC_MAX_BW (100) +#define NBL_MAX_TC_NUM (8) +#define NBL_MAX_BW (100) + +enum nbl_traffic_type { + NBL_TRAFFIC_RDMA_TYPE, + NBL_TRAFFIC_NET_TYPE, +}; +struct nbl_napi_struct { + struct napi_struct napi; + atomic_t is_irq; +}; extern int loongarch_low_version; #define NBL_LOONGSON64_VF_MAX_QUEUE_NUM 2 #define NBL_LOONGSON64_MAX_QUEUE_NUM 8 diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h index 0c9a4ec16023..f910f8d7a933 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0*/ /* * Copyright (c) 2022 nebula-matrix Limited. * Author: Bennie Yan diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c index 20a719e44e02..90db549fd54e 100644 --- a/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c @@ -71,6 +71,7 @@ struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *p NBL_COMMON_TO_DMA_DEV(common) = &pdev->dev; NBL_COMMON_TO_DEBUG_LVL(common) |= NBL_DEBUG_ALL; NBL_COMMON_TO_VF_CAP(common) = param->caps.is_vf; + NBL_COMMON_TO_OCP_CAP(common) = param->caps.is_ocp; NBL_COMMON_TO_PCI_USING_DAC(common) = param->pci_using_dac; NBL_COMMON_TO_PCI_FUNC_ID(common) = PCI_FUNC(pdev->devfn); common->devid = PCI_SLOT(pdev->devfn); @@ -129,6 +130,7 @@ struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *p void nbl_core_remove(struct nbl_adapter *adapter) { struct device *dev; + struct nbl_product_base_ops *product_base_ops; if (!adapter) @@ -217,6 +219,7 @@ int nbl_st_init(struct nbl_software_tool_table *st_table) st_table->devno = devid; st_table->cls = class_create("nblst_cls"); + st_table->cls->devnode = nblst_cdevnode; if (IS_ERR(st_table->cls)) { unregister_chrdev(st_table->major, "nblst"); @@ -266,14 +269,11 @@ static void nbl_get_func_param(struct pci_dev *pdev, kernel_ulong_t driver_data, param->caps.has_grc = NBL_CAP_IS_GRC(driver_data); param->caps.is_blk = NBL_CAP_IS_BLK(driver_data); param->caps.is_nic = NBL_CAP_IS_NIC(driver_data); + param->caps.is_ocp = NBL_CAP_IS_OCP(driver_data); param->caps.has_factory_ctrl = NBL_CAP_IS_FACTORY_CTRL(driver_data); if (NBL_CAP_IS_LEONIS(driver_data)) param->product_type = NBL_LEONIS_TYPE; - if (NBL_CAP_IS_BOOTIS(driver_data)) - param->product_type = NBL_BOOTIS_TYPE; - if (NBL_CAP_IS_VIRTIO(driver_data)) - param->product_type = NBL_VIRTIO_TYPE; /** * Leonis only PF0 has ctrl capability, but PF0's pcie device_id is same with other PF. @@ -323,11 +323,6 @@ static int nbl_probe(struct pci_dev *pdev, const struct pci_device_id __always_u pci_save_state(pdev); - if (param.caps.is_blk) { - dev_info(dev, "nbl_virtio_blk probe OK\n"); - return NBL_OK; - } - adapter = nbl_core_init(pdev, ¶m); if (!adapter) { dev_err(dev, "Nbl adapter init fail\n"); @@ -363,6 +358,7 @@ static void nbl_remove(struct pci_dev *pdev) nbl_core_stop(adapter); nbl_core_remove(adapter); + pci_clear_master(pdev); pci_disable_device(pdev); @@ -372,10 +368,17 @@ static void nbl_remove(struct pci_dev *pdev) static void nbl_shutdown(struct pci_dev *pdev) { struct nbl_adapter *adapter = pci_get_drvdata(pdev); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + bool wol_ena = common->wol_ena; if (!NBL_COMMON_TO_VF_CAP(NBL_ADAPTER_TO_COMMON(adapter))) nbl_remove(pdev); + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wol_ena); + pci_set_power_state(pdev, PCI_D3hot); + } + dev_info(&pdev->dev, "nbl shutdown OK\n"); } @@ -399,6 +402,8 @@ static __maybe_unused int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs) return 0; } + /* register pf_name to AF first, cuz vf_name depends on pf_anme */ + nbl_dev_register_dev_name(adapter); err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_err(&pdev->dev, "nbl enable sriov failed %d!\n", err); @@ -427,12 +432,8 @@ static __maybe_unused int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs) /** * Leonis DeviceID - * 0x3400-0x3402 reserve for internal test * 0x3403-0x340d for snic v3r1 product **/ -#define NBL_DEVICE_ID_LEONIS_FACTORY (0x3400) -#define NBL_DEVICE_ID_LEONIS_PF (0x3401) -#define NBL_DEVICE_ID_LEONIS_VF (0x3402) #define NBL_DEVICE_ID_M18110 (0x3403) #define NBL_DEVICE_ID_M18110_LX (0x3404) #define NBL_DEVICE_ID_M18110_BASE_T (0x3405) @@ -451,23 +452,7 @@ static __maybe_unused int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs) #define NBL_DEVICE_ID_M18120_LX_BASE_T_OCP (0x3412) #define NBL_DEVICE_ID_M18100_VF (0x3413) -#define NBL_BOOTIS_DEVICE_ID_1226 (0x1226) -#define NBL_BOOTIS_DEVICE_ID_1227 (0x1227) -#define NBL_DF200_VDPA_NET_ID (0x1041) -#define NBL_DF200_VDPA_BLK_ID (0x1042) -#define NBL_DF200_RDMA_NET_ID (0x1043) - static const struct pci_device_id nbl_id_table[] = { - { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_LEONIS_FACTORY), .driver_data = - NBL_CAP_SET_BIT(NBL_CAP_HAS_FACTORY_CTRL_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) }, - { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_LEONIS_PF), .driver_data = - NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, - { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_LEONIS_VF), .driver_data = - NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_VF_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) }, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | @@ -487,19 +472,19 @@ static const struct pci_device_id nbl_id_table[] = { { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT) }, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_BASE_T_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_BASE_T_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | @@ -519,19 +504,19 @@ static const struct pci_device_id nbl_id_table[] = { { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_BASE_T_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_BASE_T_OCP), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | - NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_OCP_BIT)}, { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18100_VF), .driver_data = NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_VF_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) }, @@ -610,6 +595,7 @@ static void __exit nbl_module_exit(void) nbl_st_remove(nbl_get_st_table()); nbl_common_destroy_wq(); + nbl_dev_user_module_destroy(); nbl_debugfs_remove(); @@ -628,11 +614,3 @@ MODULE_LICENSE("GPL"); #define NBL_FW_TUNNEL_TOE_P4 NBL_FW_SNIC_PATH MODULE_FIRMWARE(NBL_FW_SNIC_PATH "nbl_single_tunnel_toe_enhance.elf"); -MODULE_FIRMWARE(NBL_FW_SNIC_PATH "nbl_dual_tunnel_toe_enhance.elf"); -MODULE_FIRMWARE(NBL_FW_SNIC_PATH "nbl_quad_tunnel_toe_enhance.elf"); -MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_single_port_p4_hg"); -MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_dual_port_p4_hg"); -MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_quad_port_p4_hg"); -MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_single_port_p4_lg"); -MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_dual_port_p4_lg"); -MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_quad_port_p4_lg"); -- Gitee