From bad535d287c9c1056d99de3666be7da84de4a8fc Mon Sep 17 00:00:00 2001 From: Bennie Yan Date: Tue, 24 Sep 2024 16:58:52 +0800 Subject: [PATCH] Net:nbl_core: Add nbl_core-driver for nebula-matrix S1055AS series smart NIC. nbl_core-dirver inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/IAN4BO -------------------------------- The S1055AS series smart NIC is a series of network interface card for the Data Center Area. Signed-off-by: Bennie Yan --- .../ethernet/nebula-matrix/m18120.rst | 70 + arch/arm64/configs/openeuler_defconfig | 2 + arch/x86/configs/openeuler_defconfig | 2 + drivers/net/ethernet/Kconfig | 2 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/nebula-matrix/Kconfig | 37 + drivers/net/ethernet/nebula-matrix/Makefile | 6 + .../net/ethernet/nebula-matrix/nbl/Makefile | 40 + .../nbl/nbl_channel/nbl_channel.c | 1497 +++++ .../nbl/nbl_channel/nbl_channel.h | 183 + .../nebula-matrix/nbl/nbl_common/nbl_common.c | 954 +++ .../nebula-matrix/nbl/nbl_common/nbl_common.h | 69 + .../nebula-matrix/nbl/nbl_common/nbl_event.c | 106 + .../nebula-matrix/nbl/nbl_common/nbl_event.h | 29 + .../net/ethernet/nebula-matrix/nbl/nbl_core.h | 192 + .../nebula-matrix/nbl/nbl_core/nbl_debugfs.c | 298 + .../nebula-matrix/nbl/nbl_core/nbl_debugfs.h | 35 + .../nebula-matrix/nbl/nbl_core/nbl_dev.c | 3047 ++++++++++ .../nebula-matrix/nbl/nbl_core/nbl_dev.h | 217 + .../nebula-matrix/nbl/nbl_core/nbl_dev_user.c | 1377 +++++ .../nebula-matrix/nbl/nbl_core/nbl_dev_user.h | 70 + .../nebula-matrix/nbl/nbl_core/nbl_dispatch.c | 5262 +++++++++++++++++ .../nebula-matrix/nbl/nbl_core/nbl_dispatch.h | 89 + .../nebula-matrix/nbl/nbl_core/nbl_ethtool.c | 2028 +++++++ .../nebula-matrix/nbl/nbl_core/nbl_ethtool.h | 42 + .../nebula-matrix/nbl/nbl_core/nbl_hwmon.c | 144 + .../nebula-matrix/nbl/nbl_core/nbl_hwmon.h | 19 + .../nebula-matrix/nbl/nbl_core/nbl_service.c | 3108 ++++++++++ .../nebula-matrix/nbl/nbl_core/nbl_service.h | 230 + .../nebula-matrix/nbl/nbl_hw/nbl_adminq.c | 2418 ++++++++ .../nebula-matrix/nbl/nbl_hw/nbl_adminq.h | 220 + .../nebula-matrix/nbl/nbl_hw/nbl_hw.h | 348 ++ .../nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c | 1467 +++++ .../nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h | 258 + .../nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c | 2965 ++++++++++ .../nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h | 2182 +++++++ .../nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c | 1212 ++++ .../nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h | 23 + .../nbl_hw_leonis/nbl_resource_leonis.c | 1007 ++++ .../nbl_hw_leonis/nbl_resource_leonis.h | 32 + .../nebula-matrix/nbl/nbl_hw/nbl_interrupt.c | 485 ++ .../nebula-matrix/nbl/nbl_hw/nbl_interrupt.h | 26 + .../nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h | 59 + .../nebula-matrix/nbl/nbl_hw/nbl_phy.h | 143 + .../nebula-matrix/nbl/nbl_hw/nbl_queue.c | 56 + .../nebula-matrix/nbl/nbl_hw/nbl_queue.h | 16 + .../nebula-matrix/nbl/nbl_hw/nbl_resource.c | 382 ++ .../nebula-matrix/nbl/nbl_hw/nbl_resource.h | 769 +++ .../nebula-matrix/nbl/nbl_hw/nbl_txrx.c | 2243 +++++++ .../nebula-matrix/nbl/nbl_hw/nbl_txrx.h | 232 + .../nebula-matrix/nbl/nbl_hw/nbl_vsi.c | 186 + .../nebula-matrix/nbl/nbl_hw/nbl_vsi.h | 12 + .../nbl/nbl_include/nbl_def_channel.h | 616 ++ .../nbl/nbl_include/nbl_def_common.h | 474 ++ .../nbl/nbl_include/nbl_def_dev.h | 33 + .../nbl/nbl_include/nbl_def_dispatch.h | 207 + .../nbl/nbl_include/nbl_def_phy.h | 151 + .../nbl/nbl_include/nbl_def_resource.h | 209 + .../nbl/nbl_include/nbl_def_service.h | 195 + .../nbl/nbl_include/nbl_include.h | 794 +++ .../nbl/nbl_include/nbl_product_base.h | 21 + .../net/ethernet/nebula-matrix/nbl/nbl_main.c | 504 ++ 62 files changed, 39101 insertions(+) create mode 100644 Documentation/networking/device_drivers/ethernet/nebula-matrix/m18120.rst create mode 100644 drivers/net/ethernet/nebula-matrix/Kconfig create mode 100644 drivers/net/ethernet/nebula-matrix/Makefile create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/Makefile create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h create mode 100644 drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c diff --git a/Documentation/networking/device_drivers/ethernet/nebula-matrix/m18120.rst b/Documentation/networking/device_drivers/ethernet/nebula-matrix/m18120.rst new file mode 100644 index 000000000000..c2dd701ccd07 --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/nebula-matrix/m18120.rst @@ -0,0 +1,70 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================================ +Linux Base Driver for Nebula-matrix M18120-NIC family +============================================================ + +Overview: +========= +M18120-NIC is a series of network interface card for the Data Center Area. + +The driver supports link-speed 100GbE/25GE/10GE. + +M18120-NIC devices support SR-IOV. This driver is used for both of Physical +Function(PF) and Virtual Function(VF). + +M18120-NIC devices support MSI-X interrupt vector for each Tx/Rx queue and +interrupt moderation. + +M18120-NIC devices support also various offload features such as checksum offload, +Receive-Side Scaling(RSS). + + +Supported PCI vendor ID/device IDs: +=================================== + +1f0f:3403 - M18110 Family PF +1f0f:3404 - M18110 Lx Family PF +1f0f:3405 - M18110 Family BASE-T PF +1f0f:3406 - M18110 Lx Family BASE-T PF +1f0f:3407 - M18110 Family OCP PF +1f0f:3408 - M18110 Lx Family OCP PF +1f0f:3409 - M18110 Family BASE-T OCP PF +1f0f:340a - M18110 Lx Family BASE-T OCP PF +1f0f:340b - M18120 Family PF +1f0f:340c - M18120 Lx Family PF +1f0f:340d - M18120 Family BASE-T PF +1f0f:340e - M18120 Lx Family BASE-T PF +1f0f:340f - M18120 Family OCP PF +1f0f:3410 - M18120 Lx Family OCP PF +1f0f:3411 - M18120 Family BASE-T OCP PF +1f0f:3412 - M18120 Lx Family BASE-T OCP PF +1f0f:3413 - M18100 Family Virtual Function + +ethtool tool support +==================== + +Obtain basic information of the network card: + ethtool -i enp130s0f0 + +Get network card ring parameters: + ethtool -g enp130s0f0 + +Set the ring parameter: + ethtool -G enp130s0f0 rx 1024 tx 1024 + +View statistics: + ethtool -S enp130s0f0 + +Viewing Optical Module Information: + ethtool -m enp130s0f0 + +Support +======= + +For more information about M18110-NIC, please visit the following URL: +https://www.nebula-matrix.com/ + +If an issue is identified with the released source code on the supported kernel +with a supported adapter, email the specific information related to the issue to +open@nebula-matrix.com. diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 45d9443367e1..3550028ad7d4 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -3103,6 +3103,8 @@ CONFIG_SWPHY=y CONFIG_PHYLIB_LEDS=y CONFIG_FIXED_PHY=y CONFIG_SFP=m +CONFIG_NET_VENDOR_NEBULA_MATRIX=y +CONFIG_NBL_CORE=m # # MII PHY device drivers diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index c3b68f17d746..5c54e0fed823 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -3091,6 +3091,8 @@ CONFIG_SWPHY=y CONFIG_LED_TRIGGER_PHY=y CONFIG_FIXED_PHY=y CONFIG_SFP=m +CONFIG_NET_VENDOR_NEBULA_MATRIX=y +CONFIG_NBL_CORE=m # # MII PHY device drivers diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 6c378a1d4514..cce9d9c6cc10 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -197,4 +197,6 @@ source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" source "drivers/net/ethernet/bzwx/Kconfig" +source "drivers/net/ethernet/nebula-matrix/Kconfig" + endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 83d59e7b9f28..9ffae03b0afc 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -108,3 +108,4 @@ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ obj-$(CONFIG_NET_VENDOR_BZWX) += bzwx/ +obj-$(CONFIG_NET_VENDOR_NEBULA_MATRIX) += nebula-matrix/ diff --git a/drivers/net/ethernet/nebula-matrix/Kconfig b/drivers/net/ethernet/nebula-matrix/Kconfig new file mode 100644 index 000000000000..e92a66125629 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/Kconfig @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Nebula-matrix network device configuration +# + +config NET_VENDOR_NEBULA_MATRIX + bool "Nebula-matrix devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Nebual-matrix cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_NEBULA_MATRIX + +config NBL_CORE + tristate "Nebula-matrix Ethernet Controller m18110 Family support" + depends on PCI && VFIO + depends on ARM64 || X86_64 + default m + help + This driver supports Nebula-matrix Ethernet Controller m18110 Family of + devices. For more information about this product, go to the product + description with smart NIC: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called nbl_core. + +endif # NET_VENDOR_NEBULA_MATRIX diff --git a/drivers/net/ethernet/nebula-matrix/Makefile b/drivers/net/ethernet/nebula-matrix/Makefile new file mode 100644 index 000000000000..dc6bf7dcd6bf --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Nebula-matrix network device drivers. +# + +obj-$(CONFIG_NBL_CORE) += nbl/ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/Makefile b/drivers/net/ethernet/nebula-matrix/nbl/Makefile new file mode 100644 index 000000000000..bbca03bcea4a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/Makefile @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2021 Nebula Matrix Limited. +# Author: Walle Geng + +obj-$(CONFIG_NBL_CORE) := nbl_core.o + +nbl_core-objs += nbl_common/nbl_common.o \ + nbl_common/nbl_event.o \ + nbl_channel/nbl_channel.o \ + nbl_hw/nbl_hw_leonis/nbl_phy_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_flow_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_queue_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_resource_leonis.o \ + nbl_hw/nbl_resource.o \ + nbl_hw/nbl_interrupt.o \ + nbl_hw/nbl_txrx.o \ + nbl_hw/nbl_queue.o \ + nbl_hw/nbl_vsi.o \ + nbl_hw/nbl_adminq.o \ + nbl_core/nbl_dispatch.o \ + nbl_core/nbl_debugfs.o \ + nbl_core/nbl_ethtool.o \ + nbl_core/nbl_service.o \ + nbl_core/nbl_dev.o \ + nbl_core/nbl_hwmon.o \ + nbl_core/nbl_dev_user.o \ + nbl_main.o + +# Do not modify include path, unless you are adding a new file which needs some headers in its +# direct upper directory (see the exception part in below). +# +# The structure requires that codes can only access the header files in nbl_include, or the .h that +# has the same name as the .c file. The only exception is that the product-specific files can access +# the same headers as the common part, e.g. nbl_phy_leonis.c can access nbl_phy.h. +# Make sure to put all the things you need to expose to others in nbl_def_xxx.h, and make everything +# in your own .h private. +# +# Try not to break these rules, sincerely. +ccflags-y := -I $(src) -I $(src)/nbl_include -I $(src)/nbl_export -I $(src)/nbl_hw + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c new file mode 100644 index 000000000000..b00959e24178 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c @@ -0,0 +1,1497 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_channel.h" + +static int nbl_chan_add_msg_handler(struct nbl_channel_mgt *chan_mgt, u16 msg_type, + nbl_chan_resp func, void *priv) +{ + struct nbl_chan_msg_node_data handler = {0}; + int ret; + + handler.func = func; + handler.priv = priv; + + ret = nbl_common_alloc_hash_node(chan_mgt->handle_hash_tbl, &msg_type, &handler); + + return ret; +} + +static int nbl_chan_init_msg_handler(struct nbl_channel_mgt *chan_mgt, u8 user_notify) +{ + struct nbl_hash_tbl_key tbl_key; + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + int ret = 0; + + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_chan_notify_userdev *notify; + + if (user_notify) { + notify = devm_kzalloc(dev, sizeof(struct nbl_chan_notify_userdev), GFP_KERNEL); + if (!notify) + return -ENOMEM; + + mutex_init(¬ify->lock); + chan_mgt->notify = notify; + } + + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), sizeof(u16), + sizeof(struct nbl_chan_msg_node_data), + NBL_CHAN_HANDLER_TBL_BUCKET_SIZE, false); + + chan_mgt->handle_hash_tbl = nbl_common_init_hash_table(&tbl_key); + if (!chan_mgt->handle_hash_tbl) { + ret = -ENOMEM; + goto alloc_hashtbl_failed; + } + + return 0; + +alloc_hashtbl_failed: + if (user_notify) { + chan_mgt->notify = NULL; + devm_kfree(dev, notify); + } + + return ret; +} + +static void nbl_chan_remove_msg_handler(struct nbl_channel_mgt *chan_mgt) +{ + struct nbl_hash_tbl_del_key del_key = {0}; + + nbl_common_remove_hash_table(chan_mgt->handle_hash_tbl, &del_key); + + chan_mgt->handle_hash_tbl = NULL; + + if (chan_mgt->notify) { + devm_kfree(NBL_COMMON_TO_DEV(chan_mgt->common), chan_mgt->notify); + chan_mgt->notify = NULL; + } +} + +static bool nbl_chan_is_admiq(struct nbl_chan_info *chan_info) +{ + return chan_info->chan_type == NBL_CHAN_TYPE_ADMINQ; +} + +static void nbl_chan_init_queue_param(struct nbl_chan_info *chan_info, + u16 num_txq_entries, u16 num_rxq_entries, + u16 txq_buf_size, u16 rxq_buf_size) +{ + spin_lock_init(&chan_info->txq_lock); + chan_info->num_txq_entries = num_txq_entries; + chan_info->num_rxq_entries = num_rxq_entries; + chan_info->txq_buf_size = txq_buf_size; + chan_info->rxq_buf_size = rxq_buf_size; +} + +static int nbl_chan_init_tx_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(common); + struct nbl_chan_ring *txq = &chan_info->txq; + size_t size = chan_info->num_txq_entries * sizeof(struct nbl_chan_tx_desc); + + txq->desc = dmam_alloc_coherent(dma_dev, size, &txq->dma, GFP_KERNEL | __GFP_ZERO); + if (!txq->desc) { + dev_err(dev, "Allocate DMA for chan tx descriptor ring failed\n"); + return -ENOMEM; + } + + chan_info->wait = devm_kcalloc(dev, chan_info->num_txq_entries, + sizeof(struct nbl_chan_waitqueue_head), GFP_KERNEL); + if (!chan_info->wait) + goto req_wait_queue_failed; + + txq->buf = devm_kcalloc(dev, chan_info->num_txq_entries, + sizeof(struct nbl_chan_buf), GFP_KERNEL); + if (!txq->buf) + goto req_num_txq_entries; + + return 0; + +req_num_txq_entries: + devm_kfree(dev, chan_info->wait); +req_wait_queue_failed: + dmam_free_coherent(dma_dev, size, txq->desc, txq->dma); + + txq->desc = NULL; + txq->dma = 0; + chan_info->wait = NULL; + + return -ENOMEM; +} + +static int nbl_chan_init_rx_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(common); + struct nbl_chan_ring *rxq = &chan_info->rxq; + size_t size = chan_info->num_rxq_entries * sizeof(struct nbl_chan_rx_desc); + + rxq->desc = dmam_alloc_coherent(dma_dev, size, &rxq->dma, GFP_KERNEL | __GFP_ZERO); + if (!rxq->desc) { + dev_err(dev, "Allocate DMA for chan rx descriptor ring failed\n"); + return -ENOMEM; + } + + rxq->buf = devm_kcalloc(dev, chan_info->num_rxq_entries, + sizeof(struct nbl_chan_buf), GFP_KERNEL); + if (!rxq->buf) { + dmam_free_coherent(dma_dev, size, rxq->desc, rxq->dma); + rxq->desc = NULL; + rxq->dma = 0; + return -ENOMEM; + } + + return 0; +} + +static void nbl_chan_remove_tx_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(common); + struct nbl_chan_ring *txq = &chan_info->txq; + size_t size = chan_info->num_txq_entries * sizeof(struct nbl_chan_tx_desc); + + devm_kfree(dev, txq->buf); + txq->buf = NULL; + + devm_kfree(dev, chan_info->wait); + chan_info->wait = NULL; + + dmam_free_coherent(dma_dev, size, txq->desc, txq->dma); + txq->desc = NULL; + txq->dma = 0; +} + +static void nbl_chan_remove_rx_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(common); + struct nbl_chan_ring *rxq = &chan_info->rxq; + size_t size = chan_info->num_rxq_entries * sizeof(struct nbl_chan_rx_desc); + + devm_kfree(dev, rxq->buf); + rxq->buf = NULL; + + dmam_free_coherent(dma_dev, size, rxq->desc, rxq->dma); + rxq->desc = NULL; + rxq->dma = 0; +} + +static int nbl_chan_init_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + int err; + + err = nbl_chan_init_tx_queue(common, chan_info); + if (err) + return err; + + err = nbl_chan_init_rx_queue(common, chan_info); + if (err) + goto setup_rx_queue_err; + + return 0; + +setup_rx_queue_err: + nbl_chan_remove_tx_queue(common, chan_info); + return err; +} + +static void nbl_chan_config_queue(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info, bool tx) +{ + struct nbl_phy_ops *phy_ops; + struct nbl_chan_ring *ring; + dma_addr_t dma_addr; + int size_bwid = ilog2(chan_info->num_rxq_entries); + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + if (tx) + ring = &chan_info->txq; + else + ring = &chan_info->rxq; + + dma_addr = ring->dma; + + if (nbl_chan_is_admiq(chan_info)) { + if (tx) + phy_ops->config_adminq_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + dma_addr, size_bwid); + else + phy_ops->config_adminq_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + dma_addr, size_bwid); + } else { + if (tx) + phy_ops->config_mailbox_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + dma_addr, size_bwid); + else + phy_ops->config_mailbox_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + dma_addr, size_bwid); + } +} + +static int nbl_chan_alloc_all_tx_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_chan_ring *txq = &chan_info->txq; + struct nbl_chan_buf *buf; + struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(chan_mgt->common); + u16 i; + + for (i = 0; i < chan_info->num_txq_entries; i++) { + buf = &txq->buf[i]; + buf->va = dmam_alloc_coherent(dma_dev, chan_info->txq_buf_size, + &buf->pa, GFP_KERNEL | __GFP_ZERO); + if (!buf->va) { + dev_err(dev, "Allocate buffer for chan tx queue failed\n"); + goto err; + } + } + + txq->next_to_clean = 0; + txq->next_to_use = 0; + txq->tail_ptr = 0; + + return 0; +err: + while (i--) { + buf = &txq->buf[i]; + dmam_free_coherent(dma_dev, chan_info->txq_buf_size, buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } + + return -ENOMEM; +} + +static int nbl_chan_cfg_mailbox_qinfo_map_table(struct nbl_channel_mgt *chan_mgt) +{ + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + u16 func_id; + u32 pf_mask; + + pf_mask = phy_ops->get_host_pf_mask(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + for (func_id = 0; func_id < NBL_MAX_PF; func_id++) { + if (!(pf_mask & (1 << func_id))) + phy_ops->cfg_mailbox_qinfo(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), func_id, + common->bus, common->devid, + NBL_COMMON_TO_PCI_FUNC_ID(common) + func_id); + } + + return 0; +} + +static int nbl_chan_cfg_adminq_qinfo_map_table(struct nbl_channel_mgt *chan_mgt) +{ + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + phy_ops->cfg_adminq_qinfo(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + common->bus, common->devid, + NBL_COMMON_TO_PCI_FUNC_ID(common)); + + return 0; +} + +static int nbl_chan_cfg_qinfo_map_table(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + int err; + + if (!nbl_chan_is_admiq(chan_info)) + err = nbl_chan_cfg_mailbox_qinfo_map_table(chan_mgt); + else + err = nbl_chan_cfg_adminq_qinfo_map_table(chan_mgt); + + return err; +} + +static void nbl_chan_free_all_tx_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_chan_ring *txq = &chan_info->txq; + struct nbl_chan_buf *buf; + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(chan_mgt->common); + u16 i; + + for (i = 0; i < chan_info->num_txq_entries; i++) { + buf = &txq->buf[i]; + dmam_free_coherent(dma_dev, chan_info->txq_buf_size, + buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } +} + +#define NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, tail_ptr, qid) \ +do { \ + typeof(phy_ops) _phy_ops = (phy_ops); \ + typeof(chan_mgt) _chan_mgt = (chan_mgt); \ + typeof(tail_ptr) _tail_ptr = (tail_ptr); \ + typeof(qid) _qid = (qid); \ + if (nbl_chan_is_admiq(chan_info)) \ + (_phy_ops)->update_adminq_queue_tail_ptr(NBL_CHAN_MGT_TO_PHY_PRIV(_chan_mgt), \ + _tail_ptr, _qid); \ + else \ + (_phy_ops)->update_mailbox_queue_tail_ptr(NBL_CHAN_MGT_TO_PHY_PRIV(_chan_mgt), \ + _tail_ptr, _qid); \ +} while (0) + +static int nbl_chan_alloc_all_rx_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_phy_ops *phy_ops; + struct nbl_chan_ring *rxq = &chan_info->rxq; + struct nbl_chan_buf *buf; + struct nbl_chan_rx_desc *desc; + struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(chan_mgt->common); + u32 retry_times = 0; + u16 i; + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + for (i = 0; i < chan_info->num_rxq_entries; i++) { + buf = &rxq->buf[i]; + buf->va = dmam_alloc_coherent(dma_dev, chan_info->rxq_buf_size, + &buf->pa, GFP_KERNEL | __GFP_ZERO); + if (!buf->va) { + dev_err(dev, "Allocate buffer for chan rx queue failed\n"); + goto err; + } + } + + desc = rxq->desc; + for (i = 0; i < chan_info->num_rxq_entries - 1; i++) { + buf = &rxq->buf[i]; + desc[i].flags = NBL_CHAN_RX_DESC_AVAIL; + desc[i].buf_addr = buf->pa; + desc[i].buf_len = chan_info->rxq_buf_size; + } + + rxq->next_to_clean = 0; + rxq->next_to_use = chan_info->num_rxq_entries - 1; + rxq->tail_ptr = chan_info->num_rxq_entries - 1; + + /* mb for doorbell */ + mb(); + + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, rxq->tail_ptr, NBL_MB_RX_QID); + + for (retry_times = 0; retry_times < 3; retry_times++) { + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, + rxq->tail_ptr, NBL_MB_RX_QID); + usleep_range(NBL_CHAN_TX_WAIT_US * 50, NBL_CHAN_TX_WAIT_US * 60); + } + + return 0; +err: + while (i--) { + buf = &rxq->buf[i]; + dmam_free_coherent(dma_dev, chan_info->rxq_buf_size, + buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } + + return -ENOMEM; +} + +static void nbl_chan_free_all_rx_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_chan_ring *rxq = &chan_info->rxq; + struct nbl_chan_buf *buf; + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(chan_mgt->common); + u16 i; + + for (i = 0; i < chan_info->num_rxq_entries; i++) { + buf = &rxq->buf[i]; + dmam_free_coherent(dma_dev, chan_info->rxq_buf_size, + buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } +} + +static int nbl_chan_alloc_all_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + int err; + + err = nbl_chan_alloc_all_tx_bufs(chan_mgt, chan_info); + if (err) + return err; + + err = nbl_chan_alloc_all_rx_bufs(chan_mgt, chan_info); + if (err) + goto alloc_rx_bufs_err; + + return 0; + +alloc_rx_bufs_err: + nbl_chan_free_all_tx_bufs(chan_mgt, chan_info); + return err; +} + +static void nbl_chan_stop_queue(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + if (nbl_chan_is_admiq(chan_info)) { + phy_ops->stop_adminq_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + phy_ops->stop_adminq_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + } else { + phy_ops->stop_mailbox_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + phy_ops->stop_mailbox_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + } +} + +static void nbl_chan_free_all_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + nbl_chan_free_all_tx_bufs(chan_mgt, chan_info); + nbl_chan_free_all_rx_bufs(chan_mgt, chan_info); +} + +static void nbl_chan_remove_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + nbl_chan_remove_tx_queue(common, chan_info); + nbl_chan_remove_rx_queue(common, chan_info); +} + +static int nbl_chan_teardown_queue(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_common_info *common = chan_mgt->common; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + nbl_chan_stop_queue(chan_mgt, chan_info); + + nbl_chan_free_all_bufs(chan_mgt, chan_info); + + nbl_chan_remove_queue(common, chan_info); + + return 0; +} + +static int nbl_chan_setup_queue(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + int err; + + nbl_chan_init_queue_param(chan_info, NBL_CHAN_QUEUE_LEN, NBL_CHAN_QUEUE_LEN, + NBL_CHAN_BUF_LEN, NBL_CHAN_BUF_LEN); + + err = nbl_chan_init_queue(common, chan_info); + if (err) + return err; + + nbl_chan_config_queue(chan_mgt, chan_info, true); /* tx */ + nbl_chan_config_queue(chan_mgt, chan_info, false); /* rx */ + + err = nbl_chan_alloc_all_bufs(chan_mgt, chan_info); + if (err) + goto chan_q_setup_fail; + + return 0; + +chan_q_setup_fail: + nbl_chan_teardown_queue(chan_mgt, chan_type); + return err; +} + +static void nbl_chan_shutdown_queue(struct nbl_channel_mgt *chan_mgt, u8 chan_type, bool tx) +{ + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + if (tx) { + if (nbl_chan_is_admiq(chan_info)) + phy_ops->stop_adminq_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + else + phy_ops->stop_mailbox_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + + nbl_chan_free_all_tx_bufs(chan_mgt, chan_info); + nbl_chan_remove_tx_queue(common, chan_info); + } else { + if (nbl_chan_is_admiq(chan_info)) + phy_ops->stop_adminq_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + else + phy_ops->stop_mailbox_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + + nbl_chan_free_all_rx_bufs(chan_mgt, chan_info); + nbl_chan_remove_rx_queue(common, chan_info); + } +} + +static int nbl_chan_start_txq(struct nbl_channel_mgt *chan_mgt, u8 chan_type) +{ + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + int ret; + + ret = nbl_chan_init_tx_queue(common, chan_info); + if (ret) + return ret; + + nbl_chan_config_queue(chan_mgt, chan_info, true); /* tx */ + + ret = nbl_chan_alloc_all_tx_bufs(chan_mgt, chan_info); + if (ret) + goto alloc_buf_failed; + + return 0; + +alloc_buf_failed: + nbl_chan_shutdown_queue(chan_mgt, chan_type, true); + return ret; +} + +static int nbl_chan_start_rxq(struct nbl_channel_mgt *chan_mgt, u8 chan_type) +{ + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + int ret; + + ret = nbl_chan_init_rx_queue(common, chan_info); + if (ret) + return ret; + + nbl_chan_config_queue(chan_mgt, chan_info, false); /* rx */ + + ret = nbl_chan_alloc_all_rx_bufs(chan_mgt, chan_info); + if (ret) + goto alloc_buf_failed; + + return 0; + +alloc_buf_failed: + nbl_chan_shutdown_queue(chan_mgt, chan_type, false); + return ret; +} + +static int nbl_chan_reset_queue(struct nbl_channel_mgt *chan_mgt, u8 chan_type, bool tx) +{ + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + int ret = 0; + + /* If someone else is doing resetting, don't bother */ + if (test_bit(NBL_CHAN_RESETTING, chan_info->state)) + return 0; + + /* Make sure rx won't enter if we are resetting */ + set_bit(NBL_CHAN_RESETTING, chan_info->state); + if (chan_info->clean_task) + nbl_common_flush_task(chan_info->clean_task); + + /* Make sure tx won't enter if we are resetting */ + spin_lock(&chan_info->txq_lock); + + /* If we are in a race, and someone else has finished it, just return */ + if (!test_bit(NBL_CHAN_RESETTING, chan_info->state)) { + spin_unlock(&chan_info->txq_lock); + return 0; + } + + nbl_chan_shutdown_queue(chan_mgt, chan_type, tx); + + if (tx) + ret = nbl_chan_start_txq(chan_mgt, chan_type); + else + ret = nbl_chan_start_rxq(chan_mgt, chan_type); + + /* Make sure we clear this bit inside lock, so that we don't reset it twice if race */ + clear_bit(NBL_CHAN_RESETTING, chan_info->state); + spin_unlock(&chan_info->txq_lock); + + return ret; +} + +static bool nbl_chan_check_dma_err(struct nbl_channel_mgt *chan_mgt, u8 chan_type, bool tx) +{ + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + if (chan_type == NBL_CHAN_TYPE_MAILBOX) + return phy_ops->check_mailbox_dma_err(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), tx); + else + return phy_ops->check_adminq_dma_err(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), tx); +} + +static u16 nbl_chan_update_txqueue(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info, u16 dstid, + enum nbl_chan_msg_type msg_type, + void *arg, size_t arg_len) +{ + struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); + struct nbl_chan_ring *txq; + struct nbl_chan_tx_desc *tx_desc; + struct nbl_chan_buf *tx_buf; + u16 next_to_use; + + txq = &chan_info->txq; + next_to_use = txq->next_to_use; + tx_buf = NBL_CHAN_TX_RING_TO_BUF(txq, next_to_use); + tx_desc = NBL_CHAN_TX_RING_TO_DESC(txq, next_to_use); + + tx_desc->dstid = dstid; + tx_desc->msg_type = msg_type; + tx_desc->msgid = next_to_use; + if (arg_len > NBL_CHAN_BUF_LEN - sizeof(*tx_desc)) { + dev_err(dev, "%s, arg_len:%ld, too long!", __func__, arg_len); + return -1; + } + + if (arg_len > NBL_CHAN_TX_DESC_EMBEDDED_DATA_LEN) { + memcpy(tx_buf->va, arg, arg_len); + tx_desc->buf_addr = tx_buf->pa; + tx_desc->buf_len = arg_len; + tx_desc->data_len = 0; + } else { + memcpy(tx_desc->data, arg, arg_len); + tx_desc->buf_len = 0; + tx_desc->data_len = arg_len; + } + tx_desc->flags = NBL_CHAN_TX_DESC_AVAIL; + + /* wmb */ + wmb(); + txq->next_to_use++; + if (txq->next_to_use == chan_info->num_txq_entries) + txq->next_to_use = 0; + txq->tail_ptr++; + + return next_to_use; +} + +static int nbl_chan_kick_tx_ring(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_phy_ops *phy_ops; + struct nbl_common_info *common = chan_mgt->common; + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_chan_ring *txq; + struct nbl_chan_tx_desc *tx_desc; + int i; + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + txq = &chan_info->txq; + + /* mb for doorbell */ + mb(); + + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, txq->tail_ptr, NBL_MB_TX_QID); + + tx_desc = NBL_CHAN_TX_RING_TO_DESC(txq, txq->next_to_clean); + + i = 0; + while (!(tx_desc->flags & NBL_CHAN_TX_DESC_USED)) { + udelay(NBL_CHAN_TX_WAIT_US); + i++; + + if (!(i % NBL_CHAN_TX_REKICK_WAIT_TIMES)) { + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, txq->tail_ptr, + NBL_MB_TX_QID); + } + + if (i == NBL_CHAN_TX_WAIT_TIMES) { + dev_err(dev, "bus:%u, dev:%u, func:%u, chan send message type: %d timeout\n", + common->bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common), + tx_desc->msg_type); + return -1; + } + } + + txq->next_to_clean = txq->next_to_use; + return 0; +} + +static void nbl_chan_recv_ack_msg(void *priv, u16 srcid, u16 msgid, + void *data, u32 data_len) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NULL; + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_chan_waitqueue_head *wait_head; + u32 *payload = (u32 *)data; + u32 ack_msgid; + u32 ack_msgtype; + + if (srcid == NBL_CHAN_ADMINQ_FUNCTION_ID) + chan_info = NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); + else + chan_info = NBL_CHAN_MGT_TO_MAILBOX(chan_mgt); + + ack_msgtype = *payload; + ack_msgid = *(payload + 1); + wait_head = &chan_info->wait[ack_msgid]; + wait_head->ack_err = *(payload + 2); + + if (ack_msgtype != wait_head->msg_type) + nbl_debug(common, NBL_DEBUG_MBX, + "ack_msgtype %d donot match msg_type %d\n", + ack_msgtype, wait_head->msg_type); + if (wait_head->status != NBL_MBX_STATUS_WAITING) { + nbl_warn(common, NBL_DEBUG_MBX, "Skip ack with status %d", wait_head->status); + return; + } + + if (wait_head->ack_err >= 0 && (data_len > 3 * sizeof(u32))) { + if (data_len - 3 * sizeof(u32) != wait_head->ack_data_len) { + dev_err(dev, "%x:%x.%x payload_len donot match ack_data_len!, srcid:%u,\n" + "msgtype:%u, msgid:%u, data_len:%u, ack_data_len:%u\n", + common->bus, common->devid, NBL_COMMON_TO_PCI_FUNC_ID(common), + srcid, ack_msgtype, ack_msgid, data_len, wait_head->ack_data_len); + goto wakeup; + } + memcpy((char *)wait_head->ack_data, payload + 3, data_len - 3 * sizeof(int)); + } + +wakeup: + /* wmb */ + wmb(); + wait_head->acked = 1; + if (wait_head->need_waked) + wake_up(&wait_head->wait_queue); +} + +static inline u16 nbl_unused_msg_ring_count(u32 head, u32 tail) +{ + return ((tail > head) ? 0 : NBL_USER_DEV_SHMMSGBUF_SIZE) + tail - head - 1; +} + +static int nbl_chan_msg_forward_userdev(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_tx_desc *tx_desc) +{ + struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); + void *shm_msg_ring = chan_mgt->notify->shm_msg_ring; + char *data = (char *)shm_msg_ring + 8; + u32 *head = (u32 *)shm_msg_ring, tmp; + u32 tail = *(head + 1); + u32 total_len = sizeof(struct nbl_chan_tx_desc) + sizeof(u32), copy_len; + + if (!tx_desc->data_len) + total_len += ALIGN(tx_desc->buf_len, 4); + + tmp = *head; + if (total_len > nbl_unused_msg_ring_count(tmp, tail)) { + dev_err(dev, "user msg ring not enough for msg\n"); + return -E2BIG; + } + + /* save total_len */ + *(u32 *)(data + tmp) = total_len; + tmp += sizeof(u32); + total_len -= sizeof(u32); + if (tmp >= NBL_USER_DEV_SHMMSGBUF_SIZE) + tmp -= NBL_USER_DEV_SHMMSGBUF_SIZE; + + copy_len = NBL_USER_DEV_SHMMSGBUF_SIZE - tmp; + copy_len = min(copy_len, total_len); + memcpy(data + tmp, tx_desc, copy_len); + if (total_len > copy_len) + memcpy(data, (char *)tx_desc + copy_len, total_len - copy_len); + + tmp += total_len; + if (tmp >= NBL_USER_DEV_SHMMSGBUF_SIZE) + tmp -= NBL_USER_DEV_SHMMSGBUF_SIZE; + + /* make sure to update head after content */ + smp_wmb(); + *head = tmp; + eventfd_signal(chan_mgt->notify->eventfd, 1); + + return 0; +} + +static void nbl_chan_recv_msg(struct nbl_channel_mgt *chan_mgt, void *data, u32 data_len) +{ + struct nbl_chan_tx_desc *tx_desc; + struct nbl_chan_msg_node_data *msg_handler; + struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); + u16 msg_type, payload_len, srcid, msgid, warn = 1; + void *payload; + + tx_desc = data; + msg_type = tx_desc->msg_type; + dev_dbg(dev, "%s recv msg_type: %d\n", __func__, tx_desc->msg_type); + + srcid = tx_desc->srcid; + msgid = tx_desc->msgid; + if (msg_type >= NBL_CHAN_MSG_MAX) { + dev_err(dev, "Invalid chan message type %u\n", msg_type); + return; + } + + if (tx_desc->data_len) { + payload = (void *)tx_desc->data; + payload_len = tx_desc->data_len; + } else { + payload = (void *)(tx_desc + 1); + payload_len = tx_desc->buf_len; + } + + msg_handler = nbl_common_get_hash_node(chan_mgt->handle_hash_tbl, &msg_type); + if (msg_handler) { + warn = 0; + msg_handler->func(msg_handler->priv, srcid, msgid, payload, payload_len); + } + + if (chan_mgt->notify) { + mutex_lock(&chan_mgt->notify->lock); + if (chan_mgt->notify->eventfd && test_bit(msg_type, chan_mgt->notify->msgtype) && + chan_mgt->notify->shm_msg_ring) { + warn = 0; + nbl_chan_msg_forward_userdev(chan_mgt, tx_desc); + } + mutex_unlock(&chan_mgt->notify->lock); + } + + if (warn) + dev_warn(dev, "Recv channel msg_type: %d, but msg_handler is null!\n", + tx_desc->msg_type); +} + +static void nbl_chan_advance_rx_ring(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info, + struct nbl_chan_ring *rxq) +{ + struct nbl_phy_ops *phy_ops; + struct nbl_chan_rx_desc *rx_desc; + struct nbl_chan_buf *rx_buf; + u16 next_to_use; + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + next_to_use = rxq->next_to_use; + rx_desc = NBL_CHAN_RX_RING_TO_DESC(rxq, next_to_use); + rx_buf = NBL_CHAN_RX_RING_TO_BUF(rxq, next_to_use); + + rx_desc->flags = NBL_CHAN_RX_DESC_AVAIL; + rx_desc->buf_addr = rx_buf->pa; + rx_desc->buf_len = chan_info->rxq_buf_size; + + /* wmb */ + wmb(); + rxq->next_to_use++; + if (rxq->next_to_use == chan_info->num_rxq_entries) + rxq->next_to_use = 0; + rxq->tail_ptr++; + + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, rxq->tail_ptr, NBL_MB_RX_QID); +} + +static void nbl_chan_clean_queue(struct nbl_channel_mgt *chan_mgt, struct nbl_chan_info *chan_info) +{ + struct nbl_chan_ring *rxq = &chan_info->rxq; + struct nbl_chan_rx_desc *rx_desc; + struct nbl_chan_buf *rx_buf; + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + u16 next_to_clean; + + next_to_clean = rxq->next_to_clean; + rx_desc = NBL_CHAN_RX_RING_TO_DESC(rxq, next_to_clean); + rx_buf = NBL_CHAN_RX_RING_TO_BUF(rxq, next_to_clean); + while (rx_desc->flags & NBL_CHAN_RX_DESC_USED) { + if (!(rx_desc->flags & NBL_CHAN_RX_DESC_WRITE)) + nbl_debug(common, NBL_DEBUG_MBX, + "mailbox rx flag 0x%x has no NBL_CHAN_RX_DESC_WRITE\n", + rx_desc->flags); + + dma_rmb(); + nbl_chan_recv_msg(chan_mgt, rx_buf->va, rx_desc->buf_len); + + nbl_chan_advance_rx_ring(chan_mgt, chan_info, rxq); + + next_to_clean++; + if (next_to_clean == chan_info->num_rxq_entries) + next_to_clean = 0; + rx_desc = NBL_CHAN_RX_RING_TO_DESC(rxq, next_to_clean); + rx_buf = NBL_CHAN_RX_RING_TO_BUF(rxq, next_to_clean); + } + rxq->next_to_clean = next_to_clean; +} + +void nbl_chan_clean_queue_subtask(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + if (!test_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state) || + test_bit(NBL_CHAN_RESETTING, chan_info->state)) + return; + + nbl_chan_clean_queue(chan_mgt, chan_info); +} + +static int nbl_chan_send_msg(void *priv, struct nbl_chan_send_info *chan_send) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_chan_info *chan_info = NULL; + struct nbl_chan_waitqueue_head *wait_head; + u16 msgid; + int i = NBL_CHAN_TX_WAIT_ACK_TIMES, ret; + int resend_times = 0; + + if (chan_send->dstid == NBL_CHAN_ADMINQ_FUNCTION_ID) + chan_info = NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); + else + chan_info = NBL_CHAN_MGT_TO_MAILBOX(chan_mgt); + +resend: + spin_lock(&chan_info->txq_lock); + msgid = nbl_chan_update_txqueue(chan_mgt, chan_info, chan_send->dstid, + chan_send->msg_type, + chan_send->arg, chan_send->arg_len); + + if (msgid == 0xFFFF) { + spin_unlock(&chan_info->txq_lock); + dev_err(dev, "chan tx queue full, send msgtype:%u to dstid:%u failed\n", + chan_send->msg_type, chan_send->dstid); + return -1; + } + + if (!chan_send->ack) { + ret = nbl_chan_kick_tx_ring(chan_mgt, chan_info); + spin_unlock(&chan_info->txq_lock); + if (ret) + goto check_tx_dma_err; + else + return ret; + } + + wait_head = &chan_info->wait[msgid]; + init_waitqueue_head(&wait_head->wait_queue); + wait_head->ack_data = chan_send->resp; + wait_head->ack_data_len = chan_send->resp_len; + wait_head->acked = 0; + wait_head->msg_type = chan_send->msg_type; + wait_head->need_waked = 1; + wait_head->status = NBL_MBX_STATUS_WAITING; + ret = nbl_chan_kick_tx_ring(chan_mgt, chan_info); + spin_unlock(&chan_info->txq_lock); + if (ret) + goto check_tx_dma_err; + + if (test_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state)) { + ret = wait_event_timeout(wait_head->wait_queue, wait_head->acked, + NBL_CHAN_ACK_WAIT_TIME); + if (!ret) { + dev_err(dev, "wait bus:%u, dev:%u, func:%u, chan send message type: %d\n" + "msg id: %u wait ack timeout\n", common->bus, common->devid, + NBL_COMMON_TO_PCI_FUNC_ID(common), chan_send->msg_type, msgid); + wait_head->status = NBL_MBX_STATUS_TIMEOUT; + goto check_rx_dma_err; + } + + /* rmb for ack */ + rmb(); + return wait_head->ack_err; + } + + /*polling wait mailbox ack*/ + while (i--) { + nbl_chan_clean_queue(chan_mgt, chan_info); + + if (wait_head->acked) + return wait_head->ack_err; + usleep_range(NBL_CHAN_TX_WAIT_ACK_US_MIN, NBL_CHAN_TX_WAIT_ACK_US_MAX); + } + + wait_head->status = NBL_MBX_STATUS_TIMEOUT; + dev_err(dev, "polling bus:%u, dev:%u, func:%u, chan send message type: %d msg id: %u\n" + "wait ack timeout\n", common->bus, common->devid, + NBL_COMMON_TO_PCI_FUNC_ID(common), chan_send->msg_type, msgid); + +check_rx_dma_err: + if (nbl_chan_check_dma_err(chan_mgt, chan_info->chan_type, false)) { + dev_err(dev, "nbl channel rx dma error\n"); + nbl_chan_reset_queue(chan_mgt, chan_info->chan_type, false); + chan_info->rxq_reset_times++; + } + +check_tx_dma_err: + if (nbl_chan_check_dma_err(chan_mgt, chan_info->chan_type, true)) { + dev_err(dev, "nbl channel tx dma error\n"); + nbl_chan_reset_queue(chan_mgt, chan_info->chan_type, true); + chan_info->txq_reset_times++; + } + + resend_times++; + if (resend_times > NBL_CHAN_RESEND_MAX_TIMES) { + dev_err(dev, "nbl channel resend_times %d\n", resend_times); + return -1; + } + + i = NBL_CHAN_TX_WAIT_ACK_TIMES; + goto resend; +} + +static int nbl_chan_send_ack(void *priv, struct nbl_chan_ack_info *chan_ack) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_send_info chan_send; + u32 *tmp; + u32 len = 3 * sizeof(u32) + chan_ack->data_len; + + tmp = kzalloc(len, GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + + tmp[0] = chan_ack->msg_type; + tmp[1] = chan_ack->msgid; + tmp[2] = (u32)chan_ack->err; + if (chan_ack->data && chan_ack->data_len) + memcpy(&tmp[3], chan_ack->data, chan_ack->data_len); + + NBL_CHAN_SEND(chan_send, chan_ack->dstid, NBL_CHAN_MSG_ACK, tmp, len, NULL, 0, 0); + nbl_chan_send_msg(chan_mgt, &chan_send); + kfree(tmp); + + return 0; +} + +static int nbl_chan_register_msg(void *priv, u16 msg_type, nbl_chan_resp func, void *callback_priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + int ret; + + ret = nbl_chan_add_msg_handler(chan_mgt, msg_type, func, callback_priv); + + return ret; +} + +static bool nbl_chan_check_queue_exist(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt; + struct nbl_chan_info *chan_info; + + if (!priv) + return false; + + chan_mgt = (struct nbl_channel_mgt *)priv; + chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + return chan_info ? true : false; +} + +static int nbl_chan_set_queue_interrupt_state(void *priv, u8 chan_type, bool ready) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + if (ready) + set_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state); + else + clear_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state); + + return 0; +} + +static int nbl_chan_dump_txq(void *priv, struct seq_file *m, u8 type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = type == NBL_CHAN_TYPE_MAILBOX ? + NBL_CHAN_MGT_TO_MAILBOX(chan_mgt) : + NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); + struct nbl_chan_ring *txq = &chan_info->txq; + struct nbl_chan_waitqueue_head *wait; + struct nbl_chan_tx_desc *desc; + int i; + + seq_printf(m, "txq size:%u, next_to_use:%u, tail_ptr:%u, next_to_clean:%u\n", + chan_info->num_txq_entries, txq->next_to_use, txq->tail_ptr, txq->next_to_clean); + seq_printf(m, "reset times %d\n", chan_info->txq_reset_times); + + for (i = 0; i < chan_info->num_txq_entries; i++) { + desc = NBL_CHAN_TX_RING_TO_DESC(txq, i); + wait = &chan_info->wait[i]; + seq_printf(m, "%u: flags 0x%x, srcid %u, dstid %u, data_len %u,\n" + "buf_len %u, msg_type %u, msgid %u, ", i, + desc->flags, desc->srcid, desc->dstid, + desc->data_len, desc->buf_len, desc->msg_type, desc->msgid); + seq_printf(m, "acked %u, ack_err %u, ack_data_len %u,\n" + "need_waked %u, msg_type %u\n", wait->acked, wait->ack_err, + wait->ack_data_len, wait->need_waked, wait->msg_type); + } + + return 0; +} + +static int nbl_chan_dump_rxq(void *priv, struct seq_file *m, u8 type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = type == NBL_CHAN_TYPE_MAILBOX ? + NBL_CHAN_MGT_TO_MAILBOX(chan_mgt) : + NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); + struct nbl_chan_ring *rxq = &chan_info->rxq; + struct nbl_chan_rx_desc *rx_desc; + struct nbl_chan_tx_desc *tx_desc; + struct nbl_chan_buf *rx_buf; + int i; + + seq_printf(m, "rxq size:%u, next_to_use:%u, tail_ptr:%u, next_to_clean:%u\n", + chan_info->num_rxq_entries, rxq->next_to_use, rxq->tail_ptr, rxq->next_to_clean); + seq_printf(m, "reset times %d\n", chan_info->rxq_reset_times); + for (i = 0; i < chan_info->num_rxq_entries; i++) { + rx_desc = NBL_CHAN_RX_RING_TO_DESC(rxq, i); + rx_buf = NBL_CHAN_RX_RING_TO_BUF(rxq, i); + tx_desc = (struct nbl_chan_tx_desc *)rx_buf->va; + seq_printf(m, "%u: rx_desc flags 0x%x, buf_len 0x%x, buf_id 0x%x, buffer_addr 0x%llx,\n" + "tx_dedc srcid %u, dstid %u, data_len %u, buf_len %u, msg_type %u, msgid %u\n", + i, rx_desc->flags, rx_desc->buf_len, rx_desc->buf_id, rx_desc->buf_addr, + tx_desc->srcid, tx_desc->dstid, tx_desc->data_len, tx_desc->buf_len, + tx_desc->msg_type, tx_desc->msgid); + } + + return 0; +} + +static u32 nbl_chan_get_adminq_tx_buf_size(void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *adminq = NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); + + return adminq->txq_buf_size; +} + +static int nbl_chan_set_listener_info(void *priv, void *shm_ring, struct eventfd_ctx *eventfd) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + + mutex_lock(&chan_mgt->notify->lock); + + chan_mgt->notify->shm_msg_ring = shm_ring; + if (chan_mgt->notify->eventfd) + eventfd_ctx_put(chan_mgt->notify->eventfd); + chan_mgt->notify->eventfd = eventfd; + + mutex_unlock(&chan_mgt->notify->lock); + + return 0; +} + +static int nbl_chan_set_listener_msgtype(void *priv, int msgtype) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + + if (msgtype >= NBL_CHAN_MSG_MAILBOX_MAX) + return -EINVAL; + + mutex_lock(&chan_mgt->notify->lock); + set_bit(msgtype, chan_mgt->notify->msgtype); + mutex_unlock(&chan_mgt->notify->lock); + + return 0; +} + +static void nbl_chan_clear_listener_info(void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + + mutex_lock(&chan_mgt->notify->lock); + if (chan_mgt->notify->eventfd) + eventfd_ctx_put(chan_mgt->notify->eventfd); + chan_mgt->notify->eventfd = NULL; + + bitmap_zero(chan_mgt->notify->msgtype, NBL_CHAN_MSG_MAILBOX_MAX); + if (chan_mgt->notify->shm_msg_ring) + memset(chan_mgt->notify->shm_msg_ring, 0, NBL_USER_DEV_SHMMSGRING_SIZE); + mutex_unlock(&chan_mgt->notify->lock); +} + +static void nbl_chan_keepalive_resp(void *priv, u16 srcid, u16 msgid, void *data, u32 data_len) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_ack_info chan_ack; + + NBL_CHAN_ACK(chan_ack, srcid, NBL_CHAN_MSG_KEEP_ALIVE, msgid, 0, NULL, 0); + + nbl_chan_send_ack(chan_mgt, &chan_ack); +} + +static void nbl_chan_keepalive(struct delayed_work *work) +{ + struct nbl_chan_keepalive_info *keepalive = + container_of(work, struct nbl_chan_keepalive_info, keepalive_task); + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)keepalive->chan_mgt; + struct nbl_chan_send_info chan_send; + u32 delay_time; + + NBL_CHAN_SEND(chan_send, keepalive->keepalive_dest, NBL_CHAN_MSG_KEEP_ALIVE, + NULL, 0, NULL, 0, 1); + + if (nbl_chan_send_msg(chan_mgt, &chan_send)) { + if (keepalive->fail_cnt < NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH) + keepalive->fail_cnt++; + + if (keepalive->fail_cnt >= NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH && + keepalive->timeout < NBL_CHAN_KEEPALIVE_MAX_TIMEOUT) { + get_random_bytes(&delay_time, sizeof(delay_time)); + keepalive->timeout += delay_time % NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_GAP; + + keepalive->fail_cnt = 0; + } + } else { + if (keepalive->success_cnt < NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH) + keepalive->success_cnt++; + + if (keepalive->success_cnt >= NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH && + keepalive->timeout > NBL_CHAN_KEEPALIVE_DEFAULT_TIMEOUT * 2) { + get_random_bytes(&delay_time, sizeof(delay_time)); + keepalive->timeout -= delay_time % NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_GAP; + + keepalive->success_cnt = 0; + } + } + + nbl_common_queue_delayed_work_keepalive(work, jiffies_to_msecs(keepalive->timeout)); +} + +static int nbl_chan_setup_keepalive(void *priv, u16 dest_id, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_chan_keepalive_info *keepalive = &chan_info->keepalive; + u32 delay_time; + + get_random_bytes(&delay_time, sizeof(delay_time)); + delay_time = delay_time % NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_GAP; + + keepalive->timeout = NBL_CHAN_KEEPALIVE_DEFAULT_TIMEOUT + delay_time; + keepalive->chan_mgt = chan_mgt; + keepalive->keepalive_dest = dest_id; + keepalive->success_cnt = 0; + keepalive->fail_cnt = 0; + + nbl_chan_add_msg_handler(chan_mgt, NBL_CHAN_MSG_KEEP_ALIVE, + nbl_chan_keepalive_resp, chan_mgt); + + nbl_common_alloc_delayed_task(&keepalive->keepalive_task, nbl_chan_keepalive); + + nbl_common_queue_delayed_work_keepalive(&keepalive->keepalive_task, + jiffies_to_msecs(keepalive->timeout)); + + return 0; +} + +static void nbl_chan_remove_keepalive(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + nbl_common_release_delayed_task(&chan_info->keepalive.keepalive_task); +} + +static void nbl_chan_register_chan_task(void *priv, u8 chan_type, struct work_struct *task) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + chan_info->clean_task = task; +} + +static struct nbl_channel_ops chan_ops = { + .send_msg = nbl_chan_send_msg, + .send_ack = nbl_chan_send_ack, + .register_msg = nbl_chan_register_msg, + .cfg_chan_qinfo_map_table = nbl_chan_cfg_qinfo_map_table, + .check_queue_exist = nbl_chan_check_queue_exist, + .setup_queue = nbl_chan_setup_queue, + .teardown_queue = nbl_chan_teardown_queue, + .set_queue_interrupt_state = nbl_chan_set_queue_interrupt_state, + .clean_queue_subtask = nbl_chan_clean_queue_subtask, + + /* for mailbox register msg for userdev */ + .set_listener_info = nbl_chan_set_listener_info, + .set_listener_msgtype = nbl_chan_set_listener_msgtype, + .clear_listener_info = nbl_chan_clear_listener_info, + .dump_txq = nbl_chan_dump_txq, + .dump_rxq = nbl_chan_dump_rxq, + .get_adminq_tx_buf_size = nbl_chan_get_adminq_tx_buf_size, + + .setup_keepalive = nbl_chan_setup_keepalive, + .remove_keepalive = nbl_chan_remove_keepalive, + .register_chan_task = nbl_chan_register_chan_task, +}; + +static int nbl_chan_setup_chan_mgt(struct nbl_adapter *adapter, + struct nbl_init_param *param, + struct nbl_channel_mgt_leonis **chan_mgt_leonis) +{ + struct device *dev; + struct nbl_common_info *common; + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_chan_info *mailbox; + struct nbl_chan_info *adminq = NULL; + int ret; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + phy_ops_tbl = NBL_ADAPTER_TO_PHY_OPS_TBL(adapter); + + *chan_mgt_leonis = devm_kzalloc(dev, sizeof(struct nbl_channel_mgt_leonis), GFP_KERNEL); + if (!*chan_mgt_leonis) + goto alloc_channel_mgt_leonis_fail; + + NBL_CHAN_MGT_TO_COMMON(&(*chan_mgt_leonis)->chan_mgt) = common; + (*chan_mgt_leonis)->chan_mgt.phy_ops_tbl = phy_ops_tbl; + + mailbox = devm_kzalloc(dev, sizeof(struct nbl_chan_info), GFP_KERNEL); + if (!mailbox) + goto alloc_mailbox_fail; + mailbox->chan_type = NBL_CHAN_TYPE_MAILBOX; + NBL_CHAN_MGT_TO_MAILBOX(&(*chan_mgt_leonis)->chan_mgt) = mailbox; + + if (param->caps.has_ctrl || param->caps.has_factory_ctrl) { + adminq = devm_kzalloc(dev, sizeof(struct nbl_chan_info), GFP_KERNEL); + if (!adminq) + goto alloc_adminq_fail; + adminq->chan_type = NBL_CHAN_TYPE_ADMINQ; + NBL_CHAN_MGT_TO_ADMINQ(&(*chan_mgt_leonis)->chan_mgt) = adminq; + } + + ret = nbl_chan_init_msg_handler(&(*chan_mgt_leonis)->chan_mgt, param->caps.has_user); + if (ret) + goto init_chan_msg_handle; + + return 0; + +init_chan_msg_handle: + if (adminq) + devm_kfree(dev, adminq); +alloc_adminq_fail: + devm_kfree(dev, mailbox); +alloc_mailbox_fail: + devm_kfree(dev, *chan_mgt_leonis); + *chan_mgt_leonis = NULL; +alloc_channel_mgt_leonis_fail: + return -ENOMEM; +} + +static void nbl_chan_remove_chan_mgt(struct nbl_common_info *common, + struct nbl_channel_mgt_leonis **chan_mgt_leonis) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + + nbl_chan_remove_msg_handler(&(*chan_mgt_leonis)->chan_mgt); + if (NBL_CHAN_MGT_TO_ADMINQ(&(*chan_mgt_leonis)->chan_mgt)) + devm_kfree(dev, NBL_CHAN_MGT_TO_ADMINQ(&(*chan_mgt_leonis)->chan_mgt)); + devm_kfree(dev, NBL_CHAN_MGT_TO_MAILBOX(&(*chan_mgt_leonis)->chan_mgt)); + + devm_kfree(dev, *chan_mgt_leonis); + *chan_mgt_leonis = NULL; +} + +static void nbl_chan_remove_ops(struct device *dev, struct nbl_channel_ops_tbl **chan_ops_tbl) +{ + if (!dev || !chan_ops_tbl) + return; + + devm_kfree(dev, *chan_ops_tbl); + *chan_ops_tbl = NULL; +} + +static int nbl_chan_setup_ops(struct device *dev, struct nbl_channel_ops_tbl **chan_ops_tbl, + struct nbl_channel_mgt_leonis *chan_mgt) +{ + int ret; + *chan_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_channel_ops_tbl), GFP_KERNEL); + if (!*chan_ops_tbl) + return -ENOMEM; + + NBL_CHAN_OPS_TBL_TO_OPS(*chan_ops_tbl) = &chan_ops; + NBL_CHAN_OPS_TBL_TO_PRIV(*chan_ops_tbl) = chan_mgt; + + if (!chan_mgt) + return 0; + + ret = nbl_chan_add_msg_handler(&chan_mgt->chan_mgt, NBL_CHAN_MSG_ACK, + nbl_chan_recv_ack_msg, chan_mgt); + if (ret) + goto err; + + return 0; + +err: + devm_kfree(dev, *chan_ops_tbl); + *chan_ops_tbl = NULL; + + return -1; +} + +int nbl_chan_init_common(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_channel_mgt_leonis **chan_mgt_leonis; + struct nbl_channel_ops_tbl **chan_ops_tbl; + int ret = 0; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + chan_mgt_leonis = (struct nbl_channel_mgt_leonis **)&NBL_ADAPTER_TO_CHAN_MGT(adapter); + chan_ops_tbl = &NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + + ret = nbl_chan_setup_chan_mgt(adapter, param, chan_mgt_leonis); + if (ret) + goto setup_mgt_fail; + + ret = nbl_chan_setup_ops(dev, chan_ops_tbl, *chan_mgt_leonis); + if (ret) + goto setup_ops_fail; + + return 0; + +setup_ops_fail: + nbl_chan_remove_chan_mgt(common, chan_mgt_leonis); +setup_mgt_fail: + return ret; +} + +void nbl_chan_remove_common(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_channel_mgt_leonis **chan_mgt_leonis; + struct nbl_channel_ops_tbl **chan_ops_tbl; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + chan_mgt_leonis = (struct nbl_channel_mgt_leonis **)&NBL_ADAPTER_TO_CHAN_MGT(adapter); + chan_ops_tbl = &NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + + nbl_chan_remove_chan_mgt(common, chan_mgt_leonis); + nbl_chan_remove_ops(dev, chan_ops_tbl); +} + diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h new file mode 100644 index 000000000000..237f99229836 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_CHANNEL_H_ +#define _NBL_CHANNEL_H_ + +#include "nbl_core.h" + +#define NBL_CHAN_MGT_TO_COMMON(chan_mgt) ((chan_mgt)->common) +#define NBL_CHAN_MGT_TO_DEV(chan_mgt) NBL_COMMON_TO_DEV(NBL_CHAN_MGT_TO_COMMON(chan_mgt)) +#define NBL_CHAN_MGT_TO_PHY_OPS_TBL(chan_mgt) ((chan_mgt)->phy_ops_tbl) +#define NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt) (NBL_CHAN_MGT_TO_PHY_OPS_TBL(chan_mgt)->ops) +#define NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt) (NBL_CHAN_MGT_TO_PHY_OPS_TBL(chan_mgt)->priv) +#define NBL_CHAN_MGT_TO_MAILBOX(chan_mgt) ((chan_mgt)->chan_info[NBL_CHAN_TYPE_MAILBOX]) +#define NBL_CHAN_MGT_TO_ADMINQ(chan_mgt) ((chan_mgt)->chan_info[NBL_CHAN_TYPE_ADMINQ]) +#define NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type) ((chan_mgt)->chan_info[chan_type]) + +#define NBL_CHAN_TX_RING_TO_DESC(tx_ring, i) \ + (&(((struct nbl_chan_tx_desc *)((tx_ring)->desc))[i])) +#define NBL_CHAN_RX_RING_TO_DESC(rx_ring, i) \ + (&(((struct nbl_chan_rx_desc *)((rx_ring)->desc))[i])) +#define NBL_CHAN_TX_RING_TO_BUF(tx_ring, i) (&(((tx_ring)->buf)[i])) +#define NBL_CHAN_RX_RING_TO_BUF(rx_ring, i) (&(((rx_ring)->buf)[i])) + +#define NBL_CHAN_TX_WAIT_US 100 +#define NBL_CHAN_TX_REKICK_WAIT_TIMES 2000 +#define NBL_CHAN_TX_WAIT_TIMES 10000 + +#define NBL_CHAN_TX_WAIT_ACK_US_MIN 100 +#define NBL_CHAN_TX_WAIT_ACK_US_MAX 120 +#define NBL_CHAN_TX_WAIT_ACK_TIMES 50000 + +#define NBL_CHAN_QUEUE_LEN 256 +#define NBL_CHAN_BUF_LEN 4096 + +#define NBL_CHAN_TX_DESC_EMBEDDED_DATA_LEN 16 +#define NBL_CHAN_RESEND_MAX_TIMES (5) + +#define NBL_CHAN_TX_DESC_AVAIL BIT(0) +#define NBL_CHAN_TX_DESC_USED BIT(1) +#define NBL_CHAN_RX_DESC_WRITE BIT(1) +#define NBL_CHAN_RX_DESC_AVAIL BIT(3) +#define NBL_CHAN_RX_DESC_USED BIT(4) + +#define NBL_CHAN_ACK_WAIT_TIME (5 * HZ) + +/* adminq */ +#define NBL_ADMINQ_QUEUE_LEN 256 +#define NBL_ADMINQ_BUF_LEN 4096 + +#define NBL_CHAN_HANDLER_TBL_BUCKET_SIZE 512 + +enum { + NBL_MB_RX_QID = 0, + NBL_MB_TX_QID = 1, +}; + +enum { + NBL_MBX_STATUS_WAITING = 0, + NBL_MBX_STATUS_TIMEOUT = -1, +}; + +struct nbl_chan_buf { + void *va; + dma_addr_t pa; + size_t size; +}; + +struct nbl_chan_tx_desc { + u16 flags; + u16 srcid; + u16 dstid; + u16 data_len; + u16 buf_len; + u64 buf_addr; + u16 msg_type; + u8 data[16]; + u16 msgid; + u8 rsv[26]; +} __packed; + +struct nbl_chan_rx_desc { + u16 flags; + u32 buf_len; + u16 buf_id; + u64 buf_addr; +} __packed; + +struct nbl_chan_ring { + void *desc; + struct nbl_chan_buf *buf; + + u16 next_to_use; + u16 tail_ptr; + u16 next_to_clean; + + dma_addr_t dma; +}; + +struct nbl_chan_waitqueue_head { + struct wait_queue_head wait_queue; + char *ack_data; + int acked; + int ack_err; + u16 ack_data_len; + u16 need_waked; + u16 msg_type; + u8 status; +}; + +struct nbl_chan_notify_userdev { + DECLARE_BITMAP(msgtype, NBL_CHAN_MSG_MAILBOX_MAX); + struct mutex lock; /* used to protect eventfd and shm_msg_ring */ + struct eventfd_ctx *eventfd; + void *shm_msg_ring; +}; + +#define NBL_CHAN_KEEPALIVE_DEFAULT_TIMEOUT (10 * HZ) +#define NBL_CHAN_KEEPALIVE_MAX_TIMEOUT (1024 * HZ) +#define NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_GAP (10 * HZ) +#define NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH (3) + +struct nbl_chan_keepalive_info { + struct delayed_work keepalive_task; + void *chan_mgt; + u32 timeout; + u16 keepalive_dest; + u8 success_cnt; + u8 fail_cnt; +}; + +struct nbl_chan_info { + struct nbl_chan_ring txq; + struct nbl_chan_ring rxq; + struct nbl_chan_waitqueue_head *wait; + /* spinlock_t */ + spinlock_t txq_lock; + + struct work_struct *clean_task; + struct nbl_chan_keepalive_info keepalive; + + u16 num_txq_entries; + u16 num_rxq_entries; + u16 txq_buf_size; + u16 rxq_buf_size; + + u16 txq_reset_times; + u16 rxq_reset_times; + + DECLARE_BITMAP(state, NBL_CHAN_STATE_NBITS); + + u8 chan_type; +}; + +struct nbl_chan_msg_node_data { + nbl_chan_resp func; + void *priv; +}; + +struct nbl_channel_mgt { + struct nbl_common_info *common; + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_chan_info *chan_info[NBL_CHAN_TYPE_MAX]; + struct nbl_chan_notify_userdev *notify; + void *handle_hash_tbl; +}; + +/* Mgt structure for each product. + * Every indivisual mgt must have the common mgt as its first member, and contains its unique + * data structure in the reset of it. + */ +struct nbl_channel_mgt_leonis { + struct nbl_channel_mgt chan_mgt; +}; + +struct nbl_channel_mgt_bootis { + struct nbl_channel_mgt chan_mgt; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c new file mode 100644 index 000000000000..b6aef330987a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c @@ -0,0 +1,954 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_common.h" + +struct nbl_common_wq_mgt { + struct workqueue_struct *ctrl_dev_wq1; + struct workqueue_struct *ctrl_dev_wq2; + struct workqueue_struct *net_dev_wq; + struct workqueue_struct *keepalive_wq; + struct workqueue_struct *rdma_wq; +}; + +void nbl_convert_mac(u8 *mac, u8 *reverse_mac) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + reverse_mac[i] = mac[ETH_ALEN - 1 - i]; +} + +static struct nbl_common_wq_mgt *wq_mgt; + +void nbl_common_queue_work(struct work_struct *task, bool ctrl_task, bool singlethread) +{ + if (ctrl_task && singlethread) + queue_work(wq_mgt->ctrl_dev_wq1, task); + else if (ctrl_task && !singlethread) + queue_work(wq_mgt->ctrl_dev_wq2, task); + else if (!ctrl_task) + queue_work(wq_mgt->net_dev_wq, task); +} + +void nbl_common_queue_work_rdma(struct work_struct *task) +{ + queue_work(wq_mgt->rdma_wq, task); +} + +void nbl_common_queue_delayed_work(struct delayed_work *task, u32 msec, + bool ctrl_task, bool singlethread) +{ + if (ctrl_task && singlethread) + queue_delayed_work(wq_mgt->ctrl_dev_wq1, task, msecs_to_jiffies(msec)); + else if (ctrl_task && !singlethread) + queue_delayed_work(wq_mgt->ctrl_dev_wq2, task, msecs_to_jiffies(msec)); + else if (!ctrl_task) + queue_delayed_work(wq_mgt->net_dev_wq, task, msecs_to_jiffies(msec)); +} + +void nbl_common_queue_delayed_work_keepalive(struct delayed_work *task, u32 msec) +{ + queue_delayed_work(wq_mgt->keepalive_wq, task, msecs_to_jiffies(msec)); +} + +void nbl_common_release_task(struct work_struct *task) +{ + cancel_work_sync(task); +} + +void nbl_common_alloc_task(struct work_struct *task, void *func) +{ + INIT_WORK(task, func); +} + +void nbl_common_release_delayed_task(struct delayed_work *task) +{ + cancel_delayed_work_sync(task); +} + +void nbl_common_alloc_delayed_task(struct delayed_work *task, void *func) +{ + INIT_DELAYED_WORK(task, func); +} + +void nbl_common_flush_task(struct work_struct *task) +{ + flush_work(task); +} + +void nbl_common_destroy_wq(void) +{ + destroy_workqueue(wq_mgt->rdma_wq); + destroy_workqueue(wq_mgt->keepalive_wq); + destroy_workqueue(wq_mgt->net_dev_wq); + destroy_workqueue(wq_mgt->ctrl_dev_wq2); + destroy_workqueue(wq_mgt->ctrl_dev_wq1); + kfree(wq_mgt); +} + +int nbl_common_create_wq(void) +{ + wq_mgt = kzalloc(sizeof(*wq_mgt), GFP_KERNEL); + if (!wq_mgt) + return -ENOMEM; + + wq_mgt->ctrl_dev_wq1 = create_singlethread_workqueue("nbl_ctrldev_wq1"); + if (!wq_mgt->ctrl_dev_wq1) { + pr_err("Failed to create workqueue nbl_ctrldev_wq1\n"); + goto alloc_ctrl_dev_wq1_failed; + } + + wq_mgt->ctrl_dev_wq2 = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_UNBOUND, + 0, "nbl_ctrldev_wq2"); + if (!wq_mgt->ctrl_dev_wq2) { + pr_err("Failed to create workqueue nbl_ctrldev_wq2\n"); + goto alloc_ctrl_dev_wq2_failed; + } + + wq_mgt->net_dev_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_UNBOUND, + 0, "nbl_net_dev_wq1"); + if (!wq_mgt->net_dev_wq) { + pr_err("Failed to create workqueue nbl_net_dev_wq1\n"); + goto alloc_net_dev_wq_failed; + } + + wq_mgt->rdma_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nbl_rdma_wq1"); + if (!wq_mgt->rdma_wq) { + pr_err("Failed to create workqueue nbl_rdma_wq1\n"); + goto alloc_rdma_wq_failed; + } + + wq_mgt->keepalive_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_UNBOUND, + 0, "nbl_keepalive_wq1"); + if (!wq_mgt->keepalive_wq) { + pr_err("Failed to create workqueue nbl_keepalive_wq1\n"); + goto alloc_keepalive_wq_failed; + } + + return 0; + +alloc_keepalive_wq_failed: + destroy_workqueue(wq_mgt->keepalive_wq); +alloc_rdma_wq_failed: + destroy_workqueue(wq_mgt->net_dev_wq); +alloc_net_dev_wq_failed: + destroy_workqueue(wq_mgt->ctrl_dev_wq2); +alloc_ctrl_dev_wq2_failed: + destroy_workqueue(wq_mgt->ctrl_dev_wq1); +alloc_ctrl_dev_wq1_failed: + kfree(wq_mgt); + return -ENOMEM; +} + +u32 nbl_common_pf_id_subtraction_mgtpf_id(struct nbl_common_info *common, u32 pf_id) +{ + u32 diff = U32_MAX; + + if (pf_id >= NBL_COMMON_TO_MGT_PF(common)) + diff = pf_id - NBL_COMMON_TO_MGT_PF(common); + + return diff; +} + +/** + * alloc a index resource poll, the index_size max is 64 * 1024 + * the poll support start_index not zero; + * the poll support multi thread + */ +void *nbl_common_init_index_table(struct nbl_index_tbl_key *key) +{ + struct nbl_index_mgt *index_mgt; + int bucket_size; + int i; + + if (key->index_size > NBL_INDEX_SIZE_MAX) + return NULL; + + index_mgt = devm_kzalloc(key->dev, sizeof(struct nbl_index_mgt), GFP_KERNEL); + if (!index_mgt) + return NULL; + + index_mgt->bitmap = devm_kcalloc(key->dev, BITS_TO_LONGS(key->index_size), + sizeof(long), GFP_KERNEL); + if (!index_mgt->bitmap) + goto alloc_bitmap_failed; + + bucket_size = key->index_size / NBL_INDEX_HASH_DIVISOR; + index_mgt->key_hash = devm_kcalloc(key->dev, bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!index_mgt->key_hash) + goto alloc_key_hash_failed; + + for (i = 0; i < bucket_size; i++) + INIT_HLIST_HEAD(index_mgt->key_hash + i); + + memcpy(&index_mgt->tbl_key, key, sizeof(struct nbl_index_tbl_key)); + index_mgt->free_index_num = key->index_size; + index_mgt->bucket_size = bucket_size; + mutex_init(&index_mgt->lock); + + return index_mgt; + +alloc_key_hash_failed: + devm_kfree(key->dev, index_mgt->bitmap); +alloc_bitmap_failed: + devm_kfree(key->dev, index_mgt); + + return NULL; +} + +void nbl_common_remove_index_table(void *priv) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct device *dev; + struct nbl_index_entry_key_node *key_node; + struct hlist_node *list_node; + int i; + + if (!index_mgt) + return; + + mutex_lock(&index_mgt->lock); + dev = index_mgt->tbl_key.dev; + devm_kfree(dev, index_mgt->bitmap); + for (i = 0; i < index_mgt->bucket_size; i++) { + hlist_for_each_entry_safe(key_node, list_node, index_mgt->key_hash + i, node) { + hlist_del(&key_node->node); + devm_kfree(dev, key_node); + } + } + + devm_kfree(dev, index_mgt->key_hash); + mutex_unlock(&index_mgt->lock); + devm_kfree(dev, index_mgt); +} + +static u32 nbl_common_calculate_hash_key(void *key, u32 key_size, u32 bucket_size) +{ + u32 i; + u32 value = 0; + u32 hash_value; + + /* if bucket size little than 1, the hash value always 0 */ + if (bucket_size == NBL_HASH_TBL_LIST_BUCKET_SIZE) + return 0; + + for (i = 0; i < key_size; i++) + value += *((u8 *)key + i); + + hash_value = __hash_32(value); + + return hash_value % bucket_size; +} + +static int nbl_common_alloc_index(struct nbl_index_mgt *index_mgt, void *key, u32 key_size) +{ + struct nbl_index_entry_key_node *key_node; + u32 key_node_size; + u32 index = U32_MAX; + u32 hash_value; + u32 base_index; + + if (!index_mgt->free_index_num) + return index; + + base_index = find_first_zero_bit(index_mgt->bitmap, index_mgt->tbl_key.index_size); + if (base_index >= index_mgt->tbl_key.index_size) + return index; + + key_node_size = sizeof(struct nbl_index_entry_key_node) + key_size; + key_node = devm_kzalloc(index_mgt->tbl_key.dev, key_node_size, GFP_KERNEL); + if (!key_node) + return index; + + set_bit(base_index, index_mgt->bitmap); + index_mgt->free_index_num--; + index = base_index + index_mgt->tbl_key.start_index; + hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); + key_node->index = index; + memcpy(key_node->data, key, key_size); + hlist_add_head(&key_node->node, index_mgt->key_hash + hash_value); + + return index; +} + +/** + * if the key has alloced a available index, return the index; + * else alloc a new index, store the key, and return the index. + */ +int nbl_common_get_index(void *priv, void *key, u32 key_size) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct nbl_index_entry_key_node *key_node; + u32 index = U32_MAX; + u32 hash_value; + + if (key_size != index_mgt->tbl_key.key_size) + return index; + + hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); + mutex_lock(&index_mgt->lock); + hlist_for_each_entry(key_node, index_mgt->key_hash + hash_value, node) + if (!memcmp(key_node->data, key, key_size)) { + index = key_node->index; + mutex_unlock(&index_mgt->lock); + return index; + } + + index = nbl_common_alloc_index(index_mgt, key, key_size); + mutex_unlock(&index_mgt->lock); + + return index; +} + +void nbl_common_free_index(void *priv, void *key, u32 key_size) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct nbl_index_entry_key_node *key_node; + u32 hash_value; + u32 free_index; + + if (key_size != index_mgt->tbl_key.key_size) + return; + + hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); + mutex_lock(&index_mgt->lock); + hlist_for_each_entry(key_node, index_mgt->key_hash + hash_value, node) + if (!memcmp(key_node->data, key, key_size)) { + free_index = key_node->index - index_mgt->tbl_key.start_index; + clear_bit(free_index, index_mgt->bitmap); + hlist_del(&key_node->node); + devm_kfree(index_mgt->tbl_key.dev, key_node); + index_mgt->free_index_num++; + mutex_unlock(&index_mgt->lock); + return; + } + + mutex_unlock(&index_mgt->lock); +} + +/** + * alloc a hash table + * the table support multi thread + */ +void *nbl_common_init_hash_table(struct nbl_hash_tbl_key *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt; + int bucket_size; + int i; + + tbl_mgt = devm_kzalloc(key->dev, sizeof(struct nbl_hash_tbl_mgt), GFP_KERNEL); + if (!tbl_mgt) + return NULL; + + bucket_size = key->bucket_size; + tbl_mgt->hash = devm_kcalloc(key->dev, bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!tbl_mgt->hash) + goto alloc_hash_failed; + + for (i = 0; i < bucket_size; i++) + INIT_HLIST_HEAD(tbl_mgt->hash + i); + + memcpy(&tbl_mgt->tbl_key, key, sizeof(struct nbl_hash_tbl_key)); + + if (key->lock_need) + mutex_init(&tbl_mgt->lock); + + return tbl_mgt; + +alloc_hash_failed: + devm_kfree(key->dev, tbl_mgt); + + return NULL; +} + +/** + * alloc a hash node, and add to hlist_head + */ +int nbl_common_alloc_hash_node(void *priv, void *key, void *data) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + u32 hash_value; + u32 node_size; + u16 key_size; + u16 data_size; + + node_size = sizeof(struct nbl_hash_entry_node); + hash_node = devm_kzalloc(tbl_mgt->tbl_key.dev, sizeof(struct nbl_hash_entry_node), + GFP_KERNEL); + if (!hash_node) + return -1; + + key_size = tbl_mgt->tbl_key.key_size; + hash_node->key = devm_kzalloc(tbl_mgt->tbl_key.dev, key_size, GFP_KERNEL); + if (!hash_node->key) + goto alloc_key_failed; + + data_size = tbl_mgt->tbl_key.data_size; + hash_node->data = devm_kzalloc(tbl_mgt->tbl_key.dev, data_size, GFP_KERNEL); + if (!hash_node->data) + goto alloc_data_failed; + + memcpy(hash_node->key, key, key_size); + memcpy(hash_node->data, data, data_size); + + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_add_head(&hash_node->node, tbl_mgt->hash + hash_value); + tbl_mgt->node_num++; + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + return 0; + +alloc_data_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->key); +alloc_key_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node); + + return -1; +} + +/** + * get a hash node, return the data if node exist + */ +void *nbl_common_get_hash_node(void *priv, void *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + struct hlist_head *head; + void *data = NULL; + u32 hash_value; + u16 key_size; + + key_size = tbl_mgt->tbl_key.key_size; + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + head = tbl_mgt->hash + hash_value; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_for_each_entry(hash_node, head, node) + if (!memcmp(hash_node->key, key, key_size)) { + data = hash_node->data; + break; + } + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + return data; +} + +static void nbl_common_remove_hash_node(struct nbl_hash_tbl_mgt *tbl_mgt, + struct nbl_hash_entry_node *hash_node) +{ + hlist_del(&hash_node->node); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->key); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->data); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node); + tbl_mgt->node_num--; +} + +/** + * free a hash node + */ +void nbl_common_free_hash_node(void *priv, void *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + struct hlist_head *head; + u32 hash_value; + u16 key_size; + + key_size = tbl_mgt->tbl_key.key_size; + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + head = tbl_mgt->hash + hash_value; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_for_each_entry(hash_node, head, node) + if (!memcmp(hash_node->key, key, key_size)) + break; + + if (hash_node) + nbl_common_remove_hash_node(tbl_mgt, hash_node); + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); +} + +/* 0: the node accord with the match condition */ +static int nbl_common_match_and_done_hash_node(struct nbl_hash_tbl_mgt *tbl_mgt, + struct nbl_hash_tbl_scan_key *key, + struct nbl_hash_entry_node *hash_node) +{ + int ret = 0; + + if (key->match_func) { + ret = key->match_func(key->match_condition, hash_node->key, hash_node->data); + if (ret) + return ret; + } + + if (key->action_func) + key->action_func(key->action_priv, hash_node->key, hash_node->data); + + if (key->op_type == NBL_HASH_TBL_OP_DELETE) + nbl_common_remove_hash_node(tbl_mgt, hash_node); + + return 0; +} + +void nbl_common_scan_hash_node(void *priv, struct nbl_hash_tbl_scan_key *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + struct hlist_node *safe_node; + struct hlist_head *head; + u32 i; + int match_ret; + int node_num = 0; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + for (i = 0; i < tbl_mgt->tbl_key.bucket_size; i++) { + head = tbl_mgt->hash + i; + hlist_for_each_entry_safe(hash_node, safe_node, head, node) { + match_ret = nbl_common_match_and_done_hash_node(tbl_mgt, key, hash_node); + if (!match_ret) + node_num++; + } + } + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); +} + +u16 nbl_common_get_hash_node_num(void *priv) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + + return tbl_mgt->node_num; +} + +void nbl_common_remove_hash_table(void *priv, struct nbl_hash_tbl_del_key *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + struct hlist_node *safe_node; + struct hlist_head *head; + struct device *dev; + u32 i; + + if (!priv) + return; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + for (i = 0; i < tbl_mgt->tbl_key.bucket_size; i++) { + head = tbl_mgt->hash + i; + hlist_for_each_entry_safe(hash_node, safe_node, head, node) { + if (key->action_func) + key->action_func(key->action_priv, hash_node->key, hash_node->data); + nbl_common_remove_hash_node(tbl_mgt, hash_node); + } + } + + devm_kfree(tbl_mgt->tbl_key.dev, tbl_mgt->hash); + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + dev = tbl_mgt->tbl_key.dev; + devm_kfree(dev, tbl_mgt); +} + +/** + * alloc a hash x and y axis table + * it support x/y axis store if necessary, so it can scan by x/y axis; + * the table support multi thread + */ +void *nbl_common_init_hash_xy_table(struct nbl_hash_xy_tbl_key *key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt; + int i; + + tbl_mgt = devm_kzalloc(key->dev, sizeof(struct nbl_hash_xy_tbl_mgt), GFP_KERNEL); + if (!tbl_mgt) + return NULL; + + tbl_mgt->hash = devm_kcalloc(key->dev, key->bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!tbl_mgt->hash) + goto alloc_hash_failed; + + tbl_mgt->x_axis_hash = devm_kcalloc(key->dev, key->x_axis_bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!tbl_mgt->x_axis_hash) + goto alloc_x_axis_hash_failed; + + tbl_mgt->y_axis_hash = devm_kcalloc(key->dev, key->y_axis_bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!tbl_mgt->y_axis_hash) + goto alloc_y_axis_hash_failed; + + for (i = 0; i < key->bucket_size; i++) + INIT_HLIST_HEAD(tbl_mgt->hash + i); + + for (i = 0; i < key->x_axis_bucket_size; i++) + INIT_HLIST_HEAD(tbl_mgt->x_axis_hash + i); + + for (i = 0; i < key->y_axis_bucket_size; i++) + INIT_HLIST_HEAD(tbl_mgt->y_axis_hash + i); + + memcpy(&tbl_mgt->tbl_key, key, sizeof(struct nbl_hash_xy_tbl_key)); + + if (key->lock_need) + mutex_init(&tbl_mgt->lock); + + return tbl_mgt; + +alloc_y_axis_hash_failed: + devm_kfree(key->dev, tbl_mgt->x_axis_hash); +alloc_x_axis_hash_failed: + devm_kfree(key->dev, tbl_mgt->hash); +alloc_hash_failed: + devm_kfree(key->dev, tbl_mgt); + + return NULL; +} + +/** + * alloc a hash x and y node, and add to hlist_head + */ +int nbl_common_alloc_hash_xy_node(void *priv, void *x_key, void *y_key, void *data) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + void *key; + u32 hash_value; + u32 x_hash_value; + u32 y_hash_value; + u32 node_size; + u16 key_size; + u16 x_key_size; + u16 y_key_size; + u16 data_size; + + node_size = sizeof(struct nbl_hash_entry_xy_node); + hash_node = devm_kzalloc(tbl_mgt->tbl_key.dev, sizeof(struct nbl_hash_entry_xy_node), + GFP_KERNEL); + if (!hash_node) + return -1; + + x_key_size = tbl_mgt->tbl_key.x_axis_key_size; + hash_node->x_axis_key = devm_kzalloc(tbl_mgt->tbl_key.dev, x_key_size, GFP_KERNEL); + if (!hash_node->x_axis_key) + goto alloc_x_key_failed; + + y_key_size = tbl_mgt->tbl_key.y_axis_key_size; + hash_node->y_axis_key = devm_kzalloc(tbl_mgt->tbl_key.dev, y_key_size, GFP_KERNEL); + if (!hash_node->y_axis_key) + goto alloc_y_key_failed; + + key_size = x_key_size + y_key_size; + key = devm_kzalloc(tbl_mgt->tbl_key.dev, key_size, GFP_KERNEL); + if (!key) + goto alloc_key_failed; + + data_size = tbl_mgt->tbl_key.data_size; + hash_node->data = devm_kzalloc(tbl_mgt->tbl_key.dev, data_size, GFP_KERNEL); + if (!hash_node->data) + goto alloc_data_failed; + + memcpy(key, x_key, x_key_size); + memcpy(key + x_key_size, y_key, y_key_size); + memcpy(hash_node->x_axis_key, x_key, x_key_size); + memcpy(hash_node->y_axis_key, y_key, y_key_size); + memcpy(hash_node->data, data, data_size); + + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + x_hash_value = nbl_common_calculate_hash_key(x_key, x_key_size, + tbl_mgt->tbl_key.x_axis_bucket_size); + y_hash_value = nbl_common_calculate_hash_key(y_key, y_key_size, + tbl_mgt->tbl_key.y_axis_bucket_size); + + devm_kfree(tbl_mgt->tbl_key.dev, key); + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_add_head(&hash_node->node, tbl_mgt->hash + hash_value); + hlist_add_head(&hash_node->x_axis_node, tbl_mgt->x_axis_hash + x_hash_value); + hlist_add_head(&hash_node->y_axis_node, tbl_mgt->y_axis_hash + y_hash_value); + + tbl_mgt->node_num++; + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + return 0; + +alloc_data_failed: + devm_kfree(tbl_mgt->tbl_key.dev, key); +alloc_key_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->y_axis_key); +alloc_y_key_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->x_axis_key); +alloc_x_key_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node); + + return -1; +} + +/** + * get a hash node, return the data if node exist + */ +void *nbl_common_get_hash_xy_node(void *priv, void *x_key, void *y_key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + struct hlist_head *head; + void *data = NULL; + void *key; + u32 hash_value; + u16 key_size; + u16 x_key_size; + u16 y_key_size; + + x_key_size = tbl_mgt->tbl_key.x_axis_key_size; + y_key_size = tbl_mgt->tbl_key.y_axis_key_size; + key_size = x_key_size + y_key_size; + key = devm_kzalloc(tbl_mgt->tbl_key.dev, key_size, GFP_KERNEL); + if (!key) + return NULL; + + memcpy(key, x_key, x_key_size); + memcpy(key + x_key_size, y_key, y_key_size); + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + head = tbl_mgt->hash + hash_value; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_for_each_entry(hash_node, head, node) + if (!memcmp(hash_node->x_axis_key, x_key, x_key_size) && + !memcmp(hash_node->y_axis_key, y_key, y_key_size)) { + data = hash_node->data; + break; + } + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + devm_kfree(tbl_mgt->tbl_key.dev, key); + + return data; +} + +static void nbl_common_remove_hash_xy_node(struct nbl_hash_xy_tbl_mgt *tbl_mgt, + struct nbl_hash_entry_xy_node *hash_node) +{ + hlist_del(&hash_node->node); + hlist_del(&hash_node->x_axis_node); + hlist_del(&hash_node->y_axis_node); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->x_axis_key); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->y_axis_key); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->data); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node); + tbl_mgt->node_num--; +} + +/** + * free a hash node + */ +void nbl_common_free_hash_xy_node(void *priv, void *x_key, void *y_key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + struct hlist_head *head; + void *key; + u32 hash_value; + u16 key_size; + u16 x_key_size; + u16 y_key_size; + + x_key_size = tbl_mgt->tbl_key.x_axis_key_size; + y_key_size = tbl_mgt->tbl_key.y_axis_key_size; + key_size = x_key_size + y_key_size; + key = devm_kzalloc(tbl_mgt->tbl_key.dev, key_size, GFP_KERNEL); + if (!key) + return; + + memcpy(key, x_key, x_key_size); + memcpy(key + x_key_size, y_key, y_key_size); + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + head = tbl_mgt->hash + hash_value; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_for_each_entry(hash_node, head, node) + if (!memcmp(hash_node->x_axis_key, x_key, x_key_size) && + !memcmp(hash_node->y_axis_key, y_key, y_key_size)) { + break; + } + + if (hash_node) + nbl_common_remove_hash_xy_node(tbl_mgt, hash_node); + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + devm_kfree(tbl_mgt->tbl_key.dev, key); +} + +/* 0: the node accord with the match condition */ +static int nbl_common_match_and_done_hash_xy_node(struct nbl_hash_xy_tbl_mgt *tbl_mgt, + struct nbl_hash_xy_tbl_scan_key *key, + struct nbl_hash_entry_xy_node *hash_node) +{ + int ret = 0; + + if (key->match_func) { + ret = key->match_func(key->match_condition, hash_node->x_axis_key, + hash_node->y_axis_key, hash_node->data); + if (ret) + return ret; + } + + if (key->action_func) + key->action_func(key->action_priv, hash_node->x_axis_key, hash_node->y_axis_key, + hash_node->data); + + if (key->op_type == NBL_HASH_TBL_OP_DELETE) + nbl_common_remove_hash_xy_node(tbl_mgt, hash_node); + + return 0; +} + +/** + * scan by x_axis or y_aixs or none, and return the match node number + */ +u16 nbl_common_scan_hash_xy_node(void *priv, struct nbl_hash_xy_tbl_scan_key *key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + struct hlist_node *safe_node; + struct hlist_head *head; + int match_ret; + u32 i; + u32 hash_value; + u16 x_axis_key_size; + u16 y_axis_key_size; + u16 node_num = 0; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + if (key->scan_type == NBL_HASH_TBL_X_AXIS_SCAN) { + x_axis_key_size = tbl_mgt->tbl_key.x_axis_key_size; + hash_value = nbl_common_calculate_hash_key(key->x_key, x_axis_key_size, + tbl_mgt->tbl_key.x_axis_bucket_size); + head = tbl_mgt->x_axis_hash + hash_value; + hlist_for_each_entry_safe(hash_node, safe_node, head, x_axis_node) { + if (!memcmp(hash_node->x_axis_key, key->x_key, x_axis_key_size)) { + match_ret = nbl_common_match_and_done_hash_xy_node(tbl_mgt, key, + hash_node); + if (!match_ret) { + node_num++; + if (key->only_query_exist) + break; + } + } + } + } else if (key->scan_type == NBL_HASH_TBL_Y_AXIS_SCAN) { + y_axis_key_size = tbl_mgt->tbl_key.y_axis_key_size; + hash_value = nbl_common_calculate_hash_key(key->y_key, y_axis_key_size, + tbl_mgt->tbl_key.y_axis_bucket_size); + head = tbl_mgt->y_axis_hash + hash_value; + hlist_for_each_entry_safe(hash_node, safe_node, head, y_axis_node) { + if (!memcmp(hash_node->y_axis_key, key->y_key, y_axis_key_size)) { + match_ret = nbl_common_match_and_done_hash_xy_node(tbl_mgt, key, + hash_node); + if (!match_ret) { + node_num++; + if (key->only_query_exist) + break; + } + } + } + } else { + for (i = 0; i < tbl_mgt->tbl_key.bucket_size; i++) { + head = tbl_mgt->hash + i; + hlist_for_each_entry_safe(hash_node, safe_node, head, node) { + match_ret = nbl_common_match_and_done_hash_xy_node(tbl_mgt, key, + hash_node); + if (!match_ret) + node_num++; + } + } + } + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + return node_num; +} + +u16 nbl_common_get_hash_xy_node_num(void *priv) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + + return tbl_mgt->node_num; +} + +void nbl_common_remove_hash_xy_table(void *priv, struct nbl_hash_xy_tbl_del_key *key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + struct hlist_node *safe_node; + struct hlist_head *head; + struct device *dev; + u32 i; + + if (!priv) + return; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + for (i = 0; i < tbl_mgt->tbl_key.bucket_size; i++) { + head = tbl_mgt->hash + i; + hlist_for_each_entry_safe(hash_node, safe_node, head, node) { + if (key->action_func) + key->action_func(key->action_priv, hash_node->x_axis_key, + hash_node->y_axis_key, hash_node->data); + nbl_common_remove_hash_xy_node(tbl_mgt, hash_node); + } + } + + devm_kfree(tbl_mgt->tbl_key.dev, tbl_mgt->hash); + devm_kfree(tbl_mgt->tbl_key.dev, tbl_mgt->x_axis_hash); + devm_kfree(tbl_mgt->tbl_key.dev, tbl_mgt->y_axis_hash); + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + dev = tbl_mgt->tbl_key.dev; + devm_kfree(dev, tbl_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h new file mode 100644 index 000000000000..d86e1b9fb0f2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_COMMON_H_ +#define _NBL_COMMON_H_ + +#include "nbl_core.h" + +/** + * the key_hash size is index_size/NBL_INDEX_HASH_DIVISOR. eg index_size is 1024, + * the key_hash size is 1024/16 = 64 + */ +#define NBL_INDEX_HASH_DIVISOR 16 + +/* list only need one bucket size */ +#define NBL_HASH_TBL_LIST_BUCKET_SIZE 1 + +struct nbl_index_mgt { + struct nbl_index_tbl_key tbl_key; + unsigned long *bitmap; + struct hlist_head *key_hash; + u32 free_index_num; + u32 bucket_size; + struct mutex lock; /* support multi thread */ +}; + +struct nbl_index_entry_key_node { + struct hlist_node node; + u32 index; /* the index for key has alloc from index table */ + u8 data[]; +}; + +struct nbl_hash_tbl_mgt { + struct nbl_hash_tbl_key tbl_key; + struct hlist_head *hash; + struct mutex lock; /* support multi thread */ + u16 node_num; +}; + +struct nbl_hash_xy_tbl_mgt { + struct nbl_hash_xy_tbl_key tbl_key; + struct hlist_head *hash; + struct hlist_head *x_axis_hash; + struct hlist_head *y_axis_hash; + struct mutex lock; /* support multi thread */ + u16 node_num; +}; + +/* it used for y_axis no necessay */ +struct nbl_hash_entry_node { + struct hlist_node node; + void *key; + void *data; +}; + +/* it used for y_axis no necessay */ +struct nbl_hash_entry_xy_node { + struct hlist_node node; + struct hlist_node x_axis_node; + struct hlist_node y_axis_node; + void *x_axis_key; + void *y_axis_key; + void *data; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.c new file mode 100644 index 000000000000..240022df2f4a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_event.h" + +static struct nbl_event_mgt *event_mgt; + +void nbl_event_notify(enum nbl_event_type type, void *event_data, u16 src_vsi_id, u16 board_id) +{ + struct nbl_event_notifier_list *notifier_list = &event_mgt->notifier_list[type]; + struct nbl_event_notifier *notifier = NULL; + + mutex_lock(¬ifier_list->notifier_lock); + + list_for_each_entry(notifier, ¬ifier_list->list, node) { + if (src_vsi_id != notifier->src_vsi_id || board_id != notifier->board_id) + continue; + + mutex_lock(¬ifier->callback_lock); + notifier->callback.callback(type, event_data, notifier->callback.callback_data); + mutex_unlock(¬ifier->callback_lock); + } + + mutex_unlock(¬ifier_list->notifier_lock); +} + +int nbl_event_register(enum nbl_event_type type, struct nbl_event_callback *callback, + u16 src_vsi_id, u16 board_id) +{ + struct nbl_event_notifier_list *notifier_list = &event_mgt->notifier_list[type]; + struct nbl_event_notifier *notifier = NULL; + + notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); + if (!notifier) + return -ENOMEM; + + notifier->src_vsi_id = src_vsi_id; + notifier->board_id = board_id; + notifier->callback.callback = callback->callback; + notifier->callback.callback_data = callback->callback_data; + + mutex_init(¬ifier->callback_lock); + + mutex_lock(¬ifier_list->notifier_lock); + list_add_tail(¬ifier->node, ¬ifier_list->list); + mutex_unlock(¬ifier_list->notifier_lock); + + return 0; +} + +void nbl_event_unregister(enum nbl_event_type type, struct nbl_event_callback *callback, + u16 src_vsi_id, u16 board_id) +{ + struct nbl_event_notifier_list *notifier_list = &event_mgt->notifier_list[type]; + struct nbl_event_notifier *notifier = NULL; + + mutex_lock(¬ifier_list->notifier_lock); + + list_for_each_entry(notifier, ¬ifier_list->list, node) { + if (notifier->callback.callback == callback->callback && + notifier->callback.callback_data == callback->callback_data && + notifier->src_vsi_id == src_vsi_id && notifier->board_id == board_id) { + list_del(¬ifier->node); + kfree(notifier); + break; + } + } + + mutex_unlock(¬ifier_list->notifier_lock); +} + +int nbl_event_init(void) +{ + int i = 0; + + event_mgt = kzalloc(sizeof(*event_mgt), GFP_KERNEL); + if (!event_mgt) + return -ENOMEM; + + for (i = 0; i < NBL_EVENT_MAX; i++) { + INIT_LIST_HEAD(&event_mgt->notifier_list[i].list); + mutex_init(&event_mgt->notifier_list[i].notifier_lock); + } + + return 0; +} + +void nbl_event_remove(void) +{ + struct nbl_event_notifier *notifier = NULL, *notifier_safe = NULL; + int i = 0; + + for (i = 0; i < NBL_EVENT_MAX; i++) { + list_for_each_entry_safe(notifier, notifier_safe, + &event_mgt->notifier_list[i].list, node) { + list_del(¬ifier->node); + kfree(notifier); + } + } + + kfree(event_mgt); + event_mgt = NULL; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h new file mode 100644 index 000000000000..66d9b6f45936 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_EVENT_H_ +#define _NBL_EVENT_H_ + +#include "nbl_core.h" + +struct nbl_event_notifier { + struct list_head node; + struct mutex callback_lock; /* Protect callback */ + struct nbl_event_callback callback; + u16 src_vsi_id; + u16 board_id; +}; + +struct nbl_event_notifier_list { + struct list_head list; + struct mutex notifier_lock; /* Protect list structure */ +}; + +struct nbl_event_mgt { + struct nbl_event_notifier_list notifier_list[NBL_EVENT_MAX]; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h new file mode 100644 index 000000000000..190d417726e6 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_CORE_H_ +#define _NBL_CORE_H_ + +#include "nbl_product_base.h" +#include "nbl_def_common.h" +#include "nbl_def_phy.h" +#include "nbl_def_resource.h" +#include "nbl_def_dispatch.h" +#include "nbl_def_service.h" +#include "nbl_def_dev.h" +#include "nbl_def_channel.h" + +#define NBL_ADAPTER_TO_PDEV(adapter) ((adapter)->pdev) +#define NBL_ADAPTER_TO_DEV(adapter) (&((adapter)->pdev->dev)) +#define NBL_ADAPTER_TO_COMMON(adapter) (&((adapter)->common)) +#define NBL_ADAPTER_TO_RPDUCT_BASE_OPS(adapter) ((adapter)->product_base_ops) + +#define NBL_ADAPTER_TO_PHY_MGT(adapter) ((adapter)->core.phy_mgt) +#define NBL_ADAPTER_TO_RES_MGT(adapter) ((adapter)->core.res_mgt) +#define NBL_ADAPTER_TO_DISP_MGT(adapter) ((adapter)->core.disp_mgt) +#define NBL_ADAPTER_TO_SERV_MGT(adapter) ((adapter)->core.serv_mgt) +#define NBL_ADAPTER_TO_DEV_MGT(adapter) ((adapter)->core.dev_mgt) +#define NBL_ADAPTER_TO_CHAN_MGT(adapter) ((adapter)->core.chan_mgt) +#define NBL_ADAPTER_TO_DEBUGFS_MGT(adapter) ((adapter)->core.debugfs_mgt) + +#define NBL_ADAPTER_TO_PHY_OPS_TBL(adapter) ((adapter)->intf.phy_ops_tbl) +#define NBL_ADAPTER_TO_RES_OPS_TBL(adapter) ((adapter)->intf.resource_ops_tbl) +#define NBL_ADAPTER_TO_DISP_OPS_TBL(adapter) ((adapter)->intf.dispatch_ops_tbl) +#define NBL_ADAPTER_TO_SERV_OPS_TBL(adapter) ((adapter)->intf.service_ops_tbl) +#define NBL_ADAPTER_TO_DEV_OPS_TBL(adapter) ((adapter)->intf.dev_ops_tbl) +#define NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter) ((adapter)->intf.channel_ops_tbl) + +#define NBL_ADAPTER_TO_RES_PT_OPS(adapter) (&(NBL_ADAPTER_TO_SERV_OPS_TBL(adapter)->pt_ops)) + +#define NBL_NETDEV_PRIV_TO_ADAPTER(priv) ((priv)->adapter) + +#define NBL_NETDEV_TO_ADAPTER(netdev) \ + (NBL_NETDEV_PRIV_TO_ADAPTER((struct nbl_netdev_priv *)netdev_priv(netdev))) + +#define NBL_NETDEV_TO_SERV_MGT(netdev) \ + (NBL_ADAPTER_TO_SERV_MGT(NBL_NETDEV_PRIV_TO_ADAPTER(\ + (struct nbl_netdev_priv *)netdev_priv(netdev)))) + +#define NBL_NETDEV_TO_DEV_MGT(netdev) \ + (NBL_ADAPTER_TO_DEV_MGT(NBL_NETDEV_TO_ADAPTER(netdev))) + +#define NBL_NETDEV_TO_COMMON(netdev) \ + (NBL_ADAPTER_TO_COMMON(NBL_NETDEV_PRIV_TO_ADAPTER(\ + (struct nbl_netdev_priv *)netdev_priv(netdev)))) + +#define NBL_CAP_SET_BIT(loc) (1 << (loc)) +#define NBL_CAP_TEST_BIT(val, loc) (((val) >> (loc)) & 0x1) + +#define NBL_CAP_IS_CTRL(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_CTRL_BIT) +#define NBL_CAP_IS_NET(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_NET_BIT) +#define NBL_CAP_IS_VF(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_VF_BIT) +#define NBL_CAP_SUPPORT_LAG(val) NBL_CAP_TEST_BIT(val, NBL_CAP_SUPPORT_LAG_BIT) +#define NBL_CAP_IS_NIC(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_NIC_BIT) +#define NBL_CAP_IS_USER(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_USER_BIT) +#define NBL_CAP_IS_GRC(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_GRC_BIT) +#define NBL_CAP_IS_BLK(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_BLK_BIT) +#define NBL_CAP_IS_DPU_HOST(val) ({ typeof(val) _val = (val); \ + !NBL_CAP_TEST_BIT(_val, NBL_CAP_IS_NIC_BIT) && \ + NBL_CAP_TEST_BIT(_val, NBL_CAP_DPU_IS_HOST_BIT); }) +#define NBL_CAP_IS_DPU_ECPU(val) ({ typeof(val) _val = (val); \ + !NBL_CAP_TEST_BIT(_val, NBL_CAP_IS_NIC_BIT) && \ + !NBL_CAP_TEST_BIT(_val, NBL_CAP_DPU_IS_HOST_BIT); }) +#define NBL_CAP_IS_LEONIS(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_LEONIS_BIT) +#define NBL_CAP_IS_BOOTIS(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_BOOTIS_BIT) +#define NBL_CAP_IS_VIRTIO(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_VIRTIO_BIT) +#define NBL_CAP_IS_FACTORY_CTRL(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_FACTORY_CTRL_BIT) + +enum { + NBL_CAP_HAS_CTRL_BIT = 0, + NBL_CAP_HAS_NET_BIT, + NBL_CAP_IS_VF_BIT, + NBL_CAP_SUPPORT_LAG_BIT, + NBL_CAP_IS_NIC_BIT, + NBL_CAP_DPU_IS_HOST_BIT, + NBL_CAP_IS_LEONIS_BIT, + NBL_CAP_IS_BOOTIS_BIT, + NBL_CAP_IS_VIRTIO_BIT, + NBL_CAP_IS_BLK_BIT, + NBL_CAP_HAS_USER_BIT, + NBL_CAP_HAS_GRC_BIT, + NBL_CAP_HAS_FACTORY_CTRL_BIT, +}; + +enum nbl_adapter_state { + NBL_DOWN, + NBL_RESETTING, + NBL_RESET_REQUESTED, + NBL_INITING, + NBL_INIT_FAILED, + NBL_RUNNING, + NBL_TESTING, + NBL_USER, + NBL_STATE_NBITS +}; + +enum { + NBL_ESWITCH_NONE, + NBL_ESWITCH_LEGACY, + NBL_ESWITCH_OFFLOADS +}; + +struct nbl_interface { + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_resource_ops_tbl *resource_ops_tbl; + struct nbl_dispatch_ops_tbl *dispatch_ops_tbl; + struct nbl_service_ops_tbl *service_ops_tbl; + struct nbl_dev_ops_tbl *dev_ops_tbl; + struct nbl_utils_ops_tbl *utils_ops_tbl; + struct nbl_channel_ops_tbl *channel_ops_tbl; +}; + +struct nbl_core { + void *phy_mgt; + void *res_mgt; + void *disp_mgt; + void *serv_mgt; + void *dev_mgt; + void *chan_mgt; + void *debugfs_mgt; +}; + +struct nbl_adapter { + struct pci_dev *pdev; + struct nbl_core core; + struct nbl_interface intf; + struct nbl_common_info common; + struct nbl_product_base_ops *product_base_ops; + struct nbl_init_param init_param; + DECLARE_BITMAP(state, NBL_STATE_NBITS); +}; + +struct nbl_netdev_priv { + struct nbl_adapter *adapter; + struct net_device *netdev; + u16 tx_queue_num; + u16 rx_queue_num; + u16 queue_size; + /* default traffic destination in kernel/dpdk/coexist scene */ + u16 default_vsi_index; + u16 default_vsi_id; + s64 last_st_time; +}; + +struct nbl_indr_dev_priv { + struct net_device *indr_dev; + struct nbl_netdev_priv *dev_priv; + struct list_head list; + int binder_type; +}; + +struct nbl_devlink_priv { + void *priv; + void *dev_mgt; +}; + +struct nbl_software_tool_id_entry { + struct list_head node; + u16 bus; + u16 id; + u8 refcount; +}; + +#define NBL_ST_MAX_DEVICE_NUM 64 +struct nbl_software_tool_table { + DECLARE_BITMAP(devid, NBL_ST_MAX_DEVICE_NUM); + int major; + dev_t devno; + struct class *cls; +}; + +struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *param); +void nbl_core_remove(struct nbl_adapter *adapter); +int nbl_core_start(struct nbl_adapter *adapter, struct nbl_init_param *param); +void nbl_core_stop(struct nbl_adapter *adapter); + +int nbl_st_init(struct nbl_software_tool_table *st_table); +void nbl_st_remove(struct nbl_software_tool_table *st_table); +struct nbl_software_tool_table *nbl_get_st_table(void); +struct dentry *nbl_get_debugfs_root(void); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c new file mode 100644 index 000000000000..c8f20af405bb --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_debugfs.h" + +#define SINGLE_FOPS_RO(_fops_, _open_) \ + static const struct file_operations _fops_ = { \ + .open = _open_, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = seq_release, \ + } + +#define SINGLE_FOPS_WO(_fops_, _open_, _write_) \ + static const struct file_operations _fops_ = { \ + .open = _open_, \ + .write = _write_, \ + .llseek = seq_lseek, \ + .release = seq_release, \ + } + +#define COMPLETE_FOPS_RW(_fops_, _open_, _write_) \ + static const struct file_operations _fops_ = { \ + .open = _open_, \ + .write = _write_, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = seq_release, \ + } + +static int nbl_flow_info_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + + disp_ops->dump_flow(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), m); + + return 0; +} + +static int nbl_mbx_txq_dma_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + chan_ops->dump_txq(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), m, NBL_CHAN_TYPE_MAILBOX); + + return 0; +} + +static int nbl_mbx_rxq_dma_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + chan_ops->dump_rxq(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), m, NBL_CHAN_TYPE_MAILBOX); + + return 0; +} + +static int nbl_adminq_txq_dma_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + chan_ops->dump_txq(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), m, NBL_CHAN_TYPE_ADMINQ); + + return 0; +} + +static int nbl_adminq_rxq_dma_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + chan_ops->dump_rxq(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), m, NBL_CHAN_TYPE_ADMINQ); + + return 0; +} + +static int nbl_debugfs_flow_info_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_flow_info_dump, inode->i_private); +} + +static int nbl_debugfs_mbx_txq_dma_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_mbx_txq_dma_dump, inode->i_private); +} + +static int nbl_debugfs_mbx_rxq_dma_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_mbx_rxq_dma_dump, inode->i_private); +} + +static int nbl_debugfs_adminq_txq_dma_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_adminq_txq_dma_dump, inode->i_private); +} + +static int nbl_debugfs_adminq_rxq_dma_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_adminq_rxq_dma_dump, inode->i_private); +} + +SINGLE_FOPS_RO(flow_info_fops, nbl_debugfs_flow_info_dump); +SINGLE_FOPS_RO(mbx_txq_fops, nbl_debugfs_mbx_txq_dma_dump); +SINGLE_FOPS_RO(mbx_rxq_fops, nbl_debugfs_mbx_rxq_dma_dump); +SINGLE_FOPS_RO(adminq_txq_fops, nbl_debugfs_adminq_txq_dma_dump); +SINGLE_FOPS_RO(adminq_rxq_fops, nbl_debugfs_adminq_rxq_dma_dump); + +static int nbl_ring_index_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + + seq_printf(m, "Index = %d", debugfs_mgt->ring_index); + + return 0; +} + +static int nbl_ring_index_open(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_ring_index_dump, inode->i_private); +} + +static ssize_t nbl_ring_index_write(struct file *file, const char __user *buf, + size_t count, loff_t *offp) +{ + struct nbl_debugfs_mgt *debugfs_mgt = file_inode(file)->i_private; + char buffer[4] = {0}; + size_t size = min(count, sizeof(buffer)); + + if (copy_from_user(buffer, buf, size)) + return -EFAULT; + if (kstrtou16(buffer, 10, &debugfs_mgt->ring_index)) + return -EFAULT; + + return size; +} + +SINGLE_FOPS_WO(ring_index_fops, nbl_ring_index_open, nbl_ring_index_write); + +static int nbl_ring_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + bool is_tx = debugfs_mgt->ring_index % 2; + u16 ring_index = debugfs_mgt->ring_index / 2; + + seq_printf(m, "Dump %s_ring_%d :\n", is_tx ? "tx" : "rx", ring_index); + disp_ops->dump_ring(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), m, is_tx, ring_index); + + return 0; +} + +static int nbl_debugfs_ring_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_ring_dump, inode->i_private); +} + +SINGLE_FOPS_RO(ring_fops, nbl_debugfs_ring_dump); + +static void nbl_serv_debugfs_setup_netops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + debugfs_create_file("txrx_ring_index", 0644, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &ring_index_fops); + debugfs_create_file("txrx_ring", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &ring_fops); +} + +static int nbl_ring_stats_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + struct nbl_queue_err_stats queue_err_stats = {0}; + bool is_tx = debugfs_mgt->ring_index % 2; + u16 ring_index = debugfs_mgt->ring_index / 2; + int ret; + + seq_printf(m, "Dump %s_ring_%d_stats\n", is_tx ? "tx" : "rx", ring_index); + disp_ops->dump_ring_stats(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), m, is_tx, ring_index); + if (is_tx) { + ret = disp_ops->get_queue_err_stats(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), + ring_index, + &queue_err_stats, true); + if (!ret) + seq_printf(m, "dvn_pkt_drop_cnt: %d\n", queue_err_stats.dvn_pkt_drop_cnt); + } else { + ret = disp_ops->get_queue_err_stats(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), + ring_index, + &queue_err_stats, false); + if (!ret) + seq_printf(m, "uvn_pkt_drop_cnt: %d\n", queue_err_stats.uvn_stat_pkt_drop); + } + + return 0; +} + +static int nbl_debugfs_ring_stats_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_ring_stats_dump, inode->i_private); +} + +SINGLE_FOPS_RO(ring_stats_fops, nbl_debugfs_ring_stats_dump); + +static void nbl_serv_debugfs_setup_pfops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + debugfs_create_file("txrx_ring_stats", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &ring_stats_fops); +} + +static void nbl_serv_debugfs_setup_ctrlops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + + if (chan_ops->check_queue_exist(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), + NBL_CHAN_TYPE_ADMINQ)) { + debugfs_create_file("adminq_txq", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &adminq_txq_fops); + debugfs_create_file("adminq_rxq", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &adminq_rxq_fops); + } + + if (disp_ops->get_product_fix_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), + NBL_DUMP_FLOW_CAP)) + debugfs_create_file("flow_info", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &flow_info_fops); +} + +static void nbl_serv_debugfs_setup_commonops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + if (!chan_ops->check_queue_exist(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + debugfs_create_file("mbx_txq", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &mbx_txq_fops); + debugfs_create_file("mbx_rxq", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &mbx_rxq_fops); +} + +void nbl_debugfs_func_init(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_debugfs_mgt **debugfs_mgt = + (struct nbl_debugfs_mgt **)&NBL_ADAPTER_TO_DEBUGFS_MGT(adapter); + struct nbl_common_info *common; + struct device *dev; + const char *name; + + common = NBL_ADAPTER_TO_COMMON(adapter); + dev = NBL_ADAPTER_TO_DEV(adapter); + + *debugfs_mgt = devm_kzalloc(dev, sizeof(struct nbl_debugfs_mgt), GFP_KERNEL); + if (!*debugfs_mgt) + return; + + NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + NBL_DEBUGFS_MGT_TO_COMMON(*debugfs_mgt) = common; + + name = pci_name(NBL_COMMON_TO_PDEV(common)); + (*debugfs_mgt)->nbl_debugfs_root = debugfs_create_dir(name, nbl_get_debugfs_root()); + if (!(*debugfs_mgt)->nbl_debugfs_root) { + nbl_err(common, NBL_DEBUG_DEBUGFS, "nbl init debugfs failed\n"); + return; + } + + nbl_serv_debugfs_setup_commonops(*debugfs_mgt); + + if (param->caps.has_ctrl) + nbl_serv_debugfs_setup_ctrlops(*debugfs_mgt); + + if (param->caps.has_net) { + nbl_serv_debugfs_setup_netops(*debugfs_mgt); + if (!param->caps.is_vf) + nbl_serv_debugfs_setup_pfops(*debugfs_mgt); + } +} + +void nbl_debugfs_func_remove(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_debugfs_mgt **debugfs_mgt = + (struct nbl_debugfs_mgt **)&NBL_ADAPTER_TO_DEBUGFS_MGT(adapter); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + + debugfs_remove_recursive((*debugfs_mgt)->nbl_debugfs_root); + (*debugfs_mgt)->nbl_debugfs_root = NULL; + + devm_kfree(dev, *debugfs_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h new file mode 100644 index 000000000000..855765792087 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEBUGFS_H_ +#define _NBL_DEBUGFS_H_ + +#include "nbl_core.h" + +#define NBL_DEBUGFS_MGT_TO_COMMON(debugfs_mgt) ((debugfs_mgt)->common) +#define NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt) ((debugfs_mgt)->disp_ops_tbl) +#define NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt)->ops) +#define NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt)->priv) +#define NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(debugfs_mgt) ((debugfs_mgt)->chan_ops_tbl) +#define NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(debugfs_mgt)->ops) +#define NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(debugfs_mgt)->priv) + +struct nbl_debugfs_mgt { + struct dentry *nbl_debugfs_root; + struct nbl_dispatch_ops_tbl *disp_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_common_info *common; + /* Ring fops related info */ + u16 ring_index; + u16 ring_num; + bool pmd_debug; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c new file mode 100644 index 000000000000..4020f3102ab2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c @@ -0,0 +1,3047 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_dev.h" + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "netif debug level (0=none,...,16=all), adapter debug_mask (<-1)"); + +static struct nbl_dev_board_id_table board_id_table; + +struct nbl_dev_ops dev_ops; + +static const struct net_device_ops netdev_ops_leonis_pf; +static const struct ethtool_ops ethtool_ops_leonis_pf; + +static int nbl_dev_clean_mailbox_schedule(struct nbl_dev_mgt *dev_mgt); +static void nbl_dev_clean_adminq_schedule(struct nbl_task_info *task_info); + +/* ---------- Basic functions ---------- */ +static int nbl_dev_get_port_attributes(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_port_attributes(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static int nbl_dev_enable_port(struct nbl_dev_mgt *dev_mgt, bool enable) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->enable_port(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), enable); +} + +static int nbl_dev_alloc_board_id(struct nbl_dev_board_id_table *index_table, u16 bus) +{ + int i = 0; + + for (i = 0; i < NBL_DEV_BOARD_ID_MAX; i++) { + if (index_table->entry[i].bus == bus) { + index_table->entry[i].refcount++; + return i; + } + } + + for (i = 0; i < NBL_DEV_BOARD_ID_MAX; i++) { + if (!index_table->entry[i].valid) { + index_table->entry[i].bus = bus; + index_table->entry[i].refcount++; + index_table->entry[i].valid = true; + return i; + } + } + + return -ENOSPC; +} + +static void nbl_dev_free_board_id(struct nbl_dev_board_id_table *index_table, u16 bus) +{ + int i = 0; + + for (i = 0; i < NBL_DEV_BOARD_ID_MAX; i++) { + if (index_table->entry[i].bus == bus && index_table->entry[i].valid) { + index_table->entry[i].refcount--; + break; + } + } + + if (i != NBL_DEV_BOARD_ID_MAX && !index_table->entry[i].refcount) + memset(&index_table->entry[i], 0, sizeof(index_table->entry[i])); +} + +static void nbl_dev_set_netdev_priv(struct net_device *netdev, struct nbl_dev_vsi *vsi) +{ + struct nbl_netdev_priv *net_priv = netdev_priv(netdev); + + net_priv->tx_queue_num = vsi->queue_num; + net_priv->rx_queue_num = vsi->queue_num; + net_priv->queue_size = vsi->queue_size; + net_priv->netdev = netdev; + net_priv->default_vsi_index = vsi->index; + net_priv->default_vsi_id = vsi->vsi_id; +} + +/* ---------- Interrupt config ---------- */ +static irqreturn_t nbl_dev_clean_mailbox(int __always_unused irq, void *data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)data; + + nbl_dev_clean_mailbox_schedule(dev_mgt); + + return IRQ_HANDLED; +} + +static irqreturn_t nbl_dev_clean_adminq(int __always_unused irq, void *data) +{ + struct nbl_task_info *task_info = (struct nbl_task_info *)data; + + nbl_dev_clean_adminq_schedule(task_info); + + return IRQ_HANDLED; +} + +static void nbl_dev_handle_abnormal_event(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + clean_abnormal_irq_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->process_abnormal_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static void nbl_dev_clean_abnormal_status(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + nbl_common_queue_work(&task_info->clean_abnormal_irq_task, true, false); +} + +static irqreturn_t nbl_dev_clean_abnormal_event(int __always_unused irq, void *data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)data; + + nbl_dev_clean_abnormal_status(dev_mgt); + + return IRQ_HANDLED; +} + +static void nbl_dev_register_common_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_irq_num irq_num = {0}; + + serv_ops->get_common_irq_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &irq_num); + msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num = irq_num.mbx_irq_num; +} + +static void nbl_dev_register_net_irq(struct nbl_dev_mgt *dev_mgt, u16 queue_num) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + + msix_info->serv_info[NBL_MSIX_NET_TYPE].num = queue_num; + msix_info->serv_info[NBL_MSIX_NET_TYPE].hw_self_mask_en = 1; +} + +static void nbl_dev_register_ctrl_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_ctrl_irq_num irq_num = {0}; + + serv_ops->get_ctrl_irq_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &irq_num); + + msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num = irq_num.abnormal_irq_num; + msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num = irq_num.adminq_irq_num; +} + +static int nbl_dev_request_net_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_msix_info_param param = {0}; + int msix_num = msix_info->serv_info[NBL_MSIX_NET_TYPE].num; + int ret = 0; + + param.msix_entries = kcalloc(msix_num, sizeof(*param.msix_entries), GFP_KERNEL); + if (!param.msix_entries) + return -ENOMEM; + + param.msix_num = msix_num; + memcpy(param.msix_entries, msix_info->msix_entries + + msix_info->serv_info[NBL_MSIX_NET_TYPE].base_vector_id, + sizeof(param.msix_entries[0]) * msix_num); + + ret = serv_ops->request_net_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), ¶m); + + kfree(param.msix_entries); + return ret; +} + +static void nbl_dev_free_net_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_msix_info_param param = {0}; + int msix_num = msix_info->serv_info[NBL_MSIX_NET_TYPE].num; + + param.msix_entries = kcalloc(msix_num, sizeof(*param.msix_entries), GFP_KERNEL); + if (!param.msix_entries) + return; + + param.msix_num = msix_num; + memcpy(param.msix_entries, msix_info->msix_entries + + msix_info->serv_info[NBL_MSIX_NET_TYPE].base_vector_id, + sizeof(param.msix_entries[0]) * msix_num); + + serv_ops->free_net_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), ¶m); + + kfree(param.msix_entries); +} + +static int nbl_dev_request_mailbox_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + int err; + + if (!msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + snprintf(dev_common->mailbox_name, sizeof(dev_common->mailbox_name) - 1, "%s-%s", + dev_name(dev), "mailbox"); + err = devm_request_irq(dev, irq_num, nbl_dev_clean_mailbox, + 0, dev_common->mailbox_name, dev_mgt); + if (err) { + dev_err(dev, "Request mailbox irq handler failed err: %d\n", err); + return err; + } + + return 0; +} + +static void nbl_dev_free_mailbox_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + + if (!msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num) + return; + + local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + devm_free_irq(dev, irq_num, dev_mgt); +} + +static int nbl_dev_enable_mailbox_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + + if (!msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; + chan_ops->set_queue_interrupt_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX, true); + + return serv_ops->enable_mailbox_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, true); +} + +static int nbl_dev_disable_mailbox_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + + if (!msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num) + return 0; + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_flush_task(&dev_common->clean_mbx_task); + + local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; + chan_ops->set_queue_interrupt_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX, false); + + return serv_ops->enable_mailbox_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, false); +} + +static int nbl_dev_request_adminq_irq(struct nbl_dev_mgt *dev_mgt, struct nbl_task_info *task_info) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + int err; + + if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + err = devm_request_irq(dev, irq_num, nbl_dev_clean_adminq, + 0, "adminq_irq", task_info); + if (err) { + dev_err(dev, "Request adminq irq handler failed err: %d\n", err); + return err; + } + + return 0; +} + +static void nbl_dev_free_adminq_irq(struct nbl_dev_mgt *dev_mgt, struct nbl_task_info *task_info) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + + if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) + return; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + devm_free_irq(dev, irq_num, task_info); +} + +static int nbl_dev_enable_adminq_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + + if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; + chan_ops->set_queue_interrupt_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_TYPE_ADMINQ, + true); + + return serv_ops->enable_adminq_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, true); +} + +static int nbl_dev_disable_adminq_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + + if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; + chan_ops->set_queue_interrupt_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_TYPE_ADMINQ, + false); + + return serv_ops->enable_adminq_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, false); +} + +static int nbl_dev_request_abnormal_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + int err; + + if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + err = devm_request_irq(dev, irq_num, nbl_dev_clean_abnormal_event, + 0, "abnormal_irq", dev_mgt); + if (err) { + dev_err(dev, "Request abnormal_irq irq handler failed err: %d\n", err); + return err; + } + + return 0; +} + +void nbl_dev_free_abnormal_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + + if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) + return; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + devm_free_irq(dev, irq_num, dev_mgt); +} + +static int nbl_dev_enable_abnormal_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + int err = 0; + + if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; + err = serv_ops->enable_abnormal_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, true); + + return err; +} + +static int nbl_dev_disable_abnormal_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + int err = 0; + + if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; + err = serv_ops->enable_abnormal_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, false); + + return err; +} + +static int nbl_dev_configure_msix_map(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + int err = 0; + int i; + u16 msix_not_net_num = 0; + + for (i = NBL_MSIX_NET_TYPE; i < NBL_MSIX_TYPE_MAX; i++) + msix_info->serv_info[i].base_vector_id = msix_info->serv_info[i - 1].base_vector_id + + msix_info->serv_info[i - 1].num; + + for (i = NBL_MSIX_MAILBOX_TYPE; i < NBL_MSIX_TYPE_MAX; i++) { + if (i == NBL_MSIX_NET_TYPE) + continue; + + msix_not_net_num += msix_info->serv_info[i].num; + } + + err = serv_ops->configure_msix_map(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + msix_info->serv_info[NBL_MSIX_NET_TYPE].num, + msix_not_net_num, + msix_info->serv_info[NBL_MSIX_NET_TYPE].hw_self_mask_en); + + return err; +} + +static int nbl_dev_destroy_msix_map(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int err = 0; + + err = serv_ops->destroy_msix_map(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + return err; +} + +static int nbl_dev_alloc_msix_entries(struct nbl_dev_mgt *dev_mgt, u16 num_entries) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u16 i; + + msix_info->msix_entries = devm_kcalloc(NBL_DEV_MGT_TO_DEV(dev_mgt), num_entries, + sizeof(msix_info->msix_entries), + GFP_KERNEL); + if (!msix_info->msix_entries) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) + msix_info->msix_entries[i].entry = + serv_ops->get_msix_entry_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), i); + + dev_info(NBL_DEV_MGT_TO_DEV(dev_mgt), "alloc msix entry: %u-%u.\n", + msix_info->msix_entries[0].entry, msix_info->msix_entries[num_entries - 1].entry); + + return 0; +} + +static void nbl_dev_free_msix_entries(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), msix_info->msix_entries); + msix_info->msix_entries = NULL; +} + +static int nbl_dev_alloc_msix_intr(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int needed = 0; + int err; + int i; + + for (i = 0; i < NBL_MSIX_TYPE_MAX; i++) + needed += msix_info->serv_info[i].num; + + err = nbl_dev_alloc_msix_entries(dev_mgt, (u16)needed); + if (err) { + pr_err("Allocate msix entries failed\n"); + return err; + } + + err = pci_enable_msix_range(NBL_COMMON_TO_PDEV(common), msix_info->msix_entries, + needed, needed); + if (err < 0) { + pr_err("pci_enable_msix_range failed, err = %d.\n", err); + goto enable_msix_failed; + } + + return needed; + +enable_msix_failed: + nbl_dev_free_msix_entries(dev_mgt); + return err; +} + +static void nbl_dev_free_msix_intr(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + pci_disable_msix(NBL_COMMON_TO_PDEV(common)); + nbl_dev_free_msix_entries(dev_mgt); +} + +static int nbl_dev_init_interrupt_scheme(struct nbl_dev_mgt *dev_mgt) +{ + int err = 0; + + err = nbl_dev_alloc_msix_intr(dev_mgt); + if (err < 0) { + dev_err(NBL_DEV_MGT_TO_DEV(dev_mgt), "Failed to enable MSI-X vectors\n"); + return err; + } + + return 0; +} + +static void nbl_dev_clear_interrupt_scheme(struct nbl_dev_mgt *dev_mgt) +{ + nbl_dev_free_msix_intr(dev_mgt); +} + +/* ---------- Channel config ---------- */ +static int nbl_dev_setup_chan_qinfo(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + int ret = 0; + + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + return 0; + + ret = chan_ops->cfg_chan_qinfo_map_table(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + chan_type); + if (ret) + dev_err(dev, "setup chan:%d, qinfo map table failed\n", chan_type); + + return ret; +} + +static int nbl_dev_setup_chan_queue(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + int ret = 0; + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + ret = chan_ops->setup_queue(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type); + + return ret; +} + +static int nbl_dev_remove_chan_queue(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + int ret = 0; + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + ret = chan_ops->teardown_queue(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type); + + return ret; +} + +static int nbl_dev_setup_chan_keepalive(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u16 dest_func_id = NBL_COMMON_TO_MGT_PF(common); + + if (chan_type != NBL_CHAN_TYPE_MAILBOX) + return -EOPNOTSUPP; + + dest_func_id = serv_ops->get_function_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + return chan_ops->setup_keepalive(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + dest_func_id, chan_type); + + return -ENOENT; +} + +static void nbl_dev_remove_chan_keepalive(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + chan_ops->remove_keepalive(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type); +} + +static bool nbl_dev_should_chan_keepalive(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + bool ret = true; + + ret &= serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_KEEP_ALIVE); + + return ret; +} + +static void nbl_dev_register_chan_task(struct nbl_dev_mgt *dev_mgt, + u8 chan_type, struct work_struct *task) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + chan_ops->register_chan_task(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type, task); +} + +/* ---------- Tasks config ---------- */ +static void nbl_dev_clean_mailbox_task(struct work_struct *work) +{ + struct nbl_dev_common *common_dev = container_of(work, struct nbl_dev_common, + clean_mbx_task); + struct nbl_dev_mgt *dev_mgt = common_dev->dev_mgt; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + chan_ops->clean_queue_subtask(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_TYPE_MAILBOX); +} + +static int nbl_dev_clean_mailbox_schedule(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + bool is_ctrl = !!(NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)); + + nbl_common_queue_work(&common_dev->clean_mbx_task, is_ctrl, true); + + return 0; +} + +static void nbl_dev_clean_adminq_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + clean_adminq_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + chan_ops->clean_queue_subtask(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_TYPE_ADMINQ); +} + +static void nbl_dev_clean_adminq_schedule(struct nbl_task_info *task_info) +{ + nbl_common_queue_work(&task_info->clean_adminq_task, true, false); +} + +static void nbl_dev_fw_heartbeat_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + fw_hb_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + if (task_info->fw_resetting) + return; + + if (!serv_ops->check_fw_heartbeat(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt))) { + dev_notice(NBL_COMMON_TO_DEV(common), "FW reset detected"); + task_info->fw_resetting = true; + + nbl_common_queue_delayed_work(&task_info->fw_reset_task, MSEC_PER_SEC, true, false); + } +} + +static void nbl_dev_fw_reset_task(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct nbl_task_info *task_info = container_of(delayed_work, struct nbl_task_info, + fw_reset_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + if (serv_ops->check_fw_reset(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt))) { + dev_notice(NBL_COMMON_TO_DEV(common), "FW recovered"); + + nbl_dev_disable_adminq_irq(dev_mgt); + nbl_dev_free_adminq_irq(dev_mgt, task_info); + + nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + nbl_dev_setup_chan_qinfo(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + nbl_dev_request_adminq_irq(dev_mgt, task_info); + nbl_dev_enable_adminq_irq(dev_mgt); + + if (NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) { + nbl_dev_get_port_attributes(dev_mgt); + nbl_dev_enable_port(dev_mgt, true); + } + task_info->fw_resetting = false; + return; + } + + nbl_common_queue_delayed_work(delayed_work, MSEC_PER_SEC, true, false); +} + +static void nbl_dev_adapt_desc_gother_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + adapt_desc_gother_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->adapt_desc_gother(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static void nbl_dev_recovery_abnormal_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + recovery_abnormal_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->recovery_abnormal(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static void nbl_dev_ctrl_task_schedule(struct nbl_task_info *task_info) +{ + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_HB_CAP)) + nbl_common_queue_work(&task_info->fw_hb_task, true, false); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_ADAPT_DESC_GOTHER)) + nbl_common_queue_work(&task_info->adapt_desc_gother_task, true, false); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_RECOVERY_ABNORMAL_STATUS)) + nbl_common_queue_work(&task_info->recovery_abnormal_task, true, false); +} + +static void nbl_dev_ctrl_task_timer(struct timer_list *t) +{ + struct nbl_task_info *task_info = from_timer(task_info, t, serv_timer); + + mod_timer(&task_info->serv_timer, round_jiffies(task_info->serv_timer_period + jiffies)); + nbl_dev_ctrl_task_schedule(task_info); +} + +static void nbl_dev_ctrl_task_start(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + if (!task_info->timer_setup) + return; + + mod_timer(&task_info->serv_timer, round_jiffies(jiffies + task_info->serv_timer_period)); +} + +static void nbl_dev_ctrl_task_stop(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + if (!task_info->timer_setup) + return; + + del_timer_sync(&task_info->serv_timer); +} + +static void nbl_dev_chan_notify_flr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u16 vfid; + + vfid = *(u16 *)data; + serv_ops->process_flr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vfid); +} + +static void nbl_dev_ctrl_register_flr_chan_msg(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (!serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_PROCESS_FLR_CAP)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_ADMINQ_FLR_NOTIFY, + nbl_dev_chan_notify_flr_resp, dev_mgt); +} + +static int nbl_dev_setup_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + task_info->dev_mgt = dev_mgt; + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_HB_CAP)) { + nbl_common_alloc_task(&task_info->fw_hb_task, nbl_dev_fw_heartbeat_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_RESET_CAP)) { + nbl_common_alloc_delayed_task(&task_info->fw_reset_task, nbl_dev_fw_reset_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_ADMINDQ_CAP)) { + nbl_common_alloc_task(&task_info->clean_adminq_task, nbl_dev_clean_adminq_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_ADAPT_DESC_GOTHER)) { + nbl_common_alloc_task(&task_info->adapt_desc_gother_task, + nbl_dev_adapt_desc_gother_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_RECOVERY_ABNORMAL_STATUS)) { + nbl_common_alloc_task(&task_info->recovery_abnormal_task, + nbl_dev_recovery_abnormal_task); + task_info->timer_setup = true; + } + + nbl_common_alloc_task(&task_info->clean_abnormal_irq_task, + nbl_dev_handle_abnormal_event); + + if (task_info->timer_setup) { + timer_setup(&task_info->serv_timer, nbl_dev_ctrl_task_timer, 0); + task_info->serv_timer_period = HZ; + } + + nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_ADMINQ, &task_info->clean_adminq_task); + + return 0; +} + +static void nbl_dev_remove_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_ADMINQ, NULL); + + nbl_common_release_task(&task_info->clean_abnormal_irq_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_RESET_CAP)) + nbl_common_release_delayed_task(&task_info->fw_reset_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_HB_CAP)) + nbl_common_release_task(&task_info->fw_hb_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_ADMINDQ_CAP)) + nbl_common_release_task(&task_info->clean_adminq_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_ADAPT_DESC_GOTHER)) + nbl_common_release_task(&task_info->adapt_desc_gother_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_RECOVERY_ABNORMAL_STATUS)) + nbl_common_release_task(&task_info->recovery_abnormal_task); +} + +static int nbl_dev_setup_customized_p4(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (!serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_P4_CAP)) + return 0; + + return serv_ops->init_p4(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static int nbl_dev_update_ring_num(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->update_ring_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +/* ---------- Dev init process ---------- */ +static int nbl_dev_setup_common_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_common *common_dev; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int board_id; + + common_dev = devm_kzalloc(NBL_ADAPTER_TO_DEV(adapter), + sizeof(struct nbl_dev_common), GFP_KERNEL); + if (!common_dev) + return -ENOMEM; + common_dev->dev_mgt = dev_mgt; + + if (nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_MAILBOX)) + goto setup_chan_fail; + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_alloc_task(&common_dev->clean_mbx_task, nbl_dev_clean_mailbox_task); + + if (param->caps.is_nic) { + board_id = serv_ops->get_board_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (board_id < 0) + goto get_board_id_fail; + NBL_COMMON_TO_BOARD_ID(common) = board_id; + } + + NBL_COMMON_TO_VSI_ID(common) = serv_ops->get_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), 0, + NBL_VSI_DATA); + + serv_ops->get_eth_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_VSI_ID(common), + &NBL_COMMON_TO_ETH_MODE(common), &NBL_COMMON_TO_ETH_ID(common)); + + nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_MAILBOX, &common_dev->clean_mbx_task); + + NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = common_dev; + + nbl_dev_register_common_irq(dev_mgt); + + return 0; + +get_board_id_fail: + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_release_task(&common_dev->clean_mbx_task); +setup_chan_fail: + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), common_dev); + return -EFAULT; +} + +static void nbl_dev_remove_common_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + + if (!common_dev) + return; + + nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_MAILBOX, NULL); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_release_task(&common_dev->clean_mbx_task); + + nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), common_dev); + NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = NULL; +} + +static int nbl_dev_setup_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int i, ret = 0; + + if (param->caps.is_nic) + NBL_COMMON_TO_BOARD_ID(common) = + nbl_dev_alloc_board_id(&board_id_table, common->bus); + + ctrl_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_ctrl), GFP_KERNEL); + if (!ctrl_dev) + goto alloc_fail; + NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev)->adapter = adapter; + NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) = ctrl_dev; + + nbl_dev_register_ctrl_irq(dev_mgt); + + ret = serv_ops->init_chip(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) { + dev_err(dev, "ctrl dev chip_init failed\n"); + goto chip_init_fail; + } + + ret = serv_ops->start_mgt_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) { + dev_err(dev, "ctrl dev start_mgt_flow failed\n"); + goto mgt_flow_fail; + } + + for (i = 0; i < NBL_CHAN_TYPE_MAX; i++) { + ret = nbl_dev_setup_chan_qinfo(dev_mgt, i); + if (ret) { + dev_err(dev, "ctrl dev setup chan qinfo failed\n"); + goto setup_chan_q_fail; + } + } + + ret = nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + if (ret) { + dev_err(dev, "ctrl dev setup chan queue failed\n"); + goto setup_chan_q_fail; + } + + ret = nbl_dev_setup_ctrl_dev_task(dev_mgt); + if (ret) { + dev_err(dev, "ctrl dev task failed\n"); + goto setup_ctrl_dev_task_fail; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) { + ret = serv_ops->setup_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); + if (ret) { + dev_err(dev, "ctrl dev st failed\n"); + goto setup_ctrl_dev_st_fail; + } + } + + ret = nbl_dev_setup_customized_p4(dev_mgt); + if (ret) + goto customize_p4_fail; + + nbl_dev_update_ring_num(dev_mgt); + + return 0; + +customize_p4_fail: + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) + serv_ops->remove_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); +setup_ctrl_dev_st_fail: + nbl_dev_remove_ctrl_dev_task(dev_mgt); +setup_ctrl_dev_task_fail: + nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); +setup_chan_q_fail: + serv_ops->stop_mgt_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +mgt_flow_fail: + serv_ops->destroy_chip(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +chip_init_fail: + devm_kfree(dev, ctrl_dev); + NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) = NULL; +alloc_fail: + nbl_dev_free_board_id(&board_id_table, common->bus); + return ret; +} + +static void nbl_dev_remove_ctrl_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_ctrl **ctrl_dev = &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + if (!*ctrl_dev) + return; + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) + serv_ops->remove_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); + + nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + nbl_dev_remove_ctrl_dev_task(dev_mgt); + + serv_ops->stop_mgt_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->destroy_chip(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), *ctrl_dev); + *ctrl_dev = NULL; + + /* If it is not nic, this free function will do nothing, so no need check */ + nbl_dev_free_board_id(&board_id_table, common->bus); +} + +static int nbl_dev_netdev_open(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->netdev_open(netdev); +} + +static int nbl_dev_netdev_stop(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->netdev_stop(netdev); +} + +static netdev_tx_t nbl_dev_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_resource_pt_ops *pt_ops = NBL_DEV_MGT_TO_RES_PT_OPS(dev_mgt); + + return pt_ops->start_xmit(skb, netdev); +} + +static void nbl_dev_netdev_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_stats64(netdev, stats); +} + +static void nbl_dev_netdev_set_rx_mode(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->set_rx_mode(netdev); +} + +static void nbl_dev_netdev_change_rx_flags(struct net_device *netdev, int flag) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->change_rx_flags(netdev, flag); +} + +static int nbl_dev_netdev_set_mac(struct net_device *netdev, void *p) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_mac(netdev, p); +} + +static int nbl_dev_netdev_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rx_add_vid(netdev, proto, vid); +} + +static int nbl_dev_netdev_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rx_kill_vid(netdev, proto, vid); +} + +static netdev_features_t +nbl_dev_netdev_features_check(struct sk_buff *skb, struct net_device *netdev, + netdev_features_t features) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->features_check(skb, netdev, features); +} + +static void nbl_dev_netdev_tx_timeout(struct net_device *netdev, u32 txqueue) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->tx_timeout(netdev, txqueue); +} + +static int nbl_dev_netdev_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->change_mtu(netdev, new_mtu); +} + +static int nbl_dev_ndo_get_phys_port_name(struct net_device *netdev, char *name, size_t len) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_phys_port_name(netdev, name, len); +} + +static int +nbl_dev_ndo_get_port_parent_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_port_parent_id(netdev, ppid); +} + +static const struct net_device_ops netdev_ops_leonis_pf = { + .ndo_open = nbl_dev_netdev_open, + .ndo_stop = nbl_dev_netdev_stop, + .ndo_start_xmit = nbl_dev_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats64 = nbl_dev_netdev_get_stats64, + .ndo_set_rx_mode = nbl_dev_netdev_set_rx_mode, + .ndo_change_rx_flags = nbl_dev_netdev_change_rx_flags, + .ndo_set_mac_address = nbl_dev_netdev_set_mac, + .ndo_vlan_rx_add_vid = nbl_dev_netdev_rx_add_vid, + .ndo_vlan_rx_kill_vid = nbl_dev_netdev_rx_kill_vid, + .ndo_features_check = nbl_dev_netdev_features_check, + .ndo_tx_timeout = nbl_dev_netdev_tx_timeout, + .ndo_change_mtu = nbl_dev_netdev_change_mtu, + .ndo_get_phys_port_name = nbl_dev_ndo_get_phys_port_name, + .ndo_get_port_parent_id = nbl_dev_ndo_get_port_parent_id, +}; + +static int nbl_dev_setup_netops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) +{ + netdev->netdev_ops = &netdev_ops_leonis_pf; + + return 0; +} + +static void nbl_dev_remove_netops(struct net_device *netdev) +{ + netdev->netdev_ops = NULL; +} + +static void nbl_dev_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_drvinfo(netdev, drvinfo); +} + +static int nbl_dev_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_module_eeprom(netdev, eeprom, data); +} + +static int nbl_dev_get_module_info(struct net_device *netdev, struct ethtool_modinfo *info) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_module_info(netdev, info); +} + +static int nbl_dev_get_eeprom_len(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_eeprom_length(netdev); +} + +static int nbl_dev_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_eeprom(netdev, eeprom, bytes); +} + +static void nbl_dev_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_strings(netdev, stringset, data); +} + +static int nbl_dev_get_sset_count(struct net_device *netdev, int sset) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_sset_count(netdev, sset); +} + +static void nbl_dev_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_ethtool_stats(netdev, stats, data); +} + +static void nbl_dev_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_channels(netdev, channels); +} + +static int nbl_dev_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_channels(netdev, channels); +} + +static u32 nbl_dev_get_link(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_link(netdev); +} + +static int +nbl_dev_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_ksettings(netdev, cmd); +} + +static int +nbl_dev_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_ksettings(netdev, cmd); +} + +static void nbl_dev_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_ringparam(netdev, ringparam, k_ringparam, extack); +} + +static int nbl_dev_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_ringparam(netdev, ringparam, k_ringparam, extack); +} + +static int nbl_dev_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_coalesce(netdev, ec, kernel_ec, extack); +} + +static int nbl_dev_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_coalesce(netdev, ec, kernel_ec, extack); +} + +static int nbl_dev_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxnfc(netdev, cmd, rule_locs); +} + +static u32 nbl_dev_get_rxfh_indir_size(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxfh_indir_size(netdev); +} + +static u32 nbl_dev_get_rxfh_key_size(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxfh_key_size(netdev); +} + +static int nbl_dev_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxfh(netdev, indir, key, hfunc); +} + +static u32 nbl_dev_get_msglevel(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_msglevel(netdev); +} + +static void nbl_dev_set_msglevel(struct net_device *netdev, u32 msglevel) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->set_msglevel(netdev, msglevel); +} + +static int nbl_dev_get_regs_len(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_regs_len(netdev); +} + +static void nbl_dev_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_ethtool_dump_regs(netdev, regs, p); +} + +static int nbl_dev_get_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_per_queue_coalesce(netdev, q_num, ec); +} + +static int nbl_dev_set_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_per_queue_coalesce(netdev, q_num, ec); +} + +static void nbl_dev_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->self_test(netdev, eth_test, data); +} + +static u32 nbl_dev_get_priv_flags(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_priv_flags(netdev); +} + +static int nbl_dev_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_priv_flags(netdev, priv_flags); +} + +static int nbl_dev_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_pause_param(netdev, param); +} + +static void nbl_dev_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_pause_param(netdev, param); +} + +static int nbl_dev_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_fec_param(netdev, fec); +} + +static int nbl_dev_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_fec_param(netdev, fec); +} + +static int nbl_dev_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *ts_info) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_ts_info(netdev, ts_info); +} + +static int nbl_dev_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_phys_id(netdev, state); +} + +static int nbl_dev_nway_reset(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->nway_reset(netdev); +} + +static const struct ethtool_ops ethtool_ops_leonis_pf = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_RX_MAX_FRAMES | + ETHTOOL_COALESCE_TX_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .get_drvinfo = nbl_dev_get_drvinfo, + .get_module_eeprom = nbl_dev_get_module_eeprom, + .get_module_info = nbl_dev_get_module_info, + .get_eeprom_len = nbl_dev_get_eeprom_len, + .get_eeprom = nbl_dev_get_eeprom, + .get_strings = nbl_dev_get_strings, + .get_sset_count = nbl_dev_get_sset_count, + .get_ethtool_stats = nbl_dev_get_ethtool_stats, + .get_channels = nbl_dev_get_channels, + .set_channels = nbl_dev_set_channels, + .get_link = nbl_dev_get_link, + .get_link_ksettings = nbl_dev_get_link_ksettings, + .set_link_ksettings = nbl_dev_set_link_ksettings, + .get_ringparam = nbl_dev_get_ringparam, + .set_ringparam = nbl_dev_set_ringparam, + .get_coalesce = nbl_dev_get_coalesce, + .set_coalesce = nbl_dev_set_coalesce, + .get_rxnfc = nbl_dev_get_rxnfc, + .get_rxfh_indir_size = nbl_dev_get_rxfh_indir_size, + .get_rxfh_key_size = nbl_dev_get_rxfh_key_size, + .get_rxfh = nbl_dev_get_rxfh, + .get_msglevel = nbl_dev_get_msglevel, + .set_msglevel = nbl_dev_set_msglevel, + .get_regs_len = nbl_dev_get_regs_len, + .get_regs = nbl_dev_get_regs, + .get_per_queue_coalesce = nbl_dev_get_per_queue_coalesce, + .set_per_queue_coalesce = nbl_dev_set_per_queue_coalesce, + .self_test = nbl_dev_self_test, + .get_priv_flags = nbl_dev_get_priv_flags, + .set_priv_flags = nbl_dev_set_priv_flags, + .set_pauseparam = nbl_dev_set_pauseparam, + .get_pauseparam = nbl_dev_get_pauseparam, + .set_fecparam = nbl_dev_set_fecparam, + .get_fecparam = nbl_dev_get_fecparam, + .get_ts_info = nbl_dev_get_ts_info, + .set_phys_id = nbl_dev_set_phys_id, + .nway_reset = nbl_dev_nway_reset, +}; + +static int nbl_dev_setup_ethtool_ops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) +{ + netdev->ethtool_ops = ðtool_ops_leonis_pf; + + return 0; +} + +static void nbl_dev_remove_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = NULL; +} + +void nbl_dev_set_eth_mac_addr(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, netdev->dev_addr); + serv_ops->set_eth_mac_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + mac, NBL_COMMON_TO_ETH_ID(common)); +} + +static int nbl_dev_cfg_netdev(struct net_device *netdev, struct nbl_dev_mgt *dev_mgt, + struct nbl_init_param *param, + struct nbl_register_net_result *register_result) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net_ops *net_dev_ops = NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt); + int ret = 0; + + if (param->pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->hw_features |= nbl_features_to_netdev_features(register_result->hw_features); + netdev->features |= nbl_features_to_netdev_features(register_result->features); + netdev->vlan_features |= netdev->features; + + SET_DEV_MIN_MTU(netdev, ETH_MIN_MTU); + SET_DEV_MAX_MTU(netdev, register_result->max_mtu); + netdev->mtu = min_t(u16, register_result->max_mtu, NBL_DEFAULT_MTU); + + if (is_valid_ether_addr(register_result->mac)) + eth_hw_addr_set(netdev, register_result->mac); + else + eth_hw_addr_random(netdev); + + ether_addr_copy(netdev->perm_addr, netdev->dev_addr); + + serv_ops->set_spoof_check_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev->perm_addr); + + netdev->needed_headroom = serv_ops->get_tx_headroom(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + ret = net_dev_ops->setup_netdev_ops(dev_mgt, netdev, param); + if (ret) + goto set_ops_fail; + + ret = net_dev_ops->setup_ethtool_ops(dev_mgt, netdev, param); + if (ret) + goto set_ethtool_fail; + + nbl_dev_set_eth_mac_addr(dev_mgt, netdev); + + return 0; + +set_ethtool_fail: + nbl_dev_remove_netops(netdev); +set_ops_fail: + return ret; +} + +static void nbl_dev_reset_netdev(struct net_device *netdev) +{ + nbl_dev_remove_ethtool_ops(netdev); + nbl_dev_remove_netops(netdev); +} + +static int nbl_dev_register_net(struct nbl_dev_mgt *dev_mgt, + struct nbl_register_net_result *register_result) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(NBL_DEV_MGT_TO_COMMON(dev_mgt)); + struct resource *res; + u16 pf_bdf; + u64 pf_bar_start; + u64 vf_bar_start, vf_bar_size; + u16 total_vfs, offset, stride; + int pos; + u32 val; + struct nbl_register_net_param register_param = {0}; + int ret = 0; + + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &val); + pf_bar_start = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0 + 4, &val); + pf_bar_start |= ((u64)val << 32); + + register_param.pf_bar_start = pf_bar_start; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pf_bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride); + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vfs); + + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR, &val); + vf_bar_start = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR + 4, &val); + vf_bar_start |= ((u64)val << 32); + + res = &pdev->resource[PCI_IOV_RESOURCES]; + vf_bar_size = resource_size(res); + + if (total_vfs) { + register_param.pf_bdf = pf_bdf; + register_param.vf_bar_start = vf_bar_start; + register_param.vf_bar_size = vf_bar_size; + register_param.total_vfs = total_vfs; + register_param.offset = offset; + register_param.stride = stride; + } + } + + ret = serv_ops->register_net(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + ®ister_param, register_result); + + if (!register_result->tx_queue_num || !register_result->rx_queue_num) + return -EIO; + + return ret; +} + +void nbl_dev_unregister_net(struct nbl_adapter *adapter) +{ + struct nbl_service_ops_tbl *serv_ops_tbl = NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + int ret; + + ret = serv_ops_tbl->ops->unregister_net(serv_ops_tbl->priv); + if (ret) + dev_err(dev, "unregister net failed\n"); +} + +static u16 nbl_dev_vsi_alloc_queue(struct nbl_dev_net *net_dev, u16 queue_num) +{ + struct nbl_dev_vsi_controller *vsi_ctrl = &net_dev->vsi_ctrl; + u16 queue_offset = 0; + + if (vsi_ctrl->queue_free_offset + queue_num > net_dev->total_queue_num) + return -ENOSPC; + + queue_offset = vsi_ctrl->queue_free_offset; + vsi_ctrl->queue_free_offset += queue_num; + + return queue_offset; +} + +static int nbl_dev_vsi_common_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct nbl_dev_vsi *vsi) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int ret = 0; + + vsi->queue_offset = nbl_dev_vsi_alloc_queue(NBL_DEV_MGT_TO_NET_DEV(dev_mgt), + vsi->queue_num); + + /* Tell serv & res layer the mapping from vsi to queue_id */ + ret = serv_ops->register_vsi_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index, + vsi->vsi_id, vsi->queue_offset, vsi->queue_num); + return ret; +} + +static void nbl_dev_vsi_common_remove(struct nbl_dev_mgt *dev_mgt, struct nbl_dev_vsi *vsi) +{ +} + +static int nbl_dev_vsi_common_start(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, + struct nbl_dev_vsi *vsi) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + int ret; + + vsi->napi_netdev = netdev; + + ret = serv_ops->setup_q2vsi(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + if (ret) { + dev_err(dev, "Setup q2vsi failed\n"); + goto set_q2vsi_fail; + } + + ret = serv_ops->setup_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + if (ret) { + dev_err(dev, "Setup q2vsi failed\n"); + goto set_rss_fail; + } + + ret = serv_ops->enable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + if (ret) { + dev_err(dev, "Enable napis failed\n"); + goto enable_napi_fail; + } + + return 0; + +enable_napi_fail: + serv_ops->remove_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_rss_fail: + serv_ops->remove_q2vsi(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_q2vsi_fail: + return ret; +} + +static void nbl_dev_vsi_common_stop(struct nbl_dev_mgt *dev_mgt, struct nbl_dev_vsi *vsi) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->disable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + serv_ops->remove_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + serv_ops->remove_q2vsi(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +} + +static int nbl_dev_vsi_data_register(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + int ret = 0; + + ret = nbl_dev_register_net(dev_mgt, &vsi->register_result); + if (ret) + return ret; + + vsi->queue_num = vsi->register_result.tx_queue_num; + vsi->queue_size = vsi->register_result.queue_size; + + nbl_debug(common, NBL_DEBUG_VSI, "Data vsi register, queue_num %d, queue_size %d", + vsi->queue_num, vsi->queue_size); + + return 0; +} + +static int nbl_dev_vsi_data_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + return nbl_dev_vsi_common_setup(dev_mgt, param, vsi); +} + +static void nbl_dev_vsi_data_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_remove(dev_mgt, vsi); +} + +static int nbl_dev_vsi_data_start(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, + void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + int ret; + + ret = serv_ops->start_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, vsi->vsi_id); + if (ret) { + dev_err(dev, "Set netdev flow table failed\n"); + goto set_flow_fail; + } + + if (!NBL_COMMON_TO_VF_CAP(common)) { + ret = serv_ops->set_lldp_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + if (ret) { + dev_err(dev, "Set netdev lldp flow failed\n"); + goto set_lldp_fail; + } + + vsi->feature.has_lldp = true; + + ret = serv_ops->enable_lag_protocol(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, true); + if (ret) { + dev_err(dev, "Set netdev lacp flow failed\n"); + goto set_lacp_fail; + } + + vsi->feature.has_lacp = true; + } + + ret = nbl_dev_vsi_common_start(dev_mgt, netdev, vsi); + if (ret) { + dev_err(dev, "Vsi common start failed\n"); + goto common_start_fail; + } + + return 0; + +common_start_fail: + if (!NBL_COMMON_TO_VF_CAP(common)) + serv_ops->enable_lag_protocol(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id, + false); +set_lacp_fail: + if (!NBL_COMMON_TO_VF_CAP(common)) + serv_ops->remove_lldp_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_lldp_fail: + serv_ops->stop_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_flow_fail: + return ret; +} + +static void nbl_dev_vsi_data_stop(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_stop(dev_mgt, vsi); + + if (!NBL_COMMON_TO_VF_CAP(common)) { + serv_ops->remove_lldp_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + vsi->feature.has_lldp = false; + serv_ops->enable_lag_protocol(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id, + false); + vsi->feature.has_lacp = false; + } + + serv_ops->stop_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +} + +static int nbl_dev_vsi_data_netdev_build(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + vsi->netdev = netdev; + return nbl_dev_cfg_netdev(netdev, dev_mgt, param, &vsi->register_result); +} + +static void nbl_dev_vsi_data_netdev_destroy(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_reset_netdev(vsi->netdev); +} + +static int nbl_dev_vsi_ctrl_register(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_debug(common, NBL_DEBUG_VSI, "Ctrl vsi register, queue_num %d, queue_size %d", + vsi->queue_num, vsi->queue_size); + return 0; +} + +static int nbl_dev_vsi_ctrl_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + return nbl_dev_vsi_common_setup(dev_mgt, param, vsi); +} + +static void nbl_dev_vsi_ctrl_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_remove(dev_mgt, vsi); +} + +static int nbl_dev_vsi_ctrl_start(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, + void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int ret = 0; + + ret = nbl_dev_vsi_common_start(dev_mgt, netdev, vsi); + if (ret) + goto start_fail; + + /* For ctrl vsi, open it after create, for that we don't have ndo_open ops. */ + ret = serv_ops->vsi_open(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, + vsi->index, vsi->queue_num, 1); + if (ret) + goto open_fail; + + return ret; + +open_fail: + nbl_dev_vsi_common_stop(dev_mgt, vsi); +start_fail: + return ret; +} + +static void nbl_dev_vsi_ctrl_stop(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->vsi_stop(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + nbl_dev_vsi_common_stop(dev_mgt, vsi); +} + +static int nbl_dev_vsi_ctrl_netdev_build(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data) +{ + return 0; +} + +static void nbl_dev_vsi_ctrl_netdev_destroy(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ +} + +static int nbl_dev_vsi_user_register(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_user_queue_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &vsi->queue_num, + &vsi->queue_size, NBL_COMMON_TO_VSI_ID(common)); + + nbl_debug(common, NBL_DEBUG_VSI, "User vsi register, queue_num %d, queue_size %d", + vsi->queue_num, vsi->queue_size); + return 0; +} + +static int nbl_dev_vsi_user_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + return nbl_dev_vsi_common_setup(dev_mgt, param, vsi); +} + +static void nbl_dev_vsi_user_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_remove(dev_mgt, vsi); +} + +static int nbl_dev_vsi_user_start(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, + void *vsi_data) +{ + return 0; +} + +static void nbl_dev_vsi_user_stop(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ +} + +static int nbl_dev_vsi_user_netdev_build(struct nbl_dev_mgt *dev_mgt, + struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data) +{ + return 0; +} + +static void nbl_dev_vsi_user_netdev_destroy(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ +} + +static struct nbl_dev_vsi_tbl vsi_tbl[NBL_VSI_MAX] = { + [NBL_VSI_DATA] = { + .vsi_ops = { + .register_vsi = nbl_dev_vsi_data_register, + .setup = nbl_dev_vsi_data_setup, + .remove = nbl_dev_vsi_data_remove, + .start = nbl_dev_vsi_data_start, + .stop = nbl_dev_vsi_data_stop, + .netdev_build = nbl_dev_vsi_data_netdev_build, + .netdev_destroy = nbl_dev_vsi_data_netdev_destroy, + }, + .vf_support = true, + .only_nic_support = false, + .in_kernel = true, + }, + [NBL_VSI_CTRL] = { + .vsi_ops = { + .register_vsi = nbl_dev_vsi_ctrl_register, + .setup = nbl_dev_vsi_ctrl_setup, + .remove = nbl_dev_vsi_ctrl_remove, + .start = nbl_dev_vsi_ctrl_start, + .stop = nbl_dev_vsi_ctrl_stop, + .netdev_build = nbl_dev_vsi_ctrl_netdev_build, + .netdev_destroy = nbl_dev_vsi_ctrl_netdev_destroy, + }, + .vf_support = false, + .only_nic_support = true, + .in_kernel = true, + }, + [NBL_VSI_USER] = { + .vsi_ops = { + .register_vsi = nbl_dev_vsi_user_register, + .setup = nbl_dev_vsi_user_setup, + .remove = nbl_dev_vsi_user_remove, + .start = nbl_dev_vsi_user_start, + .stop = nbl_dev_vsi_user_stop, + .netdev_build = nbl_dev_vsi_user_netdev_build, + .netdev_destroy = nbl_dev_vsi_user_netdev_destroy, + }, + .vf_support = false, + .only_nic_support = true, + .in_kernel = false, + }, +}; + +static int nbl_dev_vsi_build(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) +{ + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_vsi *vsi = NULL; + int i; + + net_dev->vsi_ctrl.queue_num = 0; + net_dev->vsi_ctrl.queue_free_offset = 0; + + /* Build all vsi, and alloc vsi_id for each of them */ + for (i = 0; i < NBL_VSI_MAX; i++) { + if ((param->caps.is_vf && !vsi_tbl[i].vf_support) || + (!param->caps.is_nic && vsi_tbl[i].only_nic_support)) + continue; + + vsi = devm_kzalloc(NBL_DEV_MGT_TO_DEV(dev_mgt), sizeof(*vsi), GFP_KERNEL); + if (!vsi) + goto malloc_vsi_fail; + + vsi->ops = &vsi_tbl[i].vsi_ops; + vsi->vsi_id = serv_ops->get_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), 0, i); + vsi->index = i; + vsi->in_kernel = vsi_tbl[i].in_kernel; + + net_dev->vsi_ctrl.vsi_list[i] = vsi; + } + + return 0; + +malloc_vsi_fail: + while (--i + 1) { + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->vsi_ctrl.vsi_list[i]); + net_dev->vsi_ctrl.vsi_list[i] = NULL; + } + + return -ENOMEM; +} + +static void nbl_dev_vsi_destroy(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + int i; + + for (i = 0; i < NBL_VSI_MAX; i++) + if (net_dev->vsi_ctrl.vsi_list[i]) { + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->vsi_ctrl.vsi_list[i]); + net_dev->vsi_ctrl.vsi_list[i] = NULL; + } +} + +static struct nbl_dev_vsi *nbl_dev_vsi_select(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_dev_vsi *vsi = NULL; + int i = 0; + + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi = net_dev->vsi_ctrl.vsi_list[i]; + if (vsi && vsi->index == NBL_VSI_DATA) + return vsi; + } + + return NULL; +} + +static int nbl_dev_vsi_handle_switch_event(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev = net_dev->netdev; + struct nbl_netdev_priv *net_priv = netdev_priv(netdev); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_event_dev_mode_switch_data *data = + (struct nbl_event_dev_mode_switch_data *)event_data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_vsi *from_vsi = NULL, *to_vsi = NULL; + int op = data->op; + + switch (op) { + case NBL_DEV_KERNEL_TO_USER: + from_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; + to_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + break; + case NBL_DEV_USER_TO_KERNEL: + from_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + to_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; + break; + default: + nbl_err(common, NBL_DEBUG_VSI, "Unknown switch op %d", op); + return -ENOENT; + } + + net_priv->default_vsi_index = to_vsi->index; + net_priv->default_vsi_id = to_vsi->vsi_id; + + data->ret = serv_ops->switch_traffic_default_dest(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + from_vsi->vsi_id, + to_vsi->vsi_id); + if (data->ret) { + net_priv->default_vsi_index = from_vsi->index; + net_priv->default_vsi_id = from_vsi->vsi_id; + } + + return 0; +} + +static struct nbl_dev_net_ops netdev_ops[NBL_PRODUCT_MAX] = { + { + .setup_netdev_ops = nbl_dev_setup_netops_leonis, + .setup_ethtool_ops = nbl_dev_setup_ethtool_ops_leonis, + }, +}; + +static void nbl_det_setup_net_dev_ops(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) +{ + NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt) = &netdev_ops[param->product_type]; +} + +static int nbl_dev_setup_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net **net_dev = &NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_dev_vsi *vsi; + u16 total_queue_num = 0, kernel_queue_num = 0, user_queue_num = 0; + int i, ret = 0; + + *net_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_net), GFP_KERNEL); + if (!*net_dev) + return -ENOMEM; + + ret = nbl_dev_vsi_build(dev_mgt, param); + if (ret) + goto vsi_build_fail; + + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; + + if (!vsi) + continue; + + ret = vsi->ops->register_vsi(dev_mgt, param, vsi); + if (ret) { + dev_err(NBL_DEV_MGT_TO_DEV(dev_mgt), "Vsi %d register failed", vsi->index); + goto vsi_register_fail; + } + + total_queue_num += vsi->queue_num; + if (vsi->in_kernel) + kernel_queue_num += vsi->queue_num; + else + user_queue_num += vsi->queue_num; + } + + /* This must before vsi_setup, or else no queue can be alloced */ + (*net_dev)->total_queue_num = total_queue_num; + (*net_dev)->kernel_queue_num = kernel_queue_num; + (*net_dev)->user_queue_num = user_queue_num; + + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; + + if (!vsi) + continue; + + ret = vsi->ops->setup(dev_mgt, param, vsi); + if (ret) { + dev_err(NBL_DEV_MGT_TO_DEV(dev_mgt), "Vsi %d setup failed", vsi->index); + goto vsi_setup_fail; + } + } + + nbl_dev_register_net_irq(dev_mgt, kernel_queue_num); + + nbl_det_setup_net_dev_ops(dev_mgt, param); + + return 0; + +vsi_setup_fail: + while (--i + 1) { + vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; + + if (!vsi) + continue; + + vsi->ops->remove(dev_mgt, vsi); + } +vsi_register_fail: + nbl_dev_vsi_destroy(dev_mgt); +vsi_build_fail: + devm_kfree(dev, *net_dev); + return ret; +} + +static void nbl_dev_remove_net_dev(struct nbl_adapter *adapter) +{ + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net **net_dev = &NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev; + struct nbl_dev_vsi *vsi; + int i = 0; + + if (!*net_dev) + return; + + netdev = (*net_dev)->netdev; + + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; + + if (!vsi) + continue; + + vsi->ops->remove(dev_mgt, vsi); + } + nbl_dev_vsi_destroy(dev_mgt); + + nbl_dev_unregister_net(adapter); + + devm_kfree(dev, *net_dev); + *net_dev = NULL; +} + +static int nbl_dev_setup_dev_mgt(struct nbl_common_info *common, struct nbl_dev_mgt **dev_mgt) +{ + *dev_mgt = devm_kzalloc(NBL_COMMON_TO_DEV(common), sizeof(struct nbl_dev_mgt), GFP_KERNEL); + if (!*dev_mgt) + return -ENOMEM; + + NBL_DEV_MGT_TO_COMMON(*dev_mgt) = common; + return 0; +} + +static void nbl_dev_remove_dev_mgt(struct nbl_common_info *common, struct nbl_dev_mgt **dev_mgt) +{ + devm_kfree(NBL_COMMON_TO_DEV(common), *dev_mgt); + *dev_mgt = NULL; +} + +static void nbl_dev_remove_ops(struct device *dev, struct nbl_dev_ops_tbl **dev_ops_tbl) +{ + devm_kfree(dev, *dev_ops_tbl); + *dev_ops_tbl = NULL; +} + +static int nbl_dev_setup_ops(struct device *dev, struct nbl_dev_ops_tbl **dev_ops_tbl, + struct nbl_adapter *adapter) +{ + *dev_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_dev_ops_tbl), GFP_KERNEL); + if (!*dev_ops_tbl) + return -ENOMEM; + + NBL_DEV_OPS_TBL_TO_OPS(*dev_ops_tbl) = &dev_ops; + NBL_DEV_OPS_TBL_TO_PRIV(*dev_ops_tbl) = adapter; + + return 0; +} + +int nbl_dev_init(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dev_mgt **dev_mgt = (struct nbl_dev_mgt **)&NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_ops_tbl **dev_ops_tbl = &NBL_ADAPTER_TO_DEV_OPS_TBL(adapter); + struct nbl_service_ops_tbl *serv_ops_tbl = NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); + struct nbl_channel_ops_tbl *chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + int ret = 0; + + ret = nbl_dev_setup_dev_mgt(common, dev_mgt); + if (ret) + goto setup_mgt_fail; + + NBL_DEV_MGT_TO_SERV_OPS_TBL(*dev_mgt) = serv_ops_tbl; + NBL_DEV_MGT_TO_CHAN_OPS_TBL(*dev_mgt) = chan_ops_tbl; + + ret = nbl_dev_setup_common_dev(adapter, param); + if (ret) + goto setup_common_dev_fail; + + if (param->caps.has_ctrl) { + ret = nbl_dev_setup_ctrl_dev(adapter, param); + if (ret) + goto setup_ctrl_dev_fail; + } + + ret = nbl_dev_setup_net_dev(adapter, param); + if (ret) + goto setup_net_dev_fail; + + ret = nbl_dev_setup_ops(dev, dev_ops_tbl, adapter); + if (ret) + goto setup_ops_fail; + + return 0; + +setup_ops_fail: + nbl_dev_remove_net_dev(adapter); +setup_net_dev_fail: + nbl_dev_remove_ctrl_dev(adapter); +setup_ctrl_dev_fail: + nbl_dev_remove_common_dev(adapter); +setup_common_dev_fail: + nbl_dev_remove_dev_mgt(common, dev_mgt); +setup_mgt_fail: + return ret; +} + +void nbl_dev_remove(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dev_mgt **dev_mgt = (struct nbl_dev_mgt **)&NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_ops_tbl **dev_ops_tbl = &NBL_ADAPTER_TO_DEV_OPS_TBL(adapter); + + nbl_dev_remove_ops(dev, dev_ops_tbl); + + nbl_dev_remove_net_dev(adapter); + nbl_dev_remove_ctrl_dev(adapter); + nbl_dev_remove_common_dev(adapter); + + nbl_dev_remove_dev_mgt(common, dev_mgt); +} + +/* ---------- Dev start process ---------- */ +static int nbl_dev_start_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + int err = 0; + + err = nbl_dev_request_abnormal_irq(dev_mgt); + if (err) + goto abnormal_request_irq_err; + + err = nbl_dev_enable_abnormal_irq(dev_mgt); + if (err) + goto enable_abnormal_irq_err; + + err = nbl_dev_request_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); + if (err) + goto request_adminq_irq_err; + + err = nbl_dev_enable_adminq_irq(dev_mgt); + if (err) + goto enable_adminq_irq_err; + + nbl_dev_ctrl_register_flr_chan_msg(dev_mgt); + + nbl_dev_get_port_attributes(dev_mgt); + nbl_dev_enable_port(dev_mgt, true); + nbl_dev_ctrl_task_start(dev_mgt); + + return 0; + +enable_adminq_irq_err: + nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); +request_adminq_irq_err: + nbl_dev_disable_abnormal_irq(dev_mgt); +enable_abnormal_irq_err: + nbl_dev_free_abnormal_irq(dev_mgt); +abnormal_request_irq_err: + return err; +} + +static void nbl_dev_stop_ctrl_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + + if (!NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) + return; + + nbl_dev_ctrl_task_stop(dev_mgt); + nbl_dev_enable_port(dev_mgt, false); + nbl_dev_disable_adminq_irq(dev_mgt); + nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); + nbl_dev_disable_abnormal_irq(dev_mgt); + nbl_dev_free_abnormal_irq(dev_mgt); +} + +static void nbl_dev_chan_notify_link_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct net_device *netdev = (struct net_device *)priv; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_chan_param_notify_link_state *link_info; + + link_info = (struct nbl_chan_param_notify_link_state *)data; + + serv_ops->set_netdev_carrier_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + netdev, link_info->link_state); +} + +static void nbl_dev_register_link_state_chan_msg(struct nbl_dev_mgt *dev_mgt, + struct net_device *netdev) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_NOTIFY_LINK_STATE, + nbl_dev_chan_notify_link_state_resp, netdev); +} + +static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct net_device *netdev = net_dev->netdev; + struct nbl_netdev_priv *net_priv; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_vsi *vsi; + struct nbl_event_callback callback = {0}; + u16 net_vector_id; + int ret; + + vsi = nbl_dev_vsi_select(dev_mgt); + if (!vsi) + return -EFAULT; + + netdev = alloc_etherdev_mqs(sizeof(struct nbl_netdev_priv), vsi->queue_num, vsi->queue_num); + if (!netdev) { + dev_err(dev, "Alloc net device failed\n"); + ret = -ENOMEM; + goto alloc_netdev_fail; + } + + SET_NETDEV_DEV(netdev, dev); + net_priv = netdev_priv(netdev); + net_priv->adapter = adapter; + nbl_dev_set_netdev_priv(netdev, vsi); + + net_dev->netdev = netdev; + common->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + serv_ops->set_mask_en(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), 1); + + /* Alloc all queues. + * One problem is we now must use the queue_size of data_vsi for all queues. + */ + ret = serv_ops->alloc_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, + net_dev->kernel_queue_num, net_dev->kernel_queue_num, + net_priv->queue_size); + if (ret) { + dev_err(dev, "Alloc rings failed\n"); + goto alloc_rings_fail; + } + + net_vector_id = msix_info->serv_info[NBL_MSIX_NET_TYPE].base_vector_id; + ret = serv_ops->setup_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, net_dev->total_queue_num, net_vector_id); + if (ret) { + dev_err(dev, "Set queue map failed\n"); + goto set_queue_fail; + } + + ret = serv_ops->setup_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev); + if (ret) { + dev_err(dev, "setup net mgt failed\n"); + goto setup_net_mgt_fail; + } + + nbl_dev_register_link_state_chan_msg(dev_mgt, netdev); + + ret = vsi->ops->netdev_build(dev_mgt, param, netdev, vsi); + if (ret) { + dev_err(dev, "Build netdev failed, selected vsi %d\n", vsi->index); + goto build_netdev_fail; + } + + ret = vsi->ops->start(dev_mgt, netdev, vsi); + if (ret) { + dev_err(dev, "Start vsi failed, selected vsi %d\n", vsi->index); + goto start_vsi_fail; + } + + ret = nbl_dev_request_net_irq(dev_mgt); + if (ret) { + dev_err(dev, "request irq failed\n"); + goto request_irq_fail; + } + + netif_carrier_off(netdev); + ret = register_netdev(netdev); + if (ret) { + dev_err(dev, "Register netdev failed\n"); + goto register_netdev_fail; + } + + if (!param->caps.is_vf) { + callback.callback = nbl_dev_vsi_handle_switch_event; + callback.callback_data = dev_mgt; + nbl_event_register(NBL_EVENT_DEV_MODE_SWITCH, &callback, + NBL_COMMON_TO_ETH_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + + set_bit(NBL_DOWN, adapter->state); + + return 0; + +register_netdev_fail: + nbl_dev_free_net_irq(dev_mgt); +request_irq_fail: + vsi->ops->stop(dev_mgt, vsi); +start_vsi_fail: + vsi->ops->netdev_destroy(dev_mgt, vsi); +build_netdev_fail: + serv_ops->remove_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +setup_net_mgt_fail: + serv_ops->remove_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_queue_fail: + serv_ops->free_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +alloc_rings_fail: + free_netdev(netdev); +alloc_netdev_fail: + return ret; +} + +static void nbl_dev_stop_net_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_event_callback callback = {0}; + struct nbl_dev_vsi *vsi; + struct net_device *netdev; + struct nbl_netdev_priv *net_priv; + + if (!net_dev) + return; + + netdev = net_dev->netdev; + net_priv = netdev_priv(netdev); + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; + if (!vsi) + return; + + if (!common->is_vf) { + callback.callback = nbl_dev_vsi_handle_switch_event; + callback.callback_data = dev_mgt; + nbl_event_unregister(NBL_EVENT_DEV_MODE_SWITCH, &callback, + NBL_COMMON_TO_ETH_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + + unregister_netdev(netdev); + + vsi->ops->netdev_destroy(dev_mgt, vsi); + vsi->ops->stop(dev_mgt, vsi); + + nbl_dev_free_net_irq(dev_mgt); + + serv_ops->remove_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->remove_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + serv_ops->free_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + free_netdev(netdev); +} + +static int nbl_dev_resume_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev; + int ret = 0; + + if (!net_dev) + return 0; + + netdev = net_dev->netdev; + + ret = nbl_dev_request_net_irq(dev_mgt); + if (ret) + dev_err(dev, "request irq failed\n"); + + netif_device_attach(netdev); + return ret; +} + +static void nbl_dev_suspend_net_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev; + + if (!net_dev) + return; + + netdev = net_dev->netdev; + netif_device_detach(netdev); + nbl_dev_free_net_irq(dev_mgt); +} + +/* ---------- Devlink config ---------- */ +static void nbl_dev_devlink_free(void *devlink_ptr) +{ + devlink_free((struct devlink *)devlink_ptr); +} + +static int nbl_dev_setup_devlink(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) +{ + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct devlink *devlink; + struct devlink_ops *devlink_ops; + struct nbl_devlink_priv *priv; + int ret = 0; + + if (param->caps.is_vf) + return 0; + + devlink_ops = devm_kzalloc(dev, sizeof(*devlink_ops), GFP_KERNEL); + if (!devlink_ops) + return -ENOMEM; + + devlink_ops->info_get = serv_ops->get_devlink_info; + + if (param->caps.has_ctrl) + devlink_ops->flash_update = serv_ops->update_devlink_flash; + + devlink = devlink_alloc(devlink_ops, sizeof(*priv), dev); + + if (!devlink) + return -ENOMEM; + + common_dev->devlink_ops = devlink_ops; + + if (devm_add_action(dev, nbl_dev_devlink_free, devlink)) { + devlink_free(devlink); + return -EFAULT; + } + priv = devlink_priv(devlink); + priv->priv = NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt); + priv->dev_mgt = dev_mgt; + + devlink_register(devlink); + + common_dev->devlink = devlink; + return ret; +} + +static void nbl_dev_remove_devlink(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + + if (common_dev->devlink) { + devlink_unregister(common_dev->devlink); + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), common_dev->devlink_ops); + } +} + +static int nbl_dev_start_common_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int ret = 0; + + ret = nbl_dev_configure_msix_map(dev_mgt); + if (ret) + goto config_msix_map_err; + + ret = nbl_dev_init_interrupt_scheme(dev_mgt); + if (ret) + goto init_interrupt_scheme_err; + + ret = nbl_dev_request_mailbox_irq(dev_mgt); + if (ret) + goto mailbox_request_irq_err; + + ret = nbl_dev_enable_mailbox_irq(dev_mgt); + if (ret) + goto enable_mailbox_irq_err; + + ret = nbl_dev_setup_devlink(dev_mgt, param); + if (ret) + goto setup_devlink_err; + + if (!param->caps.is_vf && + serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_HWMON_TEMP_CAP)) { + ret = nbl_dev_setup_hwmon(adapter); + if (ret) + goto setup_hwmon_err; + } + + if (nbl_dev_should_chan_keepalive(dev_mgt)) + nbl_dev_setup_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + return 0; + +setup_hwmon_err: + nbl_dev_remove_devlink(dev_mgt); +setup_devlink_err: + nbl_dev_disable_mailbox_irq(dev_mgt); +enable_mailbox_irq_err: + nbl_dev_free_mailbox_irq(dev_mgt); +mailbox_request_irq_err: + nbl_dev_clear_interrupt_scheme(dev_mgt); +init_interrupt_scheme_err: + nbl_dev_destroy_msix_map(dev_mgt); +config_msix_map_err: + return ret; +} + +void nbl_dev_stop_common_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + + if (nbl_dev_should_chan_keepalive(dev_mgt)) + nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + nbl_dev_remove_hwmon(adapter); + nbl_dev_remove_devlink(dev_mgt); + nbl_dev_free_mailbox_irq(dev_mgt); + nbl_dev_disable_mailbox_irq(dev_mgt); + nbl_dev_clear_interrupt_scheme(dev_mgt); + nbl_dev_destroy_msix_map(dev_mgt); +} + +static int nbl_dev_resume_common_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + int ret = 0; + + ret = nbl_dev_request_mailbox_irq(dev_mgt); + if (ret) + return ret; + + if (nbl_dev_should_chan_keepalive(dev_mgt)) + nbl_dev_setup_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + return 0; +} + +void nbl_dev_suspend_common_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + + if (nbl_dev_should_chan_keepalive(dev_mgt)) + nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + nbl_dev_free_mailbox_irq(dev_mgt); +} + +int nbl_dev_start(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + int ret = 0; + + ret = nbl_dev_start_common_dev(adapter, param); + if (ret) + goto start_common_dev_fail; + + if (param->caps.has_ctrl) { + ret = nbl_dev_start_ctrl_dev(adapter, param); + if (ret) + goto start_ctrl_dev_fail; + } + + ret = nbl_dev_start_net_dev(adapter, param); + if (ret) + goto start_net_dev_fail; + + if (param->caps.has_user) + nbl_dev_start_user_dev(adapter); + + return 0; + +start_net_dev_fail: + nbl_dev_stop_ctrl_dev(adapter); +start_ctrl_dev_fail: + nbl_dev_stop_common_dev(adapter); +start_common_dev_fail: + return ret; +} + +void nbl_dev_stop(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + + nbl_dev_stop_user_dev(adapter); + nbl_dev_stop_ctrl_dev(adapter); + nbl_dev_stop_net_dev(adapter); + nbl_dev_stop_common_dev(adapter); +} + +int nbl_dev_resume(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_init_param *param = &adapter->init_param; + int ret = 0; + + ret = nbl_dev_resume_common_dev(adapter, param); + if (ret) + goto start_common_dev_fail; + + if (param->caps.has_ctrl) { + ret = nbl_dev_start_ctrl_dev(adapter, param); + if (ret) + goto start_ctrl_dev_fail; + } + + ret = nbl_dev_resume_net_dev(adapter, param); + if (ret) + goto start_net_dev_fail; + + return 0; + +start_net_dev_fail: + nbl_dev_stop_ctrl_dev(adapter); +start_ctrl_dev_fail: + nbl_dev_stop_common_dev(adapter); +start_common_dev_fail: + return ret; +} + +int nbl_dev_suspend(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + + nbl_dev_stop_ctrl_dev(adapter); + nbl_dev_suspend_net_dev(adapter); + nbl_dev_suspend_common_dev(adapter); + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h new file mode 100644 index 000000000000..5f9a8a658a65 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEV_H_ +#define _NBL_DEV_H_ + +#include "nbl_core.h" +#include "nbl_dev_user.h" + +#define NBL_DEV_MGT_TO_COMMON(dev_mgt) ((dev_mgt)->common) +#define NBL_DEV_MGT_TO_DEV(dev_mgt) NBL_COMMON_TO_DEV(NBL_DEV_MGT_TO_COMMON(dev_mgt)) +#define NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) ((dev_mgt)->common_dev) +#define NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) ((dev_mgt)->ctrl_dev) +#define NBL_DEV_MGT_TO_NET_DEV(dev_mgt) ((dev_mgt)->net_dev) +#define NBL_DEV_MGT_TO_USER_DEV(dev_mgt) ((dev_mgt)->user_dev) +#define NBL_DEV_COMMON_TO_MSIX_INFO(dev_common) (&(dev_common)->msix_info) +#define NBL_DEV_CTRL_TO_TASK_INFO(dev_ctrl) (&(dev_ctrl)->task_info) +#define NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt) ((dev_mgt)->net_dev->ops) + +#define NBL_DEV_MGT_TO_SERV_OPS_TBL(dev_mgt) ((dev_mgt)->serv_ops_tbl) +#define NBL_DEV_MGT_TO_SERV_OPS(dev_mgt) (NBL_DEV_MGT_TO_SERV_OPS_TBL(dev_mgt)->ops) +#define NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt) (NBL_DEV_MGT_TO_SERV_OPS_TBL(dev_mgt)->priv) +#define NBL_DEV_MGT_TO_RES_PT_OPS(adapter) (&(NBL_DEV_MGT_TO_SERV_OPS_TBL(dev_mgt)->pt_ops)) +#define NBL_DEV_MGT_TO_CHAN_OPS_TBL(dev_mgt) ((dev_mgt)->chan_ops_tbl) +#define NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt) (NBL_DEV_MGT_TO_CHAN_OPS_TBL(dev_mgt)->ops) +#define NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt) (NBL_DEV_MGT_TO_CHAN_OPS_TBL(dev_mgt)->priv) + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + +#define NBL_STRING_NAME_LEN (32) +#define NBL_DEFAULT_MTU (1500) + +#define NBL_MAX_CARDS 16 + +#define NBL_KEEPALIVE_TIME_CYCLE (10 * HZ) + +enum nbl_dev_mode_switch_op { + NBL_DEV_KERNEL_TO_USER, + NBL_DEV_USER_TO_KERNEL, +}; + +struct nbl_task_info { + struct nbl_adapter *adapter; + struct nbl_dev_mgt *dev_mgt; + struct work_struct fw_hb_task; + struct delayed_work fw_reset_task; + struct work_struct clean_adminq_task; + struct work_struct adapt_desc_gother_task; + struct work_struct clean_abnormal_irq_task; + struct work_struct recovery_abnormal_task; + + struct timer_list serv_timer; + unsigned long serv_timer_period; + + bool fw_resetting; + bool timer_setup; +}; + +enum nbl_msix_serv_type { + /* virtio_dev has a config vector_id, and the vector_id need is 0 */ + NBL_MSIX_VIRTIO_TYPE = 0, + NBL_MSIX_NET_TYPE, + NBL_MSIX_MAILBOX_TYPE, + NBL_MSIX_ABNORMAL_TYPE, + NBL_MSIX_ADMINDQ_TYPE, + NBL_MSIX_RDMA_TYPE, + NBL_MSIX_TYPE_MAX + +}; + +struct nbl_msix_serv_info { + u16 num; + u16 base_vector_id; + /* true: hw report msix, hw need to mask actively */ + bool hw_self_mask_en; +}; + +struct nbl_msix_info { + struct nbl_msix_serv_info serv_info[NBL_MSIX_TYPE_MAX]; + struct msix_entry *msix_entries; +}; + +struct nbl_dev_common { + struct nbl_dev_mgt *dev_mgt; + struct device *hwmon_dev; + struct nbl_msix_info msix_info; + char mailbox_name[NBL_STRING_NAME_LEN]; + // for ctrl-dev/net-dev mailbox recv msg + struct work_struct clean_mbx_task; + + struct devlink_ops *devlink_ops; + struct devlink *devlink; +}; + +struct nbl_dev_ctrl { + struct nbl_task_info task_info; +}; + +struct nbl_dev_vsi_controller { + u16 queue_num; + u16 queue_free_offset; + void *vsi_list[NBL_VSI_MAX]; +}; + +struct nbl_dev_net_ops { + int (*setup_netdev_ops)(void *priv, struct net_device *netdev, + struct nbl_init_param *param); + int (*setup_ethtool_ops)(void *priv, struct net_device *netdev, + struct nbl_init_param *param); +}; + +struct nbl_dev_net { + struct net_device *netdev; + struct nbl_dev_net_ops *ops; + struct nbl_dev_vsi_controller vsi_ctrl; + u16 total_queue_num; + u16 kernel_queue_num; + u16 user_queue_num; + u8 eth_id; + u8 resv; +}; + +struct nbl_dev_user_iommu_group { + struct mutex dma_tree_lock; /* lock dma tree */ + struct list_head group_next; + struct kref kref; + struct rb_root dma_tree; + struct iommu_group *iommu_group; + struct device *dev; + struct vfio_device *vdev; +}; + +struct nbl_dev_user { + struct vfio_device vdev; + struct device *mdev; + struct notifier_block iommu_notifier; + struct device *dev; + struct nbl_adapter *adapter; + struct nbl_dev_user_iommu_group *group; + void *shm_msg_ring; + int minor; + bool iommu_status; + bool remap_status; + int network_type; + atomic_t open_cnt; +}; + +struct nbl_dev_mgt { + struct nbl_common_info *common; + struct nbl_service_ops_tbl *serv_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_dev_common *common_dev; + struct nbl_dev_ctrl *ctrl_dev; + struct nbl_dev_net *net_dev; + struct nbl_dev_user *user_dev; +}; + +struct nbl_dev_vsi_feature { + u16 has_lldp:1; + u16 has_lacp:1; + u16 rsv:14; +}; + +struct nbl_dev_vsi_ops { + int (*register_vsi)(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data); + int (*setup)(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data); + void (*remove)(struct nbl_dev_mgt *dev_mgt, void *vsi_data); + int (*start)(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, void *vsi_data); + void (*stop)(struct nbl_dev_mgt *dev_mgt, void *vsi_data); + int (*netdev_build)(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data); + void (*netdev_destroy)(struct nbl_dev_mgt *dev_mgt, void *vsi_data); +}; + +struct nbl_dev_vsi { + struct nbl_dev_vsi_ops *ops; + struct net_device *netdev; + struct net_device *napi_netdev; + struct nbl_register_net_result register_result; + struct nbl_dev_vsi_feature feature; + u16 vsi_id; + u16 queue_offset; + u16 queue_num; + u16 queue_size; + u16 in_kernel; + u8 index; + bool enable; +}; + +struct nbl_dev_vsi_tbl { + struct nbl_dev_vsi_ops vsi_ops; + bool vf_support; + bool only_nic_support; + u16 in_kernel; +}; + +#define NBL_DEV_BOARD_ID_MAX NBL_DRIVER_DEV_MAX +struct nbl_dev_board_id_entry { + u16 bus; + u8 refcount; + bool valid; +}; + +struct nbl_dev_board_id_table { + struct nbl_dev_board_id_entry entry[NBL_DEV_BOARD_ID_MAX]; +}; + +int nbl_dev_setup_hwmon(struct nbl_adapter *adapter); +void nbl_dev_remove_hwmon(struct nbl_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c new file mode 100644 index 000000000000..9b5e1bd9fc07 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c @@ -0,0 +1,1377 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ +#include "nbl_dev.h" +#include "nbl_service.h" + +extern int device_driver_attach(struct device_driver *drv, struct device *dev); + +static struct nbl_userdev { + struct cdev cdev; + struct class *cls; + struct idr cidr; + dev_t cdevt; + struct mutex clock; /* lock character device */ + struct list_head glist; + struct mutex glock; /* lock iommu group list */ + bool success; +} nbl_userdev; + +struct nbl_vfio_batch { + unsigned long *pages_out; + unsigned long *pages_in; + int size; + int offset; + struct page **h_page; +}; + +struct nbl_userdev_dma { + struct rb_node node; + dma_addr_t iova; + unsigned long vaddr; + size_t size; + unsigned long pfn; + unsigned int ref_cnt; +}; + +bool nbl_dma_iommu_status(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + + if (dev->iommu_group && iommu_get_domain_for_dev(dev)) + return 1; + + return 0; +} + +bool nbl_dma_remap_status(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(dev); + if (!domain) + return 0; + + if (domain->type & IOMMU_DOMAIN_IDENTITY) + return 0; + + return 1; +} + +static char *user_cdevnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "nbl_userdev/%s", dev_name(dev)); +} + +static void nbl_user_change_kernel_network(struct nbl_dev_user *user) +{ + struct nbl_adapter *adapter = user->adapter; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_event_dev_mode_switch_data data = {0}; + struct net_device *netdev = net_dev->netdev; + + if (user->network_type == NBL_KERNEL_NETWORK) + return; + + rtnl_lock(); + clear_bit(NBL_USER, adapter->state); + + data.op = NBL_DEV_USER_TO_KERNEL; + nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + if (data.ret) + goto unlock; + + user->network_type = NBL_KERNEL_NETWORK; + netdev_info(netdev, "network changes to kernel space\n"); + +unlock: + rtnl_unlock(); +} + +static int nbl_user_change_user_network(struct nbl_dev_user *user) +{ + struct nbl_adapter *adapter = user->adapter; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct net_device *netdev = net_dev->netdev; + struct nbl_event_dev_mode_switch_data data = {0}; + int ret = 0; + + rtnl_lock(); + + data.op = NBL_DEV_KERNEL_TO_USER; + nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + if (data.ret) + goto unlock; + + set_bit(NBL_USER, adapter->state); + user->network_type = NBL_USER_NETWORK; + netdev_info(netdev, "network changes to user\n"); + +unlock: + rtnl_unlock(); + + return ret; +} + +static int nbl_cdev_open(struct inode *inode, struct file *filep) +{ + struct nbl_adapter *p; + struct nbl_dev_mgt *dev_mgt; + struct nbl_dev_user *user; + int opened; + + mutex_lock(&nbl_userdev.clock); + p = idr_find(&nbl_userdev.cidr, iminor(inode)); + mutex_unlock(&nbl_userdev.clock); + + if (!p) + return -ENODEV; + + dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(p); + user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + opened = atomic_cmpxchg(&user->open_cnt, 0, 1); + if (opened) + return -EBUSY; + + filep->private_data = p; + + return 0; +} + +static int nbl_cdev_release(struct inode *inode, struct file *filp) +{ + struct nbl_adapter *adapter = filp->private_data; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + + chan_ops->clear_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt)); + nbl_user_change_kernel_network(user); + atomic_set(&user->open_cnt, 0); + + return 0; +} + +static void nbl_userdev_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void nbl_userdev_mmap_close(struct vm_area_struct *vma) +{ +} + +static vm_fault_t nbl_userdev_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + vm_fault_t ret = VM_FAULT_NOPAGE; + + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + ret = VM_FAULT_SIGBUS; + + return ret; +} + +static const struct vm_operations_struct nbl_userdev_mmap_ops = { + .open = nbl_userdev_mmap_open, + .close = nbl_userdev_mmap_close, + .fault = nbl_userdev_mmap_fault, +}; + +static int nbl_userdev_common_mmap(struct nbl_adapter *adapter, struct vm_area_struct *vma) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct pci_dev *pdev = adapter->pdev; + unsigned int index; + u64 phys_len, req_len, req_start, pgoff; + int ret; + + index = vma->vm_pgoff >> (NBL_DEV_USER_PCI_OFFSET_SHIFT - PAGE_SHIFT); + pgoff = vma->vm_pgoff & ((1U << (NBL_DEV_USER_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + + req_len = vma->vm_end - vma->vm_start; + req_start = pgoff << PAGE_SHIFT; + + if (index == NBL_DEV_SHM_MSG_RING_INDEX) + phys_len = NBL_USER_DEV_SHMMSGRING_SIZE; + else + phys_len = PAGE_ALIGN(pci_resource_len(pdev, 0)); + + if (req_start + req_len > phys_len) + return -EINVAL; + + if (index == NBL_DEV_SHM_MSG_RING_INDEX) { + struct page *page = virt_to_page((void *)((unsigned long)user->shm_msg_ring + + (pgoff << PAGE_SHIFT))); + vma->vm_pgoff = pgoff; + ret = remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), + req_len, vma->vm_page_prot); + return ret; + } + + vma->vm_private_data = adapter; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = (pci_resource_start(pdev, 0) >> PAGE_SHIFT) + pgoff; + + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &nbl_userdev_mmap_ops; + + return 0; +} + +static int nbl_cdev_mmap(struct file *filep, struct vm_area_struct *vma) +{ + struct nbl_adapter *adapter = filep->private_data; + + return nbl_userdev_common_mmap(adapter, vma); +} + +static int nbl_userdev_register_net(struct nbl_adapter *adapter, void *resp) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_register_net_result *result = (struct nbl_register_net_result *)resp; + struct nbl_dev_vsi *vsi; + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + + result->tx_queue_num = vsi->queue_num; + result->rx_queue_num = vsi->queue_num; + result->rdma_enable = 0; + result->queue_offset = vsi->queue_offset; + + return 0; +} + +static int nbl_userdev_alloc_txrx_queues(struct nbl_adapter *adapter, void *resp) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_chan_param_alloc_txrx_queues *result; + struct nbl_dev_vsi *vsi; + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + result = (struct nbl_chan_param_alloc_txrx_queues *)resp; + result->queue_num = vsi->queue_num; + + return 0; +} + +static int nbl_userdev_get_vsi_id(struct nbl_adapter *adapter, void *resp) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_chan_param_get_vsi_id *result; + struct nbl_dev_vsi *vsi; + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + result = (struct nbl_chan_param_get_vsi_id *)resp; + result->vsi_id = vsi->vsi_id; + + return 0; +} + +static void nbl_userdev_translate_register_vsi2q(struct nbl_chan_send_info *chan_send) +{ + struct nbl_chan_param_register_vsi2q *param = chan_send->arg; + + param->vsi_index = NBL_VSI_USER; +} + +static void nbl_userdev_translate_clear_queues(struct nbl_chan_send_info *chan_send) +{ + chan_send->msg_type = NBL_CHAN_MSG_REMOVE_RSS; +} + +static long nbl_userdev_channel_ioctl(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_dev_user_channel_msg *msg; + void *resp; + int ret = 0; + + msg = vmalloc(sizeof(*msg)); + if (!msg) + return -ENOMEM; + + if (copy_from_user(msg, (void __user *)arg, sizeof(*msg))) { + vfree(msg); + return -EFAULT; + } + + resp = (unsigned char *)msg->data + msg->arg_len; + resp = (void *)ALIGN((u64)resp, 4); + NBL_CHAN_SEND(chan_send, msg->dst_id, msg->msg_type, msg->data, msg->arg_len, + resp, msg->ack_length, msg->ack); + + dev_dbg(&adapter->pdev->dev, "msg_type %u, arg_len %u, request %llx, resp %llx\n", + msg->msg_type, msg->arg_len, (u64)msg->data, (u64)resp); + + switch (msg->msg_type) { + case NBL_CHAN_MSG_REGISTER_NET: + ret = nbl_userdev_register_net(adapter, resp); + break; + case NBL_CHAN_MSG_ALLOC_TXRX_QUEUES: + ret = nbl_userdev_alloc_txrx_queues(adapter, resp); + break; + case NBL_CHAN_MSG_GET_VSI_ID: + ret = nbl_userdev_get_vsi_id(adapter, resp); + break; + case NBL_CHAN_MSG_ADD_MACVLAN: + WARN_ON(1); + break; + case NBL_CHAN_MSG_DEL_MACVLAN: + case NBL_CHAN_MSG_UNREGISTER_NET: + case NBL_CHAN_MSG_ADD_MULTI_RULE: + case NBL_CHAN_MSG_DEL_MULTI_RULE: + case NBL_CHAN_MSG_FREE_TXRX_QUEUES: + case NBL_CHAN_MSG_CLEAR_FLOW: + break; + case NBL_CHAN_MSG_CLEAR_QUEUE: + nbl_userdev_translate_clear_queues(&chan_send); + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + break; + case NBL_CHAN_MSG_REGISTER_VSI2Q: + nbl_userdev_translate_register_vsi2q(&chan_send); + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + break; + default: + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + break; + } + + msg->ack_err = ret; + ret = copy_to_user((void __user *)arg, msg, sizeof(*msg)); + + vfree(msg); + + return ret; +} + +static long nbl_userdev_switch_network(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + int timeout = 50; + int type; + + if (get_user(type, (unsigned long __user *)arg)) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), + "switch network get type failed\n"); + return -EFAULT; + } + + if (type == user->network_type) + return 0; + + while (test_bit(NBL_RESETTING, adapter->state)) { + timeout--; + if (!timeout) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), + "Timeout while resetting in user change state\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + /* todolist: concurreny about adapter->state */ + if (type == NBL_USER_NETWORK) + nbl_user_change_user_network(user); + else + nbl_user_change_kernel_network(user); + + return 0; +} + +static long nbl_userdev_get_ifindex(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev = net_dev->netdev; + int ifindex, ret; + + ifindex = netdev->ifindex; + ret = copy_to_user((void __user *)arg, &ifindex, sizeof(ifindex)); + return ret; +} + +static long nbl_userdev_clear_eventfd(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + chan_ops->clear_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt)); + + return 0; +} + +static long nbl_userdev_set_listener(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + int msgtype; + + if (get_user(msgtype, (unsigned long __user *)arg)) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), "get listener msgtype failed\n"); + return -EFAULT; + } + + chan_ops->set_listener_msgtype(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), msgtype); + + return 0; +} + +static long nbl_userdev_set_eventfd(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct eventfd_ctx *ctx; + struct fd eventfd; + int fd; + long ret = 0; + + if (get_user(fd, (unsigned long __user *)arg)) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), "get user fd failed\n"); + return -EFAULT; + } + + eventfd = fdget(fd); + if (!eventfd.file) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), "get eventfd failed\n"); + return -EBADF; + } + + ctx = eventfd_ctx_fileget(eventfd.file); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + dev_err(NBL_ADAPTER_TO_DEV(adapter), "get eventfd ctx failed\n"); + return ret; + } + + chan_ops->set_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), user->shm_msg_ring, ctx); + + return ret; +} + +static long nbl_userdev_get_bar_size(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + size_t size = pci_resource_len(adapter->pdev, 0); + u8 __iomem *hw_addr; + int ret; + + hw_addr = serv_ops->get_hw_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &size); + ret = copy_to_user((void __user *)arg, &size, sizeof(size)); + + return ret; +} + +static long nbl_userdev_common_ioctl(struct nbl_adapter *adapter, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + + switch (cmd) { + case NBL_DEV_USER_CHANNEL: + ret = nbl_userdev_channel_ioctl(adapter, arg); + break; + case NBL_DEV_USER_MAP_DMA: + case NBL_DEV_USER_UNMAP_DMA: + break; + case NBL_DEV_USER_SWITCH_NETWORK: + ret = nbl_userdev_switch_network(adapter, arg); + break; + case NBL_DEV_USER_GET_IFINDEX: + ret = nbl_userdev_get_ifindex(adapter, arg); + break; + case NBL_DEV_USER_SET_EVENTFD: + ret = nbl_userdev_set_eventfd(adapter, arg); + break; + case NBL_DEV_USER_CLEAR_EVENTFD: + ret = nbl_userdev_clear_eventfd(adapter, arg); + break; + case NBL_DEV_USER_SET_LISTENER: + ret = nbl_userdev_set_listener(adapter, arg); + break; + case NBL_DEV_USER_GET_BAR_SIZE: + ret = nbl_userdev_get_bar_size(adapter, arg); + break; + default: + break; + } + + return ret; +} + +static long nbl_cdev_unlock_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + struct nbl_adapter *adapter = filep->private_data; + + return nbl_userdev_common_ioctl(adapter, cmd, arg); +} + +static ssize_t nbl_vfio_read(struct vfio_device *vdev, char __user *buf, + size_t count, loff_t *ppos) +{ + return -EFAULT; +} + +static ssize_t nbl_vfio_write(struct vfio_device *vdev, const char __user *buf, + size_t count, loff_t *ppos) +{ + return count; +} + +#define NBL_VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(unsigned long)) + +static int nbl_vfio_batch_init(struct nbl_vfio_batch *batch) +{ + batch->offset = 0; + batch->size = 0; + + batch->pages_in = (unsigned long *)__get_free_page(GFP_KERNEL); + if (!batch->pages_in) + return -ENOMEM; + + batch->pages_out = (unsigned long *)__get_free_page(GFP_KERNEL); + if (!batch->pages_out) { + free_page((unsigned long)batch->pages_in); + return -ENOMEM; + } + + batch->h_page = kzalloc(NBL_VFIO_BATCH_MAX_CAPACITY * sizeof(struct page *), GFP_KERNEL); + if (!batch->h_page) { + free_page((unsigned long)batch->pages_in); + free_page((unsigned long)batch->pages_out); + return -ENOMEM; + } + + return 0; +} + +static void nbl_vfio_batch_fini(struct nbl_vfio_batch *batch) +{ + if (batch->pages_in) + free_page((unsigned long)batch->pages_in); + + if (batch->pages_out) + free_page((unsigned long)batch->pages_out); + + kfree(batch->h_page); +} + +static struct nbl_userdev_dma *nbl_userdev_find_dma(struct nbl_dev_user_iommu_group *group, + dma_addr_t start, size_t size) +{ + struct rb_node *node = group->dma_tree.rb_node; + + while (node) { + struct nbl_userdev_dma *dma = rb_entry(node, struct nbl_userdev_dma, node); + + if (start + size <= dma->vaddr) + node = node->rb_left; + else if (start >= dma->vaddr + dma->size) + node = node->rb_right; + else + return dma; + } + + return NULL; +} + +static void nbl_userdev_link_dma(struct nbl_dev_user_iommu_group *group, + struct nbl_userdev_dma *new) +{ + struct rb_node **link = &group->dma_tree.rb_node, *parent = NULL; + struct nbl_userdev_dma *dma; + + while (*link) { + parent = *link; + dma = rb_entry(parent, struct nbl_userdev_dma, node); + + if (new->vaddr + new->size <= dma->vaddr) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + + rb_link_node(&new->node, parent, link); + rb_insert_color(&new->node, &group->dma_tree); +} + +static void nbl_userdev_remove_dma(struct nbl_dev_user_iommu_group *group, + struct nbl_userdev_dma *dma) +{ + struct nbl_vfio_batch batch; + long npage, batch_pages; + unsigned long vaddr; + int ret, caps; + unsigned long *ppfn, pfn; + int i = 0; + + dev_dbg(group->dev, "dma remove: vaddr 0x%lx, iova 0x%llx, size 0x%lx\n", + dma->vaddr, dma->iova, dma->size); + iommu_unmap(iommu_get_domain_for_dev(group->dev), dma->iova, dma->size); + + ret = nbl_vfio_batch_init(&batch); + if (ret) { + caps = 1; + ppfn = &pfn; + } else { + caps = NBL_VFIO_BATCH_MAX_CAPACITY; + ppfn = batch.pages_in; + } + + npage = dma->size >> PAGE_SHIFT; + vaddr = dma->vaddr; + + while (npage) { + if (npage >= caps) + batch_pages = caps; + else + batch_pages = npage; + + ppfn[0] = vaddr >> PAGE_SHIFT; + for (i = 1; i < batch_pages; i++) + ppfn[i] = ppfn[i - 1] + 1; + + vfio_unpin_pages(group->vdev, vaddr, batch_pages); + dev_dbg(group->dev, "unpin pages 0x%lx, npages %ld, ret %d\n", + ppfn[0], batch_pages, ret); + npage -= batch_pages; + vaddr += (batch_pages << PAGE_SHIFT); + } + + nbl_vfio_batch_fini(&batch); + rb_erase(&dma->node, &group->dma_tree); + kfree(dma); +} + +static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long arg) +{ + struct nbl_dev_user_dma_map map; + struct nbl_adapter *adapter = user->adapter; + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &pdev->dev; + struct nbl_vfio_batch batch; + struct nbl_userdev_dma *dma; + struct page *h_page; + unsigned long minsz, pfn_base = 0, pfn; + unsigned long vaddr, vfn; + dma_addr_t iova; + u32 mask = NBL_DEV_USER_DMA_MAP_FLAG_READ | NBL_DEV_USER_DMA_MAP_FLAG_WRITE; + size_t size; + long npage, batch_pages, pinned = 0; + int i, ret = 0; + phys_addr_t phys; + + minsz = offsetofend(struct nbl_dev_user_dma_map, size); + + if (copy_from_user(&map, (void __user *)arg, minsz)) + return -EFAULT; + + if (map.argsz < minsz || map.flags & ~mask) + return -EINVAL; + + npage = map.size >> PAGE_SHIFT; + vaddr = map.vaddr; + iova = map.iova; + + if (!npage) + return ret; + + mutex_lock(&user->group->dma_tree_lock); + /* rb-tree find */ + dma = nbl_userdev_find_dma(user->group, vaddr, map.size); + if (dma && dma->iova == iova && dma->size == map.size) { + vfn = vaddr >> PAGE_SHIFT; + ret = vfio_pin_pages(&user->vdev, vaddr, 1, IOMMU_READ | IOMMU_WRITE, &h_page); + if (ret <= 0) { + dev_err(dev, "vfio_pin_pages failed %d\n", ret); + goto mutext_unlock; + } + + pfn = page_to_pfn(h_page); + ret = 0; + vfio_unpin_pages(&user->vdev, vaddr, 1); + + if (pfn != dma->pfn) { + dev_err(dev, "multiple dma pfn not equal, new pfn %lu, dma pfn %lu\n", + pfn, dma->pfn); + ret = -EINVAL; + goto mutext_unlock; + } + + dev_info(dev, "existing dma info, ref_cnt++\n"); + dma->ref_cnt++; + goto mutext_unlock; + } else if (dma) { + dev_info(dev, "multiple dma not equal\n"); + ret = -EINVAL; + goto mutext_unlock; + } + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) { + ret = -ENOMEM; + goto mutext_unlock; + } + + if (nbl_vfio_batch_init(&batch)) { + kfree(dma); + ret = -ENOMEM; + goto mutext_unlock; + } + + while (npage) { + if (batch.size == 0) { + if (npage >= NBL_VFIO_BATCH_MAX_CAPACITY) + batch_pages = NBL_VFIO_BATCH_MAX_CAPACITY; + else + batch_pages = npage; + batch.pages_in[0] = vaddr >> PAGE_SHIFT; + for (i = 1; i < batch_pages; i++) + batch.pages_in[i] = batch.pages_in[i - 1] + 1; + + ret = vfio_pin_pages(&user->vdev, vaddr, batch_pages, + IOMMU_READ | IOMMU_WRITE, batch.h_page); + + dev_dbg(dev, "page %ld pages, return %d\n", batch_pages, batch.size); + if (ret <= 0) { + dev_err(dev, "pin page failed\n"); + goto unwind; + } + + for (i = 0; i < batch_pages; i++) + batch.pages_out[i] = page_to_pfn(batch.h_page[i]); + + batch.offset = 0; + batch.size = ret; + if (!pfn_base) { + pfn_base = batch.pages_out[batch.offset]; + dma->pfn = batch.pages_out[batch.offset]; + } + } + + while (batch.size) { + pfn = batch.pages_out[batch.offset]; + if (pfn == (pfn_base + pinned)) { + pinned++; + vaddr += PAGE_SIZE; + batch.offset++; + batch.size--; + npage--; + continue; + } + + size = pinned << PAGE_SHIFT; + phys = pfn_base << PAGE_SHIFT; + + ret = iommu_map(iommu_get_domain_for_dev(dev), iova, phys, + size, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); + + if (ret) { + dev_err(dev, "iommu_map failed\n"); + goto unwind; + } + dev_dbg(dev, "iommu map succeed, iova 0x%llx, phys 0x%llx,\n" + "size 0x%llx\n", (u64)iova, (u64)phys, (u64)size); + pfn_base = pfn; + pinned = 0; + iova += size; + } + } + + if (pinned) { + size = pinned << PAGE_SHIFT; + phys = pfn_base << PAGE_SHIFT; + + ret = iommu_map(iommu_get_domain_for_dev(dev), iova, phys, + size, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); + + if (ret) { + dev_err(dev, "iommu_map failed\n"); + goto unwind; + } + dev_dbg(dev, "iommu map succeed, iova 0x%llx, phys 0x%llx,\n" + "size 0x%llx\n", (u64)iova, (u64)phys, (u64)size); + } + nbl_vfio_batch_fini(&batch); + + dma->iova = map.iova; + dma->size = map.size; + dma->vaddr = map.vaddr; + dma->ref_cnt = 1; + nbl_userdev_link_dma(user->group, dma); + + dev_info(dev, "dma map info: vaddr=0x%llx, iova=0x%llx, size=0x%llx\n", + (u64)map.vaddr, (u64)map.iova, (u64)map.size); + mutex_unlock(&user->group->dma_tree_lock); + + return ret; + +unwind: + if (iova > map.iova) + iommu_unmap(iommu_get_domain_for_dev(dev), map.iova, iova - map.iova); + + if (batch.size) + vfio_unpin_pages(&user->vdev, vaddr, batch.size); + + npage = (vaddr - map.vaddr) >> PAGE_SHIFT; + vaddr = map.vaddr; + + while (npage) { + if (npage >= NBL_VFIO_BATCH_MAX_CAPACITY) + batch_pages = NBL_VFIO_BATCH_MAX_CAPACITY; + else + batch_pages = npage; + + batch.pages_in[0] = vaddr >> PAGE_SHIFT; + for (i = 1; i < batch_pages; i++) + batch.pages_in[i] = batch.pages_in[i - 1] + 1; + + vfio_unpin_pages(&user->vdev, vaddr, batch_pages); + npage -= batch_pages; + vaddr += (batch_pages << PAGE_SHIFT); + } + nbl_vfio_batch_fini(&batch); + +mutext_unlock: + mutex_unlock(&user->group->dma_tree_lock); + + return ret; +} + +static long nbl_userdev_dma_unmap_ioctl(struct nbl_dev_user *user, unsigned long arg) +{ + struct nbl_adapter *adapter = user->adapter; + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &pdev->dev; + struct nbl_dev_user_dma_unmap unmap; + struct nbl_userdev_dma *dma; + unsigned long minsz; + + minsz = offsetofend(struct nbl_dev_user_dma_unmap, size); + + if (copy_from_user(&unmap, (void __user *)arg, minsz)) + return -EFAULT; + + if (unmap.argsz < minsz) + return -EINVAL; + + dev_info(dev, "dma unmap info: vaddr=0x%llx, iova=0x%llx, size=0x%llx\n", + (u64)unmap.vaddr, (u64)unmap.iova, (u64)unmap.size); + + mutex_lock(&user->group->dma_tree_lock); + dma = nbl_userdev_find_dma(user->group, unmap.vaddr, unmap.size); + /* unmmap pages: rb-tree lock */ + if (dma) { + if (dma->vaddr != unmap.vaddr || dma->iova != unmap.iova || dma->size != unmap.size) + dev_err(dev, "dma unmap not equal, unmap vaddr 0x%llx, iova 0x%llx,\n" + "size 0x%llx, dma rbtree vaddr 0x%lx, iova 0x%llx, size 0x%lx\n", + unmap.vaddr, unmap.iova, unmap.size, + dma->vaddr, dma->iova, dma->size); + dma->ref_cnt--; + if (!dma->ref_cnt) + nbl_userdev_remove_dma(user->group, dma); + } + mutex_unlock(&user->group->dma_tree_lock); + + return 0; +} + +static long nbl_vfio_ioctl(struct vfio_device *vdev, unsigned int cmd, unsigned long arg) +{ + struct nbl_dev_user *user; + long ret; + + user = container_of(vdev, struct nbl_dev_user, vdev); + switch (cmd) { + case NBL_DEV_USER_MAP_DMA: + ret = nbl_userdev_dma_map_ioctl(user, arg); + break; + case NBL_DEV_USER_UNMAP_DMA: + ret = nbl_userdev_dma_unmap_ioctl(user, arg); + break; + default: + ret = nbl_userdev_common_ioctl(user->adapter, cmd, arg); + break; + } + + return ret; +} + +static int nbl_vfio_mmap(struct vfio_device *vdev, struct vm_area_struct *vma) +{ + struct nbl_dev_user *user; + + user = container_of(vdev, struct nbl_dev_user, vdev); + return nbl_userdev_common_mmap(user->adapter, vma); +} + +static void nbl_vfio_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length) +{ + struct nbl_dev_user *user = container_of(vdev, struct nbl_dev_user, vdev); + struct nbl_userdev_dma *dma; + + dev_info(user->group->dev, "vdev notifyier iova 0x%llx, size 0x%llx\n", + iova, length); + + mutex_lock(&user->group->dma_tree_lock); + dma = nbl_userdev_find_dma(user->group, (dma_addr_t)iova, (size_t)length); + if (dma) + nbl_userdev_remove_dma(user->group, dma); + mutex_unlock(&user->group->dma_tree_lock); +} + +static void nbl_userdev_group_get(struct nbl_dev_user_iommu_group *group) +{ + kref_get(&group->kref); +} + +static void nbl_userdev_release_group(struct kref *kref) +{ + struct nbl_dev_user_iommu_group *group; + struct rb_node *node; + + group = container_of(kref, struct nbl_dev_user_iommu_group, kref); + list_del(&group->group_next); + mutex_unlock(&nbl_userdev.glock); + while ((node = rb_first(&group->dma_tree))) + nbl_userdev_remove_dma(group, rb_entry(node, struct nbl_userdev_dma, node)); + + iommu_group_put(group->iommu_group); + kfree(group); +} + +static void nbl_userdev_group_put(struct nbl_dev_user_iommu_group *group) +{ + kref_put_mutex(&group->kref, nbl_userdev_release_group, &nbl_userdev.glock); +} + +static struct nbl_dev_user_iommu_group * + nbl_userdev_group_get_from_iommu(struct iommu_group *iommu_group) +{ + struct nbl_dev_user_iommu_group *group; + + mutex_lock(&nbl_userdev.glock); + list_for_each_entry(group, &nbl_userdev.glist, group_next) { + if (group->iommu_group == iommu_group) { + nbl_userdev_group_get(group); + mutex_unlock(&nbl_userdev.glock); + return group; + } + } + + mutex_unlock(&nbl_userdev.glock); + + return NULL; +} + +static +struct nbl_dev_user_iommu_group *nbl_userdev_create_group(struct iommu_group *iommu_group, + struct device *dev, + struct vfio_device *vdev) +{ + struct nbl_dev_user_iommu_group *group, *tmp; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return ERR_PTR(-ENOMEM); + + kref_init(&group->kref); + mutex_init(&group->dma_tree_lock); + group->iommu_group = iommu_group; + group->dma_tree = RB_ROOT; + group->dev = dev; + group->vdev = vdev; + + mutex_lock(&nbl_userdev.glock); + list_for_each_entry(tmp, &nbl_userdev.glist, group_next) { + if (tmp->iommu_group == iommu_group) { + nbl_userdev_group_get(tmp); + mutex_unlock(&nbl_userdev.glock); + kfree(group); + return tmp; + } + } + + list_add(&group->group_next, &nbl_userdev.glist); + mutex_unlock(&nbl_userdev.glock); + + return group; +} + +static int nbl_vfio_open(struct vfio_device *vdev) +{ + struct nbl_dev_user *user; + struct nbl_dev_user_iommu_group *group; + struct iommu_group *iommu_group; + struct nbl_adapter *adapter; + struct pci_dev *pdev; + int ret = 0, opened; + + user = container_of(vdev, struct nbl_dev_user, vdev); + adapter = user->adapter; + pdev = adapter->pdev; + + opened = atomic_cmpxchg(&user->open_cnt, 0, 1); + if (opened) + return -EBUSY; + + /* add iommu group list */ + iommu_group = iommu_group_get(&pdev->dev); + if (!iommu_group) { + dev_err(&pdev->dev, "nbl vfio open failed\n"); + ret = -EINVAL; + goto clear_open_cnt; + } + + group = nbl_userdev_group_get_from_iommu(iommu_group); + if (!group) { + group = nbl_userdev_create_group(iommu_group, &pdev->dev, vdev); + if (IS_ERR(group)) { + iommu_group_put(iommu_group); + ret = PTR_ERR(group); + goto clear_open_cnt; + } + } else { + iommu_group_put(iommu_group); + } + + user->group = group; + + dev_info(&pdev->dev, "nbl vfio open\n"); + + return ret; + +clear_open_cnt: + atomic_set(&user->open_cnt, 0); + return ret; +} + +static void nbl_vfio_close(struct vfio_device *vdev) +{ + struct nbl_dev_user *user; + struct nbl_adapter *adapter; + struct pci_dev *pdev; + struct nbl_dev_mgt *dev_mgt; + struct nbl_channel_ops *chan_ops; + + user = container_of(vdev, struct nbl_dev_user, vdev); + adapter = user->adapter; + pdev = adapter->pdev; + dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (user->group) + nbl_userdev_group_put(user->group); + user->group = NULL; + + chan_ops->clear_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt)); + nbl_user_change_kernel_network(user); + atomic_set(&user->open_cnt, 0); + + dev_info(&pdev->dev, "nbl vfio close\n"); +} + +static const struct vfio_device_ops nbl_vfio_dev_ops = { + .name = "vfio-nbl", + .open_device = nbl_vfio_open, + .close_device = nbl_vfio_close, + .read = nbl_vfio_read, + .write = nbl_vfio_write, + .ioctl = nbl_vfio_ioctl, + .mmap = nbl_vfio_mmap, + .dma_unmap = nbl_vfio_dma_unmap, + .bind_iommufd = vfio_iommufd_emulated_bind, + .unbind_iommufd = vfio_iommufd_emulated_unbind, + .attach_ioas = vfio_iommufd_emulated_attach_ioas, + .detach_ioas = vfio_iommufd_emulated_detach_ioas, +}; + +static const struct file_operations nbl_cdev_fops = { + .owner = THIS_MODULE, + .open = nbl_cdev_open, + .unlocked_ioctl = nbl_cdev_unlock_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = nbl_cdev_release, + .mmap = nbl_cdev_mmap, +}; + +static struct mdev_driver nbl_mdev_driver = { + .device_api = VFIO_DEVICE_API_PCI_STRING, + .driver = { + .name = "nbl_mdev", + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + }, +}; + +static int nbl_bus_probe(struct device *dev) +{ + struct mdev_driver *drv = + container_of(dev->driver, struct mdev_driver, driver); + + if (!drv->probe) + return 0; + return drv->probe(to_mdev_device(dev)); +} + +static void nbl_bus_remove(struct device *dev) +{ + struct mdev_driver *drv = + container_of(dev->driver, struct mdev_driver, driver); + + if (drv->remove) + drv->remove(to_mdev_device(dev)); +} + +static int nbl_bus_match(struct device *dev, struct device_driver *drv) +{ + return 0; +} + +static struct bus_type nbl_bus_type = { + .name = "nbl_bus_type", + .probe = nbl_bus_probe, + .remove = nbl_bus_remove, + .match = nbl_bus_match, +}; + +static void nbl_mdev_device_release(struct device *dev) +{ + dev_info(dev, "nbl mdev device release\n"); +} + +void nbl_dev_start_user_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *cdev = NULL, *mdev; + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(common); + struct nbl_dev_user *user; + void *shm_msg_ring; + bool iommu_status = 0, remap_status = 0; + int minor = 0, ret; + + if (!nbl_userdev.success) + return; + + if (!dev_is_dma_coherent(dev)) + return; + + if (dma_get_mask(dev) != DMA_BIT_MASK(64)) + return; + + iommu_status = nbl_dma_iommu_status(pdev); + remap_status = nbl_dma_remap_status(pdev); + + /* iommu passthrough */ + if (iommu_status && !remap_status) { + if (common->dma_dev == common->dev) + return; + remap_status = 1; + } + + shm_msg_ring = kzalloc(NBL_USER_DEV_SHMMSGRING_SIZE, GFP_KERNEL); + if (!shm_msg_ring) + return; + + user = devm_kzalloc(dev, sizeof(struct nbl_dev_user), GFP_KERNEL); + if (!user) { + kfree(shm_msg_ring); + return; + } + + if (remap_status) { + /* mdev init */ + mdev = devm_kzalloc(dev, sizeof(struct device), GFP_KERNEL); + if (!mdev) { + kfree(shm_msg_ring); + return; + } + + device_initialize(mdev); + mdev->parent = dev; + + mdev->bus = &nbl_bus_type; + + mdev->release = nbl_mdev_device_release; + + ret = dev_set_name(mdev, pci_name(pdev)); + if (ret) { + dev_info(dev, "mdev set name failed\n"); + goto free_dev; + } + + ret = device_add(mdev); + if (ret) { + dev_err(dev, "mdev add failed\n"); + goto free_dev; + } + dev_info(dev, "MDEV: created\n"); + + devm_kfree(dev, user); + + user = vfio_alloc_device(nbl_dev_user, vdev, mdev, &nbl_vfio_dev_ops); + if (IS_ERR(user)) { + device_del(mdev); + goto free_dev; + } + + ret = vfio_register_emulated_iommu_dev(&user->vdev); + if (ret) { + vfio_put_device(&user->vdev); + device_del(mdev); + goto free_dev; + } + + user->mdev = mdev; + mdev->driver = &nbl_mdev_driver.driver; + } else { + mutex_lock(&nbl_userdev.clock); + minor = idr_alloc(&nbl_userdev.cidr, adapter, 1, MINORMASK + 1, GFP_KERNEL); + if (minor < 0) { + dev_err(dev, "alloc userdev dev minor failed\n"); + mutex_unlock(&nbl_userdev.clock); + goto free_dev; + } + + cdev = device_create(nbl_userdev.cls, NULL, MKDEV(MAJOR(nbl_userdev.cdevt), minor), + NULL, pci_name(pdev)); + if (IS_ERR(cdev)) { + dev_err(dev, "device create failed\n"); + idr_remove(&nbl_userdev.cidr, minor); + mutex_unlock(&nbl_userdev.clock); + goto free_dev; + } + mutex_unlock(&nbl_userdev.clock); + user->dev = cdev; + user->minor = minor; + } + + user->shm_msg_ring = shm_msg_ring; + user->adapter = adapter; + user->iommu_status = iommu_status; + user->remap_status = remap_status; + atomic_set(&user->open_cnt, 0); + user->network_type = NBL_KERNEL_NETWORK; + + NBL_DEV_MGT_TO_USER_DEV(dev_mgt) = user; + + return; + +free_dev: + devm_kfree(dev, mdev); + kfree(shm_msg_ring); +} + +void nbl_dev_stop_user_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *mdev; + + if (!user) + return; + + while (atomic_read(&user->open_cnt)) { + dev_info(dev, "userdev application need quit!\n"); + msleep(2000); + } + + kfree(user->shm_msg_ring); + + if (user->remap_status) { + mdev = user->mdev; + vfio_unregister_group_dev(&user->vdev); + vfio_put_device(&user->vdev); + mdev->driver = NULL; + device_del(mdev); + devm_kfree(dev, mdev); + } else if (user->dev) { + mutex_lock(&nbl_userdev.clock); + device_destroy(nbl_userdev.cls, MKDEV(MAJOR(nbl_userdev.cdevt), user->minor)); + user->dev = NULL; + mutex_unlock(&nbl_userdev.clock); + devm_kfree(dev, user); + } + + NBL_DEV_MGT_TO_USER_DEV(dev_mgt) = NULL; +} + +void nbl_dev_user_module_init(void) +{ + int ret; + + idr_init(&nbl_userdev.cidr); + mutex_init(&nbl_userdev.clock); + mutex_init(&nbl_userdev.glock); + INIT_LIST_HEAD(&nbl_userdev.glist); + + nbl_userdev.cls = class_create("nbl_userdev"); + if (IS_ERR(nbl_userdev.cls)) { + pr_err("nbl_userdev class alloc failed\n"); + goto err_create_cls; + } + + nbl_userdev.cls->devnode = user_cdevnode; + + ret = alloc_chrdev_region(&nbl_userdev.cdevt, 0, MINORMASK + 1, "nbl_userdev"); + if (ret) { + pr_err("nbl_userdev alloc chrdev region failed\n"); + goto err_alloc_chrdev; + } + + cdev_init(&nbl_userdev.cdev, &nbl_cdev_fops); + ret = cdev_add(&nbl_userdev.cdev, nbl_userdev.cdevt, MINORMASK + 1); + if (ret) { + pr_err("nbl_userdev cdev add failed\n"); + goto err_cdev_add; + } + + nbl_userdev.success = 1; + pr_info("user_module init success\n"); + + return; + +err_cdev_add: + unregister_chrdev_region(nbl_userdev.cdevt, MINORMASK + 1); +err_alloc_chrdev: + class_destroy(nbl_userdev.cls); + nbl_userdev.cls = NULL; +err_create_cls: + return; +} + +void nbl_dev_user_module_destroy(void) +{ + if (nbl_userdev.success) { + idr_destroy(&nbl_userdev.cidr); + cdev_del(&nbl_userdev.cdev); + unregister_chrdev_region(nbl_userdev.cdevt, MINORMASK + 1); + class_destroy(nbl_userdev.cls); + nbl_userdev.cls = NULL; + nbl_userdev.success = 0; + } +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h new file mode 100644 index 000000000000..8e757fd1b156 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEV_USER_H_ +#define _NBL_DEV_USER_H_ + +#define NBL_DEV_USER_TYPE ('n') + +#define NBL_DEV_USER_PCI_OFFSET_SHIFT 40 +#define NBL_DEV_USER_OFFSET_TO_INDEX(off) ((off) >> NBL_DEV_USER_PCI_OFFSET_SHIFT) +#define NBL_DEV_USER_INDEX_TO_OFFSET(index) ((u64)(index) << NBL_DEV_USER_PCI_OFFSET_SHIFT) +#define NBL_DEV_SHM_MSG_RING_INDEX (6) + +/* 8192 ioctl mailbox msg */ +struct nbl_dev_user_channel_msg { + u16 msg_type; + u16 dst_id; + u32 arg_len; + u32 ack_err; + u16 ack_length; + u16 ack; + u32 data[2044]; +}; + +#define NBL_DEV_USER_CHANNEL _IO(NBL_DEV_USER_TYPE, 0) + +struct nbl_dev_user_dma_map { + u32 argsz; + u32 flags; +#define NBL_DEV_USER_DMA_MAP_FLAG_READ BIT(0) /* readable from device */ +#define NBL_DEV_USER_DMA_MAP_FLAG_WRITE BIT(1) /* writable from device */ + u64 vaddr; /* Process virtual address */ + u64 iova; /* IO virtual address */ + u64 size; /* Size of mapping (bytes) */ +}; + +#define NBL_DEV_USER_MAP_DMA _IO(NBL_DEV_USER_TYPE, 1) + +struct nbl_dev_user_dma_unmap { + u32 argsz; + u32 flags; + u64 vaddr; + u64 iova; /* IO virtual address */ + u64 size; /* Size of mapping (bytes) */ +}; + +#define NBL_DEV_USER_UNMAP_DMA _IO(NBL_DEV_USER_TYPE, 2) + +#define NBL_KERNEL_NETWORK 0 +#define NBL_USER_NETWORK 1 + +#define NBL_DEV_USER_SWITCH_NETWORK _IO(NBL_DEV_USER_TYPE, 3) + +#define NBL_DEV_USER_GET_IFINDEX _IO(NBL_DEV_USER_TYPE, 4) + +#define NBL_DEV_USER_SET_EVENTFD _IO(NBL_DEV_USER_TYPE, 5) + +#define NBL_DEV_USER_CLEAR_EVENTFD _IO(NBL_DEV_USER_TYPE, 6) + +#define NBL_DEV_USER_SET_LISTENER _IO(NBL_DEV_USER_TYPE, 7) + +#define NBL_DEV_USER_GET_BAR_SIZE _IO(NBL_DEV_USER_TYPE, 8) + +void nbl_dev_start_user_dev(struct nbl_adapter *adapter); +void nbl_dev_stop_user_dev(struct nbl_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c new file mode 100644 index 000000000000..396f8a306832 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c @@ -0,0 +1,5262 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_dispatch.h" + +static int nbl_disp_chan_add_macvlan_req(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_add_macvlan param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt || !mac) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(param.mac, mac, sizeof(param.mac)); + param.vlan = vlan; + param.vsi = vsi; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_MACVLAN, ¶m, sizeof(param), + NULL, 0, 1); + + if (chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) + return -EFAULT; + + return 0; +} + +static void nbl_disp_chan_add_macvlan_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_add_macvlan *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_add_macvlan *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, + param->vlan, param->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_MACVLAN, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_ADD_MACVLAN); +} + +static void nbl_disp_chan_del_macvlan_req(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_del_macvlan param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt || !mac) + return; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(param.mac, mac, sizeof(param.mac)); + param.vlan = vlan; + param.vsi = vsi; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_MACVLAN, ¶m, sizeof(param), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_macvlan_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_del_macvlan *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_del_macvlan *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->mac, param->vlan, param->vsi); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_MACVLAN, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_add_multi_rule_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_MULTI_RULE, + &vsi_id, sizeof(vsi_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_multi_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_MULTI_RULE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_del_multi_rule_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_MULTI_RULE, + &vsi_id, sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_multi_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_MULTI_RULE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_multi_group_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_MULTI_GROUP, + NULL, 0, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_multi_group_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_MULTI_GROUP, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_multi_group_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_MULTI_GROUP, + NULL, 0, NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_multi_group_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_MULTI_GROUP, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_register_net_req(void *priv, + struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_register_net_info param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + int ret = 0; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.pf_bar_start = register_param->pf_bar_start; + param.pf_bdf = register_param->pf_bdf; + param.vf_bar_start = register_param->vf_bar_start; + param.vf_bar_size = register_param->vf_bar_size; + param.total_vfs = register_param->total_vfs; + param.offset = register_param->offset; + param.stride = register_param->stride; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REGISTER_NET, ¶m, sizeof(param), + (void *)register_result, sizeof(*register_result), 1); + + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return ret; +} + +static void nbl_disp_chan_register_net_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_register_net_info *param; + struct nbl_register_net_result result = {0}; + struct nbl_register_net_param register_param = {0}; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_register_net_info *)data; + + register_param.pf_bar_start = param->pf_bar_start; + register_param.pf_bdf = param->pf_bdf; + register_param.vf_bar_start = param->vf_bar_start; + register_param.vf_bar_size = param->vf_bar_size; + register_param.total_vfs = param->total_vfs; + register_param.offset = param->offset; + register_param.stride = param->stride; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, ®ister_param, &result); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_NET, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id:%d\n", + ret, NBL_CHAN_MSG_REGISTER_NET, src_id); +} + +static int nbl_disp_unregister_net(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0); +} + +static int nbl_disp_chan_unregister_net_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_UNREGISTER_NET, NULL, 0, NULL, 0, 1); + + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_unregister_net_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_NET, + msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id:%d\n", + ret, NBL_CHAN_MSG_UNREGISTER_NET, src_id); +} + +static int nbl_disp_chan_alloc_txrx_queues_req(void *priv, u16 vsi_id, u16 queue_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_alloc_txrx_queues param = {0}; + struct nbl_chan_param_alloc_txrx_queues result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.queue_num = queue_num; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return 0; +} + +static void nbl_disp_chan_alloc_txrx_queues_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_alloc_txrx_queues *param; + struct nbl_chan_param_alloc_txrx_queues result = {0}; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_alloc_txrx_queues *)data; + result.queue_num = param->queue_num; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->alloc_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->queue_num); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, + msg_id, err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_free_txrx_queues_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_FREE_TXRX_QUEUES, + &vsi_id, sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_txrx_queues_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->free_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_FREE_TXRX_QUEUES, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_register_vsi2q_req(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_vsi2q param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_index = vsi_index; + param.vsi_id = vsi_id; + param.queue_offset = queue_offset; + param.queue_num = queue_num; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REGISTER_VSI2Q, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_register_vsi2q_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_vsi2q *param = NULL; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_register_vsi2q *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_vsi2q, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_index, param->vsi_id, param->queue_offset, param->queue_num); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_VSI2Q, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_q2vsi_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_Q2VSI, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_q2vsi_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_q2vsi, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_Q2VSI, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_q2vsi_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_Q2VSI, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_q2vsi_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_q2vsi, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_Q2VSI, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_rss_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_RSS, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_rss_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_rss, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_RSS, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_rss_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_RSS, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_rss_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_rss, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_RSS, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_queue_req(void *priv, struct nbl_txrx_queue_param *queue_param, + bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_queue param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(¶m.queue_param, queue_param, sizeof(param.queue_param)); + param.is_tx = is_tx; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_QUEUE, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_queue *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_setup_queue *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ¶m->queue_param, param->is_tx); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_QUEUE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_all_queues_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_ALL_QUEUES, + &vsi_id, sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_all_queues_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_all_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_ALL_QUEUES, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_cfg_dsch_req(void *priv, u16 vsi_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_dsch param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.vld = vld; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_DSCH, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_dsch_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_dsch *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_cfg_dsch *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vld); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_DSCH, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_cqs_req(void *priv, u16 vsi_id, u16 real_qps) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_cqs param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.real_qps = real_qps; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_CQS, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_cqs_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_cqs *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_setup_cqs *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->real_qps); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_CQS, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_cqs_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_REMOVE_CQS, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_cqs_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_CQS, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_promisc_mode(void *priv, u16 vsi_id, u16 mode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_promisc_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, mode); + return ret; +} + +static int nbl_disp_chan_set_promisc_mode_req(void *priv, u16 vsi_id, u16 mode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_promisc_mode param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.mode = mode; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_PROSISC_MODE, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_promisc_mode_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_promisc_mode *param = NULL; + int err = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_promisc_mode *)data; + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_promisc_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->mode); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PROSISC_MODE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_set_spoof_check_addr_req(void *priv, u16 vsi_id, u8 *mac) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_spoof_check_addr param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + ether_addr_copy(param.mac, mac); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_SPOOF_CHECK_ADDR, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_spoof_check_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_spoof_check_addr *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_spoof_check_addr *)data; + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_spoof_check_addr, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->mac); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_SPOOF_CHECK_ADDR, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_set_vf_spoof_check_req(void *priv, u16 vsi_id, int vf_id, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_vf_spoof_check param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.vf_id = vf_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_VF_SPOOF_CHECK, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_vf_spoof_check_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_vf_spoof_check *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_vf_spoof_check *)data; + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_vf_spoof_check, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, + param->vf_id, param->enable); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_VF_SPOOF_CHECK, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_base_mac_addr_req(void *priv, u8 *mac) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_BASE_MAC_ADDR, + NULL, 0, mac, ETH_ALEN, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_base_mac_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 mac[ETH_ALEN]; + + NBL_OPS_CALL(res_ops->get_base_mac_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BASE_MAC_ADDR, msg_id, err, + mac, ETH_ALEN); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_firmware_version_req(void *priv, char *firmware_verion, u8 max_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_FIRMWARE_VERSION, NULL, 0, + firmware_verion, max_len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_firmware_version_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + char firmware_verion[ETHTOOL_FWVERS_LEN] = ""; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL(res_ops->get_firmware_version, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), firmware_verion)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "get emp version failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FIRMWARE_VERSION, msg_id, err, + firmware_verion, ETHTOOL_FWVERS_LEN); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_FIRMWARE_VERSION, src_id); +} + +static int nbl_disp_get_queue_err_stats(void *priv, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_queue_err_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + 0, queue_id, queue_err_stats, is_tx)); +} + +static int nbl_disp_chan_get_queue_err_stats_req(void *priv, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, + bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_queue_err_stats param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.queue_id = queue_id; + param.is_tx = is_tx; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, ¶m, + sizeof(param), queue_err_stats, sizeof(*queue_err_stats), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_queue_err_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_queue_err_stats *param; + struct nbl_chan_ack_info chan_ack; + struct nbl_queue_err_stats queue_err_stats = { 0 }; + int err = NBL_CHAN_RESP_OK; + int ret; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_get_queue_err_stats *)data; + + ret = NBL_OPS_CALL(res_ops->get_queue_err_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->queue_id, + &queue_err_stats, param->is_tx)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get queue err stats_resp failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, msg_id, err, + &queue_err_stats, sizeof(queue_err_stats)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, src_id); +} + +static void nbl_disp_chan_get_coalesce_req(void *priv, u16 vector_id, + struct ethtool_coalesce *ec) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_COALESCE, &vector_id, sizeof(vector_id), + ec, sizeof(*ec), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_coalesce_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + struct ethtool_coalesce ec = { 0 }; + u16 vector_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + vector_id = *(u16 *)data; + + NBL_OPS_CALL(res_ops->get_coalesce, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, + vector_id, &ec)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_COALESCE, msg_id, ret, + &ec, sizeof(ec)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_set_coalesce_req(void *priv, u16 vector_id, + u16 vector_num, u16 pnum, u16 rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_coalesce param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.local_vector_id = vector_id; + param.vector_num = vector_num; + param.rx_max_coalesced_frames = pnum; + param.rx_coalesce_usecs = rate; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_COALESCE, ¶m, sizeof(param), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_coalesce_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_coalesce *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_coalesce *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_coalesce, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->local_vector_id, + param->vector_num, param->rx_max_coalesced_frames, + param->rx_coalesce_usecs); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_COALESCE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_rxfh_indir_size_req(void *priv, u16 vsi_id, u32 *rxfh_indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_RXFH_INDIR_SIZE, + &vsi_id, sizeof(vsi_id), rxfh_indir_size, sizeof(u32), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rxfh_indir_size_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u32 rxfh_indir_size = 0; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + NBL_OPS_CALL(res_ops->get_rxfh_indir_size, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, &rxfh_indir_size)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_INDIR_SIZE, msg_id, + ret, &rxfh_indir_size, sizeof(u32)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_rxfh_indir_req(void *priv, u16 vsi_id, u32 *indir, u32 indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_rxfh_indir param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.rxfh_indir_size = indir_size; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_RXFH_INDIR, ¶m, + sizeof(param), indir, indir_size * sizeof(u32), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rxfh_indir_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_rxfh_indir *param; + struct nbl_chan_ack_info chan_ack; + u32 *indir; + int ret = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_get_rxfh_indir *)data; + + indir = kcalloc(param->rxfh_indir_size, sizeof(u32), GFP_KERNEL); + NBL_OPS_CALL(res_ops->get_rxfh_indir, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, indir)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_INDIR, msg_id, ret, + indir, param->rxfh_indir_size * sizeof(u32)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(indir); +} + +static void nbl_disp_chan_get_rxfh_rss_key_req(void *priv, u8 *rss_key, u32 rss_key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_RXFH_RSS_KEY, &rss_key_len, + sizeof(rss_key_len), rss_key, rss_key_len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rxfh_rss_key_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u8 *rss_key; + int ret = NBL_CHAN_RESP_OK; + u32 rss_key_len; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + rss_key_len = *(u32 *)data; + + rss_key = kzalloc(rss_key_len, GFP_KERNEL); + NBL_OPS_CALL(res_ops->get_rxfh_rss_key, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rss_key)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_RSS_KEY, msg_id, ret, + rss_key, rss_key_len); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(rss_key); +} + +static void nbl_disp_chan_get_rxfh_rss_alg_sel_req(void *priv, u8 *rss_alg_sel, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, ð_id, + sizeof(eth_id), rss_alg_sel, sizeof(u8), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rxfh_rss_alg_sel_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u8 rss_alg_sel, eth_id; + int ret = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + eth_id = *(u8 *)data; + + NBL_OPS_CALL(res_ops->get_rss_alg_sel, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &rss_alg_sel, eth_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, msg_id, ret, + &rss_alg_sel, sizeof(rss_alg_sel)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_phy_caps_req(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PHY_CAPS, ð_id, + sizeof(eth_id), phy_caps, sizeof(*phy_caps), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_phy_caps_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + struct nbl_phy_caps phy_caps = { 0 }; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + eth_id = *(u8 *)data; + + NBL_OPS_CALL(res_ops->get_phy_caps, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &phy_caps)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PHY_CAPS, msg_id, ret, + &phy_caps, sizeof(phy_caps)); + + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_phy_state_req(void *priv, u8 eth_id, struct nbl_phy_state *phy_state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PHY_STATE, ð_id, + sizeof(eth_id), phy_state, sizeof(*phy_state), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_phy_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + struct nbl_phy_state phy_state = { 0 }; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + eth_id = *(u8 *)data; + + NBL_OPS_CALL(res_ops->get_phy_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &phy_state)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PHY_STATE, msg_id, ret, + &phy_state, sizeof(phy_state)); + + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_set_sfp_state_req(void *priv, u8 eth_id, u8 state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_sfp_state param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.state = state; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_SFP_STATE, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_sfp_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_sfp_state *param; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_sfp_state *)data; + + ret = NBL_OPS_CALL(res_ops->set_sfp_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->state)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "set sfp state failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_SFP_STATE, msg_id, err, NULL, 0); + + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_SET_SFP_STATE, src_id); +} + +static u64 nbl_disp_chan_get_real_hw_addr_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + u64 addr = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_REAL_HW_ADDR, &vsi_id, + sizeof(vsi_id), &addr, sizeof(addr), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return addr; +} + +static void nbl_disp_chan_get_real_hw_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id; + u64 addr; + + vsi_id = *(u16 *)data; + addr = NBL_OPS_CALL(res_ops->get_real_hw_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REAL_HW_ADDR, msg_id, + ret, &addr, sizeof(addr)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_chan_get_function_id_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + u16 func_id = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_FUNCTION_ID, &vsi_id, + sizeof(vsi_id), &func_id, sizeof(func_id), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return func_id; +} + +static void nbl_disp_chan_get_function_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id, func_id; + + vsi_id = *(u16 *)data; + + func_id = NBL_OPS_CALL(res_ops->get_function_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FUNCTION_ID, msg_id, + ret, &func_id, sizeof(func_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_real_bdf_req(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_result_get_real_bdf result = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REAL_BDF, &vsi_id, + sizeof(vsi_id), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + *bus = result.bus; + *dev = result.dev; + *function = result.function; +} + +static void nbl_disp_chan_get_real_bdf_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_result_get_real_bdf result = {0}; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + NBL_OPS_CALL(res_ops->get_real_bdf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, + &result.bus, &result.dev, &result.function)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REAL_BDF, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_get_mbx_irq_num_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int result = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_MBX_IRQ_NUM, NULL, 0, + &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result; +} + +static void nbl_disp_chan_get_mbx_irq_num_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int result, ret = NBL_CHAN_RESP_OK; + + result = NBL_OPS_CALL(res_ops->get_mbx_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MBX_IRQ_NUM, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_clear_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CLEAR_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_clear_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u16 *vsi_id = (u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CLEAR_FLOW, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_clear_queues_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CLEAR_QUEUE, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_clear_queues_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u16 *vsi_id = (u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CLEAR_QUEUE, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_chan_get_vsi_id_req(void *priv, u16 func_id, u16 type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vsi_id param = {0}; + struct nbl_chan_param_get_vsi_id result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.type = type; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_VSI_ID, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result.vsi_id; +} + +static void nbl_disp_chan_get_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vsi_id *param; + struct nbl_chan_param_get_vsi_id result; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_get_vsi_id *)data; + + result.vsi_id = NBL_OPS_CALL(res_ops->get_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->type)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VSI_ID, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_GET_VSI_ID); +} + +static void nbl_disp_chan_get_eth_id_req(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_eth_id param = {0}; + struct nbl_chan_param_get_eth_id result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_ID, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + *eth_mode = result.eth_mode; + *eth_id = result.eth_id; +} + +static void nbl_disp_chan_get_eth_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_eth_id *param; + struct nbl_chan_param_get_eth_id result = {0}; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_get_eth_id *)data; + + NBL_OPS_CALL(res_ops->get_eth_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, + &result.eth_mode, &result.eth_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_ID, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_GET_ETH_ID); +} + +static int nbl_disp_alloc_rings(void *priv, struct net_device *netdev, u16 tx_num, + u16 rx_num, u16 tx_desc_num, u16 rx_desc_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->alloc_rings, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), netdev, tx_num, + rx_num, tx_desc_num, rx_desc_num)); + return ret; +} + +static void nbl_disp_remove_rings(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->remove_rings, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static dma_addr_t nbl_disp_start_tx_ring(void *priv, u8 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + dma_addr_t addr = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + addr = NBL_OPS_CALL(res_ops->start_tx_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + return addr; +} + +static void nbl_disp_stop_tx_ring(void *priv, u8 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->stop_tx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); +} + +static dma_addr_t nbl_disp_start_rx_ring(void *priv, u8 ring_index, bool use_napi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + dma_addr_t addr = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + addr = NBL_OPS_CALL(res_ops->start_rx_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, use_napi)); + + return addr; +} + +static void nbl_disp_stop_rx_ring(void *priv, u8 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->stop_rx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); +} + +static void nbl_disp_kick_rx_ring(void *priv, u16 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->kick_rx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index)); +} + +static int nbl_disp_dump_ring(void *priv, struct seq_file *m, bool is_tx, int index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->dump_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m, is_tx, index)); + return ret; +} + +static int nbl_disp_dump_ring_stats(void *priv, struct seq_file *m, bool is_tx, int index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->dump_ring_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m, is_tx, index)); + return ret; +} + +static struct napi_struct *nbl_disp_get_vector_napi(void *priv, u16 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_vector_napi, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index)); +} + +static void nbl_disp_set_vector_info(void *priv, u8 *irq_enable_base, + u32 irq_data, u16 index, bool mask_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->set_vector_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + irq_enable_base, irq_data, index, mask_en)); +} + +static void nbl_disp_register_vsi_ring(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->register_vsi_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_index, ring_offset, ring_num)); +} + +static void nbl_disp_get_res_pt_ops(void *priv, struct nbl_resource_pt_ops *pt_ops) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_resource_pt_ops, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pt_ops)); +} + +static int nbl_disp_register_net(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, + register_param, register_result); + return ret; +} + +static int nbl_disp_alloc_txrx_queues(void *priv, u16 vsi_id, u16 queue_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->alloc_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, queue_num); + return ret; +} + +static void nbl_disp_free_txrx_queues(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->free_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_register_vsi2q(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_vsi2q, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_index, vsi_id, + queue_offset, queue_num); +} + +static int nbl_disp_setup_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_q2vsi, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_remove_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_q2vsi, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_setup_rss(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_rss, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_remove_rss(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_rss, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_setup_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, is_tx); + return ret; +} + +static void nbl_disp_remove_all_queues(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_all_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_cfg_dsch(void *priv, u16 vsi_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vld); + return ret; +} + +static int nbl_disp_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, real_qps); + return ret; +} + +static void nbl_disp_remove_cqs(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_enable_msix_irq(void *priv, u16 global_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->enable_msix_irq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), global_vector_id)); + return ret; +} + +static u8 *nbl_disp_get_msix_irq_enable_info(void *priv, u16 global_vector_id, u32 *irq_data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return NULL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_msix_irq_enable_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), global_vector_id, irq_data)); +} + +static int nbl_disp_add_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt || !mac) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, vlan, vsi); + return ret; +} + +static void nbl_disp_del_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt || !mac) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, vlan, vsi); +} + +static int nbl_disp_add_multi_rule(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + return ret; +} + +static void nbl_disp_del_multi_rule(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); +} + +static int nbl_disp_setup_multi_group(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_remove_multi_group(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_get_net_stats(void *priv, struct nbl_stats *net_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_net_stats, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), net_stats)); +} + +static void nbl_disp_get_private_stat_len(void *priv, u32 *len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_private_stat_len, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), len); +} + +static void nbl_disp_get_private_stat_data(void *priv, u32 eth_id, u64 *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_private_stat_data, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, data); +} + +static void nbl_disp_get_private_stat_data_req(void *priv, u32 eth_id, u64 *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_private_stat_data param = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.data_len = data_len; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_STATS, ¶m, + sizeof(param), data, data_len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_private_stat_data_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_private_stat_data *param; + struct nbl_chan_ack_info chan_ack; + u64 *recv_data; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_get_private_stat_data *)data; + recv_data = kmalloc(param->data_len, GFP_ATOMIC); + if (!recv_data) { + dev_err(dev, "Allocate memory to private_stat_data failed\n"); + return; + } + + NBL_OPS_CALL(res_ops->get_private_stat_data, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, recv_data)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_STATS, msg_id, + ret, recv_data, param->data_len); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(recv_data); +} + +static void nbl_disp_fill_private_stat_strings(void *priv, u8 *strings) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->fill_private_stat_strings, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), strings); +} + +static u16 nbl_disp_get_max_desc_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_max_desc_num, ()); + return ret; +} + +static u16 nbl_disp_get_min_desc_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_min_desc_num, ()); + return ret; +} + +static int nbl_disp_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_spoof_check_addr, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, mac); + return ret; +} + +static int nbl_disp_set_vf_spoof_check(void *priv, u16 vsi_id, int vf_id, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_vf_spoof_check, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id, enable); + return ret; +} + +static void nbl_disp_get_base_mac_addr(void *priv, u8 *mac) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_base_mac_addr, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac); +} + +static u16 nbl_disp_get_tx_desc_num(void *priv, u32 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_tx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + return ret; +} + +static u16 nbl_disp_get_rx_desc_num(void *priv, u32 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_rx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + return ret; +} + +static void nbl_disp_set_tx_desc_num(void *priv, u32 ring_index, u16 desc_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_tx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, desc_num)); +} + +static void nbl_disp_set_rx_desc_num(void *priv, u32 ring_index, u16 desc_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_rx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, desc_num)); +} + +static void nbl_disp_get_queue_stats(void *priv, u8 queue_id, + struct nbl_queue_stats *queue_stats, bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_queue_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_id, queue_stats, is_tx)); +} + +static void nbl_disp_get_firmware_version(void *priv, char *firmware_verion, u8 max_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_firmware_version, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), firmware_verion)); + if (ret) + dev_err(dev, "get emp version failed with ret: %d\n", ret); +} + +static int nbl_disp_get_driver_info(void *priv, struct nbl_driver_info *driver_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_driver_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), driver_info)); +} + +static void nbl_disp_get_coalesce(void *priv, u16 vector_id, + struct ethtool_coalesce *ec) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_coalesce, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, ec)); +} + +static void nbl_disp_set_coalesce(void *priv, u16 vector_id, u16 vector_num, u16 pnum, u16 rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_coalesce, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, + vector_num, pnum, rate); +} + +static void nbl_disp_get_rxfh_indir_size(void *priv, u16 vsi_id, u32 *rxfh_indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_indir_size, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, rxfh_indir_size)); +} + +static void nbl_disp_get_rxfh_rss_key_size(void *priv, u32 *rxfh_rss_key_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_rss_key_size, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rxfh_rss_key_size)); +} + +static void nbl_disp_get_rxfh_indir(void *priv, u16 vsi_id, u32 *indir, u32 indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_indir, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, indir)); +} + +static void nbl_disp_get_rxfh_rss_key(void *priv, u8 *rss_key, u32 key_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_rss_key, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rss_key)); +} + +static void nbl_disp_get_rxfh_rss_alg_sel(void *priv, u8 *alg_sel, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rss_alg_sel, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), alg_sel, eth_id)); +} + +static void nbl_disp_get_phy_caps(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_phy_caps, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, phy_caps)); +} + +static void nbl_disp_get_phy_state(void *priv, u8 eth_id, struct nbl_phy_state *phy_state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_phy_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, phy_state)); +} + +static int nbl_disp_set_sfp_state(void *priv, u8 eth_id, u8 state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->set_sfp_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, state)); + return ret; +} + +static int nbl_disp_init_chip_module(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->init_chip_module, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_queue_init(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->queue_init, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_vsi_init(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->vsi_init, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_configure_msix_map(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, num_net_msix, + num_others_msix, net_msix_mask_en); + return ret; +} + +static int nbl_disp_chan_configure_msix_map_req(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_msix_map param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.num_net_msix = num_net_msix; + param.num_others_msix = num_others_msix; + param.msix_mask_en = net_msix_mask_en; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_configure_msix_map_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_msix_map *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_cfg_msix_map *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, + param->num_net_msix, param->num_others_msix, param->msix_mask_en); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP); +} + +static int nbl_disp_chan_destroy_msix_map_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DESTROY_MSIX_MAP, + NULL, 0, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_destroy_msix_map_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_msix_map *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_cfg_msix_map *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DESTROY_MSIX_MAP, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_DESTROY_MSIX_MAP); +} + +static int nbl_disp_chan_enable_mailbox_irq_req(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_enable_mailbox_irq param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vector_id = vector_id; + param.enable_msix = enable_msix; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_enable_mailbox_irq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_enable_mailbox_irq *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_enable_mailbox_irq *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_mailbox_irq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, + param->vector_id, param->enable_msix); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ); +} + +static u16 nbl_disp_chan_get_global_vector_req(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_global_vector param = {0}; + struct nbl_chan_param_get_global_vector result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.vector_id = local_vector_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result.vector_id; +} + +static void nbl_disp_chan_get_global_vector_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_global_vector *param; + struct nbl_chan_param_get_global_vector result; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_get_global_vector *)data; + + result.vector_id = NBL_OPS_CALL(res_ops->get_global_vector, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->vector_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_GET_GLOBAL_VECTOR); +} + +static int nbl_disp_destroy_msix_map(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0); + return ret; +} + +static int nbl_disp_enable_mailbox_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_mailbox_irq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, enable_msix); + return ret; +} + +static int nbl_disp_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->enable_abnormal_irq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector_id, enable_msix)); + return ret; +} + +static int nbl_disp_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->enable_adminq_irq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector_id, enable_msix)); + return ret; +} + +static u16 nbl_disp_get_global_vector(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + u16 ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_global_vector, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_vector_id)); + return ret; +} + +static u16 nbl_disp_get_msix_entry_id(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + u16 ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_msix_entry_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_vector_id)); + return ret; +} + +static void nbl_disp_dump_flow(void *priv, struct seq_file *m) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->dump_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m); +} + +static u16 nbl_disp_get_vsi_id(void *priv, u16 func_id, u16 type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_vsi_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + func_id, type)); +} + +static void nbl_disp_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_eth_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, eth_mode, eth_id)); +} + +static int nbl_disp_chan_add_lag_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_LAG_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_lag_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lag_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_LAG_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_ADD_LAG_FLOW); +} + +static int nbl_disp_add_lag_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lag_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_chan_del_lag_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_LAG_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_lag_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lag_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + *(u16 *)data); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_LAG_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_DEL_LAG_FLOW); +} + +static void nbl_disp_del_lag_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lag_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_chan_add_lldp_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_LLDP_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_lldp_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lldp_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_LLDP_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_ADD_LLDP_FLOW); +} + +static int nbl_disp_add_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lldp_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_chan_del_lldp_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_LLDP_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_lldp_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lldp_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + *(u16 *)data); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_LLDP_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_DEL_LLDP_FLOW); +} + +static void nbl_disp_del_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lldp_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static u32 nbl_disp_get_tx_headroom(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u32 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_tx_headroom, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static u8 __iomem *nbl_disp_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u8 __iomem *addr = NULL; + + addr = NBL_OPS_CALL(res_ops->get_hw_addr, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), size)); + return addr; +} + +static u64 nbl_disp_get_real_hw_addr(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u64 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_real_hw_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + return ret; +} + +static u16 nbl_disp_get_function_id(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_function_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + return ret; +} + +static void nbl_disp_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_real_bdf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, bus, dev, function)); +} + +static bool nbl_disp_check_fw_heartbeat(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = false; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->check_fw_heartbeat, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static bool nbl_disp_check_fw_reset(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->check_fw_reset, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_lock(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_lock, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_unlock(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_unlock, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_prepare(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_prepare, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_image(void *priv, u32 module, const u8 *data, size_t len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_image, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), module, data, len)); +} + +static int nbl_disp_flash_activate(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_activate, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_eth_loopback(void *priv, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u8 eth_id = NBL_DISP_MGT_TO_COMMON(disp_mgt)->eth_id; + + return NBL_OPS_CALL(res_ops->setup_loopback, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, enable)); +} + +static int nbl_disp_chan_set_eth_loopback_req(void *priv, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_eth_loopback param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_port_id = NBL_DISP_MGT_TO_COMMON(disp_mgt)->eth_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_ETH_LOOPBACK, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_eth_loopback_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_eth_loopback *param; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_set_eth_loopback *)data; + ret = NBL_OPS_CALL(res_ops->setup_loopback, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_port_id, param->enable)); + if (ret) + dev_err(dev, "setup loopback adminq failed with ret: %d\n", ret); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_ETH_LOOPBACK, + msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_SET_ETH_LOOPBACK); +} + +static struct sk_buff *nbl_disp_clean_rx_lb_test(void *priv, u32 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->clean_rx_lb_test, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); +} + +static u32 nbl_disp_check_active_vf(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->check_active_vf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0)); +} + +static u32 nbl_disp_chan_check_active_vf_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_DISP_MGT_TO_DEV(disp_mgt); + u32 active_vf_num = 0; + int ret; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CHECK_ACTIVE_VF, NULL, 0, + &active_vf_num, sizeof(active_vf_num), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + dev_err(dev, "channel check active vf send msg failed with ret: %d\n", ret); + + return active_vf_num; +} + +static void nbl_disp_chan_check_active_vf_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u32 active_vf_num; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + active_vf_num = NBL_OPS_CALL(res_ops->check_active_vf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CHECK_ACTIVE_VF, + msg_id, err, &active_vf_num, sizeof(active_vf_num)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_SET_ETH_LOOPBACK); +} + +static u32 nbl_disp_get_adminq_tx_buf_size(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + return chan_ops->get_adminq_tx_buf_size(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt)); +} + +static bool nbl_disp_get_product_flex_cap(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + bool has_cap = false; + + has_cap = NBL_OPS_CALL(res_ops->get_product_flex_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + cap_type)); + return has_cap; +} + +static bool nbl_disp_chan_get_product_flex_cap_req(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + bool has_cap = false; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, &cap_type, + sizeof(cap_type), &has_cap, sizeof(has_cap), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return has_cap; +} + +static void nbl_disp_chan_get_product_flex_cap_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + enum nbl_flex_cap_type *cap_type = (enum nbl_flex_cap_type *)data; + struct nbl_chan_ack_info chan_ack = {0}; + bool has_cap = false; + + has_cap = NBL_OPS_CALL(res_ops->get_product_flex_cap, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *cap_type)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, msg_id, + NBL_CHAN_RESP_OK, &has_cap, sizeof(has_cap)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static bool nbl_disp_get_product_fix_cap(void *priv, enum nbl_fix_cap_type cap_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + bool has_cap = false; + + has_cap = NBL_OPS_CALL(res_ops->get_product_fix_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + cap_type)); + return has_cap; +} + +static int nbl_disp_get_mbx_irq_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_mbx_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_get_adminq_irq_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_adminq_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_get_abnormal_irq_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_abnormal_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_clear_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_clear_queues(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static u16 nbl_disp_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_vsi_global_queue_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_qid)); +} + +static u16 +nbl_disp_chan_get_vsi_global_qid_req(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_vsi_qid_info param = {0}; + struct nbl_chan_send_info chan_send; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param.vsi_id = vsi_id; + param.local_qid = local_qid; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void +nbl_disp_chan_get_vsi_global_qid_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_vsi_qid_info *param; + struct nbl_chan_ack_info chan_ack; + u16 global_qid; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_vsi_qid_info *)data; + global_qid = NBL_OPS_CALL(res_ops->get_vsi_global_queue_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->local_qid)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, + msg_id, global_qid, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_board_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_board_port_info board_info = {0}; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_board_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &board_info)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BOARD_INFO, + msg_id, 0, &board_info, sizeof(board_info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_port_attributes(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_port_attributes, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + if (ret) + dev_err(dev, "get port attributes failed with ret: %d\n", ret); + + return ret; +} + +static int nbl_disp_update_ring_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->update_ring_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_ring_num(void *priv, struct nbl_fw_cmd_ring_num_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_ring_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_enable_port(void *priv, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->enable_port, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable)); + if (ret) + dev_err(dev, "enable port failed with ret: %d\n", ret); + + return ret; +} + +static void nbl_disp_chan_recv_port_notify_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + res_ops->recv_port_notify(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data); +} + +static int nbl_disp_get_port_state(void *priv, u8 eth_id, + struct nbl_port_state *port_state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_port_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, port_state)); + return ret; +} + +static int nbl_disp_chan_get_port_state_req(void *priv, u8 eth_id, + struct nbl_port_state *port_state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_PORT_STATE, ð_id, sizeof(eth_id), + port_state, sizeof(*port_state), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_port_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + struct nbl_port_state info = {0}; + int ret = 0; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = NBL_OPS_CALL(res_ops->get_port_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &info)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PORT_STATE, msg_id, err, + &info, sizeof(info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_port_advertising(void *priv, + struct nbl_port_advertising *port_advertising) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->set_port_advertising, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), port_advertising)); + return ret; +} + +static int nbl_disp_chan_set_port_advertising_req(void *priv, + struct nbl_port_advertising *port_advertising) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_PORT_ADVERTISING, + port_advertising, sizeof(*port_advertising), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_port_advertising_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_port_advertising *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_port_advertising *)data; + + ret = res_ops->set_port_advertising(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PORT_ADVERTISING, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_module_info(void *priv, u8 eth_id, struct ethtool_modinfo *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->get_module_info(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, info); +} + +static int nbl_disp_chan_get_module_info_req(void *priv, u8 eth_id, struct ethtool_modinfo *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_MODULE_INFO, ð_id, + sizeof(eth_id), info, sizeof(*info), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_module_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + struct ethtool_modinfo info; + int ret = 0; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_module_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &info); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_INFO, msg_id, err, + &info, sizeof(info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_module_eeprom(void *priv, u8 eth_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->get_module_eeprom(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, eeprom, data); +} + +static int nbl_disp_chan_get_module_eeprom_req(void *priv, u8 eth_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_module_eeprom param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + memcpy(¶m.eeprom, eeprom, sizeof(struct ethtool_eeprom)); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_MODULE_EEPROM, ¶m, + sizeof(param), data, eeprom->len, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_module_eeprom_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_module_eeprom *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + struct ethtool_eeprom *eeprom; + u8 *recv_data; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_get_module_eeprom *)data; + eth_id = param->eth_id; + eeprom = ¶m->eeprom; + recv_data = kmalloc(eeprom->len, GFP_ATOMIC); + if (!recv_data) { + dev_err(dev, "Allocate memory to store module eeprom failed\n"); + return; + } + + ret = res_ops->get_module_eeprom(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, eeprom, recv_data); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "Get module eeprom failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_EEPROM, msg_id, err, + recv_data, eeprom->len); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_MODULE_EEPROM, src_id); + kfree(recv_data); +} + +static int nbl_disp_get_link_state(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + /* if donot have res_ops->get_link_state(), default eth is up */ + if (res_ops->get_link_state) + ret = res_ops->get_link_state(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, eth_link_info); + else + eth_link_info->link_status = 1; + + return ret; +} + +static int nbl_disp_chan_get_link_state_req(void *priv, u8 eth_id, + struct nbl_eth_link_info *eth_link_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_LINK_STATE, ð_id, + sizeof(eth_id), eth_link_info, sizeof(*eth_link_info), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_link_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + struct nbl_eth_link_info eth_link_info = {0}; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = res_ops->get_link_state(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, ð_link_info); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_STATE, msg_id, err, + ð_link_info, sizeof(eth_link_info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, len)); +} + +static void nbl_disp_chan_get_reg_dump_req(void *priv, u32 *data, u32 len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + u32 *result = NULL; + + result = kmalloc(len, GFP_KERNEL); + if (!result) + return; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REG_DUMP, &len, sizeof(len), + result, len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + memcpy(data, result, len); + kfree(result); +} + +static void nbl_disp_chan_get_reg_dump_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u32 *result = NULL; + u32 len = 0; + + len = *(u32 *)data; + result = kmalloc(len, GFP_KERNEL); + if (!result) + return; + + NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), result, len)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REG_DUMP, msg_id, err, result, len); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + kfree(result); +} + +static int nbl_disp_get_reg_dump_len(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_reg_dump_len, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_get_reg_dump_len_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + int result = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REG_DUMP_LEN, NULL, 0, + &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result; +} + +static void nbl_disp_chan_get_reg_dump_len_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int result = 0; + + result = NBL_OPS_CALL(res_ops->get_reg_dump_len, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REG_DUMP_LEN, msg_id, err, + &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_eth_mac_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, eth_id)); +} + +static int nbl_disp_chan_set_eth_mac_addr_req(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_eth_mac_addr param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(param.mac, mac, sizeof(param.mac)); + param.eth_id = eth_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_eth_mac_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_eth_mac_addr *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_eth_mac_addr *)data; + + ret = NBL_OPS_CALL(res_ops->set_eth_mac_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, param->eth_id)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_SET_ETH_MAC_ADDR); +} + +static u32 nbl_disp_get_chip_temperature(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->get_chip_temperature(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static u32 nbl_disp_chan_get_chip_temperature_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + u32 chip_tempetature = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, NULL, 0, + &chip_tempetature, sizeof(chip_tempetature), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return chip_tempetature; +} + +static void nbl_disp_chan_get_chip_temperature_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u32 chip_tempetature = 0; + + chip_tempetature = NBL_OPS_CALL(res_ops->get_chip_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, msg_id, + ret, &chip_tempetature, sizeof(chip_tempetature)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u32 nbl_disp_get_chip_temperature_max(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->get_chip_temperature_max(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static u32 nbl_disp_get_chip_temperature_crit(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->get_chip_temperature_crit(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static int nbl_disp_get_module_temperature(void *priv, u8 eth_id, + enum nbl_module_temp_type type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_module_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, type)); +} + +static int nbl_disp_chan_get_module_temperature_req(void *priv, u8 eth_id, + enum nbl_module_temp_type type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + int module_temp; + struct nbl_chan_param_get_module_tempetature param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.eth_id = eth_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, + ¶m, sizeof(param), &module_temp, sizeof(module_temp), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return module_temp; +} + +static void nbl_disp_chan_get_module_temperature_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + int module_temp; + struct nbl_chan_param_get_module_tempetature *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_get_module_tempetature *)data; + module_temp = NBL_OPS_CALL(res_ops->get_module_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->type)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, msg_id, + ret, &module_temp, sizeof(module_temp)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->process_abnormal_event(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), abnomal_info); +} + +static void nbl_disp_adapt_desc_gother(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->adapt_desc_gother, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_flr_clear_net(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_queues(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_flows(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_flows, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_interrupt(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_interrupt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_unmask_all_interrupts(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unmask_all_interrupts, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_keep_alive_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_KEEP_ALIVE, + NULL, 0, NULL, 0, 1); + + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_keep_alive_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_KEEP_ALIVE, msg_id, + 0, NULL, 0); + + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_user_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size, + u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_queue_info result = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_USER_QUEUE_INFO, + &vsi_id, sizeof(vsi_id), &result, sizeof(result), 1); + + if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { + *queue_num = result.queue_num; + *queue_size = result.queue_size; + } +} + +static void nbl_disp_chan_get_user_queue_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_queue_info result = {0}; + int ret = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL(res_ops->get_user_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, + &result.queue_size, *(u16 *)data)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_USER_QUEUE_INFO, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_user_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size, vsi_id)); +} + +static int nbl_disp_ctrl_port_led(void *priv, u8 eth_id, + enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->ctrl_port_led, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, led_ctrl, led_reg)); +} + +static int nbl_disp_chan_ctrl_port_led_req(void *priv, u8 eth_id, + enum nbl_led_reg_ctrl led_ctrl, + u32 *led_reg) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_ctrl_port_led param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.led_status = led_ctrl; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CTRL_PORT_LED, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_ctrl_port_led_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_ctrl_port_led *param = {0}; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_ctrl_port_led *)data; + ret = NBL_OPS_CALL(res_ops->ctrl_port_led, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->led_status, NULL)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CTRL_PORT_LED, msg_id, + ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_passthrough_fw_cmd(void *priv, struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->passthrough_fw_cmd, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, result)); +} + +static int nbl_disp_nway_reset(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->nway_reset, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id)); +} + +static int nbl_disp_chan_nway_reset_req(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_NWAY_RESET, + ð_id, sizeof(eth_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_nway_reset_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u8 *eth_id; + int ret = NBL_CHAN_RESP_OK; + + eth_id = (u8 *)data; + ret = NBL_OPS_CALL(res_ops->nway_reset, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NWAY_RESET, msg_id, + ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_get_vf_base_vsi_id(void *priv, u16 func_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_vf_base_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id)); +} + +static u16 nbl_disp_chan_get_vf_base_vsi_id_req(void *priv, u16 func_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + u16 vf_base_vsi_id = 0; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, + NULL, 0, &vf_base_vsi_id, sizeof(vf_base_vsi_id), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return vf_base_vsi_id; +} + +static void nbl_disp_chan_get_vf_base_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vf_base_vsi_id; + + vf_base_vsi_id = NBL_OPS_CALL(res_ops->get_vf_base_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, msg_id, + ret, &vf_base_vsi_id, sizeof(vf_base_vsi_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_get_intr_suppress_level(void *priv, u64 pkt_rates, u16 last_level) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_intr_suppress_level, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pkt_rates, last_level)); +} + +static void nbl_disp_set_intr_suppress_level(void *priv, u16 vector_id, u16 vector_num, u16 level) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_intr_suppress_level, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), common->mgt_pf, + vector_id, vector_num, level); +} + +static void nbl_disp_chan_set_intr_suppress_level_req(void *priv, u16 vector_id, + u16 vector_num, u16 level) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_intr_suppress_level param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.local_vector_id = vector_id; + param.vector_num = vector_num; + param.level = level; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_INTL_SUPPRESS_LEVEL, + ¶m, sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_intr_suppress_level_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_intr_suppress_level *param; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_intr_suppress_level *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_intr_suppress_level, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->local_vector_id, + param->vector_num, param->level); +} + +static int nbl_disp_get_p4_info(void *priv, char *verify_code) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_p4_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), verify_code)); +} + +static int nbl_disp_load_p4(void *priv, struct nbl_load_p4_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->load_p4, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_load_p4_default(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->load_p4_default, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_get_p4_used_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + int p4_type; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_P4_USED, + NULL, 0, &p4_type, sizeof(p4_type), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return p4_type; +} + +static void nbl_disp_chan_get_p4_used_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int p4_type; + + p4_type = NBL_OPS_CALL(res_ops->get_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_P4_USED, msg_id, + ret, &p4_type, sizeof(p4_type)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_p4_used(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_p4_used(void *priv, int p4_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), p4_type)); +} + +static int nbl_disp_chan_get_board_id_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + int result = -1; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_BOARD_ID, + NULL, 0, &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result; +} + +static void nbl_disp_chan_get_board_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK, result = -1; + + result = NBL_OPS_CALL(res_ops->get_board_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BOARD_ID, + msg_id, ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_board_id(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_board_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static dma_addr_t nbl_disp_restore_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->restore_abnormal_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, type)); +} + +static int nbl_disp_restart_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->restart_abnormal_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, type)); +} + +static int nbl_disp_chan_restore_hw_queue_req(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_restore_hw_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.vsi_id = vsi_id; + param.local_queue_id = local_queue_id; + param.dma = dma; + param.type = type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_RESTORE_HW_QUEUE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_restore_hw_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_restore_hw_queue *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_restore_hw_queue *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->restore_hw_queue, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->local_queue_id, param->dma, param->type); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTORE_HW_QUEUE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_restore_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->restore_hw_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, local_queue_id, dma, type); +} + +static u16 nbl_disp_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_local_queue_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, global_queue_id)); +} + +static int nbl_disp_set_bridge_mode(void *priv, u16 bmode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_bridge_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + NBL_COMMON_TO_MGT_PF(common), bmode); +} + +static int nbl_disp_chan_set_bridge_mode_req(void *priv, u16 bmode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_BRIDGE_MODE, &bmode, sizeof(bmode), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_bridge_mode_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 *bmode; + + bmode = (u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_bridge_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, *bmode); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_BRIDGE_MODE, + msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_get_vf_function_id(void *priv, u16 vsi_id, int vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_vf_function_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id)); +} + +static u16 nbl_disp_chan_get_vf_function_id_req(void *priv, u16 vsi_id, int vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_vf_func_id param; + struct nbl_common_info *common; + u16 func_id = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.vsi_id = vsi_id; + param.vf_id = vf_id; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_VF_FUNCTION_ID, ¶m, + sizeof(param), &func_id, sizeof(func_id), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return func_id; +} + +static void nbl_disp_chan_get_vf_function_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vf_func_id *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 func_id; + + param = (struct nbl_chan_param_get_vf_func_id *)data; + func_id = NBL_OPS_CALL(res_ops->get_vf_function_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vf_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_FUNCTION_ID, msg_id, + ret, &func_id, sizeof(func_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +/* NBL_DISP_SET_OPS(disp_op_name, res_func, ctrl_lvl, msg_type, msg_req, msg_resp) + * ctrl_lvl is to define when this disp_op should go directly to res_op, not sending a channel msg. + * + * Use X Macros to reduce codes in channel_op and disp_op setup/remove + */ +#define NBL_DISP_OPS_TBL \ +do { \ + NBL_DISP_SET_OPS(init_chip_module, nbl_disp_init_chip_module, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_resource_pt_ops, nbl_disp_get_res_pt_ops, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(queue_init, nbl_disp_queue_init, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(vsi_init, nbl_disp_vsi_init, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(configure_msix_map, nbl_disp_configure_msix_map, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, \ + nbl_disp_chan_configure_msix_map_req, \ + nbl_disp_chan_configure_msix_map_resp); \ + NBL_DISP_SET_OPS(destroy_msix_map, nbl_disp_destroy_msix_map, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DESTROY_MSIX_MAP, \ + nbl_disp_chan_destroy_msix_map_req, \ + nbl_disp_chan_destroy_msix_map_resp); \ + NBL_DISP_SET_OPS(enable_mailbox_irq, nbl_disp_enable_mailbox_irq, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, \ + nbl_disp_chan_enable_mailbox_irq_req, \ + nbl_disp_chan_enable_mailbox_irq_resp); \ + NBL_DISP_SET_OPS(enable_abnormal_irq, nbl_disp_enable_abnormal_irq, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(enable_adminq_irq, nbl_disp_enable_adminq_irq, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_global_vector, nbl_disp_get_global_vector, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, \ + nbl_disp_chan_get_global_vector_req, \ + nbl_disp_chan_get_global_vector_resp); \ + NBL_DISP_SET_OPS(get_msix_entry_id, nbl_disp_get_msix_entry_id, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(alloc_rings, nbl_disp_alloc_rings, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(remove_rings, nbl_disp_remove_rings, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(start_tx_ring, nbl_disp_start_tx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(stop_tx_ring, nbl_disp_stop_tx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(start_rx_ring, nbl_disp_start_rx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(stop_rx_ring, nbl_disp_stop_rx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(kick_rx_ring, nbl_disp_kick_rx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(dump_ring, nbl_disp_dump_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(dump_ring_stats, nbl_disp_dump_ring_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_vector_napi, nbl_disp_get_vector_napi, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_vector_info, nbl_disp_set_vector_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_vsi_ring, nbl_disp_register_vsi_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_net, nbl_disp_register_net, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_NET, \ + nbl_disp_chan_register_net_req, nbl_disp_chan_register_net_resp); \ + NBL_DISP_SET_OPS(unregister_net, nbl_disp_unregister_net, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNREGISTER_NET, \ + nbl_disp_chan_unregister_net_req, nbl_disp_chan_unregister_net_resp); \ + NBL_DISP_SET_OPS(alloc_txrx_queues, nbl_disp_alloc_txrx_queues, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, \ + nbl_disp_chan_alloc_txrx_queues_req, \ + nbl_disp_chan_alloc_txrx_queues_resp); \ + NBL_DISP_SET_OPS(free_txrx_queues, nbl_disp_free_txrx_queues, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_TXRX_QUEUES, \ + nbl_disp_chan_free_txrx_queues_req, \ + nbl_disp_chan_free_txrx_queues_resp); \ + NBL_DISP_SET_OPS(register_vsi2q, nbl_disp_register_vsi2q, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_VSI2Q, \ + nbl_disp_chan_register_vsi2q_req, \ + nbl_disp_chan_register_vsi2q_resp); \ + NBL_DISP_SET_OPS(setup_q2vsi, nbl_disp_setup_q2vsi, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_Q2VSI, \ + nbl_disp_chan_setup_q2vsi_req, \ + nbl_disp_chan_setup_q2vsi_resp); \ + NBL_DISP_SET_OPS(remove_q2vsi, nbl_disp_remove_q2vsi, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_Q2VSI, \ + nbl_disp_chan_remove_q2vsi_req, \ + nbl_disp_chan_remove_q2vsi_resp); \ + NBL_DISP_SET_OPS(setup_rss, nbl_disp_setup_rss, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_RSS, \ + nbl_disp_chan_setup_rss_req, \ + nbl_disp_chan_setup_rss_resp); \ + NBL_DISP_SET_OPS(remove_rss, nbl_disp_remove_rss, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_RSS, \ + nbl_disp_chan_remove_rss_req, \ + nbl_disp_chan_remove_rss_resp); \ + NBL_DISP_SET_OPS(setup_queue, nbl_disp_setup_queue, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_QUEUE, \ + nbl_disp_chan_setup_queue_req, nbl_disp_chan_setup_queue_resp); \ + NBL_DISP_SET_OPS(remove_all_queues, nbl_disp_remove_all_queues, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_ALL_QUEUES, \ + nbl_disp_chan_remove_all_queues_req, \ + nbl_disp_chan_remove_all_queues_resp); \ + NBL_DISP_SET_OPS(cfg_dsch, nbl_disp_cfg_dsch, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_DSCH, \ + nbl_disp_chan_cfg_dsch_req, nbl_disp_chan_cfg_dsch_resp); \ + NBL_DISP_SET_OPS(setup_cqs, nbl_disp_setup_cqs, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_CQS, \ + nbl_disp_chan_setup_cqs_req, nbl_disp_chan_setup_cqs_resp); \ + NBL_DISP_SET_OPS(remove_cqs, nbl_disp_remove_cqs, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_CQS, \ + nbl_disp_chan_remove_cqs_req, nbl_disp_chan_remove_cqs_resp); \ + NBL_DISP_SET_OPS(enable_msix_irq, nbl_disp_enable_msix_irq, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_msix_irq_enable_info, nbl_disp_get_msix_irq_enable_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(add_macvlan, nbl_disp_add_macvlan, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_MACVLAN, \ + nbl_disp_chan_add_macvlan_req, nbl_disp_chan_add_macvlan_resp); \ + NBL_DISP_SET_OPS(del_macvlan, nbl_disp_del_macvlan, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_MACVLAN, \ + nbl_disp_chan_del_macvlan_req, nbl_disp_chan_del_macvlan_resp); \ + NBL_DISP_SET_OPS(add_multi_rule, nbl_disp_add_multi_rule, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_MULTI_RULE, \ + nbl_disp_chan_add_multi_rule_req, nbl_disp_chan_add_multi_rule_resp); \ + NBL_DISP_SET_OPS(del_multi_rule, nbl_disp_del_multi_rule, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_MULTI_RULE, \ + nbl_disp_chan_del_multi_rule_req, nbl_disp_chan_del_multi_rule_resp); \ + NBL_DISP_SET_OPS(setup_multi_group, nbl_disp_setup_multi_group, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_MULTI_GROUP, \ + nbl_disp_chan_setup_multi_group_req, \ + nbl_disp_chan_setup_multi_group_resp); \ + NBL_DISP_SET_OPS(remove_multi_group, nbl_disp_remove_multi_group, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_MULTI_GROUP, \ + nbl_disp_chan_remove_multi_group_req, \ + nbl_disp_chan_remove_multi_group_resp); \ + NBL_DISP_SET_OPS(dump_flow, nbl_disp_dump_flow, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_vsi_id, nbl_disp_get_vsi_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VSI_ID, \ + nbl_disp_chan_get_vsi_id_req, nbl_disp_chan_get_vsi_id_resp); \ + NBL_DISP_SET_OPS(get_eth_id, nbl_disp_get_eth_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_ID, \ + nbl_disp_chan_get_eth_id_req, nbl_disp_chan_get_eth_id_resp); \ + NBL_DISP_SET_OPS(add_lldp_flow, nbl_disp_add_lldp_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_LLDP_FLOW, \ + nbl_disp_chan_add_lldp_flow_req, nbl_disp_chan_add_lldp_flow_resp); \ + NBL_DISP_SET_OPS(del_lldp_flow, nbl_disp_del_lldp_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_LLDP_FLOW, \ + nbl_disp_chan_del_lldp_flow_req, nbl_disp_chan_del_lldp_flow_resp); \ + NBL_DISP_SET_OPS(add_lag_flow, nbl_disp_add_lag_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_LAG_FLOW, \ + nbl_disp_chan_add_lag_flow_req, nbl_disp_chan_add_lag_flow_resp); \ + NBL_DISP_SET_OPS(del_lag_flow, nbl_disp_del_lag_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_LAG_FLOW, \ + nbl_disp_chan_del_lag_flow_req, nbl_disp_chan_del_lag_flow_resp); \ + NBL_DISP_SET_OPS(set_promisc_mode, nbl_disp_set_promisc_mode, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PROSISC_MODE, \ + nbl_disp_chan_set_promisc_mode_req, \ + nbl_disp_chan_set_promisc_mode_resp); \ + NBL_DISP_SET_OPS(set_spoof_check_addr, nbl_disp_set_spoof_check_addr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_SPOOF_CHECK_ADDR, \ + nbl_disp_chan_set_spoof_check_addr_req, \ + nbl_disp_chan_set_spoof_check_addr_resp); \ + NBL_DISP_SET_OPS(set_vf_spoof_check, nbl_disp_set_vf_spoof_check, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_VF_SPOOF_CHECK, \ + nbl_disp_chan_set_vf_spoof_check_req, \ + nbl_disp_chan_set_vf_spoof_check_resp); \ + NBL_DISP_SET_OPS(get_base_mac_addr, nbl_disp_get_base_mac_addr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_BASE_MAC_ADDR, \ + nbl_disp_chan_get_base_mac_addr_req, \ + nbl_disp_chan_get_base_mac_addr_resp); \ + NBL_DISP_SET_OPS(get_tx_headroom, nbl_disp_get_tx_headroom, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_firmware_version, nbl_disp_get_firmware_version, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FIRMWARE_VERSION, \ + nbl_disp_chan_get_firmware_version_req, \ + nbl_disp_chan_get_firmware_version_resp); \ + NBL_DISP_SET_OPS(get_driver_info, nbl_disp_get_driver_info, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_queue_stats, nbl_disp_get_queue_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_queue_err_stats, nbl_disp_get_queue_err_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, \ + nbl_disp_chan_get_queue_err_stats_req, \ + nbl_disp_chan_get_queue_err_stats_resp); \ + NBL_DISP_SET_OPS(get_net_stats, nbl_disp_get_net_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_private_stat_len, nbl_disp_get_private_stat_len, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_private_stat_data, nbl_disp_get_private_stat_data, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_STATS, \ + nbl_disp_get_private_stat_data_req, \ + nbl_disp_chan_get_private_stat_data_resp); \ + NBL_DISP_SET_OPS(fill_private_stat_strings, nbl_disp_fill_private_stat_strings, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_max_desc_num, nbl_disp_get_max_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_min_desc_num, nbl_disp_get_min_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_tx_desc_num, nbl_disp_get_tx_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_rx_desc_num, nbl_disp_get_rx_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(set_tx_desc_num, nbl_disp_set_tx_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(set_rx_desc_num, nbl_disp_set_rx_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(set_eth_loopback, nbl_disp_set_eth_loopback, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_ETH_LOOPBACK, \ + nbl_disp_chan_set_eth_loopback_req, \ + nbl_disp_chan_set_eth_loopback_resp); \ + NBL_DISP_SET_OPS(clean_rx_lb_test, nbl_disp_clean_rx_lb_test, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_coalesce, nbl_disp_get_coalesce, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_COALESCE, \ + nbl_disp_chan_get_coalesce_req, \ + nbl_disp_chan_get_coalesce_resp); \ + NBL_DISP_SET_OPS(set_coalesce, nbl_disp_set_coalesce, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_COALESCE, \ + nbl_disp_chan_set_coalesce_req, \ + nbl_disp_chan_set_coalesce_resp); \ + NBL_DISP_SET_OPS(get_intr_suppress_level, nbl_disp_get_intr_suppress_level, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_intr_suppress_level, nbl_disp_set_intr_suppress_level, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_INTL_SUPPRESS_LEVEL, \ + nbl_disp_chan_set_intr_suppress_level_req, \ + nbl_disp_chan_set_intr_suppress_level_resp); \ + NBL_DISP_SET_OPS(get_rxfh_indir_size, nbl_disp_get_rxfh_indir_size, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_INDIR_SIZE, \ + nbl_disp_chan_get_rxfh_indir_size_req, \ + nbl_disp_chan_get_rxfh_indir_size_resp); \ + NBL_DISP_SET_OPS(get_rxfh_rss_key_size, nbl_disp_get_rxfh_rss_key_size, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_rxfh_indir, nbl_disp_get_rxfh_indir, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_INDIR, \ + nbl_disp_chan_get_rxfh_indir_req, nbl_disp_chan_get_rxfh_indir_resp); \ + NBL_DISP_SET_OPS(get_rxfh_rss_key, nbl_disp_get_rxfh_rss_key, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_RSS_KEY, \ + nbl_disp_chan_get_rxfh_rss_key_req, \ + nbl_disp_chan_get_rxfh_rss_key_resp); \ + NBL_DISP_SET_OPS(get_rxfh_rss_alg_sel, nbl_disp_get_rxfh_rss_alg_sel, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, \ + nbl_disp_chan_get_rxfh_rss_alg_sel_req, \ + nbl_disp_chan_get_rxfh_rss_alg_sel_resp); \ + NBL_DISP_SET_OPS(get_hw_addr, nbl_disp_get_hw_addr, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_real_hw_addr, nbl_disp_get_real_hw_addr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REAL_HW_ADDR, \ + nbl_disp_chan_get_real_hw_addr_req, \ + nbl_disp_chan_get_real_hw_addr_resp); \ + NBL_DISP_SET_OPS(get_function_id, nbl_disp_get_function_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FUNCTION_ID, \ + nbl_disp_chan_get_function_id_req, nbl_disp_chan_get_function_id_resp);\ + NBL_DISP_SET_OPS(get_real_bdf, nbl_disp_get_real_bdf, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REAL_BDF, \ + nbl_disp_chan_get_real_bdf_req, nbl_disp_chan_get_real_bdf_resp); \ + NBL_DISP_SET_OPS(check_fw_heartbeat, nbl_disp_check_fw_heartbeat, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(check_fw_reset, nbl_disp_check_fw_reset, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_lock, nbl_disp_flash_lock, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_unlock, nbl_disp_flash_unlock, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_prepare, nbl_disp_flash_prepare, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_image, nbl_disp_flash_image, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_activate, nbl_disp_flash_activate, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_phy_caps, nbl_disp_get_phy_caps, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PHY_CAPS, \ + nbl_disp_chan_get_phy_caps_req, \ + nbl_disp_chan_get_phy_caps_resp); \ + NBL_DISP_SET_OPS(get_phy_state, nbl_disp_get_phy_state, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PHY_STATE, \ + nbl_disp_chan_get_phy_state_req, \ + nbl_disp_chan_get_phy_state_resp); \ + NBL_DISP_SET_OPS(set_sfp_state, nbl_disp_set_sfp_state, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_SFP_STATE, \ + nbl_disp_chan_set_sfp_state_req, \ + nbl_disp_chan_set_sfp_state_resp); \ + NBL_DISP_SET_OPS(passthrough_fw_cmd, nbl_disp_passthrough_fw_cmd, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(check_active_vf, nbl_disp_check_active_vf, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CHECK_ACTIVE_VF, \ + nbl_disp_chan_check_active_vf_req, \ + nbl_disp_chan_check_active_vf_resp); \ + NBL_DISP_SET_OPS(get_adminq_tx_buf_size, nbl_disp_get_adminq_tx_buf_size, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_product_flex_cap, nbl_disp_get_product_flex_cap, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, \ + nbl_disp_chan_get_product_flex_cap_req, \ + nbl_disp_chan_get_product_flex_cap_resp); \ + NBL_DISP_SET_OPS(get_product_fix_cap, nbl_disp_get_product_fix_cap, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_mbx_irq_num, nbl_disp_get_mbx_irq_num, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MBX_IRQ_NUM, \ + nbl_disp_chan_get_mbx_irq_num_req, \ + nbl_disp_chan_get_mbx_irq_num_resp); \ + NBL_DISP_SET_OPS(get_adminq_irq_num, nbl_disp_get_adminq_irq_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_abnormal_irq_num, nbl_disp_get_abnormal_irq_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(clear_flow, nbl_disp_clear_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_FLOW, \ + nbl_disp_chan_clear_flow_req, nbl_disp_chan_clear_flow_resp); \ + NBL_DISP_SET_OPS(clear_queues, nbl_disp_clear_queues, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_QUEUE, \ + nbl_disp_chan_clear_queues_req, nbl_disp_chan_clear_queues_resp); \ + NBL_DISP_SET_OPS(get_reg_dump, nbl_disp_get_reg_dump, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REG_DUMP, \ + nbl_disp_chan_get_reg_dump_req, \ + nbl_disp_chan_get_reg_dump_resp); \ + NBL_DISP_SET_OPS(get_reg_dump_len, nbl_disp_get_reg_dump_len, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REG_DUMP_LEN, \ + nbl_disp_chan_get_reg_dump_len_req, \ + nbl_disp_chan_get_reg_dump_len_resp); \ + NBL_DISP_SET_OPS(get_p4_info, nbl_disp_get_p4_info, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(load_p4, nbl_disp_load_p4, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(load_p4_default, nbl_disp_load_p4_default, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_p4_used, nbl_disp_get_p4_used, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_P4_USED, \ + nbl_disp_chan_get_p4_used_req, nbl_disp_chan_get_p4_used_resp); \ + NBL_DISP_SET_OPS(set_p4_used, nbl_disp_set_p4_used, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_board_id, nbl_disp_get_board_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_BOARD_ID, \ + nbl_disp_chan_get_board_id_req, nbl_disp_chan_get_board_id_resp); \ + NBL_DISP_SET_OPS(restore_abnormal_ring, nbl_disp_restore_abnormal_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(restart_abnormal_ring, nbl_disp_restart_abnormal_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(restore_hw_queue, nbl_disp_restore_hw_queue, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_RESTORE_HW_QUEUE, \ + nbl_disp_chan_restore_hw_queue_req, \ + nbl_disp_chan_restore_hw_queue_resp); \ + NBL_DISP_SET_OPS(get_local_queue_id, nbl_disp_get_local_queue_id, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_vsi_global_queue_id, nbl_disp_get_vsi_global_qid, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, \ + nbl_disp_chan_get_vsi_global_qid_req, \ + nbl_disp_chan_get_vsi_global_qid_resp); \ + NBL_DISP_SET_OPS(get_port_attributes, nbl_disp_get_port_attributes, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(update_ring_num, nbl_disp_update_ring_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(set_ring_num, nbl_disp_set_ring_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(enable_port, nbl_disp_enable_port, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(dummy_func, NULL, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADMINQ_PORT_NOTIFY, \ + NULL, \ + nbl_disp_chan_recv_port_notify_resp); \ + NBL_DISP_SET_OPS(get_port_state, nbl_disp_get_port_state, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PORT_STATE, \ + nbl_disp_chan_get_port_state_req, \ + nbl_disp_chan_get_port_state_resp); \ + NBL_DISP_SET_OPS(set_port_advertising, nbl_disp_set_port_advertising, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PORT_ADVERTISING, \ + nbl_disp_chan_set_port_advertising_req, \ + nbl_disp_chan_set_port_advertising_resp); \ + NBL_DISP_SET_OPS(get_module_info, nbl_disp_get_module_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MODULE_INFO, \ + nbl_disp_chan_get_module_info_req, \ + nbl_disp_chan_get_module_info_resp); \ + NBL_DISP_SET_OPS(get_module_eeprom, nbl_disp_get_module_eeprom, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MODULE_EEPROM, \ + nbl_disp_chan_get_module_eeprom_req, \ + nbl_disp_chan_get_module_eeprom_resp); \ + NBL_DISP_SET_OPS(get_link_state, nbl_disp_get_link_state, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_STATE, \ + nbl_disp_chan_get_link_state_req, \ + nbl_disp_chan_get_link_state_resp); \ + NBL_DISP_SET_OPS(set_eth_mac_addr, nbl_disp_set_eth_mac_addr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, \ + nbl_disp_chan_set_eth_mac_addr_req, \ + nbl_disp_chan_set_eth_mac_addr_resp); \ + NBL_DISP_SET_OPS(get_chip_temperature, nbl_disp_get_chip_temperature, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, \ + nbl_disp_chan_get_chip_temperature_req, \ + nbl_disp_chan_get_chip_temperature_resp); \ + NBL_DISP_SET_OPS(get_chip_temperature_max, nbl_disp_get_chip_temperature_max, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_chip_temperature_crit, nbl_disp_get_chip_temperature_crit, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_module_temperature, nbl_disp_get_module_temperature, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, \ + nbl_disp_chan_get_module_temperature_req, \ + nbl_disp_chan_get_module_temperature_resp); \ + NBL_DISP_SET_OPS(process_abnormal_event, nbl_disp_process_abnormal_event, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(adapt_desc_gother, nbl_disp_adapt_desc_gother, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_net, nbl_disp_flr_clear_net, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_queues, nbl_disp_flr_clear_queues, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_flows, nbl_disp_flr_clear_flows, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_interrupt, nbl_disp_flr_clear_interrupt, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(unmask_all_interrupts, nbl_disp_unmask_all_interrupts, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(keep_alive, nbl_disp_keep_alive_req, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_KEEP_ALIVE, \ + nbl_disp_keep_alive_req, \ + nbl_disp_chan_keep_alive_resp); \ + NBL_DISP_SET_OPS(ctrl_port_led, nbl_disp_ctrl_port_led, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CTRL_PORT_LED, \ + nbl_disp_chan_ctrl_port_led_req, nbl_disp_chan_ctrl_port_led_resp); \ + NBL_DISP_SET_OPS(nway_reset, nbl_disp_nway_reset, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_NWAY_RESET, \ + nbl_disp_chan_nway_reset_req, nbl_disp_chan_nway_reset_resp); \ + NBL_DISP_SET_OPS(get_user_queue_info, nbl_disp_get_user_queue_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_USER_QUEUE_INFO, \ + nbl_disp_chan_get_user_queue_info_req, \ + nbl_disp_chan_get_user_queue_info_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_BOARD_INFO, NULL, \ + nbl_disp_chan_get_board_info_resp); \ + NBL_DISP_SET_OPS(get_vf_base_vsi_id, nbl_disp_get_vf_base_vsi_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, \ + nbl_disp_chan_get_vf_base_vsi_id_req, \ + nbl_disp_chan_get_vf_base_vsi_id_resp); \ + NBL_DISP_SET_OPS(set_bridge_mode, nbl_disp_set_bridge_mode, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_BRIDGE_MODE, \ + nbl_disp_chan_set_bridge_mode_req, \ + nbl_disp_chan_set_bridge_mode_resp); \ + NBL_DISP_SET_OPS(get_vf_function_id, nbl_disp_get_vf_function_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_FUNCTION_ID, \ + nbl_disp_chan_get_vf_function_id_req, \ + nbl_disp_chan_get_vf_function_id_resp); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_disp_setup_msg(struct nbl_dispatch_mgt *disp_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + int ret = 0; + + if (!chan_ops->check_queue_exist(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return 0; + + mutex_init(&disp_mgt->ops_mutex_lock); + spin_lock_init(&disp_mgt->ops_spin_lock); + disp_mgt->ops_lock_required = true; + +#define NBL_DISP_SET_OPS(disp_op, res_func, ctrl_lvl, msg_type, msg_req, msg_resp) \ +do { \ + typeof(msg_type) _msg_type = (msg_type); \ + if (_msg_type >= 0) \ + ret += chan_ops->register_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), \ + _msg_type, msg_resp, disp_mgt); \ +} while (0) + NBL_DISP_OPS_TBL; +#undef NBL_DISP_SET_OPS + + return ret; +} + +/* Ctrl lvl means that if a certain level is set, then all disp_ops that decleared this lvl + * will go directly to res_ops, rather than send a channel msg, and vice versa. + */ +static int nbl_disp_setup_ctrl_lvl(struct nbl_dispatch_mgt *disp_mgt, u32 lvl) +{ + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_DISP_MGT_TO_DISP_OPS(disp_mgt); + + set_bit(lvl, disp_mgt->ctrl_lvl); + +#define NBL_DISP_SET_OPS(disp_op, res_func, ctrl, msg_type, msg_req, msg_resp) \ +do { \ + disp_ops->NBL_NAME(disp_op) = test_bit(ctrl, disp_mgt->ctrl_lvl) ? res_func : msg_req; ;\ +} while (0) + NBL_DISP_OPS_TBL; +#undef NBL_DISP_SET_OPS + + return 0; +} + +static int nbl_disp_setup_disp_mgt(struct nbl_common_info *common, + struct nbl_dispatch_mgt **disp_mgt) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + *disp_mgt = devm_kzalloc(dev, sizeof(struct nbl_dispatch_mgt), GFP_KERNEL); + if (!*disp_mgt) + return -ENOMEM; + + NBL_DISP_MGT_TO_COMMON(*disp_mgt) = common; + return 0; +} + +static void nbl_disp_remove_disp_mgt(struct nbl_common_info *common, + struct nbl_dispatch_mgt **disp_mgt) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, *disp_mgt); + *disp_mgt = NULL; +} + +static void nbl_disp_remove_ops(struct device *dev, struct nbl_dispatch_ops_tbl **disp_ops_tbl) +{ + devm_kfree(dev, NBL_DISP_OPS_TBL_TO_OPS(*disp_ops_tbl)); + devm_kfree(dev, *disp_ops_tbl); + *disp_ops_tbl = NULL; +} + +static int nbl_disp_setup_ops(struct device *dev, struct nbl_dispatch_ops_tbl **disp_ops_tbl, + struct nbl_dispatch_mgt *disp_mgt) +{ + struct nbl_dispatch_ops *disp_ops; + + *disp_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_dispatch_ops_tbl), GFP_KERNEL); + if (!*disp_ops_tbl) + return -ENOMEM; + + disp_ops = devm_kzalloc(dev, sizeof(struct nbl_dispatch_ops), GFP_KERNEL); + if (!disp_ops) + return -ENOMEM; + + NBL_DISP_OPS_TBL_TO_OPS(*disp_ops_tbl) = disp_ops; + NBL_DISP_OPS_TBL_TO_PRIV(*disp_ops_tbl) = disp_mgt; + + return 0; +} + +int nbl_disp_init(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dispatch_mgt **disp_mgt = + (struct nbl_dispatch_mgt **)&NBL_ADAPTER_TO_DISP_MGT(adapter); + struct nbl_dispatch_ops_tbl **disp_ops_tbl = &NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + struct nbl_resource_ops_tbl *res_ops_tbl = NBL_ADAPTER_TO_RES_OPS_TBL(adapter); + struct nbl_channel_ops_tbl *chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + int ret = 0; + + ret = nbl_disp_setup_disp_mgt(common, disp_mgt); + if (ret) + goto setup_mgt_fail; + + ret = nbl_disp_setup_ops(dev, disp_ops_tbl, *disp_mgt); + if (ret) + goto setup_ops_fail; + + NBL_DISP_MGT_TO_RES_OPS_TBL(*disp_mgt) = res_ops_tbl; + NBL_DISP_MGT_TO_CHAN_OPS_TBL(*disp_mgt) = chan_ops_tbl; + NBL_DISP_MGT_TO_DISP_OPS_TBL(*disp_mgt) = *disp_ops_tbl; + + ret = nbl_disp_setup_msg(*disp_mgt); + if (ret) + goto setup_msg_fail; + + if (param->caps.has_ctrl || param->caps.has_factory_ctrl) { + ret = nbl_disp_setup_ctrl_lvl(*disp_mgt, NBL_DISP_CTRL_LVL_MGT); + if (ret) + goto setup_msg_fail; + } + + if (param->caps.has_net || param->caps.has_factory_ctrl) { + ret = nbl_disp_setup_ctrl_lvl(*disp_mgt, NBL_DISP_CTRL_LVL_NET); + if (ret) + goto setup_msg_fail; + } + + ret = nbl_disp_setup_ctrl_lvl(*disp_mgt, NBL_DISP_CTRL_LVL_ALWAYS); + if (ret) + goto setup_msg_fail; + + return 0; + +setup_msg_fail: + nbl_disp_remove_ops(dev, disp_ops_tbl); +setup_ops_fail: + nbl_disp_remove_disp_mgt(common, disp_mgt); +setup_mgt_fail: + return ret; +} + +void nbl_disp_remove(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_dispatch_mgt **disp_mgt; + struct nbl_dispatch_ops_tbl **disp_ops_tbl; + + if (!adapter) + return; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + disp_mgt = (struct nbl_dispatch_mgt **)&NBL_ADAPTER_TO_DISP_MGT(adapter); + disp_ops_tbl = &NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + + nbl_disp_remove_ops(dev, disp_ops_tbl); + + nbl_disp_remove_disp_mgt(common, disp_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h new file mode 100644 index 000000000000..a37a106d603a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DISPATCH_H_ +#define _NBL_DISPATCH_H_ + +#include "nbl_core.h" + +#define NBL_DISP_MGT_TO_COMMON(disp_mgt) ((disp_mgt)->common) +#define NBL_DISP_MGT_TO_DEV(disp_mgt) NBL_COMMON_TO_DEV(NBL_DISP_MGT_TO_COMMON(disp_mgt)) + +#define NBL_DISP_MGT_TO_RES_OPS_TBL(disp_mgt) ((disp_mgt)->res_ops_tbl) +#define NBL_DISP_MGT_TO_RES_OPS(disp_mgt) (NBL_DISP_MGT_TO_RES_OPS_TBL(disp_mgt)->ops) +#define NBL_DISP_MGT_TO_RES_PRIV(disp_mgt) (NBL_DISP_MGT_TO_RES_OPS_TBL(disp_mgt)->priv) +#define NBL_DISP_MGT_TO_CHAN_OPS_TBL(disp_mgt) ((disp_mgt)->chan_ops_tbl) +#define NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt) (NBL_DISP_MGT_TO_CHAN_OPS_TBL(disp_mgt)->ops) +#define NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt) (NBL_DISP_MGT_TO_CHAN_OPS_TBL(disp_mgt)->priv) +#define NBL_DISP_MGT_TO_DISP_OPS_TBL(disp_mgt) ((disp_mgt)->disp_ops_tbl) +#define NBL_DISP_MGT_TO_DISP_OPS(disp_mgt) (NBL_DISP_MGT_TO_DISP_OPS_TBL(disp_mgt)->ops) +#define NBL_DISP_MGT_TO_DISP_PRIV(disp_mgt) (NBL_DISP_MGT_TO_DISP_OPS_TBL(disp_mgt)->priv) + +#define NBL_OPS_CALL_LOCK(disp_mgt, func, ...) \ +({ \ + typeof(disp_mgt) _disp_mgt = (disp_mgt); \ + typeof(func) _func = (func); \ + u64 ret = 0; \ + \ + if (_disp_mgt->ops_lock_required) \ + mutex_lock(&_disp_mgt->ops_mutex_lock); \ + \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(_func(__VA_ARGS__)), void), \ + (!_func) ? 0 : _func(__VA_ARGS__), \ + ret = __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(_func(__VA_ARGS__)), void), \ + 0, \ + (!_func) ? 0 : _func(__VA_ARGS__) \ + ) \ + ); \ + \ + if (_disp_mgt->ops_lock_required) \ + mutex_unlock(&_disp_mgt->ops_mutex_lock); \ + \ + (typeof(_func(__VA_ARGS__))) ret; \ +}) + +#define NBL_OPS_CALL_SPIN_LOCK(disp_mgt, func, ...) \ +({ \ + typeof(disp_mgt) _disp_mgt = (disp_mgt); \ + typeof(func) _func = (func); \ + u64 ret = 0; \ + \ + if (_disp_mgt->ops_lock_required) \ + spin_lock(&_disp_mgt->ops_spin_lock); \ + \ + __builtin_choose_expr( \ + /* Check if the func has void return value */ \ + __builtin_types_compatible_p(typeof(_func(__VA_ARGS__)), void), \ + (!_func) ? 0 : _func(__VA_ARGS__), \ + ret = __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(_func(__VA_ARGS__)), void), \ + 0, \ + (!_func) ? 0 : _func(__VA_ARGS__) \ + ) \ + ); \ + \ + if (_disp_mgt->ops_lock_required) \ + spin_unlock(&_disp_mgt->ops_spin_lock); \ + \ + (typeof(_func(__VA_ARGS__))) ret; \ +}) + +struct nbl_dispatch_mgt { + struct nbl_common_info *common; + struct nbl_resource_ops_tbl *res_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_dispatch_ops_tbl *disp_ops_tbl; + DECLARE_BITMAP(ctrl_lvl, NBL_DISP_CTRL_LVL_MAX); + /* use for the caller not in interrupt */ + struct mutex ops_mutex_lock; + /* use for the caller is in interrupt or other can't sleep thread */ + spinlock_t ops_spin_lock; + bool ops_lock_required; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c new file mode 100644 index 000000000000..56b987fc028a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c @@ -0,0 +1,2028 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_ethtool.h" + +enum NBL_STATS_TYPE { + NBL_NETDEV_STATS, + NBL_ETH_STATS, + NBL_STATS, + NBL_PRIV_STATS, + NBL_STATS_TYPE_MAX +}; + +struct nbl_ethtool_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +static const char nbl_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "EEPROM test (offline)", + "Interrupt test (offline)", + "Loopback test (offline)", + "Link test (on/offline)", +}; + +enum nbl_ethtool_test_id { + NBL_ETH_TEST_REG = 0, + NBL_ETH_TEST_EEPROM, + NBL_ETH_TEST_INTR, + NBL_ETH_TEST_LOOP, + NBL_ETH_TEST_LINK, + NBL_ETH_TEST_MAX +}; + +#define NBL_TEST_LEN (sizeof(nbl_gstrings_test) / ETH_GSTRING_LEN) + +#define NBL_NETDEV_STAT(_name, stat_m, stat_n) { \ + .stat_string = _name, \ + .type = NBL_NETDEV_STATS, \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, stat_m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, stat_n) \ +} + +#define NBL_STAT(_name, stat_m, stat_n) { \ + .stat_string = _name, \ + .type = NBL_STATS, \ + .sizeof_stat = sizeof_field(struct nbl_stats, stat_m), \ + .stat_offset = offsetof(struct nbl_stats, stat_n) \ +} + +#define NBL_PRIV_STAT(_name, stat_m, stat_n) { \ + .stat_string = _name, \ + .type = NBL_PRIV_STATS, \ + .sizeof_stat = sizeof_field(struct nbl_priv_stats, stat_m), \ + .stat_offset = offsetof(struct nbl_priv_stats, stat_n) \ +} + +static const struct nbl_ethtool_stats nbl_gstrings_stats[] = { + NBL_NETDEV_STAT("rx_packets", rx_packets, rx_packets), + NBL_NETDEV_STAT("tx_packets", tx_packets, tx_packets), + NBL_NETDEV_STAT("rx_bytes", rx_bytes, rx_bytes), + NBL_NETDEV_STAT("tx_bytes", tx_bytes, tx_bytes), + NBL_STAT("tx_multicast", tx_multicast_packets, tx_multicast_packets), + NBL_STAT("tx_unicast", tx_unicast_packets, tx_unicast_packets), + NBL_STAT("rx_multicast", rx_multicast_packets, rx_multicast_packets), + NBL_STAT("rx_unicast", rx_unicast_packets, rx_unicast_packets), + NBL_NETDEV_STAT("rx_errors", rx_errors, rx_errors), + NBL_NETDEV_STAT("tx_errors", tx_errors, tx_errors), + NBL_NETDEV_STAT("rx_dropped", rx_dropped, rx_dropped), + NBL_NETDEV_STAT("tx_dropped", tx_dropped, tx_dropped), + NBL_NETDEV_STAT("eth_multicast", multicast, multicast), + NBL_NETDEV_STAT("collisions", collisions, collisions), + NBL_NETDEV_STAT("rx_over_errors", rx_over_errors, rx_over_errors), + NBL_NETDEV_STAT("rx_crc_errors", rx_crc_errors, rx_crc_errors), + NBL_NETDEV_STAT("rx_frame_errors", rx_frame_errors, rx_frame_errors), + NBL_NETDEV_STAT("rx_fifo_errors", rx_fifo_errors, rx_fifo_errors), + NBL_NETDEV_STAT("rx_missed_errors", rx_missed_errors, rx_missed_errors), + NBL_NETDEV_STAT("tx_aborted_errors", tx_aborted_errors, tx_aborted_errors), + NBL_NETDEV_STAT("tx_carrier_errors", tx_carrier_errors, tx_carrier_errors), + NBL_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors, tx_fifo_errors), + NBL_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors, tx_heartbeat_errors), + + NBL_STAT("tso_packets", tso_packets, tso_packets), + NBL_STAT("tso_bytes", tso_bytes, tso_bytes), + NBL_STAT("tx_csum_packets", tx_csum_packets, tx_csum_packets), + NBL_STAT("rx_csum_packets", rx_csum_packets, rx_csum_packets), + NBL_STAT("rx_csum_errors", rx_csum_errors, rx_csum_errors), + NBL_STAT("tx_busy", tx_busy, tx_busy), + NBL_STAT("tx_dma_busy", tx_dma_busy, tx_dma_busy), + NBL_STAT("tx_skb_free", tx_skb_free, tx_skb_free), + NBL_STAT("tx_desc_addr_err_cnt", tx_desc_addr_err_cnt, tx_desc_addr_err_cnt), + NBL_STAT("tx_desc_len_err_cnt", tx_desc_len_err_cnt, tx_desc_len_err_cnt), + NBL_STAT("rx_desc_addr_err_cnt", rx_desc_addr_err_cnt, rx_desc_addr_err_cnt), + NBL_STAT("rx_alloc_buf_err_cnt", rx_alloc_buf_err_cnt, rx_alloc_buf_err_cnt), + NBL_STAT("rx_cache_reuse", rx_cache_reuse, rx_cache_reuse), + NBL_STAT("rx_cache_full", rx_cache_full, rx_cache_full), + NBL_STAT("rx_cache_empty", rx_cache_empty, rx_cache_empty), + NBL_STAT("rx_cache_busy", rx_cache_busy, rx_cache_busy), + NBL_STAT("rx_cache_waive", rx_cache_waive, rx_cache_waive), + + NBL_PRIV_STAT("total_dvn_pkt_drop_cnt", total_dvn_pkt_drop_cnt, total_dvn_pkt_drop_cnt), + NBL_PRIV_STAT("total_uvn_stat_pkt_drop", total_uvn_stat_pkt_drop, total_uvn_stat_pkt_drop), +}; + +#define NBL_GLOBAL_STATS_LEN ARRAY_SIZE(nbl_gstrings_stats) + +struct nbl_priv_flags_info { + u8 supported_by_capability; + u8 supported_modify; + enum nbl_fix_cap_type capability_type; + char flag_name[ETH_GSTRING_LEN]; +}; + +static const struct nbl_priv_flags_info nbl_gstrings_priv_flags[NBL_ADAPTER_FLAGS_MAX] = { + {1, 0, NBL_P4_CAP, "P4-default"}, + {0, 1, 0, "link-down-on-close"}, + {0, 0, 0, "mini-driver"}, +}; + +#define NBL_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(nbl_gstrings_priv_flags) + +static void nbl_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_netdev_priv *priv; + struct nbl_driver_info driver_info; + char firmware_version[ETHTOOL_FWVERS_LEN] = {' '}; + + memset(&driver_info, 0, sizeof(driver_info)); + + priv = netdev_priv(netdev); + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + disp_ops->get_firmware_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + firmware_version, ETHTOOL_FWVERS_LEN); + if (disp_ops->get_driver_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &driver_info)) + strscpy(drvinfo->version, driver_info.driver_version, sizeof(drvinfo->version)); + else + strscpy(drvinfo->version, NBL_DRIVER_VERSION, sizeof(drvinfo->version)); + strscpy(drvinfo->fw_version, firmware_version, sizeof(drvinfo->fw_version)); + strscpy(drvinfo->driver, NBL_DRIVER_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + + drvinfo->regdump_len = 0; +} + +static void nbl_stats_fill_strings(struct net_device *netdev, u8 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + char *p = (char *)data; + unsigned int i; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + for (i = 0; i < NBL_GLOBAL_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", nbl_gstrings_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < vsi_info->active_ring_num; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_descs", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_dvn_pkt_drop_cnt", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_tx_timeout_cnt", i); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < vsi_info->active_ring_num; i++) { + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_descs", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_uvn_stat_pkt_drop", i); + p += ETH_GSTRING_LEN; + } + if (!common->is_vf) + disp_ops->fill_private_stat_strings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), p); +} + +static void nbl_priv_flags_fill_strings(struct net_device *netdev, u8 *data) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + char *p = (char *)data; + unsigned int i; + + for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { + enum nbl_fix_cap_type capability_type = nbl_gstrings_priv_flags[i].capability_type; + + if (nbl_gstrings_priv_flags[i].supported_by_capability) { + if (!disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + capability_type)) + continue; + } + snprintf(p, ETH_GSTRING_LEN, "%s", nbl_gstrings_priv_flags[i].flag_name); + p += ETH_GSTRING_LEN; + } +} + +static void nbl_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, nbl_gstrings_test, NBL_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + nbl_stats_fill_strings(netdev, data); + break; + case ETH_SS_PRIV_FLAGS: + nbl_priv_flags_fill_strings(netdev, data); + break; + default: + break; + } +} + +static int nbl_sset_fill_count(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + u32 total_queues, private_len = 0, extra_per_queue_entry = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + total_queues = vsi_info->active_ring_num * 2; + if (!common->is_vf) + disp_ops->get_private_stat_len(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &private_len); + + /* For dvn drop and tx_timeout */ + extra_per_queue_entry = total_queues + vsi_info->active_ring_num; + + return NBL_GLOBAL_STATS_LEN + total_queues * + (sizeof(struct nbl_queue_stats) / sizeof(u64)) + + extra_per_queue_entry + private_len; +} + +static int nbl_sset_fill_priv_flags_count(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + unsigned int i; + int count = 0; + + for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { + enum nbl_fix_cap_type capability_type = nbl_gstrings_priv_flags[i].capability_type; + + if (nbl_gstrings_priv_flags[i].supported_by_capability) { + if (!disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + capability_type)) + continue; + } + count++; + } + + return count; +} + +static int nbl_get_sset_count(struct net_device *netdev, int sset) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + switch (sset) { + case ETH_SS_TEST: + if (NBL_COMMON_TO_VF_CAP(common)) + return -EOPNOTSUPP; + else + return NBL_TEST_LEN; + case ETH_SS_STATS: + return nbl_sset_fill_count(netdev); + case ETH_SS_PRIV_FLAGS: + if (NBL_COMMON_TO_VF_CAP(common)) + return -EOPNOTSUPP; + else + return nbl_sset_fill_priv_flags_count(netdev); + default: + return -EOPNOTSUPP; + } +} + +void nbl_serv_adjust_interrpt_param(struct nbl_service_mgt *serv_mgt, bool ethtool) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + struct net_device *netdev; + struct nbl_netdev_priv *net_priv; + struct nbl_serv_ring_vsi_info *vsi_info; + u64 last_tx_packets; + u64 last_rx_packets; + u64 last_get_stats_jiffies, time_diff; + u64 tx_packets, rx_packets; + u64 tx_rates, rx_rates, pkt_rates; + u16 local_vector_id, vector_num; + u16 intr_suppress_level; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + netdev = net_resource_mgt->netdev; + net_priv = netdev_priv(netdev); + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + last_tx_packets = net_resource_mgt->stats.tx_packets; + last_rx_packets = net_resource_mgt->stats.rx_packets; + last_get_stats_jiffies = net_resource_mgt->get_stats_jiffies; + disp_ops->get_net_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &net_resource_mgt->stats); + /* ethtool -S don't adaptive interrupt suppression param */ + if (!vsi_info->itr_dynamic || ethtool) + return; + + tx_packets = net_resource_mgt->stats.tx_packets; + rx_packets = net_resource_mgt->stats.rx_packets; + time_diff = jiffies - last_get_stats_jiffies; + + net_resource_mgt->get_stats_jiffies = jiffies; + tx_rates = (tx_packets - last_tx_packets) / time_diff * HZ; + rx_rates = (rx_packets - last_rx_packets) / time_diff * HZ; + pkt_rates = max_t(u64, tx_rates, rx_rates); + + intr_suppress_level = + disp_ops->get_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), pkt_rates, + ring_mgt->vectors->intr_suppress_level); + if (intr_suppress_level != ring_mgt->vectors->intr_suppress_level) { + local_vector_id = ring_mgt->vectors[vsi_info->ring_offset].local_vector_id; + vector_num = vsi_info->ring_num; + disp_ops->set_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_vector_id, vector_num, + intr_suppress_level); + ring_mgt->vectors->intr_suppress_level = intr_suppress_level; + } +} + +void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct net_device *netdev; + struct nbl_netdev_priv *net_priv; + struct nbl_adapter *adapter; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + netdev = net_resource_mgt->netdev; + net_priv = netdev_priv(netdev); + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + + if (!test_bit(NBL_RUNNING, adapter->state) || + test_bit(NBL_RESETTING, adapter->state)) + return; + + nbl_serv_adjust_interrpt_param(serv_mgt, ethtool); + netdev->stats.tx_packets = net_resource_mgt->stats.tx_packets; + netdev->stats.tx_bytes = net_resource_mgt->stats.tx_bytes; + + netdev->stats.rx_packets = net_resource_mgt->stats.rx_packets; + netdev->stats.rx_bytes = net_resource_mgt->stats.rx_bytes; + + /* net_device_stats */ + netdev->stats.rx_errors = 0; + netdev->stats.tx_errors = 0; + netdev->stats.rx_dropped = 0; + netdev->stats.tx_dropped = 0; + netdev->stats.multicast = 0; + netdev->stats.rx_length_errors = 0; +} + +static void +nbl_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct rtnl_link_stats64 temp_stats; + struct rtnl_link_stats64 *net_stats; + struct nbl_stats *nbl_stats; + struct nbl_priv_stats *nbl_priv_stats; + struct nbl_queue_stats queue_stats = { 0 }; + struct nbl_queue_err_stats queue_err_stats = { 0 }; + struct nbl_serv_ring_vsi_info *vsi_info; + u32 private_len = 0; + char *p = NULL; + int i, j, k; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + nbl_serv_update_stats(serv_mgt, true); + net_stats = dev_get_stats(netdev, &temp_stats); + nbl_stats = (struct nbl_stats *)((char *)net_resource_mgt + + offsetof(struct nbl_serv_net_resource_mgt, stats)); + + nbl_priv_stats = (struct nbl_priv_stats *)((char *)net_resource_mgt + + offsetof(struct nbl_serv_net_resource_mgt, priv_stats)); + + i = NBL_GLOBAL_STATS_LEN; + nbl_priv_stats->total_dvn_pkt_drop_cnt = 0; + nbl_priv_stats->total_uvn_stat_pkt_drop = 0; + for (j = 0; j < vsi_info->active_ring_num; j++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + j, &queue_stats, true); + disp_ops->get_queue_err_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + j, &queue_err_stats, true); + data[i] = queue_stats.packets; + data[i + 1] = queue_stats.bytes; + data[i + 2] = queue_stats.descs; + data[i + 3] = queue_err_stats.dvn_pkt_drop_cnt; + data[i + 4] = ring_mgt->tx_rings[vsi_info->ring_offset + j].tx_timeout_count; + nbl_priv_stats->total_dvn_pkt_drop_cnt += queue_err_stats.dvn_pkt_drop_cnt; + i += 5; + } + + for (j = 0; j < vsi_info->active_ring_num; j++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + j, &queue_stats, false); + disp_ops->get_queue_err_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + j, &queue_err_stats, false); + data[i] = queue_stats.packets; + data[i + 1] = queue_stats.bytes; + data[i + 2] = queue_stats.descs; + data[i + 3] = queue_err_stats.uvn_stat_pkt_drop; + nbl_priv_stats->total_uvn_stat_pkt_drop += queue_err_stats.uvn_stat_pkt_drop; + i += 4; + } + + for (k = 0; k < NBL_GLOBAL_STATS_LEN; k++) { + switch (nbl_gstrings_stats[k].type) { + case NBL_NETDEV_STATS: + p = (char *)net_stats + nbl_gstrings_stats[k].stat_offset; + break; + case NBL_STATS: + p = (char *)nbl_stats + nbl_gstrings_stats[k].stat_offset; + break; + case NBL_PRIV_STATS: + p = (char *)nbl_priv_stats + nbl_gstrings_stats[k].stat_offset; + break; + default: + data[k] = 0; + continue; + } + data[k] = (nbl_gstrings_stats[k].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + if (!common->is_vf) { + disp_ops->get_private_stat_len(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &private_len); + disp_ops->get_private_stat_data(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, &data[i], + private_len * sizeof(u64)); + } +} + +static int nbl_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int err; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + err = disp_ops->get_module_eeprom(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), eeprom, data); + + return err; +} + +static int nbl_get_module_info(struct net_device *netdev, struct ethtool_modinfo *info) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int err; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + err = disp_ops->get_module_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), info); + + if (err) + err = -EIO; + + return err; +} + +int nbl_get_eeprom_length(struct net_device *netdev) +{ + return NBL_EEPROM_LENGTH; +} + +int nbl_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EINVAL; +} + +static void nbl_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + channels->max_combined = vsi_info->ring_num; + channels->combined_count = vsi_info->active_ring_num; + channels->max_rx = 0; + channels->max_tx = 0; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->max_other = 0; +} + +static int nbl_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(netdev); + struct nbl_serv_ring_vsi_info *vsi_info; + u16 queue_pairs = channels->combined_count; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + /* We don't support separate rx/tx channels. + * We don't allow setting 'other' channels. + */ + if (channels->rx_count || channels->tx_count || channels->other_count) + return -EINVAL; + + if (queue_pairs > vsi_info->ring_num || queue_pairs == 0) + return -EINVAL; + + vsi_info->active_ring_num = queue_pairs; + + netif_set_real_num_tx_queues(netdev, queue_pairs); + netif_set_real_num_rx_queues(netdev, queue_pairs); + + disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), queue_pairs); + + return 0; +} + +static u32 nbl_get_link(struct net_device *netdev) +{ + return netif_carrier_ok(netdev) ? 1 : 0; +} + +static void nbl_link_modes_to_ethtool(u64 modes, unsigned long *ethtool_modes_map) +{ + if (modes & BIT(NBL_PORT_CAP_AUTONEG)) + __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, ethtool_modes_map); + + if (modes & BIT(NBL_PORT_CAP_FEC_NONE)) + __set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_FEC_RS)) + __set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_FEC_BASER)) + __set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, ethtool_modes_map); + + if ((modes & BIT(NBL_PORT_CAP_RX_PAUSE)) && (modes & BIT(NBL_PORT_CAP_TX_PAUSE))) { + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, ethtool_modes_map); + } else if ((modes & BIT(NBL_PORT_CAP_RX_PAUSE)) && !(modes & BIT(NBL_PORT_CAP_TX_PAUSE))) { + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, ethtool_modes_map); + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, ethtool_modes_map); + } else if (!(modes & BIT(NBL_PORT_CAP_RX_PAUSE)) && (modes & BIT(NBL_PORT_CAP_TX_PAUSE))) { + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, ethtool_modes_map); + } + + if (modes & BIT(NBL_PORT_CAP_1000BASE_T)) { + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ethtool_modes_map); + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ethtool_modes_map); + } + if (modes & BIT(NBL_PORT_CAP_1000BASE_X)) + __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_10GBASE_T)) + __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_10GBASE_KR)) + __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_10GBASE_SR)) + __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_25GBASE_KR)) + __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_25GBASE_SR)) + __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_25GBASE_CR)) + __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_KR2)) + __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_SR2)) + __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_CR2)) + __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50G_AUI2)) + __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_KR_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_SR_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50G_AUI_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_CR_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_KR4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_SR4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_CR4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100G_AUI4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100G_CAUI4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_KR2_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_SR2_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_CR2_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100G_AUI2_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, ethtool_modes_map); +} + +static int nbl_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_port_state port_state = {0}; + u32 advertising_speed = 0; + int ret = 0; + + ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.port = PORT_OTHER; + } else { + cmd->base.autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (port_state.link_state) { + cmd->base.speed = port_state.link_speed; + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + advertising_speed = net_resource_mgt->configured_speed ? + net_resource_mgt->configured_speed : cmd->base.speed; + + switch (port_state.port_type) { + case NBL_PORT_TYPE_UNKNOWN: + cmd->base.port = PORT_OTHER; + break; + case NBL_PORT_TYPE_FIBRE: + __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, cmd->link_modes.advertising); + cmd->base.port = PORT_FIBRE; + break; + case NBL_PORT_TYPE_COPPER: + __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, cmd->link_modes.advertising); + cmd->base.port = PORT_DA; + break; + default: + cmd->base.port = PORT_OTHER; + } + } + + if (!cmd->base.autoneg) { + port_state.port_advertising &= ~NBL_PORT_CAP_SPEED_MASK; + switch (advertising_speed) { + case SPEED_1000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_1G_MASK; + break; + case SPEED_10000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_10G_MASK; + break; + case SPEED_25000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_25G_MASK; + break; + case SPEED_50000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_50G_MASK; + break; + case SPEED_100000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_100G_MASK; + break; + default: + break; + } + } + + nbl_link_modes_to_ethtool(port_state.port_caps, cmd->link_modes.supported); + nbl_link_modes_to_ethtool(port_state.port_advertising, cmd->link_modes.advertising); + nbl_link_modes_to_ethtool(port_state.port_lp_advertising, cmd->link_modes.lp_advertising); + + __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, cmd->link_modes.supported); + __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, cmd->link_modes.supported); + return 0; +} + +static u32 nbl_conver_portrate_to_speed(u8 port_rate) +{ + switch (port_rate) { + case NBL_PORT_MAX_RATE_1G: + return SPEED_1000; + case NBL_PORT_MAX_RATE_10G: + return SPEED_10000; + case NBL_PORT_MAX_RATE_25G: + return SPEED_25000; + case NBL_PORT_MAX_RATE_100G: + case NBL_PORT_MAX_RATE_100G_PAM4: + return SPEED_100000; + default: + return SPEED_25000; + } + + /* default set 25G */ + return SPEED_25000; +} + +static u32 nbl_conver_fw_rate_to_speed(u8 fw_port_max_speed) +{ + switch (fw_port_max_speed) { + case NBL_FW_PORT_SPEED_10G: + return SPEED_10000; + case NBL_FW_PORT_SPEED_25G: + return SPEED_25000; + case NBL_FW_PORT_SPEED_50G: + return SPEED_50000; + case NBL_FW_PORT_SPEED_100G: + return SPEED_100000; + default: + return SPEED_25000; + } + + /* default set 25G */ + return SPEED_25000; +} + +static int nbl_set_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_phy_state *phy_state; + struct nbl_phy_caps *phy_caps; + struct nbl_port_state port_state = {0}; + struct nbl_port_advertising port_advertising = {0}; + u32 autoneg = 0; + u32 speed, fw_speed, module_speed, max_speed; + u64 speed_advert = 0; + u8 active_fec = 0; + int ret = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + phy_state = &net_resource_mgt->phy_state; + phy_caps = &net_resource_mgt->phy_caps; + + ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, "Optical module is not inplace\n"); + return -EINVAL; + } + + if (cmd->base.autoneg) { + if (!(port_state.port_caps & BIT(NBL_PORT_CAP_AUTONEG))) { + netdev_err(netdev, "autoneg is not support\n"); + return -EOPNOTSUPP; + } + } + + if (cmd->base.duplex == DUPLEX_HALF) { + netdev_err(netdev, "half duplex is not support\n"); + return -EOPNOTSUPP; + } + + autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + speed = cmd->base.speed; + fw_speed = nbl_conver_fw_rate_to_speed(port_state.fw_port_max_speed); + module_speed = nbl_conver_portrate_to_speed(port_state.port_max_rate); + max_speed = fw_speed > module_speed ? module_speed : fw_speed; + if (speed == SPEED_UNKNOWN) + speed = max_speed; + + if (speed > max_speed) { + netdev_err(netdev, "speed %d is not support, exit\n", cmd->base.speed); + return -EINVAL; + } + + speed_advert = nbl_speed_to_link_mode(speed, cmd->base.autoneg); + speed_advert &= port_state.port_caps; + if (!speed_advert) { + netdev_err(netdev, "speed %d is not support, exit\n", cmd->base.speed); + return -EINVAL; + } + + if (cmd->base.autoneg) + speed = max_speed; + + if (cmd->base.autoneg) { + switch (net_resource_mgt->configured_fec) { + case ETHTOOL_FEC_OFF: + active_fec = NBL_PORT_FEC_OFF; + break; + case ETHTOOL_FEC_BASER: + active_fec = NBL_PORT_FEC_BASER; + break; + case ETHTOOL_FEC_RS: + active_fec = NBL_PORT_FEC_RS; + break; + default: + active_fec = NBL_PORT_FEC_AUTO; + } + } else { + /* when change speed, we should set appropriate fec mode */ + switch (speed) { + case SPEED_1000: + active_fec = NBL_ETH_1G_DEFAULT_FEC_MODE; + net_resource_mgt->configured_fec = ETHTOOL_FEC_OFF; + break; + case SPEED_10000: + active_fec = NBL_ETH_10G_DEFAULT_FEC_MODE; + net_resource_mgt->configured_fec = ETHTOOL_FEC_OFF; + break; + case SPEED_25000: + active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; + net_resource_mgt->configured_fec = ETHTOOL_FEC_RS; + break; + case SPEED_50000: + case SPEED_100000: + active_fec = NBL_ETH_100G_DEFAULT_FEC_MODE; + net_resource_mgt->configured_fec = ETHTOOL_FEC_RS; + break; + default: + active_fec = NBL_PORT_FEC_RS; + net_resource_mgt->configured_fec = ETHTOOL_FEC_RS; + } + } + + port_advertising.eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + port_advertising.speed_advert = speed_advert; + port_advertising.autoneg = cmd->base.autoneg; + port_advertising.active_fec = active_fec; + + /* update speed */ + ret = disp_ops->set_port_advertising(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &port_advertising); + if (ret) { + netdev_err(netdev, "set autoneg %d speed %d failed %d\n", + cmd->base.autoneg, cmd->base.speed, ret); + return -EIO; + } + + net_resource_mgt->configured_speed = speed; + + return 0; +} + +static void nbl_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dispatch_mgt *disp_mgt = NBL_ADAPTER_TO_DISP_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + u16 max_desc_num; + + max_desc_num = disp_ops->get_max_desc_num(disp_mgt); + ringparam->tx_max_pending = max_desc_num; + ringparam->rx_max_pending = max_desc_num; + ringparam->tx_pending = disp_ops->get_tx_desc_num(disp_mgt, 0); + ringparam->rx_pending = disp_ops->get_rx_desc_num(disp_mgt, 0); +} + +static int nbl_check_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam, + u16 max_desc_num, u16 min_desc_num) +{ + /* check if tx_pending is out of range or power of 2 */ + if (ringparam->tx_pending > max_desc_num || + ringparam->tx_pending < min_desc_num) { + netdev_err(netdev, "Tx descriptors requested: %d, out of range[%d-%d]\n", + ringparam->tx_pending, min_desc_num, max_desc_num); + return -EINVAL; + } + if (ringparam->tx_pending & (ringparam->tx_pending - 1)) { + netdev_err(netdev, "Tx descriptors requested: %d is not power of 2\n", + ringparam->tx_pending); + return -EINVAL; + } + + /* check if rx_pending is out of range or power of 2 */ + if (ringparam->rx_pending > max_desc_num || + ringparam->rx_pending < min_desc_num) { + netdev_err(netdev, "Rx descriptors requested: %d, out of range[%d-%d]\n", + ringparam->rx_pending, min_desc_num, max_desc_num); + return -EINVAL; + } + if (ringparam->rx_pending & (ringparam->rx_pending - 1)) { + netdev_err(netdev, "Rx descriptors requested: %d is not power of 2\n", + ringparam->rx_pending); + return -EINVAL; + } + + if (ringparam->rx_jumbo_pending || ringparam->rx_mini_pending) { + netdev_err(netdev, "rx_jumbo_pending or rx_mini_pending is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int nbl_pre_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dispatch_mgt *disp_mgt = NBL_ADAPTER_TO_DISP_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + int timeout = 50; + + if (ringparam->rx_pending == disp_ops->get_rx_desc_num(disp_mgt, 0) && + ringparam->tx_pending == disp_ops->get_tx_desc_num(disp_mgt, 0)) { + netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); + return 0; + } + + while (test_and_set_bit(NBL_RESETTING, adapter->state)) { + timeout--; + if (!timeout) { + netdev_err(netdev, "Timeout while resetting in set ringparam\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + /* configure params later */ + return 1; +} + +static int nbl_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dispatch_mgt *disp_mgt = NBL_ADAPTER_TO_DISP_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + u16 max_desc_num, min_desc_num; + u16 new_tx_count, new_rx_count; + int was_running; + int i; + int err; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + max_desc_num = disp_ops->get_max_desc_num(disp_mgt); + min_desc_num = disp_ops->get_min_desc_num(disp_mgt); + err = nbl_check_set_ringparam(netdev, ringparam, max_desc_num, min_desc_num); + if (err < 0) + return err; + + err = nbl_pre_set_ringparam(netdev, ringparam); + /* if either error occur or nothing to change, return */ + if (err <= 0) + return err; + + new_tx_count = ringparam->tx_pending; + new_rx_count = ringparam->rx_pending; + + netdev_info(netdev, "set tx_desc_num:%d, rx_desc_num:%d\n", new_tx_count, new_rx_count); + + was_running = netif_running(netdev); + + if (was_running) { + err = nbl_serv_netdev_stop(netdev); + if (err) { + netdev_err(netdev, "Netdev stop failed while setting ringparam\n"); + clear_bit(NBL_RESETTING, adapter->state); + return err; + } + } + + ring_mgt->tx_desc_num = new_tx_count; + ring_mgt->rx_desc_num = new_rx_count; + + for (i = vsi_info->ring_offset; i < vsi_info->ring_offset + vsi_info->ring_num; i++) + disp_ops->set_tx_desc_num(disp_mgt, i, new_tx_count); + + for (i = vsi_info->ring_offset; i < vsi_info->ring_offset + vsi_info->ring_num; i++) + disp_ops->set_rx_desc_num(disp_mgt, i, new_rx_count); + + if (was_running) { + err = nbl_serv_netdev_open(netdev); + if (err) { + netdev_err(netdev, "Netdev open failed after setting ringparam\n"); + clear_bit(NBL_RESETTING, adapter->state); + return err; + } + } + + clear_bit(NBL_RESETTING, adapter->state); + + return 0; +} + +static int nbl_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + int ret = -EOPNOTSUPP; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = vsi_info->active_ring_num; + ret = 0; + break; + default: + break; + } + + return ret; +} + +static u32 nbl_get_rxfh_indir_size(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 rxfh_indir_size = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + + return rxfh_indir_size; +} + +static u32 nbl_get_rxfh_key_size(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + u32 rxfh_rss_key_size = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_rxfh_rss_key_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &rxfh_rss_key_size); + + return rxfh_rss_key_size; +} + +static int nbl_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 rxfh_key_size = 0; + u32 rxfh_indir_size = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + disp_ops->get_rxfh_rss_key_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &rxfh_key_size); + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + + if (indir) + disp_ops->get_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), indir, rxfh_indir_size); + if (key) + disp_ops->get_rxfh_rss_key(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), key, rxfh_key_size); + if (hfunc) + disp_ops->get_rxfh_rss_alg_sel(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + hfunc, NBL_COMMON_TO_ETH_ID(serv_mgt->common)); + + return 0; +} + +static u32 nbl_get_msglevel(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + u32 debug_lvl = common->debug_lvl; + + if (debug_lvl) + netdev_dbg(netdev, "nbl debug_lvl: 0x%08X\n", debug_lvl); + + return common->msg_enable; +} + +static void nbl_set_msglevel(struct net_device *netdev, u32 msglevel) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + + if (NBL_DEBUG_USER & msglevel) + common->debug_lvl = msglevel; + else + common->msg_enable = msglevel; +} + +static int nbl_get_regs_len(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_reg_dump_len(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_get_ethtool_dump_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_reg_dump(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), p, regs->len); +} + +static int nbl_get_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + u16 local_vector_id, configured_usecs; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (q_num >= vsi_info->ring_offset + vsi_info->ring_num) { + netdev_err(netdev, "q_num %d is too larger\n", q_num); + return -EINVAL; + } + + local_vector_id = ring_mgt->vectors[q_num + vsi_info->ring_offset].local_vector_id; + configured_usecs = ring_mgt->vectors[q_num + vsi_info->ring_offset].intr_rate_usecs; + disp_ops->get_coalesce(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), local_vector_id, ec); + + if (vsi_info->itr_dynamic) { + ec->use_adaptive_tx_coalesce = 1; + ec->use_adaptive_rx_coalesce = 1; + } else { + if (configured_usecs) { + ec->tx_coalesce_usecs = configured_usecs; + ec->rx_coalesce_usecs = configured_usecs; + } + } + return 0; +} + +static int __nbl_set_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct ethtool_coalesce ec_local = {0}; + u16 local_vector_id, pnum, rate; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (q_num >= vsi_info->ring_offset + vsi_info->ring_num) { + netdev_err(netdev, "q_num %d is too larger\n", q_num); + return -EINVAL; + } + + if (ec->rx_max_coalesced_frames > U16_MAX) { + netdev_err(netdev, "rx_frames %d out of range: [0 - %d]\n", + ec->rx_max_coalesced_frames, U16_MAX); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs > U16_MAX) { + netdev_err(netdev, "rx_usecs %d out of range: [0 - %d]\n", + ec->rx_coalesce_usecs, U16_MAX); + return -EINVAL; + } + + if (ec->tx_max_coalesced_frames != ec->rx_max_coalesced_frames || + ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) { + netdev_err(netdev, "tx and rx using the same interrupt, rx params should equal to tx params\n"); + return -EINVAL; + } + + if (ec->use_adaptive_tx_coalesce != ec->use_adaptive_rx_coalesce) { + netdev_err(netdev, "rx and tx adaptive need configure as same value.\n"); + return -EINVAL; + } + + if (vsi_info->itr_dynamic) { + nbl_get_per_queue_coalesce(netdev, q_num, &ec_local); + if (ec_local.rx_coalesce_usecs != ec->rx_coalesce_usecs || + ec_local.rx_max_coalesced_frames != ec->rx_max_coalesced_frames) { + netdev_err(netdev, + "interrupt throttling cannot be changged if adaptive is enable.\n"); + return -EINVAL; + } + return 0; + } + + local_vector_id = ring_mgt->vectors[q_num + vsi_info->ring_offset].local_vector_id; + pnum = (u16)ec->tx_max_coalesced_frames; + rate = (u16)ec->tx_coalesce_usecs; + ring_mgt->vectors[q_num + vsi_info->ring_offset].intr_rate_usecs = rate; + + disp_ops->set_coalesce(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), local_vector_id, + 1, pnum, rate); + return 0; +} + +static int nbl_set_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (vsi_info->itr_dynamic != (!!ec->use_adaptive_rx_coalesce)) { + netdev_err(netdev, "modify interrupt adaptive by queue is not supported.\n"); + return -EINVAL; + } + + return __nbl_set_per_queue_coalesce(netdev, q_num, ec); +} + +static int nbl_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + u32 q_num = 0; + + return nbl_get_per_queue_coalesce(netdev, q_num, ec); +} + +static int nbl_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct ethtool_coalesce ec_local = {0}; + u16 local_vector_id; + u16 intr_suppress_level; + u16 q_num; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (ec->rx_max_coalesced_frames > U16_MAX) { + netdev_err(netdev, "rx_frames %d out of range: [0 - %d]\n", + ec->rx_max_coalesced_frames, U16_MAX); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs > U16_MAX) { + netdev_err(netdev, "rx_usecs %d out of range: [0 - %d]\n", + ec->rx_coalesce_usecs, U16_MAX); + return -EINVAL; + } + + if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames) { + netdev_err(netdev, "rx_frames and tx_frames need configure as same value.\n"); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs != ec->tx_coalesce_usecs) { + netdev_err(netdev, "rx_usecs and tx_usecs need configure as same value.\n"); + return -EINVAL; + } + + if (ec->use_adaptive_tx_coalesce != ec->use_adaptive_rx_coalesce) { + netdev_err(netdev, "rx and tx adaptive need configure as same value.\n"); + return -EINVAL; + } + + if (vsi_info->itr_dynamic && ec->use_adaptive_rx_coalesce) { + nbl_get_per_queue_coalesce(netdev, 0, &ec_local); + if (ec_local.rx_coalesce_usecs != ec->rx_coalesce_usecs || + ec_local.rx_max_coalesced_frames != ec->rx_max_coalesced_frames) { + netdev_err(netdev, + "interrupt throttling cannont be changged if adaptive is enable.\n"); + return -EINVAL; + } + } + + if (ec->use_adaptive_rx_coalesce) { + vsi_info->itr_dynamic = true; + local_vector_id = ring_mgt->vectors[vsi_info->ring_offset].local_vector_id; + intr_suppress_level = ring_mgt->vectors->intr_suppress_level; + disp_ops->set_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_vector_id, vsi_info->ring_num, + intr_suppress_level); + } else { + vsi_info->itr_dynamic = false; + for (q_num = 0; q_num < vsi_info->ring_num; q_num++) + __nbl_set_per_queue_coalesce(netdev, + vsi_info->ring_offset + q_num, + ec); + } + + return 0; +} + +static u64 nbl_link_test(struct net_device *netdev) +{ + bool link_up; + + /* TODO will get from emp in later version */ + link_up = 0; + + return link_up; +} + +static int nbl_loopback_setup_rings(struct nbl_adapter *adapter, struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + + return nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, 1, 0); +} + +static void nbl_loopback_free_rings(struct nbl_adapter *adapter, struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); +} + +static void nbl_loopback_create_skb(struct sk_buff *skb, u32 size) +{ + if (!skb) + return; + + memset(skb->data, NBL_SELF_TEST_PADDING_DATA_1, size); + size >>= 1; + memset(&skb->data[size], NBL_SELF_TEST_PADDING_DATA_2, size); + skb->data[size + NBL_SELF_TEST_POS_2] = NBL_SELF_TEST_BYTE_1; + skb->data[size + NBL_SELF_TEST_POS_3] = NBL_SELF_TEST_BYTE_2; +} + +static s32 nbl_loopback_check_skb(struct sk_buff *skb, u32 size) +{ + size >>= 1; + + if (skb->data[NBL_SELF_TEST_POS_1] != NBL_SELF_TEST_PADDING_DATA_1 || + skb->data[size + NBL_SELF_TEST_POS_2] != NBL_SELF_TEST_BYTE_1 || + skb->data[size + NBL_SELF_TEST_POS_3] != NBL_SELF_TEST_BYTE_2) + return -1; + + return 0; +} + +static s32 nbl_loopback_run_test(struct net_device *netdev) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_resource_pt_ops *pt_ops = NBL_ADAPTER_TO_RES_PT_OPS(adapter); + struct sk_buff *skb_tx[NBL_SELF_TEST_PKT_NUM] = {NULL}, *skb_rx; + u32 size = NBL_SELF_TEST_BUFF_SIZE; + u32 count; + u32 tx_count = 0; + s32 result = 0; + int i; + + for (i = 0; i < NBL_SELF_TEST_PKT_NUM; i++) { + skb_tx[i] = alloc_skb(size, GFP_KERNEL); + if (!skb_tx[i]) + goto alloc_skb_faied; + + nbl_loopback_create_skb(skb_tx[i], size); + skb_put(skb_tx[i], size); + skb_tx[i]->queue_mapping = 0; + } + + count = min_t(u16, serv_mgt->ring_mgt.tx_desc_num, NBL_SELF_TEST_PKT_NUM); + count = min_t(u16, serv_mgt->ring_mgt.rx_desc_num, count); + + for (i = 0; i < count; i++) { + skb_get(skb_tx[i]); + if (pt_ops->self_test_xmit(skb_tx[i], netdev) != NETDEV_TX_OK) + netdev_err(netdev, "Fail to tx lb skb %p", skb_tx[i]); + else + tx_count++; + } + + if (tx_count < count) { + for (i = 0; i < NBL_SELF_TEST_PKT_NUM; i++) + kfree_skb(skb_tx[i]); + result |= BIT(NBL_LB_ERR_TX_FAIL); + return result; + } + + /* Wait for rx packets loopback */ + msleep(1000); + + for (i = 0; i < tx_count; i++) { + skb_rx = NULL; + skb_rx = disp_ops->clean_rx_lb_test(NBL_ADAPTER_TO_DISP_MGT(adapter), 0); + if (!skb_rx) { + netdev_err(netdev, "Fail to rx lb skb, should rx %d but fail on %d", + tx_count, i); + break; + } + if (nbl_loopback_check_skb(skb_rx, size)) { + netdev_err(netdev, "Fail to check lb skb %d(%p)", i, skb_rx); + kfree(skb_rx); + break; + } + kfree(skb_rx); + } + + if (i != tx_count) + result |= BIT(NBL_LB_ERR_RX_FAIL); + + for (i = 0; i < NBL_SELF_TEST_PKT_NUM; i++) + kfree_skb(skb_tx[i]); + + return result; + +alloc_skb_faied: + for (i = 0; i < NBL_SELF_TEST_PKT_NUM; i++) { + if (skb_tx[i]) + kfree_skb(skb_tx[i]); + } + result |= BIT(NBL_LB_ERR_SKB_ALLOC); + return result; +} + +static u64 nbl_loopback_test(struct net_device *netdev) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = &serv_mgt->ring_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + u8 origin_num_txq, origin_num_rxq, origin_active_q; + u64 result = 0; + + /* In loopback test, we only need one queue */ + origin_num_txq = ring_mgt->tx_ring_num; + origin_num_rxq = ring_mgt->rx_ring_num; + origin_active_q = vsi_info->active_ring_num; + ring_mgt->tx_ring_num = NBL_SELF_TEST_Q_NUM; + ring_mgt->rx_ring_num = NBL_SELF_TEST_Q_NUM; + + if (nbl_loopback_setup_rings(adapter, netdev)) { + netdev_err(netdev, "Fail to setup rings"); + result |= BIT(NBL_LB_ERR_RING_SETUP); + goto lb_setup_rings_failed; + } + + if (disp_ops->set_eth_loopback(NBL_ADAPTER_TO_DISP_MGT(adapter), NBL_ETH_LB_ON)) { + netdev_err(netdev, "Fail to setup lb on"); + result |= BIT(NBL_LB_ERR_LB_MODE_SETUP); + goto set_eth_lb_failed; + } + + result |= nbl_loopback_run_test(netdev); + + if (disp_ops->set_eth_loopback(NBL_ADAPTER_TO_DISP_MGT(adapter), NBL_ETH_LB_OFF)) { + netdev_err(netdev, "Fail to setup lb off"); + result |= BIT(NBL_LB_ERR_LB_MODE_SETUP); + goto set_eth_lb_failed; + } + +set_eth_lb_failed: + nbl_loopback_free_rings(adapter, netdev); +lb_setup_rings_failed: + ring_mgt->tx_ring_num = origin_num_txq; + ring_mgt->rx_ring_num = origin_num_rxq; + vsi_info->active_ring_num = origin_active_q; + + return result; +} + +static u32 nbl_mailbox_check_active_vf(struct nbl_adapter *adapter) +{ + struct nbl_dispatch_ops_tbl *disp_ops_tbl = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + + return disp_ops_tbl->ops->check_active_vf(NBL_ADAPTER_TO_DISP_MGT(adapter)); +} + +static void nbl_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + bool if_running = netif_running(netdev); + u32 active_vf; + s64 cur_time = 0; + int ret; + + cur_time = ktime_get_real_seconds(); + + /* test too frequently will cause to fail */ + if (cur_time - priv->last_st_time < NBL_SELF_TEST_TIME_GAP) { + /* pass by defalut */ + netdev_info(netdev, "Self test too fast, pass by default!"); + data[NBL_ETH_TEST_REG] = 0; + data[NBL_ETH_TEST_EEPROM] = 0; + data[NBL_ETH_TEST_INTR] = 0; + data[NBL_ETH_TEST_LOOP] = 0; + data[NBL_ETH_TEST_LINK] = 0; + return; + } + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + active_vf = nbl_mailbox_check_active_vf(adapter); + + if (active_vf) { + netdev_err(netdev, "Cannot perform offline test when VFs are active"); + data[NBL_ETH_TEST_REG] = 1; + data[NBL_ETH_TEST_EEPROM] = 1; + data[NBL_ETH_TEST_INTR] = 1; + data[NBL_ETH_TEST_LOOP] = 1; + data[NBL_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + /* If online, take if offline */ + if (if_running) { + ret = nbl_serv_netdev_stop(netdev); + if (ret) { + netdev_err(netdev, "Could not stop device %s, err %d\n", + pci_name(adapter->pdev), ret); + goto netdev_stop_failed; + } + } + + set_bit(NBL_TESTING, adapter->state); + + data[NBL_ETH_TEST_LINK] = nbl_link_test(netdev); + data[NBL_ETH_TEST_EEPROM] = 0; + data[NBL_ETH_TEST_INTR] = 0; + data[NBL_ETH_TEST_LOOP] = nbl_loopback_test(netdev); + data[NBL_ETH_TEST_REG] = 0; + + if (data[NBL_ETH_TEST_LINK] || + data[NBL_ETH_TEST_EEPROM] || + data[NBL_ETH_TEST_INTR] || + data[NBL_ETH_TEST_LOOP] || + data[NBL_ETH_TEST_REG]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(NBL_TESTING, adapter->state); + if (if_running) { + ret = nbl_serv_netdev_open(netdev); + if (ret) { + netdev_err(netdev, "Could not open device %s, err %d\n", + pci_name(adapter->pdev), ret); + } + } + } else { + /* Online test */ + data[NBL_ETH_TEST_LINK] = nbl_link_test(netdev); + + if (data[NBL_ETH_TEST_LINK]) + eth_test->flags |= ETH_TEST_FL_FAILED; + /* Only test offlined; pass by default */ + data[NBL_ETH_TEST_EEPROM] = 0; + data[NBL_ETH_TEST_INTR] = 0; + data[NBL_ETH_TEST_LOOP] = 0; + data[NBL_ETH_TEST_REG] = 0; + } + +netdev_stop_failed: + priv->last_st_time = ktime_get_real_seconds(); +} + +static u32 nbl_get_priv_flags(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u32 ret_flags = 0; + unsigned int i; + int count = 0; + + for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { + enum nbl_fix_cap_type capability_type = nbl_gstrings_priv_flags[i].capability_type; + + if (nbl_gstrings_priv_flags[i].supported_by_capability) { + if (!disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + capability_type)) + continue; + } + + if (test_bit(i, serv_mgt->flags)) + ret_flags |= BIT(count); + count++; + } + + netdev_dbg(netdev, "get priv flag: 0x%08x, mgt flags: 0x%08x.\n", + ret_flags, *(u32 *)serv_mgt->flags); + + return ret_flags; +} + +static int nbl_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + unsigned int i; + int count = 0; + u32 new_flags = 0; + + for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { + enum nbl_fix_cap_type capability_type = nbl_gstrings_priv_flags[i].capability_type; + + if (nbl_gstrings_priv_flags[i].supported_by_capability) { + if (!disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + capability_type)) + continue; + } + + if (!nbl_gstrings_priv_flags[i].supported_modify && + (!((priv_flags & BIT(count))) != !test_bit(i, serv_mgt->flags))) { + netdev_err(netdev, "set priv flag: 0x%08x, flag %s not support modify\n", + priv_flags, nbl_gstrings_priv_flags[i].flag_name); + return -EOPNOTSUPP; + } + + if (priv_flags & BIT(count)) + new_flags |= BIT(i); + count++; + } + *serv_mgt->flags = new_flags; + + netdev_dbg(netdev, "set priv flag: 0x%08x, mgt flags: 0x%08x.\n", + priv_flags, *(u32 *)serv_mgt->flags); + + return 0; +} + +static int nbl_set_pause_param(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_phy_state *phy_state; + struct nbl_phy_caps *phy_caps; + struct nbl_port_state port_state = {0}; + struct nbl_port_advertising port_advertising = {0}; + u32 autoneg = 0; + /* cannot set default 0, 0 means pause donot change */ + u8 active_fc = NBL_PORT_TXRX_PAUSE_OFF; + int ret = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + phy_state = &net_resource_mgt->phy_state; + phy_caps = &net_resource_mgt->phy_caps; + + ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, "Optical module is not inplace\n"); + return -EINVAL; + } + + autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (param->autoneg == AUTONEG_ENABLE) { + netdev_info(netdev, "pause autoneg is not support\n"); + return -EOPNOTSUPP; + } + + /* check if the pause mode is changed */ + if (param->rx_pause == !!(port_state.active_fc & NBL_PORT_RX_PAUSE) && + param->tx_pause == !!(port_state.active_fc & NBL_PORT_TX_PAUSE)) { + netdev_info(netdev, "pause param is not changed\n"); + return 0; + } + + if (param->rx_pause) + active_fc |= NBL_PORT_RX_PAUSE; + + if (param->tx_pause) + active_fc |= NBL_PORT_TX_PAUSE; + + port_advertising.eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + port_advertising.active_fc = active_fc; + port_advertising.autoneg = autoneg; + + /* update pause mode */ + ret = disp_ops->set_port_advertising(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &port_advertising); + if (ret) { + netdev_err(netdev, "pause mode set failed %d\n", ret); + return ret; + } + + return 0; +} + +static void nbl_get_pause_param(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_port_state port_state = {0}; + int ret = 0; + + ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return; + } + + param->autoneg = AUTONEG_DISABLE; + param->rx_pause = !!(port_state.active_fc & NBL_PORT_RX_PAUSE); + param->tx_pause = !!(port_state.active_fc & NBL_PORT_TX_PAUSE); +} + +static int nbl_set_fec_param(struct net_device *netdev, struct ethtool_fecparam *fec) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_port_state port_state = {0}; + struct nbl_port_advertising port_advertising = {0}; + u32 fec_mode = fec->fec; + u8 active_fec = 0; + u8 autoneg; + int ret = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, "Optical module is not inplace\n"); + return -EINVAL; + } + + autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (fec_mode == ETHTOOL_FEC_OFF) + fec_mode = ETHTOOL_FEC_NONE; + + /* check if the fec mode is supported */ + if (fec_mode == ETHTOOL_FEC_NONE) { + active_fec = NBL_PORT_FEC_OFF; + if (!(port_state.port_caps & BIT(NBL_PORT_CAP_FEC_NONE))) { + netdev_err(netdev, "unsupported fec mode off\n"); + return -EOPNOTSUPP; + } + } + if (fec_mode == ETHTOOL_FEC_RS) { + active_fec = NBL_PORT_FEC_RS; + if (!(port_state.port_caps & BIT(NBL_PORT_CAP_FEC_RS))) { + netdev_err(netdev, "unsupported fec mode RS\n"); + return -EOPNOTSUPP; + } + } + if (fec_mode == ETHTOOL_FEC_BASER) { + active_fec = NBL_PORT_FEC_BASER; + if (!(port_state.port_caps & BIT(NBL_PORT_CAP_FEC_BASER))) { + netdev_err(netdev, "unsupported fec mode BaseR\n"); + return -EOPNOTSUPP; + } + } + if (fec_mode == ETHTOOL_FEC_AUTO) { + active_fec = NBL_PORT_FEC_AUTO; + if (!autoneg) { + netdev_err(netdev, "unsupported fec mode auto\n"); + return -EOPNOTSUPP; + } + } + + if (fec_mode == net_resource_mgt->configured_fec) { + netdev_err(netdev, "fec mode is not changed\n"); + return 0; + } + + if (fec_mode == ETHTOOL_FEC_RS) { + if (port_state.link_speed == 10000) { + netdev_err(netdev, "speed 10G cannot set fec RS, only can set fec baseR\n"); + return -EINVAL; + } + } + + net_resource_mgt->configured_fec = fec_mode; + + port_advertising.eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + port_advertising.active_fec = active_fec; + port_advertising.autoneg = autoneg; + + /* update fec mode */ + ret = disp_ops->set_port_advertising(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &port_advertising); + if (ret) { + netdev_err(netdev, "fec mode set failed %d\n", ret); + return ret; + } + + return 0; +} + +static int nbl_get_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_port_state port_state = {0}; + u32 fec = 0; + u32 active_fec = 0; + u8 autoneg = 0; + int ret = 0; + + ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, " Optical module is not inplace\n"); + return -EINVAL; + } + + autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (port_state.active_fec == NBL_PORT_FEC_OFF) + active_fec = ETHTOOL_FEC_OFF; + if (port_state.active_fec == NBL_PORT_FEC_RS) + active_fec = ETHTOOL_FEC_RS; + if (port_state.active_fec == NBL_PORT_FEC_BASER) + active_fec = ETHTOOL_FEC_BASER; + + if (net_resource_mgt->configured_fec) + fec = net_resource_mgt->configured_fec; + else + fec = active_fec; + + fecparam->fec = fec; + fecparam->active_fec = active_fec; + + return 0; +} + +static int nbl_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + static u32 led_ctrl_reg; + enum nbl_led_reg_ctrl led_ctrl_op; + u8 eth_id; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + led_ctrl_op = NBL_LED_REG_ACTIVE; + break; + case ETHTOOL_ID_ON: + led_ctrl_op = NBL_LED_REG_ON; + break; + case ETHTOOL_ID_OFF: + led_ctrl_op = NBL_LED_REG_OFF; + break; + case ETHTOOL_ID_INACTIVE: + led_ctrl_op = NBL_LED_REG_INACTIVE; + break; + default: + return 0; + } + return disp_ops->ctrl_port_led(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, led_ctrl_op, &led_ctrl_reg); +} + +static int nbl_nway_reset(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_port_state port_state = {0}; + int ret; + u8 eth_id; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, "Optical module is not inplace\n"); + return -EOPNOTSUPP; + } + + net_resource_mgt->configured_fec = 0; + net_resource_mgt->configured_speed = + nbl_conver_portrate_to_speed(port_state.port_max_rate); + + return disp_ops->nway_reset(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id); +} + +/* NBL_SERV_ETHTOOL_OPS_TBL(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_SERV_ETHTOOL_OPS_TBL \ +do { \ + NBL_SERV_SET_ETHTOOL_OPS(get_drvinfo, nbl_get_drvinfo); \ + NBL_SERV_SET_ETHTOOL_OPS(get_strings, nbl_get_strings); \ + NBL_SERV_SET_ETHTOOL_OPS(get_sset_count, nbl_get_sset_count); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ethtool_stats, nbl_get_ethtool_stats); \ + NBL_SERV_SET_ETHTOOL_OPS(get_module_eeprom, nbl_get_module_eeprom); \ + NBL_SERV_SET_ETHTOOL_OPS(get_module_info, nbl_get_module_info); \ + NBL_SERV_SET_ETHTOOL_OPS(get_eeprom_length, nbl_get_eeprom_length); \ + NBL_SERV_SET_ETHTOOL_OPS(get_eeprom, nbl_get_eeprom); \ + NBL_SERV_SET_ETHTOOL_OPS(get_channels, nbl_get_channels); \ + NBL_SERV_SET_ETHTOOL_OPS(set_channels, nbl_set_channels); \ + NBL_SERV_SET_ETHTOOL_OPS(get_link, nbl_get_link); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ksettings, nbl_get_ksettings); \ + NBL_SERV_SET_ETHTOOL_OPS(set_ksettings, nbl_set_ksettings); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ringparam, nbl_get_ringparam); \ + NBL_SERV_SET_ETHTOOL_OPS(set_ringparam, nbl_set_ringparam); \ + NBL_SERV_SET_ETHTOOL_OPS(get_coalesce, nbl_get_coalesce); \ + NBL_SERV_SET_ETHTOOL_OPS(set_coalesce, nbl_set_coalesce); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rxnfc, nbl_get_rxnfc); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rxfh_indir_size, nbl_get_rxfh_indir_size); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rxfh_key_size, nbl_get_rxfh_key_size); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rxfh, nbl_get_rxfh); \ + NBL_SERV_SET_ETHTOOL_OPS(get_msglevel, nbl_get_msglevel); \ + NBL_SERV_SET_ETHTOOL_OPS(set_msglevel, nbl_set_msglevel); \ + NBL_SERV_SET_ETHTOOL_OPS(get_regs_len, nbl_get_regs_len); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ethtool_dump_regs, nbl_get_ethtool_dump_regs); \ + NBL_SERV_SET_ETHTOOL_OPS(get_per_queue_coalesce, nbl_get_per_queue_coalesce); \ + NBL_SERV_SET_ETHTOOL_OPS(set_per_queue_coalesce, nbl_set_per_queue_coalesce); \ + NBL_SERV_SET_ETHTOOL_OPS(self_test, nbl_self_test); \ + NBL_SERV_SET_ETHTOOL_OPS(get_priv_flags, nbl_get_priv_flags); \ + NBL_SERV_SET_ETHTOOL_OPS(set_priv_flags, nbl_set_priv_flags); \ + NBL_SERV_SET_ETHTOOL_OPS(set_pause_param, nbl_set_pause_param); \ + NBL_SERV_SET_ETHTOOL_OPS(get_pause_param, nbl_get_pause_param); \ + NBL_SERV_SET_ETHTOOL_OPS(set_fec_param, nbl_set_fec_param); \ + NBL_SERV_SET_ETHTOOL_OPS(get_fec_param, nbl_get_fec_param); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ts_info, ethtool_op_get_ts_info); \ + NBL_SERV_SET_ETHTOOL_OPS(set_phys_id, nbl_set_phys_id); \ + NBL_SERV_SET_ETHTOOL_OPS(nway_reset, nbl_nway_reset); \ +} while (0) + +void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops) +{ +#define NBL_SERV_SET_ETHTOOL_OPS(name, func) do {serv_ops->NBL_NAME(name) = func; ; } while (0) + NBL_SERV_ETHTOOL_OPS_TBL; +#undef NBL_SERV_SET_ETHTOOL_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h new file mode 100644 index 000000000000..23bcf51688ff --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_ETHTOOL_H_ +#define _NBL_ETHTOOL_H_ + +#include "nbl_service.h" + +#define NBL_SELF_TEST_TIME_GAP 5 /* 5 seconds */ +#define NBL_SELF_TEST_BUFF_SIZE 128 +#define NBL_SELF_TEST_PADDING_DATA_1 0xFF +#define NBL_SELF_TEST_PADDING_DATA_2 0xA5 +#define NBL_SELF_TEST_POS_1 3 +#define NBL_SELF_TEST_POS_2 10 +#define NBL_SELF_TEST_POS_3 12 +#define NBL_SELF_TEST_BYTE_1 0xBE +#define NBL_SELF_TEST_BYTE_2 0xAF +#define NBL_SELF_TEST_PKT_NUM 32 + +#define NBL_SELF_TEST_Q_NUM 1 + +enum nbl_eth_lb_enable { + NBL_ETH_LB_OFF, + NBL_ETH_LB_ON, +}; + +enum nbl_ethtool_lb_test_err_code { + NBL_LB_ERR_NON = 0, + NBL_LB_ERR_RING_SETUP, + NBL_LB_ERR_LB_MODE_SETUP, + NBL_LB_ERR_SKB_ALLOC, + NBL_LB_ERR_TX_FAIL, + NBL_LB_ERR_RX_FAIL +}; + +void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool); +void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops_tbl); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c new file mode 100644 index 000000000000..420335c5ca5b --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include +#include +#include +#include +#include "nbl_hwmon.h" + +static const char * const nbl_hwmon_sensor_name[] = { + "Sensor0", + "Module0", + "Module1", + "Module2", + "Module3", +}; + +static umode_t nbl_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + return NBL_HWMON_VISIBLE; +} + +static int nbl_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct nbl_adapter *adapter = dev_get_drvdata(dev); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + u32 temp; + + switch (channel) { + case NBL_HWMON_CHIP_SENSOR: + switch (attr) { + case hwmon_temp_input: + temp = serv_ops->get_chip_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + *val = (temp & NBL_HWMON_TEMP_MAP) * NBL_HWMON_TEMP_UNIT; + return 0; + case hwmon_temp_max: + temp = serv_ops->get_chip_temperature_max + (NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + *val = temp * NBL_HWMON_TEMP_UNIT; + return 0; + case hwmon_temp_crit: + temp = serv_ops->get_chip_temperature_crit + (NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + *val = temp * NBL_HWMON_TEMP_UNIT; + return 0; + case hwmon_temp_highest: + temp = serv_ops->get_chip_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + *val = (temp >> NBL_HWMON_TEMP_OFF) * NBL_HWMON_TEMP_UNIT; + return 0; + default: + return -EOPNOTSUPP; + } + case NBL_HWMON_LIGHT_MODULE: + switch (attr) { + case hwmon_temp_input: + temp = serv_ops->get_module_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + eth_id, NBL_MODULE_TEMP); + *val = temp * NBL_HWMON_TEMP_UNIT; + return 0; + case hwmon_temp_max: + temp = serv_ops->get_module_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + eth_id, NBL_MODULE_TEMP_MAX); + *val = temp * NBL_HWMON_TEMP_UNIT; + return 0; + case hwmon_temp_crit: + temp = serv_ops->get_module_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + eth_id, NBL_MODULE_TEMP_CRIT); + *val = temp * NBL_HWMON_TEMP_UNIT; + return 0; + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static __maybe_unused int nbl_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct nbl_adapter *adapter = dev_get_drvdata(dev); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 func_id = NBL_COMMON_TO_PCI_FUNC_ID(common); + + switch (channel) { + case NBL_HWMON_CHIP_SENSOR: + *str = nbl_hwmon_sensor_name[channel]; + return 0; + case NBL_HWMON_LIGHT_MODULE: + *str = nbl_hwmon_sensor_name[channel + func_id]; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_channel_info *nbl_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_HIGHEST | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_LABEL), + NULL +}; + +static const struct hwmon_ops nbl_hwmon_ops = { + .is_visible = nbl_hwmon_is_visible, + .read = nbl_hwmon_read, + .read_string = nbl_hwmon_read_string, +}; + +static const struct hwmon_chip_info nbl_hwmon_chip_info = { + .ops = &nbl_hwmon_ops, + .info = nbl_hwmon_info, +}; + +int nbl_dev_setup_hwmon(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + + common_dev->hwmon_dev = hwmon_device_register_with_info(dev, "nbl", adapter, + &nbl_hwmon_chip_info, NULL); + + return PTR_ERR_OR_ZERO(common_dev->hwmon_dev); +} + +void nbl_dev_remove_hwmon(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + + if (common_dev->hwmon_dev) + hwmon_device_unregister(common_dev->hwmon_dev); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h new file mode 100644 index 000000000000..61f7ef29731d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_HWMON_H +#define _NBL_HWMON_H + +#include "nbl_dev.h" + +#define NBL_HWMON_TEMP_MAP 0x000001FF +#define NBL_HWMON_TEMP_UNIT 1000 +#define NBL_HWMON_TEMP_OFF 16 +#define NBL_HWMON_VISIBLE 0444 +#define NBL_HWMON_CHIP_SENSOR 0 +#define NBL_HWMON_LIGHT_MODULE 1 + +#endif /*_NBL_HWMON_H*/ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c new file mode 100644 index 000000000000..fa17690c2b38 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c @@ -0,0 +1,3108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_ethtool.h" + +static void nbl_serv_set_link_state(struct nbl_service_mgt *serv_mgt, struct net_device *netdev); + +static void nbl_serv_set_queue_param(struct nbl_serv_ring *ring, u16 desc_num, + struct nbl_txrx_queue_param *param, u16 vsi_id, + u16 global_vector_id) +{ + param->vsi_id = vsi_id; + param->dma = ring->dma; + param->desc_num = desc_num; + param->local_queue_id = ring->local_queue_id / 2; + param->global_vector_id = global_vector_id; + param->intr_en = 1; + param->intr_mask = 1; + param->extend_header = 1; + param->rxcsum = 1; + param->split = 0; +} + +/** + * In virtio mode, the emulator triggers the configuration of + * txrx_registers only based on tx_ring, so the rx_info needs + * to be delivered first before the tx_info can be delivered. + */ +int nbl_serv_setup_queues(struct nbl_service_mgt *serv_mgt, struct nbl_serv_ring_vsi_info *vsi_info) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_txrx_queue_param param = {0}; + struct nbl_serv_ring *ring; + struct nbl_serv_vector *vector; + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i, ret = 0; + + for (i = start; i < end; i++) { + vector = &ring_mgt->vectors[i]; + ring = &ring_mgt->rx_rings[i]; + nbl_serv_set_queue_param(ring, ring_mgt->rx_desc_num, ¶m, + vsi_info->vsi_id, vector->global_vector_id); + + ret = disp_ops->setup_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m, false); + if (ret) + return ret; + } + + for (i = start; i < end; i++) { + vector = &ring_mgt->vectors[i]; + ring = &ring_mgt->tx_rings[i]; + + nbl_serv_set_queue_param(ring, ring_mgt->tx_desc_num, ¶m, + vsi_info->vsi_id, vector->global_vector_id); + + ret = disp_ops->setup_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m, true); + if (ret) + return ret; + } + + return 0; +} + +void nbl_serv_flush_rx_queues(struct nbl_service_mgt *serv_mgt, u16 ring_offset, u16 ring_num) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int i; + + for (i = ring_offset; i < ring_offset + ring_num; i++) + disp_ops->kick_rx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); +} + +int nbl_serv_setup_rings(struct nbl_service_mgt *serv_mgt, struct net_device *netdev, + struct nbl_serv_ring_vsi_info *vsi_info, bool use_napi) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i, ret = 0; + + for (i = start; i < end; i++) { + ring_mgt->tx_rings[i].dma = + disp_ops->start_tx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + if (!ring_mgt->tx_rings[i].dma) { + netdev_err(netdev, "Fail to start tx ring %d", i); + ret = -EFAULT; + break; + } + } + if (i != end) { + while (--i + 1 > start) + disp_ops->stop_tx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + goto tx_err; + } + + for (i = start; i < end; i++) { + ring_mgt->rx_rings[i].dma = + disp_ops->start_rx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i, use_napi); + if (!ring_mgt->rx_rings[i].dma) { + netdev_err(netdev, "Fail to start rx ring %d", i); + ret = -EFAULT; + break; + } + } + if (i != end) { + while (--i + 1 > start) + disp_ops->stop_rx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + goto rx_err; + } + + return 0; + +rx_err: + for (i = start; i < end; i++) + disp_ops->stop_tx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); +tx_err: + return ret; +} + +void nbl_serv_stop_rings(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_ring_vsi_info *vsi_info) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i; + + for (i = start; i < end; i++) + disp_ops->stop_tx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + + for (i = start; i < end; i++) + disp_ops->stop_rx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); +} + +static int nbl_serv_set_tx_rings(struct nbl_serv_ring_mgt *ring_mgt, + struct net_device *netdev, struct device *dev) +{ + int i; + u16 ring_num = ring_mgt->tx_ring_num; + + ring_mgt->tx_rings = devm_kcalloc(dev, ring_num, sizeof(*ring_mgt->tx_rings), GFP_KERNEL); + if (!ring_mgt->tx_rings) + return -ENOMEM; + + for (i = 0; i < ring_num; i++) + ring_mgt->tx_rings[i].index = i; + + return 0; +} + +static void nbl_serv_remove_tx_ring(struct nbl_serv_ring_mgt *ring_mgt, struct device *dev) +{ + devm_kfree(dev, ring_mgt->tx_rings); + ring_mgt->tx_rings = NULL; +} + +static int nbl_serv_set_rx_rings(struct nbl_serv_ring_mgt *ring_mgt, + struct net_device *netdev, struct device *dev) +{ + int i; + u16 ring_num = ring_mgt->rx_ring_num; + + ring_mgt->rx_rings = devm_kcalloc(dev, ring_num, sizeof(*ring_mgt->rx_rings), GFP_KERNEL); + if (!ring_mgt->rx_rings) + return -ENOMEM; + + for (i = 0; i < ring_num; i++) + ring_mgt->rx_rings[i].index = i; + + return 0; +} + +static void nbl_serv_remove_rx_ring(struct nbl_serv_ring_mgt *ring_mgt, struct device *dev) +{ + devm_kfree(dev, ring_mgt->rx_rings); + ring_mgt->rx_rings = NULL; +} + +static int nbl_serv_set_vectors(struct nbl_service_mgt *serv_mgt, + struct net_device *netdev, struct device *dev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_resource_pt_ops *pt_ops = NBL_ADAPTER_TO_RES_PT_OPS(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int i; + u16 ring_num = ring_mgt->rx_ring_num; + + ring_mgt->vectors = devm_kcalloc(dev, ring_num, sizeof(*ring_mgt->vectors), GFP_KERNEL); + if (!ring_mgt->vectors) + return -ENOMEM; + + for (i = 0; i < ring_num; i++) { + ring_mgt->vectors[i].napi = + disp_ops->get_vector_napi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + netif_napi_add(netdev, ring_mgt->vectors[i].napi, pt_ops->napi_poll); + ring_mgt->vectors[i].netdev = netdev; + } + + return 0; +} + +static void nbl_serv_remove_vectors(struct nbl_serv_ring_mgt *ring_mgt, struct device *dev) +{ + int i; + u16 ring_num = ring_mgt->rx_ring_num; + + for (i = 0; i < ring_num; i++) + netif_napi_del(ring_mgt->vectors[i].napi); + + devm_kfree(dev, ring_mgt->vectors); + ring_mgt->vectors = NULL; +} + +static struct nbl_serv_vlan_node *nbl_serv_alloc_vlan_node(void) +{ + struct nbl_serv_vlan_node *vlan_node = NULL; + + vlan_node = kzalloc(sizeof(*vlan_node), GFP_ATOMIC); + if (!vlan_node) + return NULL; + + INIT_LIST_HEAD(&vlan_node->node); + return vlan_node; +} + +static void nbl_serv_free_vlan_node(struct nbl_serv_vlan_node *vlan_node) +{ + kfree(vlan_node); +} + +static struct nbl_serv_submac_node *nbl_serv_alloc_submac_node(void) +{ + struct nbl_serv_submac_node *submac_node = NULL; + + submac_node = kzalloc(sizeof(*submac_node), GFP_ATOMIC); + if (!submac_node) + return NULL; + + INIT_LIST_HEAD(&submac_node->node); + return submac_node; +} + +static void nbl_serv_free_submac_node(struct nbl_serv_submac_node *submac_node) +{ + kfree(submac_node); +} + +static void nbl_serv_del_all_vlans(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_vlan_node *vlan_node, *vlan_node_safe; + + list_for_each_entry_safe(vlan_node, vlan_node_safe, &flow_mgt->vlan_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, NBL_COMMON_TO_VSI_ID(common)); + + list_del(&vlan_node->node); + nbl_serv_free_vlan_node(vlan_node); + } +} + +static void nbl_serv_del_all_submacs(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_submac_node *submac_node, *submac_node_safe; + + list_for_each_entry_safe(submac_node, submac_node_safe, &flow_mgt->submac_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, + NBL_DEFAULT_VLAN_ID, NBL_COMMON_TO_VSI_ID(common)); + + list_del(&submac_node->node); + nbl_serv_free_submac_node(submac_node); + } +} + +static int nbl_serv_ipv6_exthdr_num(struct sk_buff *skb, int start, u8 nexthdr) +{ + int exthdr_num = 0; + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + + while (ipv6_ext_hdr(nexthdr)) { + if (nexthdr == NEXTHDR_NONE) + return -1; + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -1; + + exthdr_num++; + + if (nexthdr == NEXTHDR_FRAGMENT) + hdrlen = 8; + else if (nexthdr == NEXTHDR_AUTH) + hdrlen = ipv6_authlen(hp); + else + hdrlen = ipv6_optlen(hp); + + nexthdr = hp->nexthdr; + start += hdrlen; + } + + return exthdr_num; +} + +static void nbl_serv_set_sfp_state(void *priv, struct net_device *netdev, u8 eth_id, + bool open, bool is_force) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret = 0; + + if (test_bit(NBL_FLAG_LINK_DOWN_ON_CLOSE, serv_mgt->flags) || is_force) { + if (open) { + ret = disp_ops->set_sfp_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, NBL_SFP_MODULE_ON); + if (ret) + netdev_info(netdev, "Fail to open sfp\n"); + else + netdev_info(netdev, "open sfp\n"); + } else { + ret = disp_ops->set_sfp_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, NBL_SFP_MODULE_OFF); + if (ret) + netdev_info(netdev, "Fail to close sfp\n"); + else + netdev_info(netdev, "close sfp\n"); + } + } +} + +static void nbl_serv_set_netdev_carrier_state(void *priv, struct net_device *netdev, u8 link_state) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + + if (test_bit(NBL_DOWN, adapter->state)) + return; + + if (link_state) { + if (!netif_carrier_ok(netdev)) { + netif_carrier_on(netdev); + netdev_info(netdev, "Set nic link up\n"); + } + } else { + if (netif_carrier_ok(netdev)) { + netif_carrier_off(netdev); + netdev_info(netdev, "Set nic link down\n"); + } + } +} + +static void nbl_serv_set_link_state(struct nbl_service_mgt *serv_mgt, struct net_device *netdev) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + struct nbl_eth_link_info eth_link_info = {0}; + int ret = 0; + + ret = disp_ops->get_link_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, ð_link_info); + if (ret) { + netdev_err(netdev, "Fail to get_link_state err %d\n", ret); + eth_link_info.link_status = 1; + } + + nbl_serv_set_netdev_carrier_state(serv_mgt, netdev, eth_link_info.link_status); +} + +int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, + u16 real_qps, bool use_napi) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + int ret = 0; + + if (vsi_info->started) + return 0; + + ret = nbl_serv_setup_rings(serv_mgt, netdev, vsi_info, use_napi); + if (ret) { + netdev_err(netdev, "Fail to setup rings\n"); + goto setup_rings_fail; + } + + ret = nbl_serv_setup_queues(serv_mgt, vsi_info); + if (ret) { + netdev_err(netdev, "Fail to setup queues\n"); + goto setup_queue_fail; + } + nbl_serv_flush_rx_queues(serv_mgt, vsi_info->ring_offset, vsi_info->ring_num); + + ret = disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_info->vsi_id, true); + if (ret) { + netdev_err(netdev, "Fail to setup dsch\n"); + goto setup_dsch_fail; + } + + vsi_info->active_ring_num = real_qps; + ret = disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, real_qps); + if (ret) + goto setup_cqs_fail; + + vsi_info->started = true; + return 0; + +setup_cqs_fail: + disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), false); +setup_dsch_fail: + disp_ops->remove_all_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common)); +setup_queue_fail: + nbl_serv_stop_rings(serv_mgt, vsi_info); +setup_rings_fail: + return ret; +} + +int nbl_serv_vsi_stop(void *priv, u16 vsi_index) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + + if (!vsi_info->started) + return 0; + + vsi_info->started = false; + /* modify defalt action and rss configuration */ + disp_ops->remove_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id); + + /* disable and rest tx/rx logic queue */ + disp_ops->remove_all_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id); + + /* clear dsch config */ + disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, false); + /* free tx and rx bufs */ + nbl_serv_stop_rings(serv_mgt, vsi_info); + + return 0; +} + +static int nbl_serv_switch_traffic_default_dest(void *priv, u16 from_vsi, u16 to_vsi) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + int ret; + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, from_vsi); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, to_vsi); + if (ret) { + netdev_err(dev, "Fail to cfg macvlan on vid %u in vsi switch", + vlan_node->vid); + goto fail; + } + } + + /* trigger submac update */ + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); + + /* arp/nd traffic */ + disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), from_vsi); + ret = disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), to_vsi); + if (ret) + goto add_multi_fail; + + return 0; + +add_multi_fail: + disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), from_vsi); +fail: + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, to_vsi); + disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, from_vsi); + } + + return -EINVAL; +} + +static int nbl_serv_abnormal_event_to_queue(int event_type) +{ + switch (event_type) { + case NBL_ABNORMAL_EVENT_DVN: + return NBL_TX; + case NBL_ABNORMAL_EVENT_UVN: + return NBL_RX; + default: + return event_type; + } +} + +static dma_addr_t nbl_serv_netdev_queue_restore(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, int type) +{ + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vector *vector = &ring_mgt->vectors[local_queue_id]; + + if (type == NBL_TX) + netif_stop_subqueue(vector->netdev, local_queue_id); + + return disp_ops->restore_abnormal_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_queue_id, type); +} + +static int nbl_serv_netdev_queue_restart(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, int type) +{ + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vector *vector = &ring_mgt->vectors[local_queue_id]; + + if (type == NBL_TX) + netif_start_subqueue(vector->netdev, local_queue_id); + + return disp_ops->restart_abnormal_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_queue_id, type); +} + +static dma_addr_t nbl_serv_chan_restore_netdev_queue_req(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, u16 func_id, int type) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_restore_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + dma_addr_t dma = 0; + int ret = 0; + + param.local_queue_id = local_queue_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, + ¶m, sizeof(param), &dma, sizeof(dma), 1); + ret = chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); + if (ret) + return 0; + + return dma; +} + +static void nbl_serv_chan_restore_netdev_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_restore_queue *param = (struct nbl_chan_param_restore_queue *)data; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_chan_ack_info chan_ack; + dma_addr_t dma = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + if (param->local_queue_id < vsi_info->ring_offset || + param->local_queue_id >= vsi_info->ring_offset + vsi_info->ring_num || + !vsi_info->ring_num) + return; + + dma = nbl_serv_netdev_queue_restore(serv_mgt, param->local_queue_id, param->type); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, msg_id, + NBL_CHAN_RESP_OK, &dma, sizeof(dma)); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + +static int nbl_serv_chan_restart_netdev_queue_req(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, u16 func_id, int type) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_restart_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.local_queue_id = local_queue_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); +} + +static void nbl_serv_chan_restart_netdev_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_restart_queue *param = (struct nbl_chan_param_restart_queue *)data; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_chan_ack_info chan_ack; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + if (param->local_queue_id < vsi_info->ring_offset || + param->local_queue_id >= vsi_info->ring_offset + vsi_info->ring_num || + !vsi_info->ring_num) + return; + + nbl_serv_netdev_queue_restart(serv_mgt, param->local_queue_id, param->type); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + +static void nbl_serv_restore_queue(struct nbl_service_mgt *serv_mgt, u16 vsi_id, + u16 local_queue_id, u16 type, bool dif_err) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 func_id = disp_ops->get_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u16 global_queue_id; + dma_addr_t dma = 0; + int ret = 0; + + dma = nbl_serv_chan_restore_netdev_queue_req(serv_mgt, local_queue_id, func_id, type); + if (!dma) + return; + + ret = disp_ops->restore_hw_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, + local_queue_id, dma, type); + if (ret) + return; + + nbl_serv_chan_restart_netdev_queue_req(serv_mgt, local_queue_id, func_id, type); + + if (dif_err && type == NBL_TX) { + global_queue_id = + disp_ops->get_vsi_global_queue_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, local_queue_id); + nbl_info(common, NBL_DEBUG_COMMON, + "dvn int_status:0, queue_id:%d\n", global_queue_id); + } +} + +static void nbl_serv_handle_tx_timeout(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, tx_timeout); + struct nbl_service_mgt *serv_mgt = serv_net_resource_mgt->serv_mgt; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + int i = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + for (i = vsi_info->ring_offset; i < vsi_info->ring_offset + vsi_info->ring_num; i++) { + if (ring_mgt->tx_rings[i].need_recovery) { + nbl_serv_restore_queue(serv_mgt, vsi_info->vsi_id, i, NBL_TX, false); + ring_mgt->tx_rings[i].need_recovery = false; + } + } +} + +int nbl_serv_netdev_open(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_ring_vsi_info *vsi_info; + int num_cpus, real_qps, ret = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (!test_bit(NBL_DOWN, adapter->state)) + return -EBUSY; + + netdev_info(netdev, "Nbl open\n"); + netif_carrier_off(netdev); + + nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), true, false); + + if (vsi_info->active_ring_num) { + real_qps = vsi_info->active_ring_num; + } else { + num_cpus = num_online_cpus(); + real_qps = num_cpus > vsi_info->ring_num ? vsi_info->ring_num : num_cpus; + } + + ret = nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, real_qps, 1); + if (ret) + goto vsi_open_fail; + + ret = netif_set_real_num_tx_queues(netdev, real_qps); + if (ret) + goto setup_real_qps_fail; + ret = netif_set_real_num_rx_queues(netdev, real_qps); + if (ret) + goto setup_real_qps_fail; + + netif_tx_start_all_queues(netdev); + clear_bit(NBL_DOWN, adapter->state); + set_bit(NBL_RUNNING, adapter->state); + nbl_serv_set_link_state(serv_mgt, netdev); + + netdev_info(netdev, "Nbl open ok!\n"); + + return 0; + +setup_real_qps_fail: + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); +vsi_open_fail: + return ret; +} + +int nbl_serv_netdev_stop(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_ring_vsi_info *vsi_info; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (!test_bit(NBL_RUNNING, adapter->state)) + return -EBUSY; + + netdev_info(netdev, "Nbl stop\n"); + set_bit(NBL_DOWN, adapter->state); + clear_bit(NBL_RUNNING, adapter->state); + + nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), false, false); + + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); + + netdev_info(netdev, "Nbl stop ok!\n"); + + return 0; +} + +static int nbl_serv_change_mtu(struct net_device *netdev, int new_mtu) +{ + netdev->mtu = new_mtu; + return 0; +} + +static int nbl_serv_set_mac(struct net_device *dev, void *p) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + struct sockaddr *addr = p; + struct nbl_netdev_priv *priv = netdev_priv(dev); + u16 vsi_id = priv->default_vsi_id; + int ret = 0; + + if (!is_valid_ether_addr(addr->sa_data)) { + netdev_err(dev, "Temp to change a invalid mac address %pM\n", addr->sa_data); + return -EADDRNOTAVAIL; + } + + if (ether_addr_equal(dev->dev_addr, addr->sa_data)) + return 0; + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, vsi_id); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, + vlan_node->vid, vsi_id); + if (ret) { + netdev_err(dev, "Fail to cfg macvlan on vid %u", vlan_node->vid); + goto fail; + } + } + + disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, addr->sa_data); + + ether_addr_copy(flow_mgt->mac, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); + + if (!NBL_COMMON_TO_VF_CAP(common)) + disp_ops->set_eth_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + addr->sa_data, NBL_COMMON_TO_ETH_ID(common)); + + return 0; +fail: + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, + vlan_node->vid, vsi_id); + disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, vsi_id); + } + return -EAGAIN; +} + +static int nbl_serv_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_serv_vlan_node *vlan_node; + u16 vsi_id = priv->default_vsi_id; + int ret = 0; + + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + nbl_debug(common, NBL_DEBUG_COMMON, "add mac-vlan dev for proto 0x%04x, vid %u.", + be16_to_cpu(proto), vid); + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + nbl_debug(common, NBL_DEBUG_COMMON, "add mac-vlan dev vid %u.", vlan_node->vid); + if (vlan_node->vid == vid) + return 0; + } + + vlan_node = nbl_serv_alloc_vlan_node(); + if (!vlan_node) + return -EAGAIN; + + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->mac, vid, vsi_id); + if (ret) { + nbl_serv_free_vlan_node(vlan_node); + return -EAGAIN; + } + + vlan_node->vid = vid; + list_add(&vlan_node->node, &flow_mgt->vlan_list); + + return 0; +} + +static int nbl_serv_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_serv_vlan_node *vlan_node; + u16 vsi_id = priv->default_vsi_id; + + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + nbl_debug(common, NBL_DEBUG_COMMON, "del mac-vlan dev for proto 0x%04x, vid %u.", + be16_to_cpu(proto), vid); + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + nbl_debug(common, NBL_DEBUG_COMMON, "del mac-vlan dev vid %u.", vlan_node->vid); + if (vlan_node->vid == vid) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vid, vsi_id); + + list_del(&vlan_node->node); + nbl_serv_free_vlan_node(vlan_node); + + break; + } + } + + return 0; +} + +static void nbl_serv_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct nbl_queue_stats queue_stats = { 0 }; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + u16 start, end; + int i; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + start = vsi_info->ring_offset; + end = vsi_info->ring_offset + vsi_info->ring_num; + + if (!stats) + return; + + for (i = start; i < end; i++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + i, &queue_stats, true); + stats->tx_packets += queue_stats.packets; + stats->tx_bytes += queue_stats.bytes; + } + + for (i = start; i < end; i++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + i, &queue_stats, false); + stats->rx_packets += queue_stats.packets; + stats->rx_bytes += queue_stats.bytes; + } + + stats->multicast = 0; + stats->rx_errors = 0; + stats->tx_errors = 0; + stats->rx_length_errors = 0; + stats->rx_crc_errors = 0; + stats->rx_frame_errors = 0; + stats->rx_dropped = 0; + stats->tx_dropped = 0; +} + +static void nbl_modify_submacs(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct netdev_hw_addr *ha; + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(net_resource_mgt->netdev); + struct nbl_serv_submac_node *submac_node; + int uc_count, i, ret = 0; + u16 vsi_id = priv->default_vsi_id; + u8 *buf = NULL; + u16 len; + + spin_lock_bh(&net_resource_mgt->mac_vlan_list_lock); + uc_count = netdev_uc_count(net_resource_mgt->netdev); + + if (uc_count) { + len = uc_count * ETH_ALEN; + buf = kzalloc(len, GFP_ATOMIC); + + if (!buf) { + spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); + return; + } + + i = 0; + netdev_hw_addr_list_for_each(ha, &net_resource_mgt->netdev->uc) { + if (i >= len) + break; + memcpy(&buf[i], ha->addr, ETH_ALEN); + i += ETH_ALEN; + } + + net_resource_mgt->rxmode_set_required &= ~NBL_FLAG_AQ_MODIFY_MAC_FILTER; + } + spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); + + nbl_serv_del_all_submacs(serv_mgt); + + for (i = 0; i < uc_count; i++) { + submac_node = nbl_serv_alloc_submac_node(); + if (!submac_node) + break; + + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &buf[i * ETH_ALEN], + 0, vsi_id); + if (ret) { + nbl_serv_free_submac_node(submac_node); + break; + } + + ether_addr_copy(submac_node->mac, &buf[i * ETH_ALEN]); + list_add(&submac_node->node, &flow_mgt->submac_list); + } + + kfree(buf); +} + +static void nbl_modify_promisc_mode(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_netdev_priv *priv = netdev_priv(net_resource_mgt->netdev); + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 mode = 0; + + spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + if (net_resource_mgt->curr_promiscuout_mode & (IFF_PROMISC | IFF_ALLMULTI)) + mode = 1; + + net_resource_mgt->rxmode_set_required &= ~NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; + spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + + disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->default_vsi_id, mode); +} + +static struct nbl_mac_filter *nbl_find_filter(struct nbl_adapter *adapter, const u8 *macaddr) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_mac_filter *f; + + if (!macaddr) + return NULL; + + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + list_for_each_entry(f, &net_resource_mgt->mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + + return NULL; +} + +static void nbl_free_filter(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_mac_filter *f; + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &net_resource_mgt->mac_filter_list) { + f = list_entry(pos, struct nbl_mac_filter, list); + list_del(&f->list); + kfree(f); + } +} + +static struct nbl_mac_filter *nbl_add_filter(struct nbl_adapter *adapter, const u8 *macaddr) +{ + struct nbl_mac_filter *f; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + if (!macaddr) + return NULL; + + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + f = nbl_find_filter(adapter, macaddr); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return f; + + ether_addr_copy(f->macaddr, macaddr); + list_add_tail(&f->list, &net_resource_mgt->mac_filter_list); + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; + } + + return f; +} + +static int nbl_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct nbl_adapter *adapter; + struct nbl_mac_filter *f; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + f = nbl_find_filter(adapter, addr); + if (f) { + list_del(&f->list); + kfree(f); + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; + } + + return 0; +} + +static int nbl_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct nbl_adapter *adapter; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + if (nbl_add_filter(adapter, addr)) + return 0; + else + return -ENOMEM; +} + +static bool nbl_serv_promisc_mode_changed(struct net_device *dev) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(dev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + return (net_resource_mgt->curr_promiscuout_mode ^ dev->flags) + & (IFF_PROMISC | IFF_ALLMULTI); +} + +static void nbl_serv_set_rx_mode(struct net_device *dev) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(dev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + spin_lock_bh(&net_resource_mgt->mac_vlan_list_lock); + __dev_uc_sync(dev, nbl_addr_sync, nbl_addr_unsync); + spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); + + if (!NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) { /* only pf support */ + spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + if (nbl_serv_promisc_mode_changed(dev)) { + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; + net_resource_mgt->curr_promiscuout_mode = dev->flags; + } + spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + } + + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); +} + +static void nbl_serv_change_rx_flags(struct net_device *dev, int flag) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(dev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + if (nbl_serv_promisc_mode_changed(dev)) { + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; + net_resource_mgt->curr_promiscuout_mode = dev->flags; + } + spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); +} + +static netdev_features_t +nbl_serv_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) +{ + u32 l2_l3_hrd_len = 0, l4_hrd_len = 0, total_hrd_len = 0; + u8 l4_proto = 0; + __be16 protocol, frag_off; + int ret; + unsigned char *exthdr; + unsigned int offset = 0; + int nexthdr = 0; + int exthdr_num = 0; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL. + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 256 bytes or bigger than 16383 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < NBL_TX_TSO_MSS_MIN || + skb_shinfo(skb)->gso_size > NBL_TX_TSO_MSS_MAX)) + features &= ~NETIF_F_GSO_MASK; + + l2_l3_hrd_len = (u32)(skb_transport_header(skb) - skb->data); + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_IP)) { + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) { + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); + if (ret < 0) + goto out_rm_features; + } + + /* IPV6 extension headers + * (1) donot support routing and destination extension headers + * (2) support 2 extension headers mostly + */ + nexthdr = ipv6_find_hdr(skb, &offset, NEXTHDR_ROUTING, NULL, NULL); + if (nexthdr == NEXTHDR_ROUTING) { + netdev_info(dev, "skb contain ipv6 routing ext header\n"); + goto out_rm_features; + } + + nexthdr = ipv6_find_hdr(skb, &offset, NEXTHDR_DEST, NULL, NULL); + if (nexthdr == NEXTHDR_DEST) { + netdev_info(dev, "skb contain ipv6 routing dest header\n"); + goto out_rm_features; + } + + exthdr_num = nbl_serv_ipv6_exthdr_num(skb, exthdr - skb->data, ip.v6->nexthdr); + if (exthdr_num < 0 || exthdr_num > 2) { + netdev_info(dev, "skb ipv6 exthdr_num:%d\n", exthdr_num); + goto out_rm_features; + } + } else { + goto out_rm_features; + } + + switch (l4_proto) { + case IPPROTO_TCP: + l4_hrd_len = (l4.tcp->doff) * 4; + break; + case IPPROTO_UDP: + l4_hrd_len = sizeof(struct udphdr); + break; + case IPPROTO_SCTP: + l4_hrd_len = sizeof(struct sctphdr); + break; + default: + goto out_rm_features; + } + + total_hrd_len = l2_l3_hrd_len + l4_hrd_len; + + // TX checksum offload support total header len is [0, 255] + if (total_hrd_len > NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX) + goto out_rm_features; + + // TSO support total header len is [42, 128] + if (total_hrd_len < NBL_TX_TSO_L2L3L4_HDR_LEN_MIN || + total_hrd_len > NBL_TX_TSO_L2L3L4_HDR_LEN_MAX) + features &= ~NETIF_F_GSO_MASK; + + if (skb->encapsulation) + goto out_rm_features; + + return features; + +out_rm_features: + return features & ~(NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_MASK); +} + +static void nbl_serv_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + ring_mgt->tx_rings[vsi_info->ring_offset + txqueue].need_recovery = true; + ring_mgt->tx_rings[vsi_info->ring_offset + txqueue].tx_timeout_count++; + + nbl_warn(common, NBL_DEBUG_QUEUE, "TX timeout on queue %d", txqueue); + + nbl_common_queue_work(&NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->tx_timeout, false, false); +} + +static int nbl_serv_get_phys_port_name(struct net_device *dev, char *name, size_t len) +{ + struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(dev); + u8 pf_id; + + pf_id = common->eth_id; + if ((NBL_COMMON_TO_ETH_MODE(common) == NBL_TWO_ETHERNET_PORT) && common->eth_id == 2) + pf_id = 1; + + if (snprintf(name, len, "p%u", pf_id) >= len) + return -EINVAL; + return 0; +} + +static int nbl_serv_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u8 mac[ETH_ALEN]; + + disp_ops->get_base_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mac); + + ppid->id_len = ETH_ALEN; + memcpy(&ppid->id, mac, ppid->id_len); + + return 0; +} + +static int nbl_serv_register_net(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int p4_type, ret = 0; + + ret = disp_ops->register_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + register_param, register_result); + if (ret) + return ret; + + p4_type = disp_ops->get_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + switch (p4_type) { + case NBL_P4_DEFAULT: + set_bit(NBL_FLAG_P4_DEFAULT, serv_mgt->flags); + break; + default: + nbl_warn(NBL_SERV_MGT_TO_COMMON(serv_mgt), NBL_DEBUG_CUSTOMIZED_P4, + "Unknown P4 type %d", p4_type); + } + + return 0; +} + +static int nbl_serv_unregister_net(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->unregister_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_setup_txrx_queues(void *priv, u16 vsi_id, u16 queue_num, u16 net_vector_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vector *vector; + int i, ret = 0; + + /* Clear cfgs, in case this function exited abnormaly last time */ + disp_ops->clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + + /* queue_num include user&kernel queue */ + ret = disp_ops->alloc_txrx_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, queue_num); + if (ret) + return -EFAULT; + + /* ring_mgt->tx_ring_number only for kernel use */ + for (i = 0; i < ring_mgt->tx_ring_num; i++) { + ring_mgt->tx_rings[i].local_queue_id = NBL_PAIR_ID_GET_TX(i); + ring_mgt->rx_rings[i].local_queue_id = NBL_PAIR_ID_GET_RX(i); + + vector = &ring_mgt->vectors[i]; + vector->local_vector_id = i + net_vector_id; + vector->global_vector_id = + disp_ops->get_global_vector(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, vector->local_vector_id); + vector->irq_enable_base = + disp_ops->get_msix_irq_enable_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector->global_vector_id, + &vector->irq_data); + + disp_ops->set_vector_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector->irq_enable_base, + vector->irq_data, i, + ring_mgt->net_msix_mask_en); + } + + return 0; +} + +static void nbl_serv_remove_txrx_queues(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->free_txrx_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_setup_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->setup_q2vsi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_remove_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->remove_q2vsi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_setup_rss(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->setup_rss(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_remove_rss(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->remove_rss(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_alloc_rings(void *priv, struct net_device *netdev, + u16 tx_num, u16 rx_num, u16 desc_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ring_mgt->tx_ring_num = tx_num; + ring_mgt->rx_ring_num = rx_num; + ring_mgt->tx_desc_num = desc_num; + ring_mgt->rx_desc_num = desc_num; + + ret = disp_ops->alloc_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), netdev, + tx_num, rx_num, ring_mgt->tx_desc_num, + ring_mgt->rx_desc_num); + if (ret) + goto alloc_rings_fail; + + ret = nbl_serv_set_tx_rings(ring_mgt, netdev, dev); + if (ret) + goto set_tx_fail; + ret = nbl_serv_set_rx_rings(ring_mgt, netdev, dev); + if (ret) + goto set_rx_fail; + + ret = nbl_serv_set_vectors(serv_mgt, netdev, dev); + if (ret) + goto set_vectors_fail; + + return 0; + +set_vectors_fail: + nbl_serv_remove_rx_ring(ring_mgt, dev); +set_rx_fail: + nbl_serv_remove_tx_ring(ring_mgt, dev); +set_tx_fail: + disp_ops->remove_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +alloc_rings_fail: + return ret; +} + +static void nbl_serv_free_rings(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + nbl_serv_remove_vectors(ring_mgt, dev); + nbl_serv_remove_rx_ring(ring_mgt, dev); + nbl_serv_remove_tx_ring(ring_mgt, dev); + + disp_ops->remove_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_enable_napis(void *priv, u16 vsi_index) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i; + + for (i = start; i < end; i++) + napi_enable(ring_mgt->vectors[i].napi); + + return 0; +} + +static void nbl_serv_disable_napis(void *priv, u16 vsi_index) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i; + + for (i = start; i < end; i++) + napi_disable(ring_mgt->vectors[i].napi); +} + +static void nbl_serv_set_mask_en(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt; + + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + + ring_mgt->net_msix_mask_en = enable; +} + +static int nbl_serv_start_net_flow(void *priv, struct net_device *netdev, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + int ret = 0; + + /* Clear cfgs, in case this function exited abnormaly last time */ + disp_ops->clear_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + + if (!list_empty(&flow_mgt->vlan_list)) + return -ECONNRESET; + + ret = disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + if (ret) + goto add_multi_fail; + + vlan_node = nbl_serv_alloc_vlan_node(); + if (!vlan_node) + goto alloc_fail; + + ether_addr_copy(flow_mgt->mac, netdev->dev_addr); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + NBL_DEFAULT_VLAN_ID, vsi_id); + if (ret) + goto add_macvlan_fail; + + vlan_node->vid = 0; + + list_add(&vlan_node->node, &flow_mgt->vlan_list); + return 0; + +add_macvlan_fail: + nbl_serv_free_vlan_node(vlan_node); +alloc_fail: + disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +add_multi_fail: + return ret; +} + +static void nbl_serv_stop_net_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + + nbl_serv_del_all_vlans(serv_mgt); + nbl_serv_del_all_submacs(serv_mgt); + + disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + + disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, -1, false); + memset(flow_mgt->mac, 0, sizeof(flow_mgt->mac)); +} + +static int nbl_serv_set_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->add_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_remove_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_start_mgt_flow(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->setup_multi_group(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_stop_mgt_flow(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->remove_multi_group(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static u32 nbl_serv_get_tx_headroom(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_tx_headroom(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +/** + * This ops get flexible product capability from ctrl device, if the device has not manager cap, it + * need get capability from ctr device by channel + */ +static bool nbl_serv_get_product_flex_cap(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_product_flex_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + cap_type); +} + +/** + * This ops get fix product capability from resource layer, this capability fix by product_type, no + * need get from ctrl device + */ +static bool nbl_serv_get_product_fix_cap(void *priv, enum nbl_fix_cap_type cap_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + cap_type); +} + +static int nbl_serv_init_chip(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + struct device *dev; + int ret = 0; + + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + dev = NBL_COMMON_TO_DEV(common); + + ret = disp_ops->init_chip_module(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "init_chip_module failed\n"); + goto module_init_fail; + } + + ret = disp_ops->queue_init(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "queue_init failed\n"); + goto queue_init_fail; + } + + ret = disp_ops->vsi_init(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "vsi_init failed\n"); + goto vsi_init_fail; + } + + return 0; + +vsi_init_fail: +queue_init_fail: +module_init_fail: + return ret; +} + +static int nbl_serv_destroy_chip(void *p) +{ + return 0; +} + +static int nbl_serv_configure_msix_map(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->configure_msix_map(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), num_net_msix, + num_others_msix, net_msix_mask_en); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_destroy_msix_map(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->destroy_msix_map(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_enable_mailbox_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_mailbox_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector_id, enable_msix); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_abnormal_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector_id, enable_msix); + if (ret) + return -EIO; + + return 0; +} + +static irqreturn_t nbl_serv_clean_rings(int __always_unused irq, void *data) +{ + struct nbl_serv_vector *vector = (struct nbl_serv_vector *)data; + + napi_schedule_irqoff(vector->napi); + + return IRQ_HANDLED; +} + +static int nbl_serv_request_net_irq(void *priv, struct nbl_msix_info_param *msix_info) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_ring *tx_ring, *rx_ring; + struct nbl_serv_vector *vector; + u32 irq_num; + int i, ret = 0; + + for (i = 0; i < ring_mgt->tx_ring_num; i++) { + tx_ring = &ring_mgt->tx_rings[i]; + rx_ring = &ring_mgt->rx_rings[i]; + vector = &ring_mgt->vectors[i]; + vector->tx_ring = tx_ring; + vector->rx_ring = rx_ring; + + irq_num = msix_info->msix_entries[i].vector; + snprintf(vector->name, sizeof(vector->name) - 1, "%s%03d-%s-%02u", "NBL", + NBL_COMMON_TO_VSI_ID(common), "TxRx", i); + ret = devm_request_irq(dev, irq_num, nbl_serv_clean_rings, 0, + vector->name, vector); + if (ret) { + nbl_err(common, NBL_DEBUG_INTR, + "TxRx Queue %u requests MSIX irq failed %d", i, ret); + goto request_irq_err; + } + } + + net_resource_mgt->num_net_msix = msix_info->msix_num; + + return 0; + +request_irq_err: + while (--i + 1) { + vector = &ring_mgt->vectors[i]; + + irq_num = msix_info->msix_entries[i].vector; + devm_free_irq(dev, irq_num, vector); + } + return ret; +} + +static void nbl_serv_free_net_irq(void *priv, struct nbl_msix_info_param *msix_info) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_vector *vector; + u32 irq_num; + int i; + + for (i = 0; i < ring_mgt->tx_ring_num; i++) { + vector = &ring_mgt->vectors[i]; + + irq_num = msix_info->msix_entries[i].vector; + devm_free_irq(dev, irq_num, vector); + } +} + +static u16 nbl_serv_get_global_vector(void *priv, u16 local_vector_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_global_vector(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), local_vector_id); +} + +static u16 nbl_serv_get_msix_entry_id(void *priv, u16 local_vector_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_msix_entry_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), local_vector_id); +} + +static u16 nbl_serv_get_vsi_id(void *priv, u16 func_id, u16 type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, type); +} + +static void nbl_serv_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_eth_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, eth_mode, eth_id); +} + +static void nbl_serv_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_user_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + queue_num, queue_size, vsi_id); +} + +static int nbl_serv_enable_lag_protocol(void *priv, u16 vsi_id, bool lag_en) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret = 0; + + if (lag_en) + ret = disp_ops->add_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + else + disp_ops->del_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + + return ret; +} + +static void nbl_serv_net_stats_update_task(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, net_stats_update); + struct nbl_service_mgt *serv_mgt; + + serv_mgt = serv_net_resource_mgt->serv_mgt; + + nbl_serv_update_stats(serv_mgt, false); +} + +static void nbl_serv_rx_mode_async_task(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, rx_mode_async); + + if (serv_net_resource_mgt->rxmode_set_required & NBL_FLAG_AQ_MODIFY_MAC_FILTER) + nbl_modify_submacs(serv_net_resource_mgt); + + if (serv_net_resource_mgt->rxmode_set_required & NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE) + nbl_modify_promisc_mode(serv_net_resource_mgt); +} + +static void nbl_serv_net_task_service_timer(struct timer_list *t) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = + from_timer(net_resource_mgt, t, serv_timer); + + mod_timer(&net_resource_mgt->serv_timer, + round_jiffies(net_resource_mgt->serv_timer_period + jiffies)); + nbl_common_queue_work(&net_resource_mgt->net_stats_update, false, false); +} + +static void nbl_serv_setup_flow_mgt(struct nbl_serv_flow_mgt *flow_mgt) +{ + INIT_LIST_HEAD(&flow_mgt->vlan_list); + INIT_LIST_HEAD(&flow_mgt->submac_list); +} + +static void nbl_serv_register_restore_netdev_queue(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, + nbl_serv_chan_restore_netdev_queue_resp, serv_mgt); + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, + nbl_serv_chan_restart_netdev_queue_resp, serv_mgt); +} + +static void nbl_serv_remove_net_resource_mgt(void *priv) +{ + struct device *dev; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + dev = NBL_COMMON_TO_DEV(common); + + if (net_resource_mgt) { + del_timer_sync(&net_resource_mgt->serv_timer); + nbl_common_release_task(&net_resource_mgt->rx_mode_async); + nbl_common_release_task(&net_resource_mgt->net_stats_update); + nbl_common_release_task(&net_resource_mgt->tx_timeout); + nbl_free_filter(net_resource_mgt); + devm_kfree(dev, net_resource_mgt); + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = NULL; + } +} + +static int nbl_serv_phy_init(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_phy_caps(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, &net_resource_mgt->phy_caps); + + disp_ops->get_phy_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, &net_resource_mgt->phy_state); + + return ret; +} + +static int nbl_serv_setup_net_resource_mgt(void *priv, struct net_device *netdev) +{ + struct device *dev; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + dev = NBL_COMMON_TO_DEV(common); + net_resource_mgt = devm_kzalloc(dev, sizeof(struct nbl_serv_net_resource_mgt), GFP_KERNEL); + if (!net_resource_mgt) + return -ENOMEM; + + net_resource_mgt->netdev = netdev; + net_resource_mgt->serv_mgt = serv_mgt; + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = net_resource_mgt; + + nbl_serv_phy_init(net_resource_mgt); + nbl_serv_register_restore_netdev_queue(serv_mgt); + timer_setup(&net_resource_mgt->serv_timer, nbl_serv_net_task_service_timer, 0); + + net_resource_mgt->serv_timer_period = HZ; + nbl_common_alloc_task(&net_resource_mgt->rx_mode_async, nbl_serv_rx_mode_async_task); + nbl_common_alloc_task(&net_resource_mgt->net_stats_update, nbl_serv_net_stats_update_task); + nbl_common_alloc_task(&net_resource_mgt->tx_timeout, nbl_serv_handle_tx_timeout); + + INIT_LIST_HEAD(&net_resource_mgt->mac_filter_list); + INIT_LIST_HEAD(&net_resource_mgt->indr_dev_priv_list); + spin_lock_init(&net_resource_mgt->mac_vlan_list_lock); + spin_lock_init(&net_resource_mgt->current_netdev_promisc_flags_lock); + net_resource_mgt->get_stats_jiffies = jiffies; + + mod_timer(&net_resource_mgt->serv_timer, + round_jiffies(jiffies + net_resource_mgt->serv_timer_period)); + + return 0; +} + +static int nbl_serv_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_adminq_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector_id, enable_msix); + if (ret) + return -EIO; + + return 0; +} + +static u8 __iomem *nbl_serv_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_hw_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), size); +} + +static u64 nbl_serv_get_real_hw_addr(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_real_hw_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static u16 nbl_serv_get_function_id(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_real_bdf(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, + bus, dev, function); +} + +static int nbl_serv_get_devlink_info(struct devlink *devlink, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv->priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + char firmware_version[NBL_DEVLINK_INFO_FRIMWARE_VERSION_LEN] = {0}; + int ret = 0; + + disp_ops->get_firmware_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + firmware_version, sizeof(firmware_version)); + if (ret) + return ret; + + ret = devlink_info_version_fixed_put(req, "FW Version:", firmware_version); + if (ret) + return ret; + + return ret; +} + +/* Why do we need this? + * Because the original function in kernel cannot handle when we set subvendor and subdevice + * to be 0xFFFF, so write a correct one. + */ +bool nbl_serv_pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record) +{ + struct pci_dev *pdev = to_pci_dev(context->dev); + struct nbl_serv_pldm_pci_record_id id = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subsystem_vendor = PCI_ANY_ID, + .subsystem_device = PCI_ANY_ID, + }; + struct pldmfw_desc_tlv *desc; + bool ret; + + list_for_each_entry(desc, &record->descs, entry) { + u16 value; + u16 *ptr; + + switch (desc->type) { + case PLDM_DESC_ID_PCI_VENDOR_ID: + ptr = &id.vendor; + break; + case PLDM_DESC_ID_PCI_DEVICE_ID: + ptr = &id.device; + break; + case PLDM_DESC_ID_PCI_SUBVENDOR_ID: + ptr = &id.subsystem_vendor; + break; + case PLDM_DESC_ID_PCI_SUBDEV_ID: + ptr = &id.subsystem_device; + break; + default: + /* Skip unrelated TLVs */ + continue; + } + + value = get_unaligned_le16(desc->data); + /* A value of zero for one of the descriptors is sometimes + * used when the record should ignore this field when matching + * device. For example if the record applies to any subsystem + * device or vendor. + */ + if (value) + *ptr = (int)value; + else + *ptr = PCI_ANY_ID; + } + + if ((id.vendor == (u16)PCI_ANY_ID || id.vendor == pdev->vendor) && + (id.device == (u16)PCI_ANY_ID || id.device == pdev->device) && + (id.subsystem_vendor == (u16)PCI_ANY_ID || + id.subsystem_vendor == pdev->subsystem_vendor) && + (id.subsystem_device == (u16)PCI_ANY_ID || + id.subsystem_device == pdev->subsystem_device)) + ret = true; + else + ret = false; + + return ret; +} + +static int nbl_serv_send_package_data(struct pldmfw *context, const u8 *data, u16 length) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + + nbl_info(common, NBL_DEBUG_DEVLINK, "Send package data"); + + ret = disp_ops->flash_lock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return ret; + + ret = disp_ops->flash_prepare(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return 0; +} + +static int nbl_serv_send_component_table(struct pldmfw *context, struct pldmfw_component *component, + u8 transfer_flags) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + nbl_info(common, NBL_DEBUG_DEVLINK, "Send component table, id %d", component->identifier); + + return 0; +} + +static int nbl_serv_flash_component(struct pldmfw *context, struct pldmfw_component *component) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u32 component_crc, calculated_crc; + size_t data_len = component->component_size - NBL_DEVLINK_FLASH_COMPONENT_CRC_SIZE; + int ret = 0; + + nbl_info(common, NBL_DEBUG_DEVLINK, "Flash component table, id %d", component->identifier); + + component_crc = *(u32 *)((u8 *)component->component_data + data_len); + calculated_crc = crc32_le(~0, component->component_data, data_len) ^ ~0; + if (component_crc != calculated_crc) { + nbl_err(common, NBL_DEBUG_DEVLINK, "Flash component crc error"); + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return -EFAULT; + } + + ret = disp_ops->flash_image(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), component->identifier, + component->component_data, data_len); + if (ret) + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return ret; +} + +static int nbl_serv_finalize_update(struct pldmfw *context) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + + nbl_info(common, NBL_DEBUG_DEVLINK, "Flash activate"); + + ret = disp_ops->flash_activate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return ret; +} + +static const struct pldmfw_ops nbl_update_fw_ops = { + .match_record = nbl_serv_pldmfw_op_pci_match_record, + .send_package_data = nbl_serv_send_package_data, + .send_component_table = nbl_serv_send_component_table, + .flash_component = nbl_serv_flash_component, + .finalize_update = nbl_serv_finalize_update, +}; + +static int nbl_serv_update_firmware(struct nbl_service_mgt *serv_mgt, const struct firmware *fw, + struct netlink_ext_ack *extack) +{ + struct nbl_serv_update_fw_priv priv = {0}; + int ret = 0; + + priv.context.ops = &nbl_update_fw_ops; + priv.context.dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + priv.extack = extack; + priv.serv_mgt = serv_mgt; + + ret = pldmfw_flash_image(&priv.context, fw); + + return ret; +} + +static int nbl_serv_update_devlink_flash(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv->priv; + int ret = 0; + + devlink_flash_update_status_notify(devlink, "Flash start", NULL, 0, 0); + + ret = nbl_serv_update_firmware(serv_mgt, params->fw, extack); + + if (ret) + devlink_flash_update_status_notify(devlink, "Flash failed", NULL, 0, 0); + else + devlink_flash_update_status_notify(devlink, + "Flash finished, please reboot to take effect", + NULL, 0, 0); + return ret; +} + +static u32 nbl_serv_get_adminq_tx_buf_size(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_adminq_tx_buf_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static bool nbl_serv_check_fw_heartbeat(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->check_fw_heartbeat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static bool nbl_serv_check_fw_reset(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->check_fw_reset(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_get_common_irq_num(void *priv, struct nbl_common_irq_num *irq_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + irq_num->mbx_irq_num = disp_ops->get_mbx_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_get_ctrl_irq_num(void *priv, struct nbl_ctrl_irq_num *irq_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + irq_num->adminq_irq_num = disp_ops->get_adminq_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + irq_num->abnormal_irq_num = + disp_ops->get_abnormal_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static u32 nbl_serv_get_chip_temperature(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_chip_temperature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static u32 nbl_serv_get_chip_temperature_max(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_chip_temperature_max(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static u32 nbl_serv_get_chip_temperature_crit(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_chip_temperature_crit(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_get_module_temperature(void *priv, u8 eth_id, enum nbl_module_temp_type type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_module_temperature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, type); +} + +static int nbl_serv_get_port_attributes(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_port_attributes(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_update_ring_num(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->update_ring_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_enable_port(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_port(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + if (NBL_COMMON_TO_VF_CAP(common)) + return 0; + else + return disp_ops->set_eth_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + mac, eth_id); +} + +static void nbl_serv_adapt_desc_gother(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->adapt_desc_gother(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_process_flr(void *priv, u16 vfid) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->flr_clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_flows(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_interrupt(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); +} + +static void nbl_serv_recovery_abnormal(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->unmask_all_interrupts(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_keep_alive(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->keep_alive(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_register_vsi_info(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ring_mgt->vsi_info[vsi_index].vsi_index = vsi_index; + ring_mgt->vsi_info[vsi_index].vsi_id = vsi_id; + ring_mgt->vsi_info[vsi_index].ring_offset = queue_offset; + ring_mgt->vsi_info[vsi_index].ring_num = queue_num; + if (disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_ITR_DYNAMIC)) + ring_mgt->vsi_info[vsi_index].itr_dynamic = true; + + disp_ops->register_vsi_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_index, queue_offset, queue_num); + + return disp_ops->register_vsi2q(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_index, vsi_id, queue_offset, queue_num); +} + +static int nbl_serv_st_open(struct inode *inode, struct file *filep) +{ + struct nbl_serv_st_mgt *p = container_of(inode->i_cdev, struct nbl_serv_st_mgt, cdev); + + filep->private_data = p; + + return 0; +} + +static ssize_t nbl_serv_st_write(struct file *file, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t nbl_serv_st_read(struct file *file, char __user *ubuf, size_t size, loff_t *ppos) +{ + return 0; +} + +static int nbl_serv_st_release(struct inode *inode, struct file *filp) +{ + return 0; +} + +static int nbl_serv_process_passthrough(struct nbl_service_mgt *serv_mgt, + unsigned int cmd, unsigned long arg) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_passthrough_fw_cmd_param *param = NULL, *result = NULL; + int ret = 0; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + goto alloc_param_fail; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) + goto alloc_result_fail; + + ret = copy_from_user(param, (void *)arg, _IOC_SIZE(cmd)); + if (ret) { + nbl_err(common, NBL_DEBUG_ST, "Bad access %d.\n", ret); + return ret; + } + + nbl_debug(common, NBL_DEBUG_ST, "Passthough opcode: %d\n", param->opcode); + + ret = disp_ops->passthrough_fw_cmd(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param, result); + if (ret) + goto passthrough_fail; + + ret = copy_to_user((void *)arg, result, _IOC_SIZE(cmd)); + +passthrough_fail: + kfree(result); +alloc_result_fail: + kfree(param); +alloc_param_fail: + return ret; +} + +static long nbl_serv_st_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct nbl_serv_st_mgt *st_mgt = file->private_data; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)st_mgt->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + + if (_IOC_TYPE(cmd) != IOCTL_TYPE) { + nbl_err(common, NBL_DEBUG_ST, "cmd %u, bad magic 0x%x/0x%x.\n", + cmd, _IOC_TYPE(cmd), IOCTL_TYPE); + return -ENOTTY; + } + + if (_IOC_DIR(cmd) & _IOC_READ) + ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) + ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); + if (ret) { + nbl_err(common, NBL_DEBUG_ST, "Bad access.\n"); + return ret; + } + + switch (cmd) { + case IOCTL_PASSTHROUGH: + ret = nbl_serv_process_passthrough(serv_mgt, cmd, arg); + break; + default: + nbl_err(common, NBL_DEBUG_ST, "Unknown cmd %d.\n", cmd); + return -EFAULT; + } + + return ret; +} + +static const struct file_operations st_ops = { + .owner = THIS_MODULE, + .open = nbl_serv_st_open, + .write = nbl_serv_st_write, + .read = nbl_serv_st_read, + .unlocked_ioctl = nbl_serv_st_unlock_ioctl, + .release = nbl_serv_st_release, +}; + +static int nbl_serv_alloc_subdev_id(struct nbl_software_tool_table *st_table) +{ + int subdev_id; + + subdev_id = find_first_zero_bit(st_table->devid, NBL_ST_MAX_DEVICE_NUM); + if (subdev_id == NBL_ST_MAX_DEVICE_NUM) + return -ENOSPC; + set_bit(subdev_id, st_table->devid); + + return subdev_id; +} + +static void nbl_serv_free_subdev_id(struct nbl_software_tool_table *st_table, int id) +{ + clear_bit(id, st_table->devid); +} + +static int nbl_serv_setup_st(void *priv, void *st_table_param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_software_tool_table *st_table = (struct nbl_software_tool_table *)st_table_param; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_st_mgt *st_mgt = NBL_SERV_MGT_TO_ST_MGT(serv_mgt); + struct device *test_device; + char name[NBL_RESTOOL_NAME_LEN] = {0}; + dev_t devid; + int id, subdev_id, ret = 0; + + id = NBL_COMMON_TO_BOARD_ID(common); + + subdev_id = nbl_serv_alloc_subdev_id(st_table); + if (subdev_id < 0) + goto alloc_subdev_id_fail; + + devid = MKDEV(st_table->major, subdev_id); + + if (!NBL_COMMON_TO_PCI_FUNC_ID(common)) + snprintf(name, sizeof(name), "/nblst/nblst%04x_conf%d", + NBL_COMMON_TO_PDEV(common)->device, id); + else + snprintf(name, sizeof(name), "/nblst/nblst%04x_conf%d.%d", + NBL_COMMON_TO_PDEV(common)->device, id, NBL_COMMON_TO_PCI_FUNC_ID(common)); + + st_mgt = devm_kzalloc(NBL_COMMON_TO_DEV(common), sizeof(*st_mgt), GFP_KERNEL); + if (!st_mgt) + goto malloc_fail; + + st_mgt->serv_mgt = serv_mgt; + + st_mgt->major = MAJOR(devid); + st_mgt->minor = MINOR(devid); + st_mgt->devno = devid; + st_mgt->subdev_id = subdev_id; + + cdev_init(&st_mgt->cdev, &st_ops); + ret = cdev_add(&st_mgt->cdev, devid, 1); + if (ret) + goto cdev_add_fail; + + test_device = device_create(st_table->cls, NULL, st_mgt->devno, NULL, name); + if (IS_ERR(test_device)) { + ret = -EBUSY; + goto device_create_fail; + } + + NBL_SERV_MGT_TO_ST_MGT(serv_mgt) = st_mgt; + return 0; + +device_create_fail: + cdev_del(&st_mgt->cdev); +cdev_add_fail: + devm_kfree(NBL_COMMON_TO_DEV(common), st_mgt); +malloc_fail: + nbl_serv_free_subdev_id(st_table, subdev_id); +alloc_subdev_id_fail: + return ret; +} + +static void nbl_serv_remove_st(void *priv, void *st_table_param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_software_tool_table *st_table = (struct nbl_software_tool_table *)st_table_param; + struct nbl_serv_st_mgt *st_mgt = NBL_SERV_MGT_TO_ST_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + if (!st_mgt) + return; + + device_destroy(st_table->cls, st_mgt->devno); + cdev_del(&st_mgt->cdev); + + nbl_serv_free_subdev_id(st_table, st_mgt->subdev_id); + + NBL_SERV_MGT_TO_ST_MGT(serv_mgt) = NULL; + devm_kfree(NBL_COMMON_TO_DEV(common), st_mgt); +} + +static void nbl_serv_form_p4_name(struct nbl_common_info *common, int type, char *name, u16 len) +{ + char eth_num[NBL_P4_NAME_LEN] = {0}; + + switch (NBL_COMMON_TO_ETH_MODE(common)) { + case 1: + snprintf(eth_num, sizeof(eth_num), "single"); + break; + case 2: + snprintf(eth_num, sizeof(eth_num), "dual"); + break; + case 4: + snprintf(eth_num, sizeof(eth_num), "quad"); + break; + default: + nbl_err(common, NBL_DEBUG_CUSTOMIZED_P4, "Unknown P4 type %d", type); + return; + } + + switch (type) { + case NBL_P4_DEFAULT: + /* No need to load default p4 file */ + break; + default: + nbl_err(common, NBL_DEBUG_CUSTOMIZED_P4, "Unknown P4 type %d", type); + } +} + +static int nbl_serv_load_p4(struct nbl_service_mgt *serv_mgt, + const struct firmware *fw, char *verify_code) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + const struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct elf32_shdr *shdr; + struct nbl_load_p4_param param; + u8 *strtab, *name, *product_code = NULL; + int i, ret = 0; + + if (memcmp(elf_hdr->e_ident, NBL_P4_ELF_IDENT, NBL_P4_ELF_IDENT_LEN)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Invalid ELF file"); + return -EINVAL; + } + + memset(¶m, 0, sizeof(param)); + + shdr = (struct elf32_shdr *)((u8 *)elf_hdr + elf_hdr->e_shoff); + strtab = (u8 *)elf_hdr + shdr[elf_hdr->e_shstrndx].sh_offset; + + for (i = 0; i < elf_hdr->e_shnum; i++) + if (shdr[i].sh_type == SHT_NOTE) { + name = strtab + shdr[i].sh_name; + if (!strncmp(name, NBL_P4_PRODUCT_INFO_SECTION_NAME, + sizeof(NBL_P4_PRODUCT_INFO_SECTION_NAME))) + product_code = (u8 *)elf_hdr + shdr[i].sh_offset; + } + + if (!product_code) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Product code not exist"); + return -EINVAL; + } + + if (strncmp(product_code, verify_code, NBL_P4_VERIFY_CODE_LEN)) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Invalid product code %32s", + product_code); + return -EINVAL; + } + + param.start = 1; + ret = disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + if (ret) + return ret; + + for (i = 0; i < elf_hdr->e_shnum; i++) + if (shdr[i].sh_type == SHT_PROGBITS && !(shdr[i].sh_flags & SHF_EXECINSTR)) { + if (shdr[i].sh_size > NBL_P4_SECTION_LEN_MAX) { + nbl_warn(common, NBL_DEBUG_CUSTOMIZED_P4, "Section oversize %d", + shdr[i].sh_size); + return -EINVAL; + } + + memset(¶m, 0, sizeof(param)); + /* name is used for distinguish configuration, not used for now */ + strscpy(param.name, strtab + shdr[i].sh_name, sizeof(param.name)); + param.addr = shdr[i].sh_addr; + param.size = shdr[i].sh_size; + param.section_index = i; + param.section_offset = 0; + param.data = (u8 *)elf_hdr + shdr[i].sh_offset; + + ret = disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + if (ret) + return ret; + } + + memset(¶m, 0, sizeof(param)); + param.end = 1; + ret = disp_ops->load_p4(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + if (ret) + return ret; + + return 0; +} + +static __maybe_unused void nbl_serv_load_default_p4(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->load_p4_default(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_init_p4(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + const struct firmware *fw; + char name[NBL_P4_NAME_LEN] = {0}; + char verify_code[NBL_P4_NAME_LEN] = {0}; + int type, ret = 0; + + type = disp_ops->get_p4_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), verify_code); + if (type < 0 || type > NBL_P4_TYPE_MAX) + return -ENOENT; + + if (type == NBL_P4_DEFAULT) + goto out; + + nbl_serv_form_p4_name(common, type, name, sizeof(name)); + ret = firmware_request_nowarn(&fw, name, NBL_SERV_MGT_TO_DEV(serv_mgt)); + if (ret) + goto out; + + ret = nbl_serv_load_p4(serv_mgt, fw, verify_code); + + release_firmware(fw); + +out: + if (type == NBL_P4_DEFAULT || ret) { + nbl_info(common, NBL_DEBUG_CUSTOMIZED_P4, "Load P4 default"); + nbl_serv_load_default_p4(serv_mgt); + disp_ops->set_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), NBL_P4_DEFAULT); + } else { + nbl_info(common, NBL_DEBUG_CUSTOMIZED_P4, "Load P4 %d", type); + disp_ops->set_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), type); + } + + /* We always return OK, because at the very least we would use default P4 */ + return 0; +} + +static int nbl_serv_set_spoof_check_addr(void *priv, u8 *mac) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + return disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), mac); +} + +u16 nbl_serv_get_vf_base_vsi_id(void *priv, u16 func_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_vf_base_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); +} + +static int nbl_serv_get_board_id(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_board_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_process_abnormal_event(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_abnormal_event_info abnomal_info; + struct nbl_abnormal_details *detail; + u16 local_queue_id; + int type, i, ret = 0; + + memset(&abnomal_info, 0, sizeof(abnomal_info)); + + ret = disp_ops->process_abnormal_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &abnomal_info); + if (!ret) + return ret; + + for (i = 0; i < NBL_ABNORMAL_EVENT_MAX; i++) { + detail = &abnomal_info.details[i]; + + if (!detail->abnormal) + continue; + + type = nbl_serv_abnormal_event_to_queue(i); + local_queue_id = disp_ops->get_local_queue_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + detail->vsi_id, detail->qid); + if (local_queue_id == U16_MAX) + return 0; + + nbl_serv_restore_queue(serv_mgt, detail->vsi_id, local_queue_id, type, true); + } + + return 0; +} + +static struct nbl_service_ops serv_ops = { + .init_chip = nbl_serv_init_chip, + .destroy_chip = nbl_serv_destroy_chip, + .init_p4 = nbl_serv_init_p4, + + .configure_msix_map = nbl_serv_configure_msix_map, + .destroy_msix_map = nbl_serv_destroy_msix_map, + .enable_mailbox_irq = nbl_serv_enable_mailbox_irq, + .enable_abnormal_irq = nbl_serv_enable_abnormal_irq, + .enable_adminq_irq = nbl_serv_enable_adminq_irq, + .request_net_irq = nbl_serv_request_net_irq, + .free_net_irq = nbl_serv_free_net_irq, + .get_global_vector = nbl_serv_get_global_vector, + .get_msix_entry_id = nbl_serv_get_msix_entry_id, + .get_common_irq_num = nbl_serv_get_common_irq_num, + .get_ctrl_irq_num = nbl_serv_get_ctrl_irq_num, + .get_chip_temperature = nbl_serv_get_chip_temperature, + .get_chip_temperature_max = nbl_serv_get_chip_temperature_max, + .get_chip_temperature_crit = nbl_serv_get_chip_temperature_crit, + .get_module_temperature = nbl_serv_get_module_temperature, + .get_port_attributes = nbl_serv_get_port_attributes, + .update_ring_num = nbl_serv_update_ring_num, + .enable_port = nbl_serv_enable_port, + .set_sfp_state = nbl_serv_set_sfp_state, + + .register_net = nbl_serv_register_net, + .unregister_net = nbl_serv_unregister_net, + .setup_txrx_queues = nbl_serv_setup_txrx_queues, + .remove_txrx_queues = nbl_serv_remove_txrx_queues, + .setup_q2vsi = nbl_serv_setup_q2vsi, + .remove_q2vsi = nbl_serv_remove_q2vsi, + .setup_rss = nbl_serv_setup_rss, + .remove_rss = nbl_serv_remove_rss, + .register_vsi_info = nbl_serv_register_vsi_info, + + .alloc_rings = nbl_serv_alloc_rings, + .free_rings = nbl_serv_free_rings, + .enable_napis = nbl_serv_enable_napis, + .disable_napis = nbl_serv_disable_napis, + .set_mask_en = nbl_serv_set_mask_en, + .start_net_flow = nbl_serv_start_net_flow, + .stop_net_flow = nbl_serv_stop_net_flow, + .set_lldp_flow = nbl_serv_set_lldp_flow, + .remove_lldp_flow = nbl_serv_remove_lldp_flow, + .start_mgt_flow = nbl_serv_start_mgt_flow, + .stop_mgt_flow = nbl_serv_stop_mgt_flow, + .get_tx_headroom = nbl_serv_get_tx_headroom, + .get_product_flex_cap = nbl_serv_get_product_flex_cap, + .get_product_fix_cap = nbl_serv_get_product_fix_cap, + .set_spoof_check_addr = nbl_serv_set_spoof_check_addr, + + .vsi_open = nbl_serv_vsi_open, + .vsi_stop = nbl_serv_vsi_stop, + .switch_traffic_default_dest = nbl_serv_switch_traffic_default_dest, + .get_user_queue_info = nbl_serv_get_user_queue_info, + + /* For netdev ops */ + .netdev_open = nbl_serv_netdev_open, + .netdev_stop = nbl_serv_netdev_stop, + .change_mtu = nbl_serv_change_mtu, + .set_mac = nbl_serv_set_mac, + .rx_add_vid = nbl_serv_rx_add_vid, + .rx_kill_vid = nbl_serv_rx_kill_vid, + .get_stats64 = nbl_serv_get_stats64, + .set_rx_mode = nbl_serv_set_rx_mode, + .change_rx_flags = nbl_serv_change_rx_flags, + .features_check = nbl_serv_features_check, + .get_phys_port_name = nbl_serv_get_phys_port_name, + .get_port_parent_id = nbl_serv_get_port_parent_id, + .tx_timeout = nbl_serv_tx_timeout, + + .get_vsi_id = nbl_serv_get_vsi_id, + .get_eth_id = nbl_serv_get_eth_id, + .setup_net_resource_mgt = nbl_serv_setup_net_resource_mgt, + .remove_net_resource_mgt = nbl_serv_remove_net_resource_mgt, + .enable_lag_protocol = nbl_serv_enable_lag_protocol, + .get_hw_addr = nbl_serv_get_hw_addr, + .get_real_hw_addr = nbl_serv_get_real_hw_addr, + .get_function_id = nbl_serv_get_function_id, + .get_real_bdf = nbl_serv_get_real_bdf, + .set_eth_mac_addr = nbl_serv_set_eth_mac_addr, + .process_abnormal_event = nbl_serv_process_abnormal_event, + .adapt_desc_gother = nbl_serv_adapt_desc_gother, + .process_flr = nbl_serv_process_flr, + .get_board_id = nbl_serv_get_board_id, + .recovery_abnormal = nbl_serv_recovery_abnormal, + .keep_alive = nbl_serv_keep_alive, + + .get_devlink_info = nbl_serv_get_devlink_info, + .update_devlink_flash = nbl_serv_update_devlink_flash, + .get_adminq_tx_buf_size = nbl_serv_get_adminq_tx_buf_size, + + .check_fw_heartbeat = nbl_serv_check_fw_heartbeat, + .check_fw_reset = nbl_serv_check_fw_reset, + .set_netdev_carrier_state = nbl_serv_set_netdev_carrier_state, + + .setup_st = nbl_serv_setup_st, + .remove_st = nbl_serv_remove_st, + .get_vf_base_vsi_id = nbl_serv_get_vf_base_vsi_id, +}; + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_serv_setup_serv_mgt(struct nbl_common_info *common, + struct nbl_service_mgt **serv_mgt) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + *serv_mgt = devm_kzalloc(dev, sizeof(struct nbl_service_mgt), GFP_KERNEL); + if (!*serv_mgt) + return -ENOMEM; + + NBL_SERV_MGT_TO_COMMON(*serv_mgt) = common; + nbl_serv_setup_flow_mgt(NBL_SERV_MGT_TO_FLOW_MGT(*serv_mgt)); + + set_bit(NBL_FLAG_MINI_DRIVER, (*serv_mgt)->flags); + + return 0; +} + +static void nbl_serv_remove_serv_mgt(struct nbl_common_info *common, + struct nbl_service_mgt **serv_mgt) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, *serv_mgt); + *serv_mgt = NULL; +} + +static void nbl_serv_remove_ops(struct device *dev, struct nbl_service_ops_tbl **serv_ops_tbl) +{ + devm_kfree(dev, *serv_ops_tbl); + *serv_ops_tbl = NULL; +} + +static int nbl_serv_setup_ops(struct device *dev, struct nbl_service_ops_tbl **serv_ops_tbl, + struct nbl_service_mgt *serv_mgt) +{ + *serv_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_service_ops_tbl), GFP_KERNEL); + if (!*serv_ops_tbl) + return -ENOMEM; + + NBL_SERV_OPS_TBL_TO_OPS(*serv_ops_tbl) = &serv_ops; + nbl_serv_setup_ethtool_ops(&serv_ops); + NBL_SERV_OPS_TBL_TO_PRIV(*serv_ops_tbl) = serv_mgt; + + return 0; +} + +int nbl_serv_init(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_service_mgt **serv_mgt; + struct nbl_service_ops_tbl **serv_ops_tbl; + struct nbl_dispatch_ops_tbl *disp_ops_tbl; + struct nbl_dispatch_ops *disp_ops; + struct nbl_channel_ops_tbl *chan_ops_tbl; + int ret = 0; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + serv_mgt = (struct nbl_service_mgt **)&NBL_ADAPTER_TO_SERV_MGT(adapter); + serv_ops_tbl = &NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); + disp_ops_tbl = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + disp_ops = disp_ops_tbl->ops; + + ret = nbl_serv_setup_serv_mgt(common, serv_mgt); + if (ret) + goto setup_mgt_fail; + + ret = nbl_serv_setup_ops(dev, serv_ops_tbl, *serv_mgt); + if (ret) + goto setup_ops_fail; + + NBL_SERV_MGT_TO_DISP_OPS_TBL(*serv_mgt) = disp_ops_tbl; + NBL_SERV_MGT_TO_CHAN_OPS_TBL(*serv_mgt) = chan_ops_tbl; + disp_ops->get_resource_pt_ops(disp_ops_tbl->priv, &(*serv_ops_tbl)->pt_ops); + + return 0; + +setup_ops_fail: + nbl_serv_remove_serv_mgt(common, serv_mgt); +setup_mgt_fail: + return ret; +} + +void nbl_serv_remove(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_service_mgt **serv_mgt; + struct nbl_service_ops_tbl **serv_ops_tbl; + + if (!adapter) + return; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + serv_mgt = (struct nbl_service_mgt **)&NBL_ADAPTER_TO_SERV_MGT(adapter); + serv_ops_tbl = &NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); + + nbl_serv_remove_ops(dev, serv_ops_tbl); + nbl_serv_remove_serv_mgt(common, serv_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h new file mode 100644 index 000000000000..a8f1a6458705 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_SERVICE_H_ +#define _NBL_SERVICE_H_ + +#include +#include +#include "nbl_core.h" + +#define NBL_SERV_MGT_TO_COMMON(serv_mgt) ((serv_mgt)->common) +#define NBL_SERV_MGT_TO_DEV(serv_mgt) NBL_COMMON_TO_DEV(NBL_SERV_MGT_TO_COMMON(serv_mgt)) +#define NBL_SERV_MGT_TO_RING_MGT(serv_mgt) (&(serv_mgt)->ring_mgt) +#define NBL_SERV_MGT_TO_REP_QUEUE_MGT(serv_mgt) ((serv_mgt)->rep_queue_mgt) +#define NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt) (&(serv_mgt)->flow_mgt) +#define NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) ((serv_mgt)->net_resource_mgt) +#define NBL_SERV_MGT_TO_ST_MGT(serv_mgt) ((serv_mgt)->st_mgt) + +#define NBL_SERV_MGT_TO_DISP_OPS_TBL(serv_mgt) ((serv_mgt)->disp_ops_tbl) +#define NBL_SERV_MGT_TO_DISP_OPS(serv_mgt) (NBL_SERV_MGT_TO_DISP_OPS_TBL(serv_mgt)->ops) +#define NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt) (NBL_SERV_MGT_TO_DISP_OPS_TBL(serv_mgt)->priv) + +#define NBL_SERV_MGT_TO_CHAN_OPS_TBL(serv_mgt) ((serv_mgt)->chan_ops_tbl) +#define NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt) (NBL_SERV_MGT_TO_CHAN_OPS_TBL(serv_mgt)->ops) +#define NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt) (NBL_SERV_MGT_TO_CHAN_OPS_TBL(serv_mgt)->priv) + +#define NBL_DEFAULT_VLAN_ID 0 + +#define NBL_TX_TSO_MSS_MIN (256) +#define NBL_TX_TSO_MSS_MAX (16383) +#define NBL_TX_TSO_L2L3L4_HDR_LEN_MIN (42) +#define NBL_TX_TSO_L2L3L4_HDR_LEN_MAX (128) +#define NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX (255) + +#define NBL_FLAG_AQ_MODIFY_MAC_FILTER BIT(0) +#define NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT(1) + +#define NBL_EEPROM_LENGTH (0) + +/* input set */ +#define NBL_MAC_ADDR_LEN_U8 6 + +#define NBL_FLOW_IN_PORT_TYPE_ETH 0x0 +#define NBL_FLOW_IN_PORT_TYPE_LAG 0x400 +#define NBL_FLOW_IN_PORT_TYPE_VSI 0x800 + +#define NBL_FLOW_OUT_PORT_TYPE_VSI 0x0 +#define NBL_FLOW_OUT_PORT_TYPE_ETH 0x10 +#define NBL_FLOW_OUT_PORT_TYPE_LAG 0x20 + +#define SET_DPORT_TYPE_VSI_HOST (0) +#define SET_DPORT_TYPE_VSI_ECPU (1) +#define SET_DPORT_TYPE_ETH_LAG (2) +#define SET_DPORT_TYPE_SP_PORT (3) + +#define NBL_VLAN_SHIFT 8 + +#define NBL_DEVLINK_INFO_FRIMWARE_VERSION_LEN 32 +#define NBL_DEVLINK_FLASH_COMPONENT_CRC_SIZE 4 + +/* For customized P4 */ +#define NBL_P4_ELF_IDENT "\x7F\x45\x4C\x46\x01\x01\x01\x00" +#define NBL_P4_ELF_IDENT_LEN 8 +#define NBL_P4_SECTION_LEN_MAX 2048 +#define NBL_P4_VERIFY_CODE_LEN 9 +#define NBL_P4_PRODUCT_INFO_SECTION_NAME "product_info" + +enum { + NBL_MGT_SERV_MGT, + NBL_MGT_SERV_RDMA, +}; + +enum { + NBL_NET_SERV_NET, + NBL_NET_SERV_RDMA, +}; + +struct nbl_serv_ring { + dma_addr_t dma; + u16 index; + u16 local_queue_id; + u16 global_queue_id; + bool need_recovery; + u32 tx_timeout_count; +}; + +struct nbl_serv_vector { + char name[32]; + struct net_device *netdev; + u32 irq_data; + u8 *irq_enable_base; + u16 local_vector_id; + u16 global_vector_id; + u16 intr_rate_usecs; + u16 intr_suppress_level; + struct napi_struct *napi; + struct nbl_serv_ring *tx_ring; + struct nbl_serv_ring *rx_ring; +}; + +struct nbl_serv_ring_vsi_info { + u16 vsi_index; + u16 vsi_id; + u16 ring_offset; + u16 ring_num; + u16 active_ring_num; + bool itr_dynamic; + bool started; +}; + +struct nbl_serv_ring_mgt { + struct nbl_serv_ring *tx_rings; + struct nbl_serv_ring *rx_rings; + struct nbl_serv_vector *vectors; + struct nbl_serv_ring_vsi_info vsi_info[NBL_VSI_MAX]; + u16 tx_desc_num; + u16 rx_desc_num; + u16 tx_ring_num; + u16 rx_ring_num; + u16 active_ring_num; + bool net_msix_mask_en; +}; + +struct nbl_serv_vlan_node { + struct list_head node; + u16 vid; +}; + +struct nbl_serv_submac_node { + struct list_head node; + u8 mac[ETH_ALEN]; +}; + +struct nbl_serv_flow_mgt { + u8 mac[ETH_ALEN]; + u8 eth; + struct list_head vlan_list; + struct list_head submac_list; +}; + +struct nbl_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; +}; + +enum nbl_adapter_flags { + /* p4 flags must be at the start */ + NBL_FLAG_P4_DEFAULT, + NBL_FLAG_LINK_DOWN_ON_CLOSE, + NBL_FLAG_MINI_DRIVER, + NBL_ADAPTER_FLAGS_MAX +}; + +struct nbl_serv_net_resource_mgt { + struct nbl_service_mgt *serv_mgt; + struct net_device *netdev; + struct work_struct net_stats_update; + struct work_struct rx_mode_async; + struct work_struct tx_timeout; + struct delayed_work watchdog_task; + struct timer_list serv_timer; + unsigned long serv_timer_period; + + /* spinlock_t for rx mode submac */ + spinlock_t mac_vlan_list_lock; + /* spinlock_t for rx mode promisc */ + spinlock_t current_netdev_promisc_flags_lock; + struct list_head mac_filter_list; + struct list_head indr_dev_priv_list; + u32 rxmode_set_required; + u16 curr_promiscuout_mode; + u16 num_net_msix; + + /* stats for netdev */ + u64 get_stats_jiffies; + struct nbl_stats stats; + struct nbl_priv_stats priv_stats; + struct nbl_phy_state phy_state; + struct nbl_phy_caps phy_caps; + u32 configured_speed; + u32 configured_fec; +}; + +#define IOCTL_TYPE 'n' +#define IOCTL_PASSTHROUGH _IOWR(IOCTL_TYPE, 0x01, struct nbl_passthrough_fw_cmd_param) + +#define NBL_RESTOOL_NAME_LEN 32 +struct nbl_serv_st_mgt { + void *serv_mgt; + struct cdev cdev; + int major; + int minor; + dev_t devno; + int subdev_id; +}; + +struct nbl_service_mgt { + struct nbl_common_info *common; + struct nbl_dispatch_ops_tbl *disp_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_serv_ring_mgt ring_mgt; + struct nbl_serv_flow_mgt flow_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_serv_st_mgt *st_mgt; + DECLARE_BITMAP(flags, NBL_ADAPTER_FLAGS_MAX); +}; + +struct nbl_serv_update_fw_priv { + struct pldmfw context; + struct netlink_ext_ack *extack; + struct nbl_service_mgt *serv_mgt; +}; + +struct nbl_serv_pldm_pci_record_id { + u16 vendor; + u16 device; + u16 subsystem_vendor; + u16 subsystem_device; +}; + +int nbl_serv_netdev_open(struct net_device *netdev); +int nbl_serv_netdev_stop(struct net_device *netdev); +int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, + u16 real_qps, bool use_napi); +int nbl_serv_vsi_stop(void *priv, u16 vsi_index); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c new file mode 100644 index 000000000000..87a3c8e9ebfd --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c @@ -0,0 +1,2418 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_adminq.h" + +static int nbl_res_adminq_update_ring_num(void *priv); + +/* **** FW CMD FILTERS START **** */ + +static int nbl_res_adminq_check_ring_num(struct nbl_resource_mgt *res_mgt, + struct nbl_fw_cmd_ring_num_param *param) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u32 sum = 0, pf_real_num = 0, vf_real_num = 0; + int i; + + pf_real_num = NBL_VSI_PF_REAL_QUEUE_NUM(param->pf_def_max_net_qp_num); + vf_real_num = NBL_VSI_VF_REAL_QUEUE_NUM(param->vf_def_max_net_qp_num); + + if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC || vf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) + return -EINVAL; + + /* TODO: should we consider when pf_num is 8? */ + for (i = 0; i < NBL_COMMON_TO_ETH_MODE(common); i++) { + pf_real_num = param->net_max_qp_num[i] ? + NBL_VSI_PF_REAL_QUEUE_NUM(param->net_max_qp_num[i]) : + NBL_VSI_PF_REAL_QUEUE_NUM(param->pf_def_max_net_qp_num); + + if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) + return -EINVAL; + + sum += pf_real_num; + } + + for (i = NBL_MAX_PF; i < NBL_MAX_FUNC; i++) { + vf_real_num = param->net_max_qp_num[i] ? + NBL_VSI_VF_REAL_QUEUE_NUM(param->net_max_qp_num[i]) : + NBL_VSI_VF_REAL_QUEUE_NUM(param->vf_def_max_net_qp_num); + + if (vf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) + return -EINVAL; + + sum += vf_real_num; + } + + if (sum > NBL_MAX_TXRX_QUEUE) + return -EINVAL; + + return 0; +} + +static int nbl_res_fw_cmd_filter_rw_in(struct nbl_resource_mgt *res_mgt, void *data, int len) +{ + struct nbl_chan_resource_write_param *param = (struct nbl_chan_resource_write_param *)data; + struct nbl_fw_cmd_ring_num_param *num_param; + + switch (param->resid) { + case NBL_ADMINQ_PFA_TLV_PFVF_RING_ID: + num_param = (struct nbl_fw_cmd_ring_num_param *)param->data; + return nbl_res_adminq_check_ring_num(res_mgt, num_param); + default: + break; + } + + return 0; +} + +static void nbl_res_adminq_add_cmd_filter_res_write(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_res_fw_cmd_filter filter = {0}; + u16 key = 0; + + key = NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE; + filter.in = nbl_res_fw_cmd_filter_rw_in; + + if (nbl_common_alloc_hash_node(adminq_mgt->cmd_filter, &key, &filter)) + nbl_warn(common, NBL_DEBUG_ADMINQ, "Fail to register res_write in filter"); +} + +/* **** FW CMD FILTERS END **** */ + +static int nbl_res_adminq_set_module_eeprom_info(struct nbl_resource_mgt *res_mgt, + u8 eth_id, + u8 i2c_address, + u8 page, + u8 bank, + u32 offset, + u32 length, + u8 *data) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_module_eeprom_info param = {0}; + u32 xfer_size = 0; + u32 byte_offset = 0; + int data_length = length; + int ret = 0; + + do { + xfer_size = min_t(u32, data_length, NBL_MODULE_EEPRO_WRITE_MAX_LEN); + data_length -= xfer_size; + + param.eth_id = eth_id; + param.i2c_address = i2c_address; + param.page = page; + param.bank = bank; + param.write = 1; + param.offset = offset + byte_offset; + param.length = xfer_size; + memcpy(param.data, data + byte_offset, xfer_size); + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, + ¶m, sizeof(param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d,\n" + "i2c_address:%d, page:%d, bank:%d, offset:%d, length:%d\n", + ret, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, + eth_info->logic_eth_id[eth_id], + i2c_address, page, bank, offset + byte_offset, xfer_size); + } + byte_offset += xfer_size; + } while (!ret && data_length > 0); + + return ret; +} + +static int nbl_res_adminq_turn_module_eeprom_page(struct nbl_resource_mgt *res_mgt, + u8 eth_id, u8 page) +{ + int ret; + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + ret = nbl_res_adminq_set_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, + SFF_8636_TURNPAGE_ADDR, 1, &page); + if (ret) { + dev_err(dev, "eth %d set_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + + return ret; +} + +static void nbl_res_get_module_eeprom_page(u32 addr, u8 *upper_page, u8 *offset) +{ + if (addr >= SFF_8638_PAGESIZE) { + *upper_page = (addr - SFF_8638_PAGESIZE) / SFF_8638_PAGESIZE; + *offset = (u8)(addr - (*upper_page * SFF_8638_PAGESIZE)); + } else { + *upper_page = 0; + *offset = addr; + } +} + +static int nbl_res_adminq_get_module_eeprom_info(struct nbl_resource_mgt *res_mgt, + u8 eth_id, + u8 i2c_address, + u8 page, + u8 bank, + u32 offset, + u32 length, + u8 *data) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_module_eeprom_info param = {0}; + u32 xfer_size = 0; + u32 byte_offset = 0; + int data_length = length; + int ret = 0; + + /* read a maximum of 128 bytes each time */ + do { + xfer_size = min_t(u32, data_length, NBL_MAX_PHY_I2C_RESP_SIZE); + data_length -= xfer_size; + + param.eth_id = eth_id; + param.i2c_address = i2c_address; + param.page = page; + param.bank = bank; + param.write = 0; + param.offset = offset + byte_offset; + param.length = xfer_size; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, + ¶m, sizeof(param), data + byte_offset, xfer_size, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d,\n" + "i2c_address:%d, page:%d, bank:%d, offset:%d, length:%d\n", + ret, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, + eth_info->logic_eth_id[eth_id], + i2c_address, page, bank, offset + byte_offset, xfer_size); + } + byte_offset += xfer_size; + } while (!ret && data_length > 0); + + return ret; +} + +static int nbl_res_adminq_flash_read(struct nbl_resource_mgt *res_mgt, u32 bank_id, + u32 offset, u32 len, u8 *data) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_flash_read read_param; + int remain = len, sec_offset = 0, ret = 0; + + while (remain > 0) { + read_param.bank_id = bank_id; + read_param.offset = offset + sec_offset; + read_param.len = remain > NBL_CHAN_FLASH_READ_LEN ? NBL_CHAN_FLASH_READ_LEN : + remain; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_FLASH_READ, &read_param, sizeof(read_param), + data + sec_offset, read_param.len, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "adminq flash read fail on bank %d, offset %d", bank_id, offset); + return ret; + } + + remain -= read_param.len; + sec_offset += read_param.len; + } + + return ret; +} + +static int nbl_res_adminq_flash_erase(struct nbl_resource_mgt *res_mgt, u32 bank_id, + u32 offset, u32 len) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_flash_erase erase_param; + int remain = len, sec_offset = 0, ret = 0; + + while (remain > 0) { + erase_param.bank_id = bank_id; + erase_param.offset = offset + sec_offset; + /* When erase, it must be 4k-aligned, so we always erase 4k each time. */ + erase_param.len = NBL_CHAN_FLASH_ERASE_LEN; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_FLASH_ERASE, + &erase_param, sizeof(erase_param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "adminq flash erase fail on bank %d, offset %d", + bank_id, erase_param.offset); + return ret; + } + + remain -= erase_param.len; + sec_offset += erase_param.len; + } + + return ret; +} + +static int nbl_res_adminq_flash_write(struct nbl_resource_mgt *res_mgt, u32 bank_id, + u32 offset, u32 len, const u8 *data) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_flash_write *write_param = NULL; + int remain = len, sec_offset = 0, ret = 0; + + write_param = kzalloc(sizeof(*write_param), GFP_KERNEL); + if (!write_param) + return -ENOMEM; + + while (remain > 0) { + write_param->bank_id = bank_id; + write_param->offset = offset + sec_offset; + write_param->len = remain > NBL_CHAN_FLASH_WRITE_LEN ? NBL_CHAN_FLASH_WRITE_LEN : + remain; + memcpy(write_param->data, data + sec_offset, write_param->len); + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_FLASH_WRITE, + write_param, sizeof(*write_param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "adminq flash write fail on bank %d, offset %d", bank_id, offset); + kfree(write_param); + return ret; + } + + remain -= write_param->len; + sec_offset += write_param->len; + } + + kfree(write_param); + return ret; +} + +static int nbl_res_adminq_get_nvm_bank_index(struct nbl_resource_mgt *res_mgt, int *rbank) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_NVM_BANK_INDEX, NULL, 0, rbank, sizeof(*rbank), 1); + return chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +static int nbl_res_adminq_flash_set_nvm_bank(struct nbl_resource_mgt *res_mgt, int rbank, + int bank_id, int op) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 nvmidx; + u8 *idxbuf = NULL; + int ret = 0; + + idxbuf = kzalloc(NBL_ADMINQ_IDX_LEN, GFP_KERNEL); + if (!idxbuf) + return -ENOMEM; + + memset(idxbuf, 0xFF, NBL_ADMINQ_IDX_LEN); + + if (op == NBL_ADMINQ_NVM_BANK_REPAIR) + idxbuf[0] = rbank ? 0xFF : 0x00; + else if (op == NBL_ADMINQ_NVM_BANK_SWITCH) + idxbuf[0] = rbank ? 0x00 : 0xFF; + + idxbuf[1] = 0x5A; + strscpy((char *)&idxbuf[4080], "M181XXSRIS", NBL_ADMINQ_IDX_LEN - 4080); + + ret |= nbl_res_adminq_flash_erase(res_mgt, bank_id, 0, NBL_ADMINQ_IDX_LEN); + ret |= nbl_res_adminq_flash_write(res_mgt, bank_id, 0, NBL_ADMINQ_IDX_LEN, idxbuf); + + ret |= nbl_res_adminq_flash_read(res_mgt, bank_id, 0, sizeof(nvmidx), (u8 *)&nvmidx); + if (ret) + goto out; + + if (op == NBL_ADMINQ_NVM_BANK_SWITCH) + rbank = !rbank; + + if (((nvmidx >> 2) & 1) != rbank) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "S0 update bank index is %d but read back index is %d", + rbank, (nvmidx >> 2) & 1); + ret = -EFAULT; + goto out; + } + +out: + kfree(idxbuf); + return ret; +} + +static int nbl_res_adminq_flash_verify(struct nbl_resource_mgt *res_mgt, int *rbank) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_chan_send_info chan_send; + int verify_bank, sign0, sign1, ret = 0; + + verify_bank = 0; + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_VERIFY_NVM_BANK, &verify_bank, sizeof(verify_bank), + &sign0, sizeof(sign0), 1); + ret |= chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + + verify_bank = 1; + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_VERIFY_NVM_BANK, &verify_bank, sizeof(verify_bank), + &sign1, sizeof(sign1), 1); + ret |= chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + + sign0 = !sign0; + sign1 = !sign1; + + if (ret || (sign0 != 0 && sign0 != 1) || (sign1 != 0 && sign1 != 1) || (!sign0 && !sign1)) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "Verify signature both invalid, ret %d, sign0 %d, sign1 %d", + ret, sign0, sign1); + return -EFAULT; + } + + if (sign0 != sign1) { + nbl_warn(common, NBL_DEBUG_ADMINQ, "WARN: bank0 and bank1 signature: %s/%s", + sign0 ? "pass" : "fail", sign1 ? "pass" : "fail"); + + /* Set rbank to fail bank to because we will switch bank idx next */ + if (sign0) + *rbank = 1; + else if (sign1) + *rbank = 0; + else + return -EFAULT; + } + + return 0; +} + +static int nbl_res_adminq_flash_lock(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_send_info chan_send; + u32 success = 0, ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_FLASH_LOCK, + NULL, 0, &success, sizeof(success), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + return ret; + + return !success; +} + +static int nbl_res_adminq_flash_unlock(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_send_info chan_send; + u32 success = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_FLASH_UNLOCK, NULL, 0, &success, sizeof(success), 1); + return chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +static int nbl_res_adminq_flash_prepare(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 nvmidx0, nvmidx1; + int rbank, ret = 0; + + ret = nbl_res_adminq_get_nvm_bank_index(res_mgt, &rbank); + if (ret || (rbank != 0 && rbank != 1)) + return -EFAULT; + + ret |= nbl_res_adminq_flash_read(res_mgt, BANKID_SR_BANK0, 0, + sizeof(nvmidx0), (u8 *)&nvmidx0); + ret |= nbl_res_adminq_flash_read(res_mgt, BANKID_SR_BANK1, 0, + sizeof(nvmidx1), (u8 *)&nvmidx1); + if (ret) + return ret; + + if ((((nvmidx0 >> 2) & 1) != rbank)) + ret = nbl_res_adminq_flash_set_nvm_bank(res_mgt, rbank, BANKID_SR_BANK0, + NBL_ADMINQ_NVM_BANK_REPAIR); + + if ((((nvmidx1 >> 2) & 1) != rbank)) + ret = nbl_res_adminq_flash_set_nvm_bank(res_mgt, rbank, BANKID_SR_BANK1, + NBL_ADMINQ_NVM_BANK_REPAIR); + + return ret; +} + +static int nbl_res_adminq_flash_image(void *priv, u32 module, const u8 *data, size_t len) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + int rbank, write_bank, ret = 0; + + switch (module) { + case NBL_ADMINQ_BANK_INDEX_SPI_BOOT: + ret |= nbl_res_adminq_flash_erase(res_mgt, BANKID_BOOT_BANK, 0, len); + ret |= nbl_res_adminq_flash_write(res_mgt, BANKID_BOOT_BANK, 0, len, data); + + break; + case NBL_ADMINQ_BANK_INDEX_NVM_BANK: + if (nbl_res_adminq_get_nvm_bank_index(res_mgt, &rbank)) + return -EFAULT; + + write_bank = rbank ? BANKID_NVM_BANK0 : BANKID_NVM_BANK1; + + ret |= nbl_res_adminq_flash_erase(res_mgt, write_bank, 0, len); + ret |= nbl_res_adminq_flash_write(res_mgt, write_bank, 0, len, data); + + break; + default: + return 0; + } + + return ret; +} + +static int nbl_res_adminq_flash_activate(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + int rbank, ret = 0; + + ret = nbl_res_adminq_get_nvm_bank_index(res_mgt, &rbank); + if (ret || (rbank != 0 && rbank != 1)) + return -EFAULT; + + ret = nbl_res_adminq_flash_verify(res_mgt, &rbank); + if (ret) + return ret; + + ret = nbl_res_adminq_flash_set_nvm_bank(res_mgt, rbank, BANKID_SR_BANK0, + NBL_ADMINQ_NVM_BANK_SWITCH); + if (ret) + return ret; + + ret = nbl_res_adminq_flash_set_nvm_bank(res_mgt, rbank, BANKID_SR_BANK1, + NBL_ADMINQ_NVM_BANK_SWITCH); + + return ret; +} + +/* get_emp_version is deprecated, repalced by get_firmware_version, 0x8102 */ +static int nbl_res_adminq_get_firmware_version(void *priv, char *firmware_verion) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_nvm_version_resp resp_param; + int ret = 0; + u32 version_type = NBL_FW_VERSION_RUNNING_BANK; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_GET_NVM_VERSION, + &version_type, sizeof(version_type), &resp_param, sizeof(resp_param), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_GET_NVM_VERSION); + return ret; + } + + if (!memcmp(resp_param.magic, FIRMWARE_MAGIC, sizeof(resp_param.magic))) { + snprintf(firmware_verion, ETHTOOL_FWVERS_LEN, + "%d.%d.%d build %04d%02d%02d %08x", + BCD2BYTE((resp_param.version >> 16) & 0xFF), + BCD2BYTE((resp_param.version >> 8) & 0xFF), + BCD2BYTE(resp_param.version & 0xFF), + BCD2SHORT((resp_param.build_date >> 16) & 0xFFFF), + BCD2BYTE((resp_param.build_date >> 8) & 0xFF), + BCD2BYTE(resp_param.build_date & 0xFF), + resp_param.build_hash); + } else { + dev_err(dev, "adminq msg firmware verion magic check failed\n"); + return -EINVAL; + } + + return 0; +} + +static int nbl_res_adminq_set_sfp_state(void *priv, u8 eth_id, u8 state) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_MODULE_SWITCH; + if (state) + data = NBL_PORT_SFP_ON + (key << NBL_PORT_KEY_KEY_SHIFT); + else + data = NBL_PORT_SFP_OFF + (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, sfp %s\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id], + state ? "on" : "off"); + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +int nbl_res_open_sfp(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + return nbl_res_adminq_set_sfp_state(res_mgt, eth_id, NBL_SFP_MODULE_ON); +} + +static int nbl_res_adminq_setup_loopback(void *priv, u32 eth_id, u32 enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_LOOPBACK; + if (enable) + data = NBL_PORT_ENABLE_LOOPBACK + (key << NBL_PORT_KEY_KEY_SHIFT); + else + data = NBL_PORT_DISABLE_LOOPBCK + (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, %s eth loopback\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id], + enable ? "enable" : "disable"); + + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +static bool nbl_res_adminq_check_fw_heartbeat(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + unsigned long check_time; + unsigned long seq_acked; + + if (adminq_mgt->fw_resetting) { + adminq_mgt->fw_last_hb_seq++; + return false; + } + + check_time = jiffies; + if (time_before(check_time, adminq_mgt->fw_last_hb_time + 5 * HZ)) + return true; + + seq_acked = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (adminq_mgt->fw_last_hb_seq == seq_acked) { + adminq_mgt->fw_last_hb_seq++; + adminq_mgt->fw_last_hb_time = check_time; + phy_ops->set_fw_ping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), adminq_mgt->fw_last_hb_seq); + return true; + } + + return false; +} + +static bool nbl_res_adminq_check_fw_reset(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + unsigned long seq_acked; + + seq_acked = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (adminq_mgt->fw_last_hb_seq != seq_acked) { + phy_ops->set_fw_ping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), adminq_mgt->fw_last_hb_seq); + return false; + } + + adminq_mgt->fw_resetting = false; + wake_up(&adminq_mgt->wait_queue); + return true; +} + +static int nbl_res_adminq_get_port_attributes(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 port_caps = 0; + u64 port_advertising = 0; + u64 key = 0; + int eth_id = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + for_each_set_bit(eth_id, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + key = NBL_PORT_KEY_CAPABILITIES; + port_caps = 0; + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_READ; + param->data[0] = key << NBL_PORT_KEY_KEY_SHIFT; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, (void *)&port_caps, sizeof(port_caps), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, get_port_caps\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + eth_info->port_caps[eth_id] = port_caps & NBL_PORT_KEY_DATA_MASK; + + dev_info(dev, "ctrl dev get eth %d port caps: %llx\n", + eth_info->logic_eth_id[eth_id], + eth_info->port_caps[eth_id]); + } + + for_each_set_bit(eth_id, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + key = NBL_PORT_KEY_ADVERT; + port_advertising = 0; + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_READ; + param->data[0] = key << NBL_PORT_KEY_KEY_SHIFT; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, + (void *)&port_advertising, sizeof(port_advertising), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, port_advertising\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + port_advertising = port_advertising & NBL_PORT_KEY_DATA_MASK; + /* set default FEC mode: auto */ + port_advertising = port_advertising & ~NBL_PORT_CAP_FEC_MASK; + port_advertising += BIT(NBL_PORT_CAP_FEC_RS); + port_advertising += BIT(NBL_PORT_CAP_FEC_BASER); + /* set default pause: tx on, rx on */ + port_advertising = port_advertising & ~NBL_PORT_CAP_PAUSE_MASK; + port_advertising += BIT(NBL_PORT_CAP_TX_PAUSE); + port_advertising += BIT(NBL_PORT_CAP_RX_PAUSE); + eth_info->port_advertising[eth_id] = port_advertising; + + dev_info(dev, "ctrl dev get eth %d port advertising: %llx\n", + eth_info->logic_eth_id[eth_id], + eth_info->port_advertising[eth_id]); + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_enable_port(void *priv, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int eth_id = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + if (enable) { + key = NBL_PORT_KEY_ENABLE; + data = NBL_PORT_FLAG_ENABLE_NOTIFY + (key << NBL_PORT_KEY_KEY_SHIFT); + } else { + key = NBL_PORT_KEY_DISABLE; + data = key << NBL_PORT_KEY_KEY_SHIFT; + } + + for_each_set_bit(eth_id, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + nbl_res_adminq_set_sfp_state(res_mgt, eth_id, NBL_SFP_MODULE_ON); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, %s port\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id], enable ? "enable" : "disable"); + kfree(param); + return ret; + } + + dev_info(dev, "ctrl dev %s eth %d\n", enable ? "enable" : "disable", + eth_info->logic_eth_id[eth_id]); + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_get_special_port_type(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 port_type = NBL_PORT_TYPE_UNKNOWN; + u8 cable_tech = 0; + int ret; + + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, 0); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + port_type = NBL_PORT_TYPE_UNKNOWN; + return port_type; + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, + 0, 0, SFF8636_DEVICE_TECH_OFFSET, + 1, &cable_tech); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + port_type = NBL_PORT_TYPE_UNKNOWN; + return port_type; + } + cable_tech = (cable_tech >> 4) & 0x0f; + switch (cable_tech) { + case SFF8636_TRANSMIT_FIBER_850nm_VCSEL: + case SFF8636_TRANSMIT_FIBER_1310nm_VCSEL: + case SFF8636_TRANSMIT_FIBER_1550nm_VCSEL: + case SFF8636_TRANSMIT_FIBER_1310nm_FP: + case SFF8636_TRANSMIT_FIBER_1310nm_DFB: + case SFF8636_TRANSMIT_FIBER_1550nm_DFB: + case SFF8636_TRANSMIT_FIBER_1310nm_EML: + case SFF8636_TRANSMIT_FIBER_1550nm_EML: + case SFF8636_TRANSMIT_FIBER_1490nm_DFB: + port_type = NBL_PORT_TYPE_FIBRE; + break; + case SFF8636_TRANSMIT_COPPER_UNEQUA: + case SFF8636_TRANSMIT_COPPER_PASSIVE_EQUALIZED: + case SFF8636_TRANSMIT_COPPER_NEAR_FAR_END: + case SFF8636_TRANSMIT_COPPER_FAR_END: + case SFF8636_TRANSMIT_COPPER_NEAR_END: + case SFF8636_TRANSMIT_COPPER_LINEAR_ACTIVE: + port_type = NBL_PORT_TYPE_COPPER; + break; + default: + dev_err(dev, "eth %d unknown port_type\n", eth_info->logic_eth_id[eth_id]); + port_type = NBL_PORT_TYPE_UNKNOWN; + break; + } + return port_type; +} + +static int nbl_res_adminq_get_common_port_type(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 data[SFF_8472_CABLE_SPEC_COMP + 1]; + u8 cable_tech = 0; + u8 cable_comp = 0; + u8 port_type = NBL_PORT_TYPE_UNKNOWN; + int ret; + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, 0, + SFF_8472_CABLE_SPEC_COMP + 1, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + port_type = NBL_PORT_TYPE_UNKNOWN; + return port_type; + } + + cable_tech = data[SFF_8472_CABLE_TECHNOLOGY]; + + if (cable_tech & SFF_PASSIVE_CABLE) { + cable_comp = data[SFF_8472_CABLE_SPEC_COMP]; + + /* determine if the port is a cooper cable */ + if (cable_comp == SFF_COPPER_UNSPECIFIED || + cable_comp == SFF_COPPER_8431_APPENDIX_E) + port_type = NBL_PORT_TYPE_COPPER; + else + port_type = NBL_PORT_TYPE_FIBRE; + } else if (cable_tech & SFF_ACTIVE_CABLE) { + cable_comp = data[SFF_8472_CABLE_SPEC_COMP]; + + /* determine if the port is a cooper cable */ + if (cable_comp == SFF_COPPER_UNSPECIFIED || + cable_comp == SFF_COPPER_8431_APPENDIX_E || + cable_comp == SFF_COPPER_8431_LIMITING) + port_type = NBL_PORT_TYPE_COPPER; + else + port_type = NBL_PORT_TYPE_FIBRE; + } else { + port_type = NBL_PORT_TYPE_FIBRE; + } + + return port_type; +} + +static int nbl_res_adminq_get_port_type(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) + return nbl_res_adminq_get_special_port_type(res_mgt, eth_id); + + return nbl_res_adminq_get_common_port_type(res_mgt, eth_id); +} + +static s32 nbl_res_adminq_get_module_bitrate(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 data[SFF_8472_SIGNALING_RATE_MAX + 1]; + u32 result; + u8 br_nom; + u8 br_max; + u8 identifier; + u8 encoding = 0; + int port_max_rate; + int ret; + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, 0); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return NBL_PORT_MAX_RATE_UNKNOWN; + } + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, 0, + SFF_8472_SIGNALING_RATE_MAX + 1, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return NBL_PORT_MAX_RATE_UNKNOWN; + } + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, + I2C_DEV_ADDR_A0, 0, 0, + SFF_8636_VENDOR_ENCODING, + 1, &encoding); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return NBL_PORT_MAX_RATE_UNKNOWN; + } + } + + br_nom = data[SFF_8472_SIGNALING_RATE]; + br_max = data[SFF_8472_SIGNALING_RATE_MAX]; + identifier = data[SFF_8472_IDENTIFIER]; + + /* sff-8472 section 5.6 */ + if (br_nom == 255) + result = (u32)br_max * 250; + else if (br_nom == 0) + result = 0; + else + result = (u32)br_nom * 100; + + switch (result / 1000) { + case 25: + port_max_rate = NBL_PORT_MAX_RATE_25G; + break; + case 10: + port_max_rate = NBL_PORT_MAX_RATE_10G; + break; + case 1: + port_max_rate = NBL_PORT_MAX_RATE_1G; + break; + default: + port_max_rate = NBL_PORT_MAX_RATE_UNKNOWN; + break; + } + + if (identifier == SFF_IDENTIFIER_QSFP28) + port_max_rate = NBL_PORT_MAX_RATE_100G; + + if (identifier == SFF_IDENTIFIER_PAM4 || encoding == SFF_8636_ENCODING_PAM4) + port_max_rate = NBL_PORT_MAX_RATE_100G_PAM4; + + return port_max_rate; +} + +static void nbl_res_eth_task_schedule(struct nbl_adminq_mgt *adminq_mgt) +{ + nbl_common_queue_work(&adminq_mgt->eth_task, true, false); +} + +static void nbl_res_adminq_recv_port_notify(void *priv, void *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_port_notify *notify; + u8 last_module_inplace = 0; + u8 last_link_state = 0; + int eth_id = 0; + + notify = (struct nbl_port_notify *)data; + eth_id = notify->id; + + dev_info(dev, "eth_id:%d link_state:%d, module_inplace:%d, speed:%d, flow_ctrl:%d, fec:%d, advertising:%llx, lp_advertising:%llx\n", + eth_info->logic_eth_id[eth_id], notify->link_state, notify->module_inplace, + notify->speed * 10, notify->flow_ctrl, + notify->fec, notify->advertising, notify->lp_advertising); + + mutex_lock(&adminq_mgt->eth_lock); + + last_module_inplace = eth_info->module_inplace[eth_id]; + last_link_state = eth_info->link_state[eth_id]; + + eth_info->link_state[eth_id] = notify->link_state; + eth_info->module_inplace[eth_id] = notify->module_inplace; + /* when eth link down, don not update speed + * when config autoneg to off, ethtool read speed and set it with disable autoneg command, + * if eth is link down, the speed from emp is not credible, + * need to reserver last link up speed. + */ + if (notify->link_state || !eth_info->link_speed[eth_id]) + eth_info->link_speed[eth_id] = notify->speed * 10; + eth_info->active_fc[eth_id] = notify->flow_ctrl; + eth_info->active_fec[eth_id] = notify->fec; + eth_info->port_lp_advertising[eth_id] = notify->lp_advertising; + + if (!last_module_inplace && notify->module_inplace) { + adminq_mgt->module_inplace_changed[eth_id] = 1; + nbl_res_eth_task_schedule(adminq_mgt); + } + + if (last_link_state != notify->link_state) { + adminq_mgt->link_state_changed[eth_id] = 1; + nbl_res_eth_task_schedule(adminq_mgt); + } + + mutex_unlock(&adminq_mgt->eth_lock); +} + +static int nbl_get_highest_bit(u64 advertise) +{ + int highest_bit_pos = 0; + + while (advertise != 0) { + advertise >>= 1; + highest_bit_pos++; + } + + return highest_bit_pos; +} + +static int nbl_res_adminq_set_port_advertising(void *priv, + struct nbl_port_advertising *advertising) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + int highest_bit_pos = 0; + struct nbl_port_key *param; + int param_len = 0; + int eth_id = 0; + u64 key = 0; + u64 data = 0; + u64 new_advert = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + eth_id = advertising->eth_id; + new_advert = eth_info->port_advertising[eth_id]; + + /* set autoneg */ + if (advertising->autoneg != 0) { + new_advert = new_advert | NBL_PORT_CAP_AUTONEG_MASK | NBL_PORT_CAP_PAUSE_MASK; + new_advert |= BIT(NBL_PORT_CAP_AUTONEG); + } else { + new_advert = new_advert & ~NBL_PORT_CAP_AUTONEG_MASK; + } + + if (advertising->active_fc != 0) { + new_advert = new_advert & ~NBL_PORT_CAP_PAUSE_MASK; + if (advertising->active_fc & NBL_PORT_TX_PAUSE) + new_advert |= BIT(NBL_PORT_CAP_TX_PAUSE); + if (advertising->active_fc & NBL_PORT_RX_PAUSE) + new_advert |= BIT(NBL_PORT_CAP_RX_PAUSE); + } + + /* set FEC */ + if (advertising->active_fec != 0) { + new_advert = new_advert & ~NBL_PORT_CAP_FEC_MASK; + + /* when ethtool set FEC_AUTO, we set default fec mode */ + if (advertising->active_fec == NBL_PORT_FEC_AUTO && !advertising->autoneg) { + advertising->active_fec = NBL_PORT_FEC_OFF; + if (eth_info->link_speed[eth_id] == SPEED_1000) + advertising->active_fec = NBL_ETH_1G_DEFAULT_FEC_MODE; + if (eth_info->link_speed[eth_id] == SPEED_10000) + advertising->active_fec = NBL_ETH_10G_DEFAULT_FEC_MODE; + if (eth_info->link_speed[eth_id] == SPEED_25000) + advertising->active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; + } + + if (advertising->active_fec == NBL_PORT_FEC_OFF) + new_advert |= BIT(NBL_PORT_CAP_FEC_NONE); + if (advertising->active_fec == NBL_PORT_FEC_RS) + new_advert |= BIT(NBL_PORT_CAP_FEC_RS); + if (advertising->active_fec == NBL_PORT_FEC_BASER) + new_advert |= BIT(NBL_PORT_CAP_FEC_BASER); + if (advertising->active_fec == NBL_PORT_FEC_AUTO) + new_advert |= NBL_PORT_CAP_FEC_MASK; + } + + /* set speed */ + if (advertising->speed_advert != 0) { + new_advert = (new_advert & (NBL_PORT_CAP_AUTONEG_MASK | NBL_PORT_CAP_FEC_MASK | + NBL_PORT_CAP_PAUSE_MASK)) | advertising->speed_advert; + } + + highest_bit_pos = nbl_get_highest_bit(new_advert); + /* speed 10G only can set fec off or baseR, if set RS we change it to baseR */ + if (highest_bit_pos <= NBL_PORT_CAP_10GBASE_SR && + highest_bit_pos >= NBL_PORT_CAP_10GBASE_T && !advertising->autoneg) { + if (new_advert & BIT(NBL_PORT_CAP_FEC_RS)) { + new_advert = new_advert & ~NBL_PORT_CAP_FEC_MASK; + new_advert |= BIT(NBL_PORT_CAP_FEC_BASER); + dev_notice(dev, "speed 10G default set fec baseR, set fec baseR\n"); + dev_notice(dev, "set new_advert:%llx\n", new_advert); + } + } + + if (eth_info->port_max_rate[eth_id] != NBL_PORT_MAX_RATE_100G_PAM4) + new_advert &= ~NBL_PORT_CAP_PAM4_MASK; + else + new_advert |= NBL_PORT_CAP_PAM4_MASK; + + dev_notice(dev, "set NBL_PORT_KEY_ADVERT eth id %d new_advert 0x%llx\n", + eth_info->logic_eth_id[eth_id], new_advert); + + key = NBL_PORT_KEY_ADVERT; + data = new_advert + (key << NBL_PORT_KEY_KEY_SHIFT); + + param->id = advertising->eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, set_port_advertising\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + eth_info->port_advertising[eth_id] = new_advert; + + kfree(param); + return 0; +} + +static int nbl_res_adminq_get_port_state(void *priv, u8 eth_id, struct nbl_port_state *port_state) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + port_state->port_caps = eth_info->port_caps[eth_id]; + port_state->port_advertising = eth_info->port_advertising[eth_id]; + port_state->port_lp_advertising = eth_info->port_lp_advertising[eth_id]; + port_state->link_speed = eth_info->link_speed[eth_id]; + port_state->active_fc = eth_info->active_fc[eth_id]; + port_state->active_fec = eth_info->active_fec[eth_id]; + port_state->link_state = eth_info->link_state[eth_id]; + port_state->module_inplace = eth_info->module_inplace[eth_id]; + port_state->fw_port_max_speed = res_mgt->resource_info->board_info.eth_speed; + if (port_state->module_inplace) { + port_state->port_type = eth_info->port_type[eth_id]; + port_state->port_max_rate = eth_info->port_max_rate[eth_id]; + } else { + port_state->port_caps = port_state->port_caps & ~NBL_PORT_CAP_FEC_MASK; + port_state->port_caps = port_state->port_caps & ~NBL_PORT_CAP_PAUSE_MASK; + port_state->port_caps = port_state->port_caps & ~NBL_PORT_CAP_AUTONEG_MASK; + port_state->port_advertising = + port_state->port_advertising & ~NBL_PORT_CAP_FEC_MASK; + port_state->port_advertising = + port_state->port_advertising & ~NBL_PORT_CAP_PAUSE_MASK; + port_state->port_advertising = + port_state->port_advertising & ~NBL_PORT_CAP_AUTONEG_MASK; + } + + return 0; +} + +static int nbl_res_adminq_get_module_info(void *priv, u8 eth_id, struct ethtool_modinfo *info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + u8 sff8472_rev; + u8 addr_mode; + bool page_swap = false; + u8 module_inplace = 0; /* 1 inplace, 0 not inplace */ + u8 data[SFF_8472_COMPLIANCE + 1]; + int ret; + + module_inplace = eth_info->module_inplace[eth_id]; + if (!module_inplace) { + dev_err(dev, "Optical module of ETH port %u is not inplace\n", + eth_info->logic_eth_id[eth_id]); + return -EIO; + } + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + info->type = ETH_MODULE_SFF_8636; + info->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + return 0; + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, 0, + SFF_8472_COMPLIANCE + 1, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + + sff8472_rev = data[SFF_8472_COMPLIANCE]; + addr_mode = data[SFF_8472_DIAGNOSTIC]; + + /* check if can access page 0xA2 directly, see sff-8472 */ + if (addr_mode & SFF_8472_ADDRESSING_MODE) { + dev_err(dev, "Address change required to access page 0xA2 which is not supported\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == SFF_8472_UNSUPPORTED || page_swap || + !(addr_mode & SFF_DDM_IMPLEMENTED)) { + /* We have an SFP, but it does not support SFF-8472 */ + info->type = ETH_MODULE_SFF_8079; + info->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + info->type = ETH_MODULE_SFF_8472; + info->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + u8 module_inplace = 0; /* 1 inplace, 0 not inplace */ + u32 start = eeprom->offset; + u32 length = eeprom->len; + u8 turn_page, offset; + int ret; + + if (eeprom->len == 0) + return -EINVAL; + + module_inplace = eth_info->module_inplace[eth_id]; + if (!module_inplace) { + dev_err(dev, "Optical module of ETH port %u is not inplace\n", + eth_info->logic_eth_id[eth_id]); + return -EIO; + } + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + while (start < ETH_MODULE_SFF_8636_MAX_LEN) { + length = SFF_8638_PAGESIZE; + if (start + length > ETH_MODULE_SFF_8636_MAX_LEN) + length = ETH_MODULE_SFF_8636_MAX_LEN - start; + + nbl_res_get_module_eeprom_page(start, &turn_page, &offset); + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, turn_page); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, + I2C_DEV_ADDR_A0, 0, 0, + offset, length, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + start += length; + data += length; + length = eeprom->len - length; + } + return 0; + } + + /* Read A0 portion of eth EEPROM */ + if (start < ETH_MODULE_SFF_8079_LEN) { + if (start + eeprom->len > ETH_MODULE_SFF_8079_LEN) + length = ETH_MODULE_SFF_8079_LEN - start; + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, + start, length, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + start += length; + data += length; + length = eeprom->len - length; + } + + /* Read A2 portion of eth EEPROM */ + if (length) { + start -= ETH_MODULE_SFF_8079_LEN; + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A2, 0, 0, + start, length, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + } + + return 0; +} + +static int nbl_res_adminq_get_link_state(void *priv, u8 eth_id, + struct nbl_eth_link_info *eth_link_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + eth_link_info->link_status = eth_info->link_state[eth_id]; + eth_link_info->link_speed = eth_info->link_speed[eth_id]; + + return 0; +} + +static int nbl_res_adminq_get_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + u64 data = 0, key = 0, result = 0; + int param_len = 0, i, ret; + u8 reverse_mac[ETH_ALEN]; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_MAC_ADDRESS; + + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_READ; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + memcpy(reverse_mac, &result, ETH_ALEN); + + /*convert mac address*/ + for (i = 0; i < ETH_ALEN; i++) + mac[i] = reverse_mac[ETH_ALEN - 1 - i]; + + kfree(param); + return 0; +} + +int nbl_res_get_eth_mac(struct nbl_resource_mgt *res_mgt, u8 *mac, u8 eth_id) +{ + return nbl_res_adminq_get_eth_mac_addr(res_mgt, mac, eth_id); +} + +static int nbl_res_adminq_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + int i; + u8 reverse_mac[ETH_ALEN]; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_MAC_ADDRESS; + + /*convert mac address*/ + for (i = 0; i < ETH_ALEN; i++) + reverse_mac[i] = mac[ETH_ALEN - 1 - i]; + + memcpy(&data, reverse_mac, ETH_ALEN); + + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d, reverse_mac=0x%x:%x:%x:%x:%x:%x\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id], reverse_mac[0], + reverse_mac[1], reverse_mac[2], reverse_mac[3], + reverse_mac[4], reverse_mac[5]); + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_ctrl_port_led(void *priv, u8 eth_id, + enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KRY_LED_BLINK; + + switch (led_ctrl) { + case NBL_LED_REG_ACTIVE: + data = 1; + break; + case NBL_LED_REG_INACTIVE: + data = 0; + break; + default: + return 0; + } + + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "ctrl eth %d blink failed", eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_pt_filter_in(struct nbl_resource_mgt *res_mgt, + struct nbl_passthrough_fw_cmd_param *param) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_res_fw_cmd_filter *filter; + + filter = nbl_common_get_hash_node(adminq_mgt->cmd_filter, ¶m->opcode); + if (filter && filter->in) + return filter->in(res_mgt, param->data, param->in_size); + + return 0; +} + +static int nbl_res_adminq_pt_filter_out(struct nbl_resource_mgt *res_mgt, + struct nbl_passthrough_fw_cmd_param *param) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_res_fw_cmd_filter *filter; + int ret = 0; + + filter = nbl_common_get_hash_node(adminq_mgt->cmd_filter, ¶m->opcode); + if (filter && filter->out) + ret = filter->out(res_mgt, param->data, param->out_size); + + return 0; +} + +static int nbl_res_adminq_passthrough(void *priv, struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + u8 *in_data = NULL, *out_data = NULL; + int ret = 0; + + ret = nbl_res_adminq_pt_filter_in(res_mgt, param); + if (ret) + return ret; + + if (param->in_size) { + in_data = kzalloc(param->in_size, GFP_KERNEL); + if (!in_data) + goto in_data_fail; + memcpy(in_data, param->data, param->in_size); + } + if (param->out_size) { + out_data = kzalloc(param->out_size, GFP_KERNEL); + if (!out_data) + goto out_data_fail; + } + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, param->opcode, + in_data, param->in_size, out_data, param->out_size, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, param->opcode); + goto send_fail; + } + + result->opcode = param->opcode; + result->errcode = ret; + result->out_size = param->out_size; + if (result->out_size) + memcpy(result->data, out_data, param->out_size); + + nbl_res_adminq_pt_filter_out(res_mgt, result); + +send_fail: + kfree(out_data); +out_data_fail: + kfree(in_data); +in_data_fail: + return ret; +} + +static int nbl_res_adminq_update_ring_num(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_resource_read_param *param; + struct nbl_net_ring_num_info *info; + int ret = 0; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto alloc_param_fail; + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto alloc_info_fail; + } + + param->resid = NBL_ADMINQ_PFA_TLV_PFVF_RING_ID; + param->offset = 0; + param->len = sizeof(*info); + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ, + param, sizeof(*param), info, sizeof(*info), 1); + + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ); + goto send_fail; + } + + if (info->pf_def_max_net_qp_num && info->vf_def_max_net_qp_num) + memcpy(&res_info->net_ring_num_info, info, sizeof(res_info->net_ring_num_info)); + +send_fail: + kfree(info); +alloc_info_fail: + kfree(param); +alloc_param_fail: + return ret; +} + +static int nbl_res_adminq_set_ring_num(void *priv, struct nbl_fw_cmd_ring_num_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_resource_write_param *data; + int data_len = sizeof(struct nbl_fw_cmd_ring_num_param); + int ret = 0; + + data = kzalloc(sizeof(*data) + data_len, GFP_KERNEL); + if (!data) + goto alloc_data_fail; + + data->resid = NBL_ADMINQ_PFA_TLV_PFVF_RING_ID; + data->offset = 0; + data->len = data_len; + memcpy(data->data, param, data_len); + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE, + data, sizeof(*data) + data_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq send msg failed with ret: %d\n", ret); + + kfree(data); +alloc_data_fail: + return ret; +} + +static void nbl_res_adminq_set_eth_speed(struct nbl_resource_mgt *res_mgt, + u8 eth_id, u32 speed, u8 active_fec, u8 autoneg) +{ + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_port_advertising port_advertising = {0}; + u64 speed_advert = 0; + + speed_advert = nbl_speed_to_link_mode(speed, autoneg); + speed_advert &= eth_info->port_caps[eth_id]; + + if (!speed_advert) { + dev_err(dev, "eth %d speed %d is not support, exit\n", + eth_info->logic_eth_id[eth_id], speed); + return; + } + + if (active_fec == NBL_PORT_FEC_OFF) { + if (!(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_NONE))) { + dev_err(dev, "eth %d optical module plug in, want to set fec mode off, but eth caps %llx donot support it\n", + eth_info->logic_eth_id[eth_id], eth_info->port_caps[eth_id]); + } + } + if (active_fec == NBL_PORT_FEC_RS) { + if (!(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_RS))) { + dev_err(dev, "eth %d optical module plug in, want to set fec mode RS, but eth caps %llx donot support it\n", + eth_info->logic_eth_id[eth_id], eth_info->port_caps[eth_id]); + } + } + if (active_fec == NBL_PORT_FEC_BASER) { + if (!(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_BASER))) { + dev_err(dev, "eth %d optical module plug in, want to set fec mode baseR, but eth caps %llx donot support it\n", + eth_info->logic_eth_id[eth_id], eth_info->port_caps[eth_id]); + } + } + if (active_fec == NBL_PORT_FEC_AUTO) { + if (!(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_AUTONEG))) { + dev_err(dev, "eth %d optical module plug in, want to set fec mode auto, but eth caps %llx donot support it\n", + eth_info->logic_eth_id[eth_id], eth_info->port_caps[eth_id]); + } + } + port_advertising.eth_id = eth_id; + port_advertising.speed_advert = speed_advert; + port_advertising.active_fec = active_fec; + port_advertising.autoneg = autoneg; + dev_info(dev, "eth %d optical module plug in, set speed_advert:%llx, active_fec:%x, autoneg %d\n", + eth_info->logic_eth_id[eth_id], speed_advert, active_fec, autoneg); + nbl_res_adminq_set_port_advertising(res_mgt, &port_advertising); +} + +static void nbl_res_adminq_recovery_eth(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 port_max_rate = 0; + u8 port_type; + u32 port_max_speed = 0; + u8 active_fec = 0; + u8 autoneg = 0; + + if (!eth_info->module_inplace[eth_id]) + return; + + port_max_rate = eth_info->port_max_rate[eth_id]; + + switch (port_max_rate) { + case NBL_PORT_MAX_RATE_1G: + port_max_speed = SPEED_1000; + active_fec = NBL_ETH_1G_DEFAULT_FEC_MODE; + break; + case NBL_PORT_MAX_RATE_10G: + port_max_speed = SPEED_10000; + active_fec = NBL_ETH_10G_DEFAULT_FEC_MODE; + break; + case NBL_PORT_MAX_RATE_25G: + port_max_speed = SPEED_25000; + active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; + break; + case NBL_PORT_MAX_RATE_100G: + case NBL_PORT_MAX_RATE_100G_PAM4: + port_max_speed = SPEED_100000; + active_fec = NBL_ETH_100G_DEFAULT_FEC_MODE; + break; + default: + /* default set 25G */ + port_max_speed = SPEED_25000; + active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; + break; + } + + port_type = eth_info->port_type[eth_id]; + /* cooper support auto-negotiation */ + if (port_type == NBL_PORT_TYPE_COPPER) { + if (port_max_speed >= SPEED_25000) + autoneg = 1; + else + autoneg = 0; /* disable autoneg when 10G module pluged */ + + eth_info->port_caps[eth_id] |= BIT(NBL_PORT_CAP_AUTONEG); + } else { + autoneg = 0; + eth_info->port_caps[eth_id] &= ~BIT_MASK(NBL_PORT_CAP_AUTONEG); + } + /* when optical module plug in, we must set default fec */ + nbl_res_adminq_set_eth_speed(res_mgt, eth_id, port_max_speed, active_fec, autoneg); +} + +static int nbl_res_adminq_nway_reset(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + + key = NBL_PORT_KEY_DISABLE; + data = (key << NBL_PORT_KEY_KEY_SHIFT); + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "ctrl eth %d disable failed ret %d\n", + eth_info->logic_eth_id[eth_id], ret); + kfree(param); + return ret; + } + + key = NBL_PORT_KEY_ENABLE; + data = NBL_PORT_FLAG_ENABLE_NOTIFY + (key << NBL_PORT_KEY_KEY_SHIFT); + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param->data[0] = data; + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "ctrl eth %d enable failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + kfree(param); + return ret; + } + + nbl_res_adminq_recovery_eth(res_mgt, eth_id); + + kfree(param); + return 0; +} + +#define ADD_ETH_STATISTICS(name) {#name} +static struct nbl_leonis_eth_stats_info _eth_statistics[] = { + ADD_ETH_STATISTICS(eth_frames_tx), + ADD_ETH_STATISTICS(eth_frames_tx_ok), + ADD_ETH_STATISTICS(eth_frames_tx_badfcs), + ADD_ETH_STATISTICS(eth_unicast_frames_tx_ok), + ADD_ETH_STATISTICS(eth_multicast_frames_tx_ok), + ADD_ETH_STATISTICS(eth_broadcast_frames_tx_ok), + ADD_ETH_STATISTICS(eth_macctrl_frames_tx_ok), + ADD_ETH_STATISTICS(eth_fragment_frames_tx), + ADD_ETH_STATISTICS(eth_fragment_frames_tx_ok), + ADD_ETH_STATISTICS(eth_pause_frames_tx), + ADD_ETH_STATISTICS(eth_pause_macctrl_frames_tx), + ADD_ETH_STATISTICS(eth_pfc_frames_tx), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio0), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio1), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio2), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio3), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio4), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio5), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio6), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio7), + ADD_ETH_STATISTICS(eth_verify_frames_tx), + ADD_ETH_STATISTICS(eth_respond_frames_tx), + ADD_ETH_STATISTICS(eth_frames_tx_64B), + ADD_ETH_STATISTICS(eth_frames_tx_65_to_127B), + ADD_ETH_STATISTICS(eth_frames_tx_128_to_255B), + ADD_ETH_STATISTICS(eth_frames_tx_256_to_511B), + ADD_ETH_STATISTICS(eth_frames_tx_512_to_1023B), + ADD_ETH_STATISTICS(eth_frames_tx_1024_to_1535B), + ADD_ETH_STATISTICS(eth_frames_tx_1536_to_2047B), + ADD_ETH_STATISTICS(eth_frames_tx_2048_to_MAXB), + ADD_ETH_STATISTICS(eth_undersize_frames_tx_goodfcs), + ADD_ETH_STATISTICS(eth_oversize_frames_tx_goodfcs), + ADD_ETH_STATISTICS(eth_undersize_frames_tx_badfcs), + ADD_ETH_STATISTICS(eth_oversize_frames_tx_badfcs), + ADD_ETH_STATISTICS(eth_octets_tx), + ADD_ETH_STATISTICS(eth_octets_tx_ok), + ADD_ETH_STATISTICS(eth_octets_tx_badfcs), + ADD_ETH_STATISTICS(eth_frames_rx), + ADD_ETH_STATISTICS(eth_frames_rx_ok), + ADD_ETH_STATISTICS(eth_frames_rx_badfcs), + ADD_ETH_STATISTICS(eth_undersize_frames_rx_goodfcs), + ADD_ETH_STATISTICS(eth_undersize_frames_rx_badfcs), + ADD_ETH_STATISTICS(eth_oversize_frames_rx_goodfcs), + ADD_ETH_STATISTICS(eth_oversize_frames_rx_badfcs), + ADD_ETH_STATISTICS(eth_frames_rx_misc_error), + ADD_ETH_STATISTICS(eth_frames_rx_misc_dropped), + ADD_ETH_STATISTICS(eth_unicast_frames_rx_ok), + ADD_ETH_STATISTICS(eth_multicast_frames_rx_ok), + ADD_ETH_STATISTICS(eth_broadcast_frames_rx_ok), + ADD_ETH_STATISTICS(eth_pause_frames_rx), + ADD_ETH_STATISTICS(eth_pfc_frames_rx), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio0), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio1), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio2), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio3), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio4), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio5), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio6), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio7), + ADD_ETH_STATISTICS(eth_macctrl_frames_rx), + ADD_ETH_STATISTICS(eth_verify_frames_rx_ok), + ADD_ETH_STATISTICS(eth_respond_frames_rx_ok), + ADD_ETH_STATISTICS(eth_fragment_frames_rx_ok), + ADD_ETH_STATISTICS(eth_fragment_rx_smdc_nocontext), + ADD_ETH_STATISTICS(eth_fragment_rx_smds_seq_error), + ADD_ETH_STATISTICS(eth_fragment_rx_smdc_seq_error), + ADD_ETH_STATISTICS(eth_fragment_rx_frag_cnt_error), + ADD_ETH_STATISTICS(eth_frames_assembled_ok), + ADD_ETH_STATISTICS(eth_frames_assembled_error), + ADD_ETH_STATISTICS(eth_frames_rx_64B), + ADD_ETH_STATISTICS(eth_frames_rx_65_to_127B), + ADD_ETH_STATISTICS(eth_frames_rx_128_to_255B), + ADD_ETH_STATISTICS(eth_frames_rx_256_to_511B), + ADD_ETH_STATISTICS(eth_frames_rx_512_to_1023B), + ADD_ETH_STATISTICS(eth_frames_rx_1024_to_1535B), + ADD_ETH_STATISTICS(eth_frames_rx_1536_to_2047B), + ADD_ETH_STATISTICS(eth_frames_rx_2048_to_MAXB), + ADD_ETH_STATISTICS(eth_octets_rx), + ADD_ETH_STATISTICS(eth_octets_rx_ok), + ADD_ETH_STATISTICS(eth_octets_rx_badfcs), + ADD_ETH_STATISTICS(eth_octets_rx_dropped), +}; + +static void nbl_res_adminq_get_private_stat_len(void *priv, u32 *len) +{ + *len = ARRAY_SIZE(_eth_statistics); +} + +static void nbl_res_adminq_get_private_stat_data(void *priv, u32 eth_id, u64 *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), data, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq get eth %d stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); +} + +static void nbl_res_adminq_fill_private_stat_strings(void *priv, u8 *strings) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(_eth_statistics); i++) { + snprintf(strings, ETH_GSTRING_LEN, "%s", _eth_statistics[i].descp); + strings += ETH_GSTRING_LEN; + } +} + +static u32 nbl_convert_temp_type_eeprom_offset(enum nbl_module_temp_type type) +{ + switch (type) { + case NBL_MODULE_TEMP: + return SFF_8636_TEMP; + case NBL_MODULE_TEMP_MAX: + return SFF_8636_TEMP_MAX; + case NBL_MODULE_TEMP_CRIT: + return SFF_8636_TEMP_CIRT; + default: + return SFF_8636_TEMP; + } +} + +static u32 nbl_convert_temp_type_qsfp28_eeprom_offset(enum nbl_module_temp_type type) +{ + switch (type) { + case NBL_MODULE_TEMP: + return SFF_8636_QSFP28_TEMP; + case NBL_MODULE_TEMP_MAX: + return SFF_8636_QSFP28_TEMP_MAX; + case NBL_MODULE_TEMP_CRIT: + return SFF_8636_QSFP28_TEMP_CIRT; + default: + return SFF_8636_QSFP28_TEMP; + } +} + +static int nbl_res_adminq_get_module_temp_common(struct nbl_resource_mgt *res_mgt, u8 eth_id, + enum nbl_module_temp_type type) +{ + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct ethtool_modinfo info = {0}; + u32 offset; + int temp = 0; + int ret = 0; + + ret = nbl_res_adminq_get_module_info(res_mgt, eth_id, &info); + if (ret) { + dev_err(dev, "get_module_info eth id %d ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return 0; + } + + if (info.eeprom_len <= ETH_MODULE_SFF_8079_LEN) + return 0; + + offset = nbl_convert_temp_type_eeprom_offset(type); + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A2, + 0, 0, offset, 1, (u8 *)&temp); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return 0; + } + + return temp; +} + +static int nbl_res_adminq_get_module_temp_special(struct nbl_resource_mgt *res_mgt, u8 eth_id, + enum nbl_module_temp_type type) +{ + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u32 addr; + u8 offset, turn_page; + int temp = 0; + int ret = 0; + + addr = nbl_convert_temp_type_qsfp28_eeprom_offset(type); + + nbl_res_get_module_eeprom_page(addr, &turn_page, &offset); + + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, turn_page); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return 0; + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, + 0, 0, offset, 1, (u8 *)&temp); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return 0; + } + + return temp; +} + +static int nbl_res_adminq_get_module_temperature(void *priv, u8 eth_id, + enum nbl_module_temp_type type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + if (!eth_info->module_inplace[eth_id]) + return 0; + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) + return nbl_res_adminq_get_module_temp_special(res_mgt, eth_id, type); + else + return nbl_res_adminq_get_module_temp_common(res_mgt, eth_id, type); +} + +static int nbl_res_adminq_load_p4(void *priv, struct nbl_load_p4_param *p4_param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_load_p4 *param; + int ret = 0; + + param = kzalloc(sizeof(*param) + p4_param->size, GFP_KERNEL); + if (!param) + return -ENOMEM; + + param->addr = p4_param->addr; + param->size = p4_param->size; + param->section_index = p4_param->section_index; + param->section_offset = p4_param->section_offset; + param->load_start = p4_param->start; + param->load_end = p4_param->end; + strscpy(param->name, p4_param->name, sizeof(param->name)); + memcpy(param->data, p4_param->data, p4_param->size); + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_LOAD_P4, + param, sizeof(*param) + p4_param->size, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_LOAD_P4); + + kfree(param); + return ret; +} + +static int nbl_res_adminq_load_p4_default(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_LOAD_P4_DEFAULT, + NULL, 0, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_LOAD_P4_DEFAULT); + + return ret; +} + +/* NBL_ADMINQ_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_ADMINQ_OPS_TBL \ +do { \ + NBL_ADMINQ_SET_OPS(get_firmware_version, nbl_res_adminq_get_firmware_version); \ + NBL_ADMINQ_SET_OPS(flash_lock, nbl_res_adminq_flash_lock); \ + NBL_ADMINQ_SET_OPS(flash_unlock, nbl_res_adminq_flash_unlock); \ + NBL_ADMINQ_SET_OPS(flash_prepare, nbl_res_adminq_flash_prepare); \ + NBL_ADMINQ_SET_OPS(flash_image, nbl_res_adminq_flash_image); \ + NBL_ADMINQ_SET_OPS(flash_activate, nbl_res_adminq_flash_activate); \ + NBL_ADMINQ_SET_OPS(set_sfp_state, nbl_res_adminq_set_sfp_state); \ + NBL_ADMINQ_SET_OPS(setup_loopback, nbl_res_adminq_setup_loopback); \ + NBL_ADMINQ_SET_OPS(check_fw_heartbeat, nbl_res_adminq_check_fw_heartbeat); \ + NBL_ADMINQ_SET_OPS(check_fw_reset, nbl_res_adminq_check_fw_reset); \ + NBL_ADMINQ_SET_OPS(get_port_attributes, nbl_res_adminq_get_port_attributes); \ + NBL_ADMINQ_SET_OPS(update_ring_num, nbl_res_adminq_update_ring_num); \ + NBL_ADMINQ_SET_OPS(set_ring_num, nbl_res_adminq_set_ring_num); \ + NBL_ADMINQ_SET_OPS(enable_port, nbl_res_adminq_enable_port); \ + NBL_ADMINQ_SET_OPS(recv_port_notify, nbl_res_adminq_recv_port_notify); \ + NBL_ADMINQ_SET_OPS(set_port_advertising, nbl_res_adminq_set_port_advertising); \ + NBL_ADMINQ_SET_OPS(get_port_state, nbl_res_adminq_get_port_state); \ + NBL_ADMINQ_SET_OPS(get_module_info, nbl_res_adminq_get_module_info); \ + NBL_ADMINQ_SET_OPS(get_module_eeprom, nbl_res_adminq_get_module_eeprom); \ + NBL_ADMINQ_SET_OPS(get_link_state, nbl_res_adminq_get_link_state); \ + NBL_ADMINQ_SET_OPS(set_eth_mac_addr, nbl_res_adminq_set_eth_mac_addr); \ + NBL_ADMINQ_SET_OPS(ctrl_port_led, nbl_res_adminq_ctrl_port_led); \ + NBL_ADMINQ_SET_OPS(nway_reset, nbl_res_adminq_nway_reset); \ + NBL_ADMINQ_SET_OPS(passthrough_fw_cmd, nbl_res_adminq_passthrough); \ + NBL_ADMINQ_SET_OPS(get_private_stat_len, nbl_res_adminq_get_private_stat_len); \ + NBL_ADMINQ_SET_OPS(get_private_stat_data, nbl_res_adminq_get_private_stat_data); \ + NBL_ADMINQ_SET_OPS(fill_private_stat_strings, nbl_res_adminq_fill_private_stat_strings);\ + NBL_ADMINQ_SET_OPS(get_module_temperature, nbl_res_adminq_get_module_temperature); \ + NBL_ADMINQ_SET_OPS(load_p4, nbl_res_adminq_load_p4); \ + NBL_ADMINQ_SET_OPS(load_p4_default, nbl_res_adminq_load_p4_default); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_adminq_setup_mgt(struct device *dev, struct nbl_adminq_mgt **adminq_mgt) +{ + *adminq_mgt = devm_kzalloc(dev, sizeof(struct nbl_adminq_mgt), GFP_KERNEL); + if (!*adminq_mgt) + return -ENOMEM; + + init_waitqueue_head(&(*adminq_mgt)->wait_queue); + return 0; +} + +static void nbl_adminq_remove_mgt(struct device *dev, struct nbl_adminq_mgt **adminq_mgt) +{ + devm_kfree(dev, *adminq_mgt); + *adminq_mgt = NULL; +} + +static int nbl_res_adminq_chan_notify_link_state_req(struct nbl_resource_mgt *res_mgt, + u16 fid, u8 link_state, u32 link_speed) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_notify_link_state link_info = {0}; + + chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + + link_info.link_state = link_state; + link_info.link_speed = link_speed; + NBL_CHAN_SEND(chan_send, fid, NBL_CHAN_MSG_NOTIFY_LINK_STATE, &link_info, + sizeof(link_info), NULL, 0, 0); + return chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +static void nbl_res_adminq_notify_link_state(struct nbl_resource_mgt *res_mgt, u8 eth_id, + u8 link_state) +{ + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_sriov_info *sriov_info; + struct nbl_queue_info *queue_info; + u16 pf_fid = 0, vf_fid = 0, link_speed = 0; + int i = 0, j = 0; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + if (eth_info->pf_bitmap[eth_id] & BIT(i)) + pf_fid = nbl_res_pfvfid_to_func_id(res_mgt, i, -1); + else + continue; + + sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[pf_fid]; + queue_info = &queue_mgt->queue_info[pf_fid]; + + /* send eth's link state to pf */ + if (queue_info->num_txrx_queues) + nbl_res_adminq_chan_notify_link_state_req(res_mgt, + pf_fid, + link_state, + eth_info->link_speed[eth_id]); + + /* send eth's link state to pf's all vf */ + for (j = 0; j < sriov_info->num_vfs; j++) { + vf_fid = sriov_info->start_vf_func_id + j; + queue_info = &queue_mgt->queue_info[vf_fid]; + if (queue_info->num_txrx_queues) { + link_speed = eth_info->link_speed[eth_id]; + nbl_res_adminq_chan_notify_link_state_req(res_mgt, vf_fid, + link_state, + link_speed); + } + } + } +} + +static void nbl_res_adminq_eth_task(struct work_struct *work) +{ + struct nbl_adminq_mgt *adminq_mgt = container_of(work, struct nbl_adminq_mgt, + eth_task); + struct nbl_resource_mgt *res_mgt = adminq_mgt->res_mgt; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 eth_id = 0; + u8 port_max_rate = 0; + u32 port_max_speed = 0; + u8 active_fec = 0; + u8 autoneg = 0; + + for (eth_id = 0 ; eth_id < NBL_MAX_ETHERNET; eth_id++) { + if (adminq_mgt->module_inplace_changed[eth_id]) { + /* module not-inplace, transitions to inplace status */ + /* read module register and set speed, */ + /* set fec mode: 10G default OFF, 25G default RS */ + port_max_rate = nbl_res_adminq_get_module_bitrate(res_mgt, eth_id); + switch (port_max_rate) { + case NBL_PORT_MAX_RATE_1G: + port_max_speed = SPEED_1000; + active_fec = NBL_ETH_1G_DEFAULT_FEC_MODE; + break; + case NBL_PORT_MAX_RATE_10G: + port_max_speed = SPEED_10000; + active_fec = NBL_ETH_10G_DEFAULT_FEC_MODE; + break; + case NBL_PORT_MAX_RATE_25G: + port_max_speed = SPEED_25000; + active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; + break; + case NBL_PORT_MAX_RATE_100G: + case NBL_PORT_MAX_RATE_100G_PAM4: + port_max_speed = SPEED_100000; + active_fec = NBL_ETH_100G_DEFAULT_FEC_MODE; + break; + default: + /* default set 25G */ + port_max_speed = SPEED_25000; + active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; + break; + } + + eth_info->port_max_rate[eth_id] = port_max_rate; + eth_info->port_type[eth_id] = nbl_res_adminq_get_port_type(res_mgt, eth_id); + /* cooper support auto-negotiation */ + if (eth_info->port_type[eth_id] == NBL_PORT_TYPE_COPPER) { + if (port_max_speed >= SPEED_25000) + autoneg = 1; + else + autoneg = 0; /* disable autoneg when 10G module pluged */ + + eth_info->port_caps[eth_id] |= BIT(NBL_PORT_CAP_AUTONEG); + } else { + autoneg = 0; + eth_info->port_caps[eth_id] &= ~BIT_MASK(NBL_PORT_CAP_AUTONEG); + } + + /* when optical module plug in, we must set default fec */ + nbl_res_adminq_set_eth_speed(res_mgt, eth_id, port_max_speed, + active_fec, autoneg); + + adminq_mgt->module_inplace_changed[eth_id] = 0; + } + + mutex_lock(&adminq_mgt->eth_lock); + if (adminq_mgt->link_state_changed[eth_id]) { + /* eth link state changed, notify pf and vf */ + nbl_res_adminq_notify_link_state(res_mgt, eth_id, + eth_info->link_state[eth_id]); + adminq_mgt->link_state_changed[eth_id] = 0; + } + mutex_unlock(&adminq_mgt->eth_lock); + } +} + +static int nbl_res_adminq_setup_cmd_filter(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_hash_tbl_key tbl_key = {0}; + + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), sizeof(u16), + sizeof(struct nbl_res_fw_cmd_filter), + NBL_RES_FW_CMD_FILTER_MAX, false); + + adminq_mgt->cmd_filter = nbl_common_init_hash_table(&tbl_key); + if (!adminq_mgt->cmd_filter) + return -EFAULT; + + return 0; +} + +static void nbl_res_adminq_remove_cmd_filter(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_hash_tbl_del_key del_key = {0}; + + if (adminq_mgt->cmd_filter) + nbl_common_remove_hash_table(adminq_mgt->cmd_filter, &del_key); + + adminq_mgt->cmd_filter = NULL; +} + +int nbl_adminq_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_adminq_mgt **adminq_mgt = &NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int ret; + + ret = nbl_adminq_setup_mgt(dev, adminq_mgt); + if (ret) + goto setup_mgt_fail; + + (*adminq_mgt)->res_mgt = res_mgt; + + (*adminq_mgt)->fw_last_hb_seq = (u32)phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + INIT_WORK(&(*adminq_mgt)->eth_task, nbl_res_adminq_eth_task); + mutex_init(&(*adminq_mgt)->eth_lock); + + ret = nbl_res_adminq_setup_cmd_filter(res_mgt); + if (ret) + goto set_filter_fail; + + nbl_res_adminq_add_cmd_filter_res_write(res_mgt); + + return 0; + +set_filter_fail: + cancel_work_sync(&((*adminq_mgt)->eth_task)); + nbl_adminq_remove_mgt(dev, adminq_mgt); +setup_mgt_fail: + return ret; +} + +void nbl_adminq_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_adminq_mgt **adminq_mgt = &NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + + if (!(*adminq_mgt)) + return; + + nbl_res_adminq_remove_cmd_filter(res_mgt); + + cancel_work_sync(&((*adminq_mgt)->eth_task)); + nbl_adminq_remove_mgt(dev, adminq_mgt); +} + +int nbl_adminq_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_ADMINQ_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_ADMINQ_OPS_TBL; +#undef NBL_ADMINQ_SET_OPS + + return 0; +} + +void nbl_adminq_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_ADMINQ_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_ADMINQ_OPS_TBL; +#undef NBL_ADMINQ_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h new file mode 100644 index 000000000000..9cd868075827 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_ADMINQ_H_ +#define _NBL_ADMINQ_H_ + +#include "nbl_resource.h" + +/* SPI Bank Index */ +#define BANKID_DESC_BANK 0xA0 +#define BANKID_BOOT_BANK 0xA1 +#define BANKID_SR_BANK0 0xA2 +#define BANKID_SR_BANK1 0xA3 +#define BANKID_OSI_BANK0 0xA4 +#define BANKID_OSI_BANK1 0xA5 +#define BANKID_FSI_BANK0 0xA6 +#define BANKID_FSI_BANK1 0xA7 +#define BANKID_PHY_BANK 0xA8 +#define BANKID_NVM_BANK0 0xA9 +#define BANKID_NVM_BANK1 0xAA +#define BANKID_LOG_BANK 0xAB + +#define NBL_ADMINQ_IDX_LEN 4096 + +#define NBL_MAX_PHY_I2C_RESP_SIZE 128 + +#define I2C_DEV_ADDR_A0 0x50 +#define I2C_DEV_ADDR_A2 0x51 + +/* SFF moudle register addresses: 8 bit valid */ +#define SFF_8472_IDENTIFIER 0x0 +#define SFF_8472_10GB_CAPABILITY 0x3 /* check sff-8472 table 5-3 */ +#define SFF_8472_1GB_CAPABILITY 0x6 /* check sff-8472 table 5-3 */ +#define SFF_8472_CABLE_TECHNOLOGY 0x8 /* check sff-8472 table 5-3 */ +#define SFF_8472_EXTENDED_CAPA 0x24 /* check sff-8024 table 4-4 */ +#define SFF_8472_CABLE_SPEC_COMP 0x3C +#define SFF_8472_DIAGNOSTIC 0x5C /* digital diagnostic monitoring, relates to A2 */ +#define SFF_8472_COMPLIANCE 0x5E /* the specification revision version */ +#define SFF_8472_VENDOR_NAME 0x14 +#define SFF_8472_VENDOR_NAME_LEN 16 /* 16 bytes, from offset 0x14 to offset 0x23 */ +#define SFF_8472_VENDOR_PN 0x28 +#define SFF_8472_VENDOR_PN_LEN 16 +#define SFF_8472_VENDOR_OUI 0x25 /* name and oui cannot all be empty */ +#define SFF_8472_VENDOR_OUI_LEN 3 +#define SFF_8472_SIGNALING_RATE 0xC +#define SFF_8472_SIGNALING_RATE_MAX 0x42 +#define SFF_8472_SIGNALING_RATE_MIN 0x43 +/* optional status/control bits: soft rate select and tx disable */ +#define SFF_8472_OSCB 0x6E +/* extended status/control bits */ +#define SFF_8472_ESCB 0x76 +#define SFF8636_DEVICE_TECH_OFFSET 0x93 + +#define SFF_8636_VENDOR_ENCODING 0x8B +#define SFF_8636_ENCODING_PAM4 0x8 + +/* SFF status code */ +#define SFF_IDENTIFIER_SFP 0x3 +#define SFF_IDENTIFIER_QSFP28 0x11 +#define SFF_IDENTIFIER_PAM4 0x1E +#define SFF_PASSIVE_CABLE 0x4 +#define SFF_ACTIVE_CABLE 0x8 +#define SFF_8472_ADDRESSING_MODE 0x4 +#define SFF_8472_UNSUPPORTED 0x00 +#define SFF_8472_10G_SR_BIT 4 /* 850nm, short reach */ +#define SFF_8472_10G_LR_BIT 5 /* 1310nm, long reach */ +#define SFF_8472_10G_LRM_BIT 6 /* 1310nm, long reach multimode */ +#define SFF_8472_10G_ER_BIT 7 /* 1550nm, extended reach */ +#define SFF_8472_1G_SX_BIT 0 +#define SFF_8472_1G_LX_BIT 1 +#define SFF_8472_1G_CX_BIT 2 +#define SFF_8472_1G_T_BIT 3 +#define SFF_8472_SOFT_TX_DISABLE 6 +#define SFF_8472_SOFT_RATE_SELECT 4 +#define SFF_8472_EMPTY_ASCII 20 +#define SFF_DDM_IMPLEMENTED 0x40 +#define SFF_COPPER_UNSPECIFIED 0 +#define SFF_COPPER_8431_APPENDIX_E 1 +#define SFF_COPPER_8431_LIMITING 4 +#define SFF_8636_TURNPAGE_ADDR (127) +#define SFF_8638_PAGESIZE (128) + +#define SFF_8636_TEMP (0x60) +#define SFF_8636_TEMP_MAX (0x4) +#define SFF_8636_TEMP_CIRT (0x0) + +#define SFF_8636_QSFP28_TEMP (0x16) +#define SFF_8636_QSFP28_TEMP_MAX (0x204) +#define SFF_8636_QSFP28_TEMP_CIRT (0x200) + +/* Firmware version */ +#define FIRMWARE_MAGIC "M181FWV0" +#define BCD2BYTE(b) ({ typeof(b) _b = (b); \ + (((_b) & 0xF) + (((_b) >> 4) & 0xF) * 10); }) +#define BCD2SHORT(s) ({ typeof(s) _s = (s); \ + (((_s) & 0xF) + (((_s) >> 4) & 0xF) * 10 + \ + (((_s) >> 8) & 0xF) * 100 + (((_s) >> 12) & 0xF) * 1000); }) + +/* VSI fixed number of queues*/ +#define NBL_VSI_PF_REAL_QUEUE_NUM(num) (((num) * 2) + NBL_DEFAULT_REP_HW_QUEUE_NUM) +#define NBL_VSI_VF_REAL_QUEUE_NUM(num) (num) + +#define NBL_ADMINQ_PFA_TLV_PFVF_RING_ID (0x5805) + +enum { + NBL_FW_VERSION_BANK0 = 0, + NBL_FW_VERSION_BANK1 = 1, + NBL_FW_VERSION_RUNNING_BANK = 2, +}; + +enum { + NBL_ADMINQ_NVM_BANK_REPAIR = 0, + NBL_ADMINQ_NVM_BANK_SWITCH, +}; + +enum { + NBL_ADMINQ_BANK_INDEX_SPI_BOOT = 2, + NBL_ADMINQ_BANK_INDEX_NVM_BANK = 3, +}; + +struct nbl_leonis_eth_tx_stats { + u64 frames_txd; + u64 frames_txd_ok; + u64 frames_txd_badfcs; + u64 unicast_frames_txd_ok; + u64 multicast_frames_txd_ok; + u64 broadcast_frames_txd_ok; + u64 macctrl_frames_txd_ok; + u64 fragment_frames_txd; + u64 fragment_frames_txd_ok; + u64 pause_macctrl_frames_txd; + u64 pause_macctrl_toggle_frames_txd; + u64 pfc_macctrl_frames_txd; + u64 pfc_macctrl_toggle_frames_txd_0; + u64 pfc_macctrl_toggle_frames_txd_1; + u64 pfc_macctrl_toggle_frames_txd_2; + u64 pfc_macctrl_toggle_frames_txd_3; + u64 pfc_macctrl_toggle_frames_txd_4; + u64 pfc_macctrl_toggle_frames_txd_5; + u64 pfc_macctrl_toggle_frames_txd_6; + u64 pfc_macctrl_toggle_frames_txd_7; + u64 verify_frames_txd; + u64 respond_frames_txd; + u64 frames_txd_sizerange0; + u64 frames_txd_sizerange1; + u64 frames_txd_sizerange2; + u64 frames_txd_sizerange3; + u64 frames_txd_sizerange4; + u64 frames_txd_sizerange5; + u64 frames_txd_sizerange6; + u64 frames_txd_sizerange7; + u64 undersize_frames_txd_goodfcs; + u64 oversize_frames_txd_goodfcs; + u64 undersize_frames_txd_badfcs; + u64 oversize_frames_txd_badfcs; + u64 octets_txd; + u64 octets_txd_ok; + u64 octets_txd_badfcs; +}; + +struct nbl_leonis_eth_rx_stats { + u64 frames_rxd; + u64 frames_rxd_ok; + u64 frames_rxd_badfcs; + u64 undersize_frames_rxd_goodfcs; + u64 undersize_frames_rxd_badfcs; + u64 oversize_frames_rxd_goodfcs; + u64 oversize_frames_rxd_badfcs; + u64 frames_rxd_misc_error; + u64 frames_rxd_misc_dropped; + u64 unicast_frames_rxd_ok; + u64 multicast_frames_rxd_ok; + u64 broadcast_frames_rxd_ok; + u64 pause_macctrl_frames_rxd; + u64 pfc_macctrl_frames_rxd; + u64 pfc_macctrl_frames_rxd_0; + u64 pfc_macctrl_frames_rxd_1; + u64 pfc_macctrl_frames_rxd_2; + u64 pfc_macctrl_frames_rxd_3; + u64 pfc_macctrl_frames_rxd_4; + u64 pfc_macctrl_frames_rxd_5; + u64 pfc_macctrl_frames_rxd_6; + u64 pfc_macctrl_frames_rxd_7; + u64 macctrl_frames_rxd; + u64 verify_frames_rxd_ok; + u64 respond_frames_rxd_ok; + u64 fragment_frames_rxd_ok; + u64 fragment_frames_rxd_smdc_nocontext; + u64 fragment_frames_rxd_smds_seq_error; + u64 fragment_frames_rxd_smdc_seq_error; + u64 fragment_frames_rxd_frag_cnt_error; + u64 frames_assembled_ok; + u64 frames_assembled_error; + u64 frames_rxd_sizerange0; + u64 frames_rxd_sizerange1; + u64 frames_rxd_sizerange2; + u64 frames_rxd_sizerange3; + u64 frames_rxd_sizerange4; + u64 frames_rxd_sizerange5; + u64 frames_rxd_sizerange6; + u64 frames_rxd_sizerange7; + u64 octets_rxd; + u64 octets_rxd_ok; + u64 octets_rxd_badfcs; + u64 octets_rxd_dropped; +}; + +struct nbl_leonis_eth_stats { + struct nbl_leonis_eth_tx_stats tx_stats; + struct nbl_leonis_eth_rx_stats rx_stats; +}; + +struct nbl_leonis_eth_stats_info { + const char *descp; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h new file mode 100644 index 000000000000..acb68f2bac4b --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_HW_H_ +#define _NBL_HW_H_ + +#include "nbl_include.h" + +#define NBL_MAX_ETHERNET (4) + +#define NBL_PT_PP0 0 +#define NBL_PT_LEN 3 +#define NBL_TCAM_TABLE_LEN (64) +#define NBL_MCC_ID_INVALID U16_MAX +#define NBL_KT_BYTE_LEN 40 +#define NBL_KT_BYTE_HALF_LEN 20 + +#define NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2 0 +#define NBL_EM0_PT_PHY_UP_LLDP_LACP 1 +#define NBL_EM0_PT_PHY_UP_UNICAST_L2 2 +#define NBL_EM0_PT_PHY_DOWN_UNICAST_L2 3 +#define NBL_EM0_PT_PHY_UP_MULTICAST_L2 4 +#define NBL_EM0_PT_PHY_DOWN_MULTICAST_L2 5 +#define NBL_EM0_PT_PHY_UP_MULTICAST_L3 6 +#define NBL_EM0_PT_PHY_DOWN_MULTICAST_L3 7 +#define NBL_EM0_PT_PHY_DPRBAC_IPV4 8 +#define NBL_EM0_PT_PHY_DPRBAC_IPV6 9 +#define NBL_EM0_PT_PHY_UL4S_IPV4 10 +#define NBL_EM0_PT_PHY_UL4S_IPV6 11 +#define NBL_EM0_PT_PMD_ND_UPCALL 12 + +#define NBL_PP0_PROFILE_ID_MIN (0) +#define NBL_PP0_PROFILE_ID_MAX (15) +#define NBL_PP1_PROFILE_ID_MIN (16) +#define NBL_PP1_PROFILE_ID_MAX (31) +#define NBL_PP2_PROFILE_ID_MIN (32) +#define NBL_PP2_PROFILE_ID_MAX (47) +#define NBL_PP_PROFILE_NUM (16) + +#define NBL_QID_MAP_TABLE_ENTRIES (4096) +#define NBL_EPRO_RSS_RET_TBL_DEPTH (8192 * 2) +#define NBL_EPRO_RSS_ENTRY_SIZE_UNIT (16) + +#define NBL_EPRO_RSS_SK_SIZE 40 +#define NBL_EPRO_RSS_PER_KEY_SIZE 8 +#define NBL_EPRO_RSS_KEY_NUM (NBL_EPRO_RSS_SK_SIZE / NBL_EPRO_RSS_PER_KEY_SIZE) + +enum { + NBL_HT0, + NBL_HT1, + NBL_HT_MAX, +}; + +enum { + NBL_KT_HALF_MODE, + NBL_KT_FULL_MODE, +}; + +#pragma pack(1) +union nbl_action_data { + struct clear_flag_act { + u16 clear_flag:8; + u16 start_offset:5; + u16 rsv:1; + u16 identify:2; + #define NBL_CLEAR_FLAGS_IDENTIFY (0) + } clear_flag; + + struct set_flag_act { + u16 set_flag:8; + u16 start_offset:5; + u16 rsv:1; + u16 identify:2; + #define NBL_SET_FLAGS_IDENTIFY (1) + } set_flag; + + struct set_fwd_type_act { + u16 next_stg:4; + u16 next_stg_vld:1; + u16 fwd_type:3; + u16 fwd_type_vld:1; + u16 cos:3; + u16 set_cos_vld:1; + u16 rsv:1; + u16 identify:2; + #define NBL_SET_FWD_TYPE_IDENTIFY (2) + } set_fwd_type; + + /* FLOW ACTION */ + struct flow_id_act { + u16 flow_id; + } flow_idx; + + struct rss_id_act { + u16 rss_id:10; + u16 rss_tc_en:1; + u16 rsv:5; + } rss_idx; + + struct port_car_act { + u16 car_id:10; + u16 rsv:6; + } port_car; + + struct flow_car_act { + u16 car_id:12; + u16 rsv:4; + } flow_car; + + struct cascade_act_act { + u16 table_id; + } cascade_act; + + struct mirror_id_act { + u16 mirror_id:4; + u16 mirror_mode:2; + #define NBL_MIRROR_MODE_IN (0) + #define NBL_MIRROR_MODE_FLOW (1) + #define NBL_MIRROR_MODE_OUT (2) + uint32_t rsv:10; + } mirror_idx; + + union dport_act { + struct { + /* port_type = SET_DPORT_TYPE_ETH_LAG, set the eth and lag field. */ + u16 dport_info:10; + u16 dport_type:2; + #define FWD_DPORT_TYPE_ETH (0) + #define FWD_DPORT_TYPE_LAG (1) + #define FWD_DPORT_TYPE_VSI (2) + u16 dport_id:4; + #define FWD_DPORT_ID_HOST_TLS (0) + #define FWD_DPORT_ID_ECPU_TLS (1) + #define FWD_DPORT_ID_HOST_RDMA (2) + #define FWD_DPORT_ID_ECPU_RDMA (3) + #define FWD_DPORT_ID_EMP (4) + #define FWD_DPORT_ID_BMC (5) + #define FWD_DPORT_ID_LOOP_BACK (7) + #define FWD_DPORT_ID_ETH0 (8) + #define FWD_DPORT_ID_ETH1 (9) + #define FWD_DPORT_ID_ETH2 (10) + #define FWD_DPORT_ID_ETH3 (11) + } fwd_dport; + + struct { + /* port_type = SET_DPORT_TYPE_ETH_LAG, set the eth and lag field. */ + u16 eth_id:2; + u16 lag_id:2; + u16 eth_vld:1; + u16 lag_vld:1; + u16 rsv:4; + u16 port_type:2; + u16 next_stg_sel:2; + u16 upcall_flag:2; + } down; + + struct { + /* port_type = SET_DPORT_TYPE_VSI_HOST and SET_DPORT_TYPE_VSI_ECPU, + * set the port_id field as the vsi_id. + * port_type = SET_DPORT_TYPE_SP_PORT, set the port_id as the defined + * PORT_TYPE_SP_*. + */ + u16 port_id:10; + #define PORT_TYPE_SP_DROP (0x3FF) + #define PORT_TYPE_SP_GLB_LB (0x3FE) + #define PORT_TYPE_SP_BMC (0x3FD) + #define PORT_TYPE_SP_EMP (0x3FC) + u16 port_type:2; + #define SET_DPORT_TYPE_VSI_HOST (0) + #define SET_DPORT_TYPE_VSI_ECPU (1) + #define SET_DPORT_TYPE_ETH_LAG (2) + #define SET_DPORT_TYPE_SP_PORT (3) + u16 next_stg_sel:2; + #define NEXT_STG_SEL_NONE (0) + #define NEXT_STG_SEL_ACL_S0 (1) + #define NEXT_STG_SEL_EPRO (2) + #define NEXT_STG_SEL_BYPASS (3) + u16 upcall_flag:2; + #define AUX_KEEP_FWD_TYPE (0) + #define AUX_FWD_TYPE_NML_FWD (1) + #define AUX_FWD_TYPE_UPCALL (2) + } up; + } dport; + + struct dqueue_act { + u16 que_id:11; + u16 rsv:5; + } dqueue; + + struct mcc_id_act { + u16 mcc_id:13; + u16 pri:1; + #define NBL_MCC_PRI_HIGH (0) + #define NBL_MCC_PRI_LOW (1) + uint32_t rsv:2; + } mcc_idx; + + struct vni_id_act { + u16 vni_id; + } vni_idx; + + struct stat_flow_id_act { + u16 stat_flow_id:11; + u16 rsv:5; + } stat_flow_idx; + + struct prbac_id_act { + u16 prbac_id; + } prbac_idx; + + struct dp_hash_act { + u16 dp_hash; + } dp_hash_idx; + + struct pri_mdf_dscp_act { + u16 dscp:6; + u16 i_ip_flag:1; + u16 o_ip_flag:1; + u16 off_sel:1; + #define NBL_DSCP_MDF_OFF_SEL_IPV4 (0) + #define NBL_DSCP_MDF_OFF_SEL_IPV6 (1) + u16 rsv:1; + u16 dscp_flag:1; + u16 rsv1:5; + } pri_mdf_dscp; + + struct pri_mdf_vlan_act { + u16 pri:3; + u16 rsv0:3; + u16 i_cvlan_flag:1; + u16 i_svlan_flag:1; + u16 o_cvlan_flag:1; + u16 o_svlan_flag:1; + u16 rsv1:6; + } pri_mdf_vlan; + + struct ttl_mdf_act { + u16 ttl_value:8; + u16 ttl_sub1_flag:1; + u16 rsv:7; + } ttl_mdf; + + struct vlan_mdf_act { + u16 vlan_value; + } vlan_mdf; + + struct dscp_mdf_act { + u16 ecn_value:2; + u16 dscp_value:6; + u16 ecn_en:1; + u16 dscp_en:1; + u16 rsv:6; + } dscp_mdf; + + struct index_value_act { + u16 index; + } index_value; + + struct set_aux_act { + u16 nstg_val:4; + u16 nstg_vld:1; + u16 ftype_val:3; + u16 ftype_vld:1; + u16 pkt_cos_val:3; + u16 pcos_vld:1; + u16 rsv:1; + #define NBL_SET_AUX_CLR_FLG (0) + #define NBL_SET_AUX_SET_FLG (1) + #define NBL_SET_AUX_SET_AUX (2) + u16 sub_id:2; + } set_aux; + + u16 data; +}; + +#pragma pack() + +enum nbl_chan_flow_rule_type { + NBL_FLOW_EPRO_ECPVPT_REG = 0, + NBL_FLOW_EPRO_ECPIPT_REG, + NBL_FLOW_DPED_TAB_TNL_REG, + NBL_FLOW_DPED_REPLACE, + NBL_FLOW_UPED_REPLACE, + NBL_FLOW_DPED_MIRROR_TABLE, + NBL_FLOW_DPED_MIR_CMD_0_TABLE, + NBL_FLOW_EPRO_MT_REG, + NBL_FLOW_EM0_TCAM_TABLE_REG, + NBL_FLOW_EM1_TCAM_TABLE_REG, + NBL_FLOW_EM2_TCAM_TABLE_REG, + NBL_FLOW_EM0_AD_TABLE_REG, + NBL_FLOW_EM1_AD_TABLE_REG, + NBL_FLOW_EM2_AD_TABLE_REG, + NBL_FLOW_IPRO_UDL_PKT_FLT_DMAC_REG, + NBL_FLOW_IPRO_UDL_PKT_FLT_CTRL_REG, + NBL_FLOW_ACTION_RAM_TBL, + NBL_FLOW_MCC_TBL_REG, + NBL_FLOW_EPRO_EPT_REG, + NBL_FLOW_IPRO_UP_SRC_PORT_TBL_REG, + NBL_FLOW_UCAR_FLOW_REG, + NBL_FLOW_EPRO_VPT_REG, + NBL_FLOW_UCAR_FLOW_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_GRP_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_GRP_REG, + NBL_FLOW_DSCH_VN_SHA2GRP_MAP_TBL_REG, + NBL_FLOW_DSCH_VN_GRP2SHA_MAP_TBL_REG, + NBL_FLOW_SHAPING_DPORT_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_DPORT_REG, + NBL_FLOW_DSCH_PSHA_EN_ADDR, + NBL_FLOW_UCAR_FLOW_4K_REG, + NBL_FLOW_UCAR_FLOW_4K_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_NET_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_NET_REG, + NBL_FLOW_DSCH_VN_NET2SHA_MAP_TBL_REG, + NBL_FLOW_DSCH_VN_SHA2NET_MAP_TBL_REG, + NBL_FLOW_UCAR_CAR_CTRL_ADDR, + NBL_FLOW_UCAR_GREEN_CELL_ADDR, + NBL_FLOW_UCAR_GREEN_PKT_ADDR, +}; + +enum nbl_chan_flow_mode { + NBL_FLOW_READ_MODE = 0, + NBL_FLOW_WRITE_MODE, + NBL_FLOW_READ_OR_WRITE_MODE, + NBL_FLOW_READ_AND_WRITE_MODE, + NBL_FLOW_READ_OR_AND_WRITE_MODE, +}; + +#define SFF8636_TRANSMIT_FIBER_850nm_VCSEL (0x0) +#define SFF8636_TRANSMIT_FIBER_1310nm_VCSEL (0x1) +#define SFF8636_TRANSMIT_FIBER_1550nm_VCSEL (0x2) +#define SFF8636_TRANSMIT_FIBER_1310nm_FP (0x3) +#define SFF8636_TRANSMIT_FIBER_1310nm_DFB (0x4) +#define SFF8636_TRANSMIT_FIBER_1550nm_DFB (0x5) +#define SFF8636_TRANSMIT_FIBER_1310nm_EML (0x6) +#define SFF8636_TRANSMIT_FIBER_1550nm_EML (0x7) +#define SFF8636_TRANSMIT_FIBER_OTHER (0x8) +#define SFF8636_TRANSMIT_FIBER_1490nm_DFB (0x9) +#define SFF8636_TRANSMIT_COPPER_UNEQUA (0xa) +#define SFF8636_TRANSMIT_COPPER_PASSIVE_EQUALIZED (0xb) +#define SFF8636_TRANSMIT_COPPER_NEAR_FAR_END (0xc) +#define SFF8636_TRANSMIT_COPPER_FAR_END (0xd) +#define SFF8636_TRANSMIT_COPPER_NEAR_END (0xe) +#define SFF8636_TRANSMIT_COPPER_LINEAR_ACTIVE (0xf) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c new file mode 100644 index 000000000000..83c9e99b6891 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c @@ -0,0 +1,1467 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_flow_leonis.h" +#include "nbl_p4_actions.h" + +static u32 nbl_flow_cfg_action_set_dport(u16 upcall_flag, u16 port_type, u16 vsi, u16 next_stg_sel) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.upcall_flag = upcall_flag; + set_dport.dport.up.port_type = port_type; + set_dport.dport.up.port_id = vsi; + set_dport.dport.up.next_stg_sel = next_stg_sel; + + return set_dport.data + (NBL_ACT_SET_DPORT << 16); +} + +static u16 nbl_flow_cfg_action_set_dport_mcc_eth(u8 eth) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.down.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.down.port_type = SET_DPORT_TYPE_ETH_LAG; + set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_EPRO; + set_dport.dport.down.lag_vld = 0; + set_dport.dport.down.eth_vld = 1; + set_dport.dport.down.eth_id = eth; + + return set_dport.data; +} + +static u16 nbl_flow_cfg_action_set_dport_mcc_vsi(u16 vsi) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.port_id = vsi; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + + return set_dport.data; +} + +static int nbl_flow_cfg_action_mcc(u16 mcc_id, u32 *action0, u32 *action1) +{ + union nbl_action_data mcc_idx_act = {.data = 0}, set_aux_act = {.data = 0}; + + mcc_idx_act.mcc_idx.mcc_id = mcc_id; + *action0 = (u32)mcc_idx_act.data + (NBL_ACT_SET_MCC << 16); + + set_aux_act.set_aux.sub_id = NBL_SET_AUX_SET_AUX; + set_aux_act.set_aux.nstg_vld = 1; + set_aux_act.set_aux.nstg_val = NBL_NEXT_STG_MCC; + *action1 = (u32)set_aux_act.data + (NBL_ACT_SET_AUX_FIELD << 16); + + return 0; +} + +static int nbl_flow_cfg_action_up_tnl(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + *action1 = 0; + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_EPRO); + + return 0; +} + +static int nbl_flow_cfg_action_lldp_lacp_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + *action1 = 0; + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_EPRO); + + return 0; +} + +static int nbl_flow_cfg_action_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + *action1 = 0; + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_NONE); + + return 0; +} + +static int nbl_flow_cfg_action_down(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + *action1 = 0; + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_EPRO); + + return 0; +} + +static int nbl_flow_cfg_action_l2_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} + +static int nbl_flow_cfg_action_l2_down(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} + +static int nbl_flow_cfg_action_l3_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} + +static int nbl_flow_cfg_action_l3_down(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} + +static int nbl_flow_cfg_up_tnl_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + u64 dst_mac = 0; + u8 sport; + u8 reverse_mac[ETH_ALEN]; + + nbl_convert_mac(param.mac, reverse_mac); + + memset(kt_data->hash_key, 0x0, sizeof(kt_data->hash_key)); + ether_addr_copy((u8 *)&dst_mac, reverse_mac); + + kt_data->info.dst_mac = dst_mac; + kt_data->info.svlan_id = param.vid; + kt_data->info.template = NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2; + kt_data->info.padding = 0; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_lldp_lacp_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_lldp_lacp_data_u *kt_data = (union nbl_l2_phy_lldp_lacp_data_u *)data; + u8 sport; + + kt_data->info.template = NBL_EM0_PT_PHY_UP_LLDP_LACP; + + kt_data->info.ether_type = param.ether_type; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + u64 dst_mac = 0; + u8 sport; + u8 reverse_mac[ETH_ALEN]; + + nbl_convert_mac(param.mac, reverse_mac); + + memset(kt_data->hash_key, 0x0, sizeof(kt_data->hash_key)); + ether_addr_copy((u8 *)&dst_mac, reverse_mac); + + kt_data->info.dst_mac = dst_mac; + kt_data->info.svlan_id = param.vid; + kt_data->info.template = NBL_EM0_PT_PHY_UP_UNICAST_L2; + kt_data->info.padding = 0; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_down_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_down_data_u *kt_data = (union nbl_l2_phy_down_data_u *)data; + u64 dst_mac = 0; + u8 sport; + u8 reverse_mac[ETH_ALEN]; + + nbl_convert_mac(param.mac, reverse_mac); + + memset(kt_data->hash_key, 0x0, sizeof(kt_data->hash_key)); + ether_addr_copy((u8 *)&dst_mac, reverse_mac); + + kt_data->info.dst_mac = dst_mac; + kt_data->info.svlan_id = param.vid; + kt_data->info.template = NBL_EM0_PT_PHY_DOWN_UNICAST_L2; + kt_data->info.padding = 0; + + sport = param.vsi >> 8; + if (eth_mode == NBL_TWO_ETHERNET_PORT) + sport &= 0xFE; + kt_data->info.sport = sport; + + return 0; +} + +static int nbl_flow_cfg_l2_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_up_multi_data_u *kt_data = (union nbl_l2_phy_up_multi_data_u *)data; + u8 sport; + + kt_data->info.dst_mac = 0xFFFFFFFFFFFF; + kt_data->info.template = NBL_EM0_PT_PHY_UP_MULTICAST_L2; + kt_data->info.padding = 0; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_l2_down_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_down_multi_data_u *kt_data = (union nbl_l2_phy_down_multi_data_u *)data; + u8 sport; + + kt_data->info.dst_mac = 0xFFFFFFFFFFFF; + kt_data->info.template = NBL_EM0_PT_PHY_DOWN_MULTICAST_L2; + kt_data->info.padding = 0; + + sport = param.eth; + if (eth_mode == NBL_TWO_ETHERNET_PORT) + sport &= 0xFE; + kt_data->info.sport = sport; + + return 0; +} + +static int nbl_flow_cfg_l3_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l3_phy_up_multi_data_u *kt_data = (union nbl_l3_phy_up_multi_data_u *)data; + u8 sport; + + kt_data->info.dst_mac = 0x3333; + kt_data->info.template = NBL_EM0_PT_PHY_UP_MULTICAST_L3; + kt_data->info.padding = 0; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_l3_down_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l3_phy_down_multi_data_u *kt_data = (union nbl_l3_phy_down_multi_data_u *)data; + u8 sport; + + kt_data->info.dst_mac = 0x3333; + kt_data->info.template = NBL_EM0_PT_PHY_DOWN_MULTICAST_L3; + kt_data->info.padding = 0; + + sport = param.eth; + if (eth_mode == NBL_TWO_ETHERNET_PORT) + sport &= 0xFE; + kt_data->info.sport = sport; + + return 0; +} + +static void nbl_flow_cfg_kt_action_up_tnl(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + + kt_data->info.act0 = action0; +} + +static void nbl_flow_cfg_kt_action_lldp_lacp_up(union nbl_common_data_u *data, + u32 action0, u32 action1) +{ + union nbl_l2_phy_lldp_lacp_data_u *kt_data = (union nbl_l2_phy_lldp_lacp_data_u *)data; + + kt_data->info.act0 = action0; +} + +static void nbl_flow_cfg_kt_action_up(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + + kt_data->info.act0 = action0; +} + +static void nbl_flow_cfg_kt_action_down(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_down_data_u *kt_data = (union nbl_l2_phy_down_data_u *)data; + + kt_data->info.act0 = action0; +} + +static void nbl_flow_cfg_kt_action_l2_up(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_up_multi_data_u *kt_data = (union nbl_l2_phy_up_multi_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +static void nbl_flow_cfg_kt_action_l2_down(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_down_multi_data_u *kt_data = (union nbl_l2_phy_down_multi_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +static void nbl_flow_cfg_kt_action_l3_up(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l3_phy_up_multi_data_u *kt_data = (union nbl_l3_phy_up_multi_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +static void nbl_flow_cfg_kt_action_l3_down(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l3_phy_down_multi_data_u *kt_data = (union nbl_l3_phy_down_multi_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +#define NBL_FLOW_OPS_ARR_ENTRY(type, action_func, kt_func, kt_action_func) \ + [type] = {.cfg_action = action_func, .cfg_key = kt_func, \ + .cfg_kt_action = kt_action_func} +static const struct nbl_flow_rule_cfg_ops cfg_ops[] = { + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_UP_TNL, + nbl_flow_cfg_action_up_tnl, + nbl_flow_cfg_up_tnl_key_value, + nbl_flow_cfg_kt_action_up_tnl), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_LLDP_LACP_UP, + nbl_flow_cfg_action_lldp_lacp_up, + nbl_flow_cfg_lldp_lacp_up_key_value, + nbl_flow_cfg_kt_action_lldp_lacp_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_UP, + nbl_flow_cfg_action_up, + nbl_flow_cfg_up_key_value, + nbl_flow_cfg_kt_action_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_DOWN, + nbl_flow_cfg_action_down, + nbl_flow_cfg_down_key_value, + nbl_flow_cfg_kt_action_down), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_UP, + nbl_flow_cfg_action_l2_up, + nbl_flow_cfg_l2_up_key_value, + nbl_flow_cfg_kt_action_l2_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_DOWN, + nbl_flow_cfg_action_l2_down, + nbl_flow_cfg_l2_down_key_value, + nbl_flow_cfg_kt_action_l2_down), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_UP, + nbl_flow_cfg_action_l3_up, + nbl_flow_cfg_l3_up_key_value, + nbl_flow_cfg_kt_action_l3_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_DOWN, + nbl_flow_cfg_action_l3_down, + nbl_flow_cfg_l3_down_key_value, + nbl_flow_cfg_kt_action_l3_down), +}; + +static unsigned long find_two_zero_bit(const unsigned long *addr, unsigned long size) +{ + unsigned long flow_id, next_id; + + flow_id = find_first_zero_bit(addr, size); + next_id = find_next_zero_bit(addr, size, flow_id + 1); + while ((flow_id + 1) != next_id || (flow_id % 2)) { + flow_id = next_id; + next_id = find_next_zero_bit(addr, size, flow_id + 1); + if (next_id == size) + return size; + } + + return flow_id; +} + +static int nbl_flow_alloc_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow_fem_entry *flow) +{ + u32 flow_id; + + if (flow->flow_type == NBL_KT_HALF_MODE) { + flow_id = find_first_zero_bit(flow_mgt->flow_id, NBL_MACVLAN_TABLE_LEN); + if (flow_id == NBL_MACVLAN_TABLE_LEN) + return -ENOSPC; + set_bit(flow_id, flow_mgt->flow_id); + } else { + flow_id = find_two_zero_bit(flow_mgt->flow_id, NBL_MACVLAN_TABLE_LEN); + if (flow_id == NBL_MACVLAN_TABLE_LEN) + return -ENOSPC; + set_bit(flow_id, flow_mgt->flow_id); + set_bit(flow_id + 1, flow_mgt->flow_id); + } + + flow->flow_id = flow_id; + return 0; +} + +static void nbl_flow_free_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow_fem_entry *flow) +{ + if (flow->flow_id == U16_MAX) + return; + + if (flow->flow_type == NBL_KT_HALF_MODE) { + clear_bit(flow->flow_id, flow_mgt->flow_id); + flow->flow_id = 0xFFFF; + } else { + clear_bit(flow->flow_id, flow_mgt->flow_id); + clear_bit(flow->flow_id + 1, flow_mgt->flow_id); + flow->flow_id = 0xFFFF; + } +} + +static int nbl_flow_alloc_tcam_id(struct nbl_flow_mgt *flow_mgt, + struct nbl_tcam_item *tcam_item) +{ + u32 tcam_id; + + tcam_id = find_first_zero_bit(flow_mgt->tcam_id, NBL_TCAM_TABLE_LEN); + if (tcam_id == NBL_TCAM_TABLE_LEN) + return -ENOSPC; + + set_bit(tcam_id, flow_mgt->tcam_id); + tcam_item->tcam_index = tcam_id; + + return 0; +} + +static void nbl_flow_free_tcam_id(struct nbl_flow_mgt *flow_mgt, + struct nbl_tcam_item *tcam_item) +{ + clear_bit(tcam_item->tcam_index, flow_mgt->tcam_id); + tcam_item->tcam_index = 0; +} + +void nbl_flow_set_mt_input(struct nbl_mt_input *mt_input, union nbl_common_data_u *kt_data, + u8 type, u16 flow_id) +{ + int i; + u16 key_len; + + key_len = ((type) == NBL_KT_HALF_MODE ? NBL_KT_BYTE_HALF_LEN : NBL_KT_BYTE_LEN); + for (i = 0; i < key_len; i++) + mt_input->key[i] = kt_data->hash_key[key_len - 1 - i]; + + mt_input->tbl_id = flow_id + NBL_EM_PHY_KT_OFFSET; + mt_input->depth = 0; + mt_input->power = 10; +} + +static void nbl_flow_key_hash(struct nbl_flow_fem_entry *flow, struct nbl_mt_input *mt_input) +{ + u16 ht0_hash = 0; + u16 ht1_hash = 0; + + ht0_hash = NBL_CRC16_CCITT(mt_input->key, NBL_KT_BYTE_LEN); + ht1_hash = NBL_CRC16_IBM(mt_input->key, NBL_KT_BYTE_LEN); + flow->ht0_hash = nbl_hash_transfer(ht0_hash, mt_input->power, mt_input->depth); + flow->ht1_hash = nbl_hash_transfer(ht1_hash, mt_input->power, mt_input->depth); +} + +static bool nbl_pp_ht0_ht1_search(struct nbl_flow_ht_mng *pp_ht0_mng, u16 ht0_hash, + struct nbl_flow_ht_mng *pp_ht1_mng, u16 ht1_hash, + struct nbl_common_info *common) +{ + struct nbl_flow_ht_tbl *node0 = NULL; + struct nbl_flow_ht_tbl *node1 = NULL; + u16 i = 0; + bool is_find = false; + + node0 = pp_ht0_mng->hash_map[ht0_hash]; + if (node0) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node0->key[i].vid && node0->key[i].ht_other_index == ht1_hash) { + is_find = true; + nbl_info(common, NBL_DEBUG_FLOW, + "Conflicted ht on vid %d and kt_index %u\n", + node0->key[i].vid, node0->key[i].kt_index); + return is_find; + } + + node1 = pp_ht1_mng->hash_map[ht1_hash]; + if (node1) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node1->key[i].vid && node1->key[i].ht_other_index == ht0_hash) { + is_find = true; + nbl_info(common, NBL_DEBUG_FLOW, + "Conflicted ht on vid %d and kt_index %u\n", + node1->key[i].vid, node1->key[i].kt_index); + return is_find; + } + + return is_find; +} + +static bool nbl_flow_check_ht_conflict(struct nbl_flow_ht_mng *pp_ht0_mng, + struct nbl_flow_ht_mng *pp_ht1_mng, + u16 ht0_hash, u16 ht1_hash, struct nbl_common_info *common) +{ + return nbl_pp_ht0_ht1_search(pp_ht0_mng, ht0_hash, pp_ht1_mng, ht1_hash, common); +} + +static int nbl_flow_find_ht_avail_table(struct nbl_flow_ht_mng *pp_ht0_mng, + struct nbl_flow_ht_mng *pp_ht1_mng, + u16 ht0_hash, u16 ht1_hash) +{ + struct nbl_flow_ht_tbl *pp_ht0_node = NULL; + struct nbl_flow_ht_tbl *pp_ht1_node = NULL; + + pp_ht0_node = pp_ht0_mng->hash_map[ht0_hash]; + pp_ht1_node = pp_ht1_mng->hash_map[ht1_hash]; + + if (!pp_ht0_node && !pp_ht1_node) { + return 0; + } else if (pp_ht0_node && !pp_ht1_node) { + if (pp_ht0_node->ref_cnt >= NBL_HASH_CFT_AVL) + return 1; + else + return 0; + } else if (!pp_ht0_node && pp_ht1_node) { + if (pp_ht1_node->ref_cnt >= NBL_HASH_CFT_AVL) + return 0; + else + return 1; + } else { + if ((pp_ht0_node->ref_cnt <= NBL_HASH_CFT_AVL || + (pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht0_node->ref_cnt < NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL))) + return 0; + else if (((pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt <= NBL_HASH_CFT_AVL) || + (pp_ht0_node->ref_cnt == NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt < NBL_HASH_CFT_MAX))) + return 1; + else + return -1; + } +} + +int nbl_flow_insert_pp_ht(struct nbl_flow_ht_mng *pp_ht_mng, + u16 hash, u16 hash_other, u32 key_index) +{ + struct nbl_flow_ht_tbl *node; + int i; + + node = pp_ht_mng->hash_map[hash]; + if (!node) { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOSPC; + pp_ht_mng->hash_map[hash] = node; + } + + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (node->key[i].vid == 0) { + node->key[i].vid = 1; + node->key[i].ht_other_index = hash_other; + node->key[i].kt_index = key_index; + node->ref_cnt++; + break; + } + } + + return i; +} + +static void nbl_flow_add_ht(struct nbl_ht_item *ht_item, struct nbl_flow_fem_entry *flow, + u32 key_index, struct nbl_flow_ht_mng *pp_ht_mng, u8 ht_table) +{ + u16 ht_hash; + u16 ht_other_hash; + + ht_hash = ht_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash; + ht_other_hash = ht_table == NBL_HT0 ? flow->ht1_hash : flow->ht0_hash; + + ht_item->hash_bucket = nbl_flow_insert_pp_ht(pp_ht_mng, ht_hash, ht_other_hash, key_index); + if (ht_item->hash_bucket < 0) + return; + + ht_item->ht_table = ht_table; + ht_item->key_index = key_index; + ht_item->ht0_hash = flow->ht0_hash; + ht_item->ht1_hash = flow->ht1_hash; + + flow->hash_bucket = ht_item->hash_bucket; + flow->hash_table = ht_item->ht_table; +} + +static void nbl_flow_del_ht(struct nbl_ht_item *ht_item, struct nbl_flow_fem_entry *flow, + struct nbl_flow_ht_mng *pp_ht_mng) +{ + struct nbl_flow_ht_tbl *pp_ht_node = NULL; + u16 ht_hash; + u16 ht_other_hash; + int i; + + ht_hash = ht_item->ht_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash; + ht_other_hash = ht_item->ht_table == NBL_HT0 ? flow->ht1_hash : flow->ht0_hash; + + pp_ht_node = pp_ht_mng->hash_map[ht_hash]; + if (!pp_ht_node) + return; + + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht_node->key[i].vid == 1 && + pp_ht_node->key[i].ht_other_index == ht_other_hash) { + memset(&pp_ht_node->key[i], 0, sizeof(pp_ht_node->key[i])); + pp_ht_node->ref_cnt--; + break; + } + } + + if (!pp_ht_node->ref_cnt) { + kfree(pp_ht_node); + pp_ht_mng->hash_map[ht_hash] = NULL; + } +} + +static int nbl_flow_send_2hw(struct nbl_resource_mgt *res_mgt, struct nbl_ht_item ht_item, + struct nbl_kt_item kt_item, u8 key_type) +{ + struct nbl_phy_ops *phy_ops; + u16 hash, hash_other; + int ret = 0; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = phy_ops->set_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), kt_item.kt_data.hash_key, + ht_item.key_index, key_type); + if (ret) + goto set_kt_fail; + + hash = ht_item.ht_table == NBL_HT0 ? ht_item.ht0_hash : ht_item.ht1_hash; + hash_other = ht_item.ht_table == NBL_HT0 ? ht_item.ht1_hash : ht_item.ht0_hash; + ret = phy_ops->set_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hash, hash_other, ht_item.ht_table, + ht_item.hash_bucket, ht_item.key_index, 1); + if (ret) + goto set_ht_fail; + + ret = phy_ops->search_key(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + kt_item.kt_data.hash_key, key_type); + if (ret) + goto search_fail; + + return 0; + +search_fail: + ret = phy_ops->set_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hash, 0, ht_item.ht_table, + ht_item.hash_bucket, 0, 0); +set_ht_fail: + memset(kt_item.kt_data.hash_key, 0, sizeof(kt_item.kt_data.hash_key)); + phy_ops->set_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), kt_item.kt_data.hash_key, + ht_item.key_index, key_type); +set_kt_fail: + return ret; +} + +static int nbl_flow_del_2hw(struct nbl_resource_mgt *res_mgt, struct nbl_ht_item ht_item, + struct nbl_kt_item kt_item, u8 key_type) +{ + struct nbl_phy_ops *phy_ops; + u16 hash; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + memset(kt_item.kt_data.hash_key, 0, sizeof(kt_item.kt_data.hash_key)); + phy_ops->set_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), kt_item.kt_data.hash_key, + ht_item.key_index, key_type); + + hash = ht_item.ht_table == NBL_HT0 ? ht_item.ht0_hash : ht_item.ht1_hash; + phy_ops->set_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hash, 0, ht_item.ht_table, + ht_item.hash_bucket, 0, 0); + + return 0; +} + +static void nbl_flow_cfg_tcam(struct nbl_tcam_item *tcam_item, struct nbl_ht_item *ht_item, + struct nbl_kt_item *kt_item, u32 action0, u32 action1) +{ + tcam_item->key_mode = NBL_KT_HALF_MODE; + tcam_item->pp_type = NBL_PT_PP0; + tcam_item->tcam_action[0] = action0; + tcam_item->tcam_action[1] = action1; + memcpy(&tcam_item->ht_item, ht_item, sizeof(struct nbl_ht_item)); + memcpy(&tcam_item->kt_item, kt_item, sizeof(struct nbl_kt_item)); +} + +static int nbl_flow_add_tcam(struct nbl_resource_mgt *res_mgt, struct nbl_tcam_item tcam_item) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->add_tcam(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_item.tcam_index, + tcam_item.kt_item.kt_data.hash_key, tcam_item.tcam_action, + tcam_item.key_mode, NBL_PT_PP0); +} + +static void nbl_flow_del_tcam(struct nbl_resource_mgt *res_mgt, struct nbl_tcam_item tcam_item) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->del_tcam(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_item.tcam_index, + tcam_item.key_mode, NBL_PT_PP0); +} + +static int nbl_flow_add_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_param param, + s32 type, struct nbl_flow_fem_entry *flow) +{ + struct nbl_flow_mgt *flow_mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_common_info *common; + struct nbl_mt_input mt_input; + struct nbl_ht_item ht_item; + struct nbl_kt_item kt_item; + struct nbl_tcam_item tcam_item; + struct nbl_flow_ht_mng *pp_ht_mng = NULL; + u32 action0, action1; + int ht_table; + int ret = 0; + + memset(&mt_input, 0, sizeof(mt_input)); + memset(&ht_item, 0, sizeof(ht_item)); + memset(&kt_item, 0, sizeof(kt_item)); + memset(&tcam_item, 0, sizeof(tcam_item)); + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + flow->flow_type = param.type; + flow->type = type; + flow->flow_id = 0xFFFF; + + ret = nbl_flow_alloc_flow_id(flow_mgt, flow); + if (ret) + return ret; + + ret = cfg_ops[type].cfg_action(param, &action0, &action1); + if (ret) + return ret; + + ret = cfg_ops[type].cfg_key(&kt_item.kt_data, param, NBL_COMMON_TO_ETH_MODE(common)); + if (ret) + return ret; + + nbl_flow_set_mt_input(&mt_input, &kt_item.kt_data, param.type, flow->flow_id); + nbl_flow_key_hash(flow, &mt_input); + + if (nbl_flow_check_ht_conflict(&flow_mgt->pp0_ht0_mng, &flow_mgt->pp0_ht1_mng, + flow->ht0_hash, flow->ht1_hash, common)) + flow->tcam_flag = true; + + ht_table = nbl_flow_find_ht_avail_table(&flow_mgt->pp0_ht0_mng, + &flow_mgt->pp0_ht1_mng, + flow->ht0_hash, flow->ht1_hash); + if (ht_table < 0) + flow->tcam_flag = true; + + if (!flow->tcam_flag) { + pp_ht_mng = ht_table == NBL_HT0 ? &flow_mgt->pp0_ht0_mng : &flow_mgt->pp0_ht1_mng; + nbl_flow_add_ht(&ht_item, flow, mt_input.tbl_id, pp_ht_mng, ht_table); + + cfg_ops[type].cfg_kt_action(&kt_item.kt_data, action0, action1); + ret = nbl_flow_send_2hw(res_mgt, ht_item, kt_item, param.type); + } else { + ret = nbl_flow_alloc_tcam_id(flow_mgt, &tcam_item); + if (ret) + goto out; + + nbl_flow_cfg_tcam(&tcam_item, &ht_item, &kt_item, action0, action1); + flow->tcam_index = tcam_item.tcam_index; + + ret = nbl_flow_add_tcam(res_mgt, tcam_item); + } + +out: + if (ret) { + if (flow->tcam_flag) + nbl_flow_free_tcam_id(flow_mgt, &tcam_item); + else + nbl_flow_del_ht(&ht_item, flow, pp_ht_mng); + + nbl_flow_free_flow_id(flow_mgt, flow); + } + + return ret; +} + +static void nbl_flow_del_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_fem_entry *flow) +{ + struct nbl_flow_mgt *flow_mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_ht_item ht_item; + struct nbl_kt_item kt_item; + struct nbl_tcam_item tcam_item; + struct nbl_flow_ht_mng *pp_ht_mng = NULL; + + if (flow->flow_id == 0xFFFF) + return; + + memset(&ht_item, 0, sizeof(ht_item)); + memset(&kt_item, 0, sizeof(kt_item)); + memset(&tcam_item, 0, sizeof(tcam_item)); + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (!flow->tcam_flag) { + ht_item.ht_table = flow->hash_table; + ht_item.ht0_hash = flow->ht0_hash; + ht_item.ht1_hash = flow->ht1_hash; + ht_item.hash_bucket = flow->hash_bucket; + + pp_ht_mng = flow->hash_table == NBL_HT0 ? &flow_mgt->pp0_ht0_mng + : &flow_mgt->pp0_ht1_mng; + + nbl_flow_del_ht(&ht_item, flow, pp_ht_mng); + nbl_flow_del_2hw(res_mgt, ht_item, kt_item, flow->flow_type); + } else { + tcam_item.tcam_index = flow->tcam_index; + nbl_flow_del_tcam(res_mgt, tcam_item); + nbl_flow_free_tcam_id(flow_mgt, &tcam_item); + } + + nbl_flow_free_flow_id(flow_mgt, flow); +} + +static int nbl_flow_add_mcc_node(struct nbl_flow_multi_group *multi_group, + struct nbl_resource_mgt *res_mgt, int eth, u16 vsi_id, u16 mcc_id) +{ + struct nbl_flow_mcc_node *mcc_node = NULL; + struct nbl_phy_ops *phy_ops; + u16 prev_mcc_id, mcc_action; + int ret = 0; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + mcc_node = kzalloc(sizeof(*mcc_node), GFP_KERNEL); + if (!mcc_node) + return -ENOMEM; + + mcc_action = eth >= 0 ? nbl_flow_cfg_action_set_dport_mcc_eth((u8)eth) + : nbl_flow_cfg_action_set_dport_mcc_vsi(vsi_id); + mcc_node->mcc_id = mcc_id; + list_add_tail(&mcc_node->node, &multi_group->mcc_list); + + if (nbl_list_is_first(&mcc_node->node, &multi_group->mcc_list)) + prev_mcc_id = NBL_MCC_ID_INVALID; + else + prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; + + ret = phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_id, prev_mcc_id, mcc_action); + if (ret) { + list_del(&mcc_node->node); + kfree(mcc_node); + return -EFAULT; + } + + return 0; +} + +static void nbl_flow_del_mcc_node(struct nbl_flow_multi_group *multi_group, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_mcc_node *mcc_node) +{ + struct nbl_phy_ops *phy_ops; + u16 prev_mcc_id, next_mcc_id; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (list_entry_is_head(mcc_node, &multi_group->mcc_list, node)) + return; + + if (nbl_list_is_first(&mcc_node->node, &multi_group->mcc_list)) + prev_mcc_id = NBL_MCC_ID_INVALID; + else + prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; + + if (nbl_list_is_last(&mcc_node->node, &multi_group->mcc_list)) + next_mcc_id = NBL_MCC_ID_INVALID; + else + next_mcc_id = list_next_entry(mcc_node, node)->mcc_id; + + phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, next_mcc_id); + + list_del(&mcc_node->node); + kfree(mcc_node); +} + +static void nbl_flow_macvlan_node_del_action_func(void *priv, void *x_key, void *y_key, + void *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_macvlan_node_data *rule_data = (struct nbl_flow_macvlan_node_data *)data; + int i; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); +} + +static int nbl_flow_add_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_common_info *common; + struct nbl_flow_macvlan_node_data *rule_data; + void *mac_hash_tbl; + struct nbl_flow_param param = {0}; + int i; + int ret; + u16 eth_id; + u16 node_num; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + node_num = nbl_common_get_hash_xy_node_num(mac_hash_tbl); + if (node_num >= flow_mgt->unicast_mac_threshold) + return -ENOSPC; + + if (nbl_common_get_hash_xy_node(mac_hash_tbl, mac, &vlan)) + return -EEXIST; + + rule_data = kzalloc(sizeof(*rule_data), GFP_KERNEL); + if (!rule_data) + return -ENOMEM; + + param.mac = mac; + param.vid = vlan; + param.eth = eth_id; + param.vsi = vsi; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (nbl_flow_add_flow(res_mgt, param, i, &rule_data->entry[i])) + break; + } + if (i != NBL_FLOW_MACVLAN_MAX) { + while (--i + 1) + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + goto rule_err; + } + + rule_data->vsi = vsi; + ret = nbl_common_alloc_hash_xy_node(mac_hash_tbl, mac, &vlan, rule_data); + if (ret) + goto node_err; + + kfree(rule_data); + + return 0; + +node_err: + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); +rule_err: + kfree(rule_data); + return -EFAULT; +} + +static void nbl_flow_del_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_macvlan_node_data *rule_data; + void *mac_hash_tbl; + int i; + u16 eth_id; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + + rule_data = nbl_common_get_hash_xy_node(mac_hash_tbl, mac, &vlan); + if (!rule_data) + return; + + if (rule_data->vsi != vsi) + return; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + + nbl_common_free_hash_xy_node(mac_hash_tbl, mac, &vlan); +} + +static int nbl_flow_add_lag(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_lacp_rule *rule; + struct nbl_flow_param param = {0}; + + list_for_each_entry(rule, &flow_mgt->lacp_list, node) + if (rule->vsi == vsi) + return 0; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + param.eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + param.vsi = vsi; + param.ether_type = ETH_P_SLOW; + + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_LLDP_LACP_UP, &rule->entry)) { + nbl_err(common, NBL_DEBUG_FLOW, "Fail to add lag flow for vsi %d", vsi); + kfree(rule); + return -EFAULT; + } + + rule->vsi = vsi; + list_add_tail(&rule->node, &flow_mgt->lacp_list); + + return 0; +} + +static void nbl_flow_del_lag(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_lacp_rule *rule; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + list_for_each_entry(rule, &flow_mgt->lacp_list, node) + if (rule->vsi == vsi) + break; + + if (list_entry_is_head(rule, &flow_mgt->lacp_list, node)) + return; + + nbl_flow_del_flow(res_mgt, &rule->entry); + + list_del(&rule->node); + kfree(rule); +} + +static int nbl_flow_add_lldp(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_lldp_rule *rule; + struct nbl_flow_param param = {0}; + + list_for_each_entry(rule, &flow_mgt->lldp_list, node) + if (rule->vsi == vsi) + return 0; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + param.eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + param.vsi = vsi; + param.ether_type = ETH_P_LLDP; + + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_LLDP_LACP_UP, &rule->entry)) { + nbl_err(common, NBL_DEBUG_FLOW, "Fail to add lldp flow for vsi %d", vsi); + kfree(rule); + return -EFAULT; + } + + rule->vsi = vsi; + list_add_tail(&rule->node, &flow_mgt->lldp_list); + + return 0; +} + +static void nbl_flow_del_lldp(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_lldp_rule *rule; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + list_for_each_entry(rule, &flow_mgt->lldp_list, node) + if (rule->vsi == vsi) + break; + + if (list_entry_is_head(rule, &flow_mgt->lldp_list, node)) + return; + + nbl_flow_del_flow(res_mgt, &rule->entry); + + list_del(&rule->node); + kfree(rule); +} + +static int nbl_flow_add_multi_rule(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_multi_group *multi_group; + struct nbl_flow_mcc_index_key index_key = {0}; + u16 mcc_id; + u8 eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + + NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_VSI, vsi); + mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, sizeof(index_key)); + + multi_group = &flow_mgt->multi_flow[eth]; + + return nbl_flow_add_mcc_node(multi_group, res_mgt, -1, vsi, mcc_id); +} + +static void nbl_flow_del_multi_rule(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_multi_group *multi_group; + struct nbl_flow_mcc_node *mcc_node; + struct nbl_flow_mcc_index_key index_key = {0}; + u8 eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + u16 mcc_id; + + NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_VSI, vsi); + mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, sizeof(index_key)); + nbl_common_free_index(flow_mgt->mcc_tbl_priv, &index_key, sizeof(index_key)); + + multi_group = &flow_mgt->multi_flow[eth]; + + list_for_each_entry(mcc_node, &multi_group->mcc_list, node) + if (mcc_node->mcc_id == mcc_id) { + nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); + return; + } +} + +static int nbl_flow_add_multi_group(struct nbl_resource_mgt *res_mgt, u8 eth) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_multi_group *multi_group; + struct nbl_flow_mcc_index_key index_key = {0}; + struct nbl_flow_param param = {0}; + int i, ret; + + NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_ETH, eth); + param.mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, sizeof(index_key)); + param.eth = eth; + + multi_group = &flow_mgt->multi_flow[eth]; + for (i = NBL_FLOW_MACVLAN_MAX; i < NBL_FLOW_TYPE_MAX; i++) { + ret = nbl_flow_add_flow(res_mgt, param, i, + &multi_group->entry[i - NBL_FLOW_MACVLAN_MAX]); + if (ret) + goto add_macvlan_fail; + } + + ret = nbl_flow_add_mcc_node(multi_group, res_mgt, eth, -1, param.mcc_id); + if (ret) + goto add_mcc_fail; + + multi_group->ether_id = eth; + multi_group->mcc_id = param.mcc_id; + + return 0; + +add_mcc_fail: +add_macvlan_fail: + while (--i >= NBL_FLOW_MACVLAN_MAX) + nbl_flow_del_flow(res_mgt, &multi_group->entry[i - NBL_FLOW_MACVLAN_MAX]); + return ret; +} + +static void nbl_flow_del_multi_group(struct nbl_resource_mgt *res_mgt, u8 eth) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_multi_group *multi_group = &flow_mgt->multi_flow[eth]; + struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; + int i; + + if (!multi_group->mcc_id) + return; + + for (i = NBL_FLOW_MACVLAN_MAX; i < NBL_FLOW_TYPE_MAX; i++) + nbl_flow_del_flow(res_mgt, &multi_group->entry[i - NBL_FLOW_MACVLAN_MAX]); + + list_for_each_entry_safe(mcc_node, mcc_node_safe, &multi_group->mcc_list, node) + nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); + + memset(multi_group, 0, sizeof(*multi_group)); + INIT_LIST_HEAD(&multi_group->mcc_list); +} + +static void nbl_flow_remove_multi_group(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) + nbl_flow_del_multi_group(res_mgt, i); +} + +static int nbl_flow_setup_multi_group(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i, ret = 0; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + ret = nbl_flow_add_multi_group(res_mgt, i); + if (ret) + goto fail; + } + + return 0; + +fail: + nbl_flow_remove_multi_group(res_mgt); + return ret; +} + +static int nbl_flow_macvlan_node_vsi_match_func(void *condition, void *x_key, void *y_key, + void *data) +{ + u16 vsi = *(u16 *)condition; + struct nbl_flow_macvlan_node_data *rule_data = (struct nbl_flow_macvlan_node_data *)data; + + return rule_data->vsi == vsi ? 0 : -1; +} + +static void nbl_flow_clear_flow(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + void *mac_hash_tbl; + struct nbl_hash_xy_tbl_scan_key scan_key; + u8 eth_id; + + eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); + mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + + NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_DELETE, NBL_HASH_TBL_ALL_SCAN, + false, NULL, NULL, &vsi_id, + &nbl_flow_macvlan_node_vsi_match_func, res_mgt, + &nbl_flow_macvlan_node_del_action_func); + nbl_common_scan_hash_xy_node(mac_hash_tbl, &scan_key); + + nbl_flow_del_multi_rule(res_mgt, vsi_id); +} + +char templete_name[NBL_FLOW_TYPE_MAX][16] = { + "up_tnl", + "up", + "down", + "l2_mc_up", + "l2_mc_down", + "l3_mc_up", + "l3_mc_down" +}; + +static void nbl_flow_id_dump(struct seq_file *m, struct nbl_flow_fem_entry *entry, char *title) +{ + seq_printf(m, "%s: flow_id %u, ht0 0x%x, ht1 0x%x, table: %u, bucket: %u\n", title, + entry->flow_id, entry->ht0_hash, entry->ht1_hash, + entry->hash_table, entry->hash_bucket); +} + +static void nbl_flow_macvlan_node_show_action_func(void *priv, void *x_key, void *y_key, + void *data) +{ + struct seq_file *m = (struct seq_file *)priv; + u8 *mac = (u8 *)x_key; + u16 vlan = *(u16 *)y_key; + struct nbl_flow_macvlan_node_data *rule_data = (struct nbl_flow_macvlan_node_data *)data; + int i; + + seq_printf(m, "\nvsi %d, vlan %d MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", + rule_data->vsi, vlan, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + nbl_flow_id_dump(m, &rule_data->entry[i], templete_name[i]); +} + +static void nbl_flow_dump_flow(void *priv, struct seq_file *m) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_flow_multi_group *multi_group; + struct nbl_flow_lldp_rule *lldp_rule; + struct nbl_flow_lacp_rule *lacp_rule; + struct nbl_hash_xy_tbl_scan_key scan_key; + int i, j; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + multi_group = &flow_mgt->multi_flow[i]; + seq_printf(m, "\nether_id %d, mcc_id %d, status %u\n" + !i, + multi_group->ether_id, multi_group->mcc_id, multi_group->network_status); + for (j = NBL_FLOW_MACVLAN_MAX; j < NBL_FLOW_TYPE_MAX; j++) + nbl_flow_id_dump(m, &multi_group->entry[j - NBL_FLOW_MACVLAN_MAX], + templete_name[j]); + } + + NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_SHOW, NBL_HASH_TBL_ALL_SCAN, + false, NULL, NULL, NULL, NULL, m, + &nbl_flow_macvlan_node_show_action_func); + for (i = 0; i < NBL_MAX_ETHERNET; i++) + nbl_common_scan_hash_xy_node(flow_mgt->mac_hash_tbl[i], &scan_key); + + seq_puts(m, "\n"); + + list_for_each_entry(lldp_rule, &flow_mgt->lldp_list, node) + seq_printf(m, "LLDP rule: vsi %d\n", lldp_rule->vsi); + + seq_puts(m, "\n"); + list_for_each_entry(lacp_rule, &flow_mgt->lacp_list, node) + seq_printf(m, "LACP rule: vsi %d\n", lacp_rule->vsi); +} + +/* NBL_FLOW_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_FLOW_OPS_TBL \ +do { \ + NBL_FLOW_SET_OPS(add_macvlan, nbl_flow_add_macvlan); \ + NBL_FLOW_SET_OPS(del_macvlan, nbl_flow_del_macvlan); \ + NBL_FLOW_SET_OPS(add_lag_flow, nbl_flow_add_lag); \ + NBL_FLOW_SET_OPS(del_lag_flow, nbl_flow_del_lag); \ + NBL_FLOW_SET_OPS(add_lldp_flow, nbl_flow_add_lldp); \ + NBL_FLOW_SET_OPS(del_lldp_flow, nbl_flow_del_lldp); \ + NBL_FLOW_SET_OPS(add_multi_rule, nbl_flow_add_multi_rule); \ + NBL_FLOW_SET_OPS(del_multi_rule, nbl_flow_del_multi_rule); \ + NBL_FLOW_SET_OPS(setup_multi_group, nbl_flow_setup_multi_group); \ + NBL_FLOW_SET_OPS(remove_multi_group, nbl_flow_remove_multi_group); \ + NBL_FLOW_SET_OPS(clear_flow, nbl_flow_clear_flow); \ + NBL_FLOW_SET_OPS(dump_flow, nbl_flow_dump_flow); \ +} while (0) + +static void nbl_flow_remove_mgt(struct device *dev, struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + int i; + struct nbl_hash_xy_tbl_del_key del_key; + + nbl_common_remove_index_table(flow_mgt->mcc_tbl_priv); + + NBL_HASH_XY_TBL_DEL_KEY_INIT(&del_key, res_mgt, &nbl_flow_macvlan_node_del_action_func); + for (i = 0; i < NBL_MAX_ETHERNET; i++) + nbl_common_remove_hash_xy_table(flow_mgt->mac_hash_tbl[i], &del_key); + + devm_kfree(dev, flow_mgt); + NBL_RES_MGT_TO_FLOW_MGT(res_mgt) = NULL; +} + +static int nbl_flow_setup_mgt(struct device *dev, struct nbl_resource_mgt *res_mgt) +{ + struct nbl_index_tbl_key mcc_tbl_key; + struct nbl_hash_xy_tbl_key macvlan_tbl_key; + struct nbl_flow_mgt *flow_mgt; + struct nbl_eth_info *eth_info; + int i; + + flow_mgt = devm_kzalloc(dev, sizeof(struct nbl_flow_mgt), GFP_KERNEL); + if (!flow_mgt) + return -ENOMEM; + + NBL_RES_MGT_TO_FLOW_MGT(res_mgt) = flow_mgt; + + NBL_INDEX_TBL_KEY_INIT(&mcc_tbl_key, dev, NBL_FLOW_MCC_INDEX_START, + NBL_FLOW_MCC_INDEX_SIZE, sizeof(struct nbl_flow_mcc_index_key)); + flow_mgt->mcc_tbl_priv = nbl_common_init_index_table(&mcc_tbl_key); + if (!flow_mgt->mcc_tbl_priv) + goto alloc_mcc_tbl_failed; + + NBL_HASH_XY_TBL_KEY_INIT(&macvlan_tbl_key, dev, ETH_ALEN, sizeof(u16), + sizeof(struct nbl_flow_macvlan_node_data), + NBL_MACVLAN_TBL_BUCKET_SIZE, NBL_MACVLAN_X_AXIS_BUCKET_SIZE, + NBL_MACVLAN_Y_AXIS_BUCKET_SIZE, false); + for (i = 0; i < NBL_MAX_ETHERNET; i++) { + (flow_mgt)->mac_hash_tbl[i] = nbl_common_init_hash_xy_table(&macvlan_tbl_key); + if (!flow_mgt->mac_hash_tbl[i]) + goto alloc_machash_fail; + } + + for (i = 0; i < NBL_MAX_ETHERNET; i++) + INIT_LIST_HEAD(&flow_mgt->multi_flow[i].mcc_list); + + INIT_LIST_HEAD(&flow_mgt->lldp_list); + INIT_LIST_HEAD(&flow_mgt->lacp_list); + + eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + flow_mgt->unicast_mac_threshold = NBL_TOTAL_MACVLAN_NUM / eth_info->eth_num; + + return 0; + +alloc_machash_fail: +alloc_mcc_tbl_failed: + nbl_flow_remove_mgt(dev, res_mgt); + return -1; +} + +int nbl_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops; + struct device *dev; + int ret = 0; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = nbl_flow_setup_mgt(dev, res_mgt); + if (ret) + goto setup_mgt_fail; + + ret = phy_ops->init_fem(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (ret) + goto init_fem_fail; + + return 0; + +init_fem_fail: + nbl_flow_remove_mgt(dev, res_mgt); +setup_mgt_fail: + return -1; +} + +void nbl_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_flow_mgt *flow_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + if (!flow_mgt) + return; + + nbl_flow_remove_mgt(dev, res_mgt); +} + +int nbl_flow_setup_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_FLOW_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_FLOW_OPS_TBL; +#undef NBL_FLOW_SET_OPS + + return 0; +} + +void nbl_flow_remove_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_FLOW_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_FLOW_OPS_TBL; +#undef NBL_FLOW_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h new file mode 100644 index 000000000000..68c8ce7a0fef --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ +#ifndef _NBL_FLOW_LEONIS_H_ +#define _NBL_FLOW_LEONIS_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" +#include "nbl_resource.h" + +#define NBL_EM_PHY_KT_OFFSET (0x1F000) + +#define NBL_TOTAL_MACVLAN_NUM 2048 +#define NBL_MAX_ACTION_NUM 16 + +#define NBL_SPORT_ETH_OFFSET 8 +#define NBL_MCC_NUM_PER_SWITCH 256 + +#define NBL_FLOW_MCC_INDEX_SIZE 1024 +#define NBL_FLOW_MCC_INDEX_START (7 * 1024) + +#define NBL_MACVLAN_TBL_BUCKET_SIZE 64 +#define NBL_MACVLAN_X_AXIS_BUCKET_SIZE 64 +#define NBL_MACVLAN_Y_AXIS_BUCKET_SIZE 16 + +enum nbl_flow_mcc_index_type { + NBL_MCC_INDEX_ETH, + NBL_MCC_INDEX_VSI, + NBL_MCC_INDEX_BOND, +}; + +struct nbl_flow_mcc_index_key { + enum nbl_flow_mcc_index_type type; + union { + u8 eth_id; + u16 vsi_id; + u32 data; + }; +}; + +#define NBL_FLOW_MCC_INDEX_KEY_INIT(key, key_type_arg, value_arg) \ +do { \ + typeof(key) __key = key; \ + typeof(key_type_arg) __type = key_type_arg; \ + typeof(value_arg) __value = value_arg; \ + __key->type = __type; \ + if (__type == NBL_MCC_INDEX_ETH) \ + __key->eth_id = __value; \ + else if (__type == NBL_MCC_INDEX_VSI || __type == NBL_MCC_INDEX_BOND) \ + __key->vsi_id = __value; \ +} while (0) + +#pragma pack(1) + +#define NBL_DUPPKT_PTYPE_NA 135 +#define NBL_DUPPKT_PTYPE_NS 136 + +struct nbl_flow_macvlan_node_data { + struct nbl_flow_fem_entry entry[NBL_FLOW_MACVLAN_MAX]; + u16 vsi; +}; + +union nbl_l2_phy_up_data_u { + struct nbl_l2_phy_up_data { + u32 act0:22; + u64 rsv1:62; + u32 padding:4; + u32 sport:4; + u32 svlan_id:16; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_UP_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_up_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_UP_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_up_data)]; +}; + +union nbl_l2_phy_lldp_lacp_data_u { + struct nbl_l2_phy_lldp_lacp_data { + u32 act0:22; + u32 rsv1:2; + u8 padding[14]; + u32 sport:4; + u32 ether_type:16; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_LLDP_LACP_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_lldp_lacp_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_LLDP_LACP_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_lldp_lacp_data)]; +}; + +union nbl_l2_phy_down_data_u { + struct nbl_l2_phy_down_data { + u32 act0:22; + u32 rsv2:10; + u64 rsv1:52; + u32 padding:6; + u32 sport:2; + u32 svlan_id:16; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_DOWN_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_DOWN_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_down_data)]; +}; + +union nbl_l2_phy_up_multi_data_u { + struct nbl_l2_phy_up_multi_data { + u32 act0:22; + u32 act1:22; + u32 rsv2:20; + u64 rsv1:36; + u32 padding:4; + u32 sport:4; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_UP_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_up_multi_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_UP_MULTI_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_up_multi_data)]; +}; + +union nbl_l2_phy_down_multi_data_u { + struct nbl_l2_phy_down_multi_data { + u32 act0:22; + u32 act1:22; + u32 rsv2:20; + u64 rsv1:36; + u32 padding:6; + u32 sport:2; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_DOWN_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_multi_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_DOWN_MULTI_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_down_multi_data)]; +}; + +union nbl_l3_phy_up_multi_data_u { + struct nbl_l3_phy_up_multi_data { + u32 act0:22; + u32 act1:22; + u32 rsv2:20; + u64 rsv1:60; + u32 padding:12; + u32 sport:4; + u64 dst_mac:16; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L3_PHY_UP_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l3_phy_up_multi_data) \ + / sizeof(u32)) + u32 data[NBL_L3_PHY_UP_MULTI_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l3_phy_up_multi_data)]; +}; + +union nbl_l3_phy_down_multi_data_u { + struct nbl_l3_phy_down_multi_data { + u32 act0:22; + u32 act1:22; + u32 rsv3:20; + u64 rsv2; + u64 rsv1:4; + u32 padding:6; + u32 sport:2; + u64 dst_mac:16; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L3_PHY_DOWN_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l3_phy_down_multi_data) \ + / sizeof(u32)) + u32 data[NBL_L3_PHY_DOWN_MULTI_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l3_phy_down_multi_data)]; +}; + +union nbl_common_data_u { + struct nbl_common_data { + u32 rsv[10]; + } __packed info; +#define NBL_COMMON_DATA_TAB_WIDTH (sizeof(struct nbl_common_data) \ + / sizeof(u32)) + u32 data[NBL_COMMON_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_common_data)]; +}; + +#pragma pack() + +struct nbl_flow_param { + u8 *mac; + u8 type; + u8 eth; + u16 ether_type; + u16 vid; + u16 vsi; + u16 mcc_id; + u32 index; + u32 *data; + u32 priv_data; + bool for_pmd; +}; + +struct nbl_mt_input { + u8 key[NBL_KT_BYTE_LEN]; + u8 at_num; + u8 kt_left_num; + u32 tbl_id; + u16 depth; + u16 power; +}; + +struct nbl_ht_item { + u16 ht0_hash; + u16 ht1_hash; + u16 hash_bucket; + u32 key_index; + u8 ht_table; +}; + +struct nbl_kt_item { + union nbl_common_data_u kt_data; +}; + +struct nbl_tcam_item { + struct nbl_ht_item ht_item; + struct nbl_kt_item kt_item; + u32 tcam_action[NBL_MAX_ACTION_NUM]; + bool tcam_flag; + u8 key_mode; + u8 pp_type; + u32 *pp_tcam_count; + u16 tcam_index; +}; + +struct nbl_tcam_ad_item { + u32 action[NBL_MAX_ACTION_NUM]; +}; + +struct nbl_flow_rule_cfg_ops { + int (*cfg_action)(struct nbl_flow_param param, u32 *action0, u32 *action1); + int (*cfg_key)(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode); + void (*cfg_kt_action)(union nbl_common_data_u *data, u32 action0, u32 action1); +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c new file mode 100644 index 000000000000..1e9a038ff4d7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c @@ -0,0 +1,2965 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_phy_leonis.h" +#include "nbl_hw/nbl_p4_actions.h" + +static int nbl_send_kt_data(struct nbl_phy_mgt *phy_mgt, union nbl_fem_kt_acc_ctrl_u *kt_ctrl, + u8 *data, struct nbl_common_info *common) +{ + union nbl_fem_kt_acc_ack_u kt_ack = {.info = {0}}; + u32 times = 3; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_ACC_DATA, data, NBL_KT_PHY_L2_DW_LEN); + nbl_debug(common, NBL_DEBUG_FLOW, "Set kt = %08x-%08x-%08x-%08x-%08x", + ((u32 *)data)[0], ((u32 *)data)[1], ((u32 *)data)[2], + ((u32 *)data)[3], ((u32 *)data)[4]); + + kt_ctrl->info.rw = NBL_ACC_MODE_WRITE; + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_ACC_CTRL, + kt_ctrl->data, NBL_FEM_KT_ACC_CTRL_TBL_WIDTH); + + times = 3; + do { + nbl_hw_read_regs(phy_mgt, NBL_FEM_KT_ACC_ACK, kt_ack.data, + NBL_FEM_KT_ACC_ACK_TBL_WIDTH); + if (!kt_ack.info.done) { + times--; + usleep_range(100, 200); + } else { + break; + } + } while (times); + + if (!times) { + nbl_err(common, NBL_DEBUG_FLOW, "Config kt flowtale failed"); + return -EIO; + } + + return 0; +} + +static int nbl_send_ht_data(struct nbl_phy_mgt *phy_mgt, union nbl_fem_ht_acc_ctrl_u *ht_ctrl, + u8 *data, struct nbl_common_info *common) +{ + union nbl_fem_ht_acc_ack_u ht_ack = {.info = {0}}; + u32 times = 3; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_ACC_DATA, data, NBL_FEM_HT_ACC_DATA_TBL_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Set ht data = %x", *(u32 *)data); + + ht_ctrl->info.rw = NBL_ACC_MODE_WRITE; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_ACC_CTRL, + ht_ctrl->data, NBL_FEM_HT_ACC_CTRL_TBL_WIDTH); + + times = 3; + do { + nbl_hw_read_regs(phy_mgt, NBL_FEM_HT_ACC_ACK, ht_ack.data, + NBL_FEM_HT_ACC_ACK_TBL_WIDTH); + if (!ht_ack.info.done) { + times--; + usleep_range(100, 200); + } else { + break; + } + } while (times); + + if (!times) { + nbl_err(common, NBL_DEBUG_FLOW, "Config ht flowtale failed"); + return -EIO; + } + + return 0; +} + +static void nbl_check_kt_data(struct nbl_phy_mgt *phy_mgt, union nbl_fem_kt_acc_ctrl_u *kt_ctrl, + struct nbl_common_info *common) +{ + union nbl_fem_kt_acc_ack_u ack = {.info = {0}}; + u32 data[10] = {0}; + + kt_ctrl->info.rw = NBL_ACC_MODE_READ; + kt_ctrl->info.access_size = NBL_ACC_SIZE_320B; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_ACC_CTRL, kt_ctrl->data, + NBL_FEM_KT_ACC_CTRL_TBL_WIDTH); + + nbl_hw_read_regs(phy_mgt, NBL_FEM_KT_ACC_ACK, ack.data, NBL_FEM_KT_ACC_ACK_TBL_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Check kt done:%u status:%u.", + ack.info.done, ack.info.status); + if (ack.info.done) { + nbl_hw_read_regs(phy_mgt, NBL_FEM_KT_ACC_DATA, (u8 *)data, NBL_KT_PHY_L2_DW_LEN); + nbl_debug(common, NBL_DEBUG_FLOW, "Check kt data:0x%x-%x-%x-%x-%x-%x-%x-%x-%x-%x.", + data[9], data[8], data[7], data[6], data[5], + data[4], data[3], data[2], data[1], data[0]); + } +} + +static void nbl_check_ht_data(struct nbl_phy_mgt *phy_mgt, union nbl_fem_ht_acc_ctrl_u *ht_ctrl, + struct nbl_common_info *common) +{ + union nbl_fem_ht_acc_ack_u ack = {.info = {0}}; + u32 data[4] = {0}; + + ht_ctrl->info.rw = NBL_ACC_MODE_READ; + ht_ctrl->info.access_size = NBL_ACC_SIZE_128B; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_ACC_CTRL, ht_ctrl->data, + NBL_FEM_HT_ACC_CTRL_TBL_WIDTH); + + nbl_hw_read_regs(phy_mgt, NBL_FEM_HT_ACC_ACK, ack.data, NBL_FEM_HT_ACC_ACK_TBL_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Check ht done:%u status:%u.", + ack.info.done, ack.info.status); + if (ack.info.done) { + nbl_hw_read_regs(phy_mgt, NBL_FEM_HT_ACC_DATA, + (u8 *)data, NBL_FEM_HT_ACC_DATA_TBL_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Check ht data:0x%x-%x-%x-%x.", + data[0], data[1], data[2], data[3]); + } +} + +static void nbl_phy_fem_set_bank(struct nbl_phy_mgt *phy_mgt) +{ + u32 bank_sel = 0; + + /* HT bank sel */ + bank_sel = HT_PORT0_BANK_SEL | HT_PORT1_BANK_SEL << NBL_8BIT + | HT_PORT2_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_BANK_SEL_BITMAP, (u8 *)&bank_sel, sizeof(bank_sel)); + + /* KT bank sel */ + bank_sel = KT_PORT0_BANK_SEL | KT_PORT1_BANK_SEL << NBL_8BIT + | KT_PORT2_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_BANK_SEL_BITMAP, (u8 *)&bank_sel, sizeof(bank_sel)); + + /* AT bank sel */ + bank_sel = AT_PORT0_BANK_SEL | AT_PORT1_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_AT_BANK_SEL_BITMAP, (u8 *)&bank_sel, sizeof(bank_sel)); + bank_sel = AT_PORT2_BANK_SEL; + nbl_hw_write_regs(phy_mgt, NBL_FEM_AT_BANK_SEL_BITMAP2, (u8 *)&bank_sel, sizeof(bank_sel)); +} + +static void nbl_phy_fem_clear_tcam_ad(struct nbl_phy_mgt *phy_mgt) +{ + union fem_em_tcam_table_u tcam_table; + union fem_em_ad_table_u ad_table = {.info = {0}}; + int i; + int j; + + memset(&tcam_table, 0, sizeof(tcam_table)); + + for (i = 0; i < NBL_PT_LEN; i++) { + for (j = 0; j < NBL_TCAM_TABLE_LEN; j++) { + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(i, j), + tcam_table.hash_key, sizeof(tcam_table)); + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_AD_TABLE_REG(i, j), + ad_table.hash_key, sizeof(ad_table)); + nbl_hw_rd32(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(i, 1)); + } + } +} + +static int nbl_phy_fem_em0_pt_phy_l2_init(struct nbl_phy_mgt *phy_mgt, int pt_idx) +{ + union nbl_fem_profile_tbl_u em0_pt_tbl = {.info = {0}}; + + em0_pt_tbl.info.pt_vld = 1; + em0_pt_tbl.info.pt_hash_sel0 = 0; + em0_pt_tbl.info.pt_hash_sel1 = 3; + + switch (pt_idx) { + case NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_12; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_UP_UNICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_12; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_DOWN_UNICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_4; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_UP_MULTICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_68; + em0_pt_tbl.info.pt_act_num = 2; + break; + case NBL_EM0_PT_PHY_DOWN_MULTICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_60; + em0_pt_tbl.info.pt_act_num = 2; + break; + case NBL_EM0_PT_PHY_UP_MULTICAST_L3: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_36; + em0_pt_tbl.info.pt_act_num = 2; + break; + case NBL_EM0_PT_PHY_DOWN_MULTICAST_L3: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_28; + em0_pt_tbl.info.pt_act_num = 2; + break; + case NBL_EM0_PT_PHY_DPRBAC_IPV4: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_DPRBAC_IPV6: + em0_pt_tbl.info.pt_key_size = 1; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_64 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_128; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_UL4S_IPV4: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_32; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_UL4S_IPV6: + em0_pt_tbl.info.pt_key_size = 1; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_112; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + default: + return -EOPNOTSUPP; + } + + nbl_hw_write_regs(phy_mgt, NBL_FEM0_PROFILE_TABLE(pt_idx), em0_pt_tbl.data, + NBL_FEM_PROFILE_TBL_WIDTH); + return 0; +} + +static __maybe_unused int nbl_phy_fem_em0_pt_init(struct nbl_phy_mgt *phy_mgt) +{ + int i, ret = 0; + + for (i = NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2; i <= NBL_EM0_PT_PHY_UL4S_IPV6; i++) { + ret = nbl_phy_fem_em0_pt_phy_l2_init(phy_mgt, i); + if (ret) + return ret; + } + + return 0; +} + +static int nbl_phy_set_ht(void *priv, u16 hash, u16 hash_other, u8 ht_table, + u8 bucket, u32 key_index, u8 valid) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common; + union nbl_fem_ht_acc_data_u ht = {.info = {0}}; + union nbl_fem_ht_acc_ctrl_u ht_ctrl = {.info = {0}}; + + common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + + ht.info.vld = valid; + ht.info.hash = hash_other; + ht.info.kt_index = key_index; + + ht_ctrl.info.ht_id = ht_table == NBL_HT0 ? NBL_ACC_HT0 : NBL_ACC_HT1; + ht_ctrl.info.entry_id = hash; + ht_ctrl.info.bucket_id = bucket; + ht_ctrl.info.port = NBL_PT_PP0; + ht_ctrl.info.access_size = NBL_ACC_SIZE_32B; + ht_ctrl.info.start = 1; + + if (nbl_send_ht_data(phy_mgt, &ht_ctrl, ht.data, common)) + return -EIO; + + nbl_check_ht_data(phy_mgt, &ht_ctrl, common); + return 0; +} + +static int nbl_phy_set_kt(void *priv, u8 *key, u32 key_index, u8 key_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common; + union nbl_fem_kt_acc_ctrl_u kt_ctrl = {.info = {0}}; + + common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + + kt_ctrl.info.addr = key_index; + kt_ctrl.info.access_size = key_type == NBL_KT_HALF_MODE ? NBL_ACC_SIZE_160B + : NBL_ACC_SIZE_320B; + kt_ctrl.info.start = 1; + + if (nbl_send_kt_data(phy_mgt, &kt_ctrl, key, common)) + return -EIO; + + nbl_check_kt_data(phy_mgt, &kt_ctrl, common); + return 0; +} + +static int nbl_phy_search_key(void *priv, u8 *key, u8 key_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common; + union nbl_search_ctrl_u s_ctrl = {.info = {0}}; + union nbl_search_ack_u s_ack = {.info = {0}}; + u8 key_data[NBL_KT_BYTE_LEN] = {0}; + u8 search_key[NBL_FEM_SEARCH_KEY_LEN] = {0}; + u8 data[NBL_FEM_SEARCH_KEY_LEN] = {0}; + u8 times = 3; + + common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + + if (key_type == NBL_KT_HALF_MODE) + memcpy(key_data, key, NBL_KT_BYTE_HALF_LEN); + else + memcpy(key_data, key, NBL_KT_BYTE_LEN); + + key_data[0] &= KT_MASK_LEN32_ACTION_INFO; + key_data[1] &= KT_MASK_LEN12_ACTION_INFO; + if (key_type == NBL_KT_HALF_MODE) + memcpy(&search_key[20], key_data, NBL_KT_BYTE_HALF_LEN); + else + memcpy(search_key, key_data, NBL_KT_BYTE_LEN); + + nbl_debug(common, NBL_DEBUG_FLOW, "Search key:0x%x-%x-%x-%x-%x-%x-%x-%x-%x-%x", + ((u32 *)search_key)[9], ((u32 *)search_key)[8], + ((u32 *)search_key)[7], ((u32 *)search_key)[6], + ((u32 *)search_key)[5], ((u32 *)search_key)[4], + ((u32 *)search_key)[3], ((u32 *)search_key)[2], + ((u32 *)search_key)[1], ((u32 *)search_key)[0]); + nbl_hw_write_regs(phy_mgt, NBL_FEM_INSERT_SEARCH0_DATA, search_key, NBL_FEM_SEARCH_KEY_LEN); + + s_ctrl.info.start = 1; + nbl_hw_write_regs(phy_mgt, NBL_FEM_INSERT_SEARCH0_CTRL, (u8 *)&s_ctrl, + NBL_SEARCH_CTRL_WIDTH); + + do { + nbl_hw_read_regs(phy_mgt, NBL_FEM_INSERT_SEARCH0_ACK, + s_ack.data, NBL_SEARCH_ACK_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Search key ack:done:%u status:%u.", + s_ack.info.done, s_ack.info.status); + + if (!s_ack.info.done) { + times--; + usleep_range(100, 200); + } else { + nbl_hw_read_regs(phy_mgt, NBL_FEM_INSERT_SEARCH0_DATA, + data, NBL_FEM_SEARCH_KEY_LEN); + nbl_debug(common, NBL_DEBUG_FLOW, + "Search key data:0x%x-%x-%x-%x-%x-%x-%x-%x-%x-%x-%x.", + ((u32 *)data)[10], ((u32 *)data)[9], + ((u32 *)data)[8], ((u32 *)data)[7], + ((u32 *)data)[6], ((u32 *)data)[5], + ((u32 *)data)[4], ((u32 *)data)[3], + ((u32 *)data)[2], ((u32 *)data)[1], + ((u32 *)data)[0]); + break; + } + } while (times); + + if (!times) { + nbl_err(common, NBL_DEBUG_PHY, "Search ht/kt failed."); + return -EAGAIN; + } + + return 0; +} + +static int nbl_phy_add_tcam(void *priv, u32 index, u8 *key, u32 *action, u8 key_type, u8 pp_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union fem_em_tcam_table_u tcam_table; + union fem_em_tcam_table_u tcam_table_second; + union fem_em_ad_table_u ad_table; + + memset(&tcam_table, 0, sizeof(tcam_table)); + memset(&tcam_table_second, 0, sizeof(tcam_table_second)); + memset(&ad_table, 0, sizeof(ad_table)); + + memcpy(tcam_table.info.key, key, NBL_KT_BYTE_HALF_LEN); + tcam_table.info.key_vld = 1; + + if (key_type == NBL_KT_FULL_MODE) { + tcam_table.info.key_size = 1; + memcpy(tcam_table_second.info.key, &key[5], NBL_KT_BYTE_HALF_LEN); + tcam_table_second.info.key_vld = 1; + tcam_table_second.info.key_size = 1; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(pp_type, index + 1), + tcam_table_second.hash_key, NBL_FLOW_TCAM_TOTAL_LEN); + } + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(pp_type, index), + tcam_table.hash_key, NBL_FLOW_TCAM_TOTAL_LEN); + + ad_table.info.action0 = action[0]; + ad_table.info.action1 = action[1]; + ad_table.info.action2 = action[2]; + ad_table.info.action3 = action[3]; + ad_table.info.action4 = action[4]; + ad_table.info.action5 = action[5]; + ad_table.info.action6 = action[6]; + ad_table.info.action7 = action[7]; + ad_table.info.action8 = action[8]; + ad_table.info.action9 = action[9]; + ad_table.info.action10 = action[10]; + ad_table.info.action11 = action[11]; + ad_table.info.action12 = action[12]; + ad_table.info.action13 = action[13]; + ad_table.info.action14 = action[14]; + ad_table.info.action15 = action[15]; + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_AD_TABLE_REG(pp_type, index), + ad_table.hash_key, NBL_FLOW_AD_TOTAL_LEN); + + return 0; +} + +static void nbl_phy_del_tcam(void *priv, u32 index, u8 key_type, u8 pp_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union fem_em_tcam_table_u tcam_table; + union fem_em_tcam_table_u tcam_table_second; + union fem_em_ad_table_u ad_table; + + memset(&tcam_table, 0, sizeof(tcam_table)); + memset(&tcam_table_second, 0, sizeof(tcam_table_second)); + memset(&ad_table, 0, sizeof(ad_table)); + if (key_type == NBL_KT_FULL_MODE) + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(pp_type, index + 1), + tcam_table_second.hash_key, NBL_FLOW_TCAM_TOTAL_LEN); + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(pp_type, index), + tcam_table.hash_key, NBL_FLOW_TCAM_TOTAL_LEN); + + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_AD_TABLE_REG(pp_type, index), + ad_table.hash_key, NBL_FLOW_AD_TOTAL_LEN); +} + +static int nbl_phy_add_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 action) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_mcc_tbl node = {0}; + + node.vld = 1; + node.next_pntr = 0; + node.tail = 1; + node.stateid_filter = 1; + node.flowid_filter = 1; + node.dport_act = action; + + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); + if (prev_mcc_id != NBL_MCC_ID_INVALID) { + nbl_hw_read_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(prev_mcc_id), + (u8 *)&node, sizeof(node)); + node.next_pntr = mcc_id; + node.tail = 0; + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(prev_mcc_id), + (u8 *)&node, sizeof(node)); + } + + return 0; +} + +static void nbl_phy_del_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_mcc_tbl node = {0}; + + if (prev_mcc_id != NBL_MCC_ID_INVALID) { + nbl_hw_read_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(prev_mcc_id), + (u8 *)&node, sizeof(node)); + + if (next_mcc_id != NBL_MCC_ID_INVALID) { + node.next_pntr = next_mcc_id; + } else { + node.next_pntr = 0; + node.tail = 1; + } + + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(prev_mcc_id), + (u8 *)&node, sizeof(node)); + } + + memset(&node, 0, sizeof(node)); + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); +} + +static int nbl_phy_init_fem(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fem_ht_size_table_u ht_size = {.info = {0}}; + u32 fem_start = NBL_FEM_INIT_START_KERN; + int ret = 0; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_INIT_START, (u8 *)&fem_start, sizeof(fem_start)); + + nbl_phy_fem_set_bank(phy_mgt); + + ht_size.info.pp0_size = HT_PORT0_BTM; + ht_size.info.pp1_size = HT_PORT1_BTM; + ht_size.info.pp2_size = HT_PORT2_BTM; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_SIZE_REG, ht_size.data, NBL_FEM_HT_SIZE_TBL_WIDTH); + + nbl_phy_fem_clear_tcam_ad(phy_mgt); + + /*ret = nbl_phy_fem_em0_pt_init(phy_mgt);*/ + return ret; +} + +static void nbl_configure_dped_checksum(struct nbl_phy_mgt *phy_mgt) +{ + struct dped_l4_ck_cmd_40 l4_ck_cmd_40; + + /* DPED dped_l4_ck_cmd_40 for sctp */ + nbl_hw_read_regs(phy_mgt, NBL_DPED_L4_CK_CMD_40_ADDR, + (u8 *)&l4_ck_cmd_40, sizeof(l4_ck_cmd_40)); + l4_ck_cmd_40.en = 1; + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_40_ADDR, + (u8 *)&l4_ck_cmd_40, sizeof(l4_ck_cmd_40)); +} + +static int nbl_dped_init(struct nbl_phy_mgt *phy_mgt) +{ + nbl_hw_wr32(phy_mgt, NBL_DPED_VLAN_OFFSET, 0xC); + nbl_hw_wr32(phy_mgt, NBL_DPED_DSCP_OFFSET_0, 0x8); + nbl_hw_wr32(phy_mgt, NBL_DPED_DSCP_OFFSET_1, 0x4); + + // dped checksum offload + nbl_configure_dped_checksum(phy_mgt); + + return 0; +} + +static int nbl_uped_init(struct nbl_phy_mgt *phy_mgt) +{ + struct ped_hw_edit_profile hw_edit; + + nbl_hw_read_regs(phy_mgt, NBL_UPED_HW_EDT_PROF_TABLE(5), (u8 *)&hw_edit, sizeof(hw_edit)); + hw_edit.l3_len = 0; + nbl_hw_write_regs(phy_mgt, NBL_UPED_HW_EDT_PROF_TABLE(5), (u8 *)&hw_edit, sizeof(hw_edit)); + + nbl_hw_read_regs(phy_mgt, NBL_UPED_HW_EDT_PROF_TABLE(6), (u8 *)&hw_edit, sizeof(hw_edit)); + hw_edit.l3_len = 1; + nbl_hw_write_regs(phy_mgt, NBL_UPED_HW_EDT_PROF_TABLE(6), (u8 *)&hw_edit, sizeof(hw_edit)); + + return 0; +} + +static void nbl_shaping_eth_init(struct nbl_phy_mgt *phy_mgt, u8 eth_id, u8 speed) +{ + struct nbl_shaping_dport dport = {0}; + struct nbl_shaping_dvn_dport dvn_dport = {0}; + struct nbl_shaping_rdma_dport rdma_dport = {0}; + u32 rate, half_rate; + + if (speed == NBL_FW_PORT_SPEED_100G) { + rate = NBL_SHAPING_DPORT_100G_RATE; + half_rate = NBL_SHAPING_DPORT_HALF_100G_RATE; + } else { + rate = NBL_SHAPING_DPORT_25G_RATE; + half_rate = NBL_SHAPING_DPORT_HALF_25G_RATE; + } + + dport.cir = rate; + dport.pir = rate; + dport.depth = max(dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + dport.cbs = dport.depth; + dport.pbs = dport.depth; + dport.valid = 1; + + dvn_dport.cir = half_rate; + dvn_dport.pir = rate; + dvn_dport.depth = dport.depth; + dvn_dport.cbs = dvn_dport.depth; + dvn_dport.pbs = dvn_dport.depth; + dvn_dport.valid = 1; + + rdma_dport.cir = half_rate; + rdma_dport.pir = rate; + rdma_dport.depth = dport.depth; + rdma_dport.cbs = rdma_dport.depth; + rdma_dport.pbs = rdma_dport.depth; + rdma_dport.valid = 1; + + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), (u8 *)&dport, sizeof(dport)); + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id), + (u8 *)&dvn_dport, sizeof(dvn_dport)); + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_RDMA_DPORT_REG(eth_id), + (u8 *)&rdma_dport, sizeof(rdma_dport)); +} + +static int nbl_shaping_init(struct nbl_phy_mgt *phy_mgt, u8 speed) +{ + struct dsch_psha_en psha_en = {0}; + int i; + + for (i = 0; i < NBL_MAX_ETHERNET; i++) + nbl_shaping_eth_init(phy_mgt, i, speed); + + psha_en.en = 0xF; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_PSHA_EN_ADDR, (u8 *)&psha_en, sizeof(psha_en)); + + return 0; +} + +static int nbl_dsch_qid_max_init(struct nbl_phy_mgt *phy_mgt) +{ + struct dsch_vn_quanta quanta = {0}; + + quanta.h_qua = NBL_HOST_QUANTA; + quanta.e_qua = NBL_ECPU_QUANTA; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_QUANTA_ADDR, + (u8 *)&quanta, sizeof(quanta)); + nbl_hw_wr32(phy_mgt, NBL_DSCH_HOST_QID_MAX, NBL_MAX_QUEUE_ID); + + nbl_hw_wr32(phy_mgt, NBL_DVN_ECPU_QUEUE_NUM, 0); + nbl_hw_wr32(phy_mgt, NBL_UVN_ECPU_QUEUE_NUM, 0); + + return 0; +} + +static int nbl_ustore_init(struct nbl_phy_mgt *phy_mgt, u8 eth_num) +{ + struct ustore_pkt_len pkt_len; + struct nbl_ustore_port_drop_th drop_th; + int i; + + nbl_hw_read_regs(phy_mgt, NBL_USTORE_PKT_LEN_ADDR, (u8 *)&pkt_len, sizeof(pkt_len)); + /* min arp packet length 42 (14 + 28) */ + pkt_len.min = 42; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_PKT_LEN_ADDR, (u8 *)&pkt_len, sizeof(pkt_len)); + + drop_th.en = 1; + if (eth_num == 1) + drop_th.disc_th = NBL_USTORE_SIGNLE_ETH_DROP_TH; + else if (eth_num == 2) + drop_th.disc_th = NBL_USTORE_DUAL_ETH_DROP_TH; + else + drop_th.disc_th = NBL_USTORE_QUAD_ETH_DROP_TH; + + for (i = 0; i < 4; i++) + nbl_hw_write_regs(phy_mgt, NBL_USTORE_PORT_DROP_TH_REG_ARR(i), + (u8 *)&drop_th, sizeof(drop_th)); + + return 0; +} + +static int nbl_dstore_init(struct nbl_phy_mgt *phy_mgt, u8 speed) +{ + struct dstore_d_dport_fc_th fc_th; + struct dstore_port_drop_th drop_th; + struct dstore_disc_bp_th bp_th; + int i; + + for (i = 0; i < 6; i++) { + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_PORT_DROP_TH_REG(i), + (u8 *)&drop_th, sizeof(drop_th)); + drop_th.en = 0; + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_PORT_DROP_TH_REG(i), + (u8 *)&drop_th, sizeof(drop_th)); + } + + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_DISC_BP_TH, + (u8 *)&bp_th, sizeof(bp_th)); + bp_th.en = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_DISC_BP_TH, + (u8 *)&bp_th, sizeof(bp_th)); + + for (i = 0; i < 4; i++) { + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(i), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH; + } + + fc_th.fc_en = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(i), + (u8 *)&fc_th, sizeof(fc_th)); + } + + return 0; +} + +static void nbl_dvn_descreq_num_cfg(struct nbl_phy_mgt *phy_mgt, u32 descreq_num) +{ + struct nbl_dvn_descreq_num_cfg descreq_num_cfg = { 0 }; + u32 packet_ring_prefect_num = descreq_num & 0xffff; + u32 split_ring_prefect_num = (descreq_num >> 16) & 0xffff; + + packet_ring_prefect_num = packet_ring_prefect_num > 32 ? 32 : packet_ring_prefect_num; + packet_ring_prefect_num = packet_ring_prefect_num < 8 ? 8 : packet_ring_prefect_num; + descreq_num_cfg.packed_l1_num = (packet_ring_prefect_num - 8) / 4; + + split_ring_prefect_num = split_ring_prefect_num > 16 ? 16 : split_ring_prefect_num; + split_ring_prefect_num = split_ring_prefect_num < 8 ? 8 : split_ring_prefect_num; + descreq_num_cfg.avring_cfg_num = split_ring_prefect_num > 8 ? 1 : 0; + + nbl_hw_write_regs(phy_mgt, NBL_DVN_DESCREQ_NUM_CFG, + (u8 *)&descreq_num_cfg, sizeof(descreq_num_cfg)); +} + +static int nbl_dvn_init(struct nbl_phy_mgt *phy_mgt, u8 speed) +{ + struct nbl_dvn_desc_wr_merge_timeout timeout = {0}; + struct nbl_dvn_dif_req_rd_ro_flag ro_flag = {0}; + + timeout.cfg_cycle = DEFAULT_DVN_DESC_WR_MERGE_TIMEOUT_MAX; + nbl_hw_write_regs(phy_mgt, NBL_DVN_DESC_WR_MERGE_TIMEOUT, + (u8 *)&timeout, sizeof(timeout)); + + ro_flag.rd_desc_ro_en = 1; + ro_flag.rd_data_ro_en = 1; + ro_flag.rd_avring_ro_en = 1; + nbl_hw_write_regs(phy_mgt, NBL_DVN_DIF_REQ_RD_RO_FLAG, + (u8 *)&ro_flag, sizeof(ro_flag)); + + if (speed == NBL_FW_PORT_SPEED_100G) + nbl_dvn_descreq_num_cfg(phy_mgt, DEFAULT_DVN_100G_DESCREQ_NUMCFG); + else + nbl_dvn_descreq_num_cfg(phy_mgt, DEFAULT_DVN_DESCREQ_NUMCFG); + + return 0; +} + +static int nbl_uvn_init(struct nbl_phy_mgt *phy_mgt) +{ + struct uvn_queue_err_mask mask = {0}; + struct uvn_dif_req_ro_flag flag = {0}; + u32 timeout = 119760; /* 200us 200000/1.67 */ + + nbl_hw_wr32(phy_mgt, NBL_UVN_DESC_RD_WAIT, timeout); + + flag.avail_rd = 1; + flag.desc_rd = 1; + flag.pkt_wr = 1; + flag.desc_wr = 0; + nbl_hw_write_regs(phy_mgt, NBL_UVN_DIF_REQ_RO_FLAG, (u8 *)&flag, sizeof(flag)); + + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_MASK, (u8 *)&mask, sizeof(mask)); + mask.dif_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_ERR_MASK, (u8 *)&mask, sizeof(mask)); + + return 0; +} + +static int nbl_dp_init(struct nbl_phy_mgt *phy_mgt, u8 speed, u8 eth_num) +{ + nbl_dped_init(phy_mgt); + nbl_uped_init(phy_mgt); + nbl_shaping_init(phy_mgt, speed); + nbl_dsch_qid_max_init(phy_mgt); + nbl_ustore_init(phy_mgt, eth_num); + nbl_dstore_init(phy_mgt, speed); + nbl_dvn_init(phy_mgt, speed); + nbl_uvn_init(phy_mgt); + + return 0; +} + +static struct nbl_epro_action_filter_tbl epro_action_filter_tbl_def[NBL_FWD_TYPE_MAX] = { + [NBL_FWD_TYPE_NORMAL] = { + BIT(NBL_MD_ACTION_MCIDX) | BIT(NBL_MD_ACTION_TABLE_INDEX) | + BIT(NBL_MD_ACTION_MIRRIDX)}, + [NBL_FWD_TYPE_CPU_ASSIGNED] = { + BIT(NBL_MD_ACTION_MCIDX) | BIT(NBL_MD_ACTION_TABLE_INDEX) | + BIT(NBL_MD_ACTION_MIRRIDX) + }, + [NBL_FWD_TYPE_UPCALL] = {0}, + [NBL_FWD_TYPE_SRC_MIRROR] = { + BIT(NBL_MD_ACTION_FLOWID0) | BIT(NBL_MD_ACTION_FLOWID1) | + BIT(NBL_MD_ACTION_RSSIDX) | BIT(NBL_MD_ACTION_TABLE_INDEX) | + BIT(NBL_MD_ACTION_MCIDX) | BIT(NBL_MD_ACTION_VNI0) | + BIT(NBL_MD_ACTION_VNI1) | BIT(NBL_MD_ACTION_PRBAC_IDX) | + BIT(NBL_MD_ACTION_L4S_IDX) | BIT(NBL_MD_ACTION_DP_HASH0) | + BIT(NBL_MD_ACTION_DP_HASH1) | BIT(NBL_MD_ACTION_MDF_PRI) | + ((u64)0xffffffff << 32)}, + [NBL_FWD_TYPE_OTHER_MIRROR] = { + BIT(NBL_MD_ACTION_FLOWID0) | BIT(NBL_MD_ACTION_FLOWID1) | + BIT(NBL_MD_ACTION_RSSIDX) | BIT(NBL_MD_ACTION_TABLE_INDEX) | + BIT(NBL_MD_ACTION_MCIDX) | BIT(NBL_MD_ACTION_VNI0) | + BIT(NBL_MD_ACTION_VNI1) | BIT(NBL_MD_ACTION_PRBAC_IDX) | + BIT(NBL_MD_ACTION_L4S_IDX) | BIT(NBL_MD_ACTION_DP_HASH0) | + BIT(NBL_MD_ACTION_DP_HASH1) | BIT(NBL_MD_ACTION_MDF_PRI)}, + [NBL_FWD_TYPE_MNG] = {0}, + [NBL_FWD_TYPE_GLB_LB] = {0}, + [NBL_FWD_TYPE_DROP] = {0}, +}; + +static void nbl_epro_action_filter_cfg(struct nbl_phy_mgt *phy_mgt, u32 fwd_type, + struct nbl_epro_action_filter_tbl *cfg) +{ + if (fwd_type >= NBL_FWD_TYPE_MAX) { + pr_err("fwd_type %u exceed the max num %u.", fwd_type, NBL_FWD_TYPE_MAX); + return; + } + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ACTION_FILTER_TABLE(fwd_type), + (u8 *)cfg, sizeof(*cfg)); +} + +static int nbl_epro_init(struct nbl_phy_mgt *phy_mgt) +{ + u32 fwd_type = 0; + + for (fwd_type = 0; fwd_type < NBL_FWD_TYPE_MAX; fwd_type++) + nbl_epro_action_filter_cfg(phy_mgt, fwd_type, + &epro_action_filter_tbl_def[fwd_type]); + + return 0; +} + +static int nbl_ppe_init(struct nbl_phy_mgt *phy_mgt) +{ + nbl_epro_init(phy_mgt); + + return 0; +} + +static int nbl_host_padpt_init(struct nbl_phy_mgt *phy_mgt) +{ + /* padpt flow control register */ + nbl_hw_wr32(phy_mgt, NBL_HOST_PADPT_HOST_CFG_FC_CPLH_UP, 0x10400); + nbl_hw_wr32(phy_mgt, NBL_HOST_PADPT_HOST_CFG_FC_PD_DN, 0x10080); + nbl_hw_wr32(phy_mgt, NBL_HOST_PADPT_HOST_CFG_FC_PH_DN, 0x10010); + nbl_hw_wr32(phy_mgt, NBL_HOST_PADPT_HOST_CFG_FC_NPH_DN, 0x10010); + + return 0; +} + +/* set padpt debug reg to cap for aged stop */ +static void nbl_host_pcap_init(struct nbl_phy_mgt *phy_mgt) +{ + int addr; + + /* tx */ + nbl_hw_wr32(phy_mgt, 0x15a4204, 0x4); + nbl_hw_wr32(phy_mgt, 0x15a4208, 0x10); + + for (addr = 0x15a4300; addr <= 0x15a4338; addr += 4) + nbl_hw_wr32(phy_mgt, addr, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a433c, 0xdf000000); + + for (addr = 0x15a4340; addr <= 0x15a437c; addr += 4) + nbl_hw_wr32(phy_mgt, addr, 0x0); + + /* rx */ + nbl_hw_wr32(phy_mgt, 0x15a4804, 0x4); + nbl_hw_wr32(phy_mgt, 0x15a4808, 0x20); + + for (addr = 0x15a4940; addr <= 0x15a4978; addr += 4) + nbl_hw_wr32(phy_mgt, addr, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a497c, 0x0a000000); + + for (addr = 0x15a4900; addr <= 0x15a4938; addr += 4) + nbl_hw_wr32(phy_mgt, addr, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a493c, 0xbe000000); + + nbl_hw_wr32(phy_mgt, 0x15a420c, 0x1); + nbl_hw_wr32(phy_mgt, 0x15a480c, 0x1); + nbl_hw_wr32(phy_mgt, 0x15a420c, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a480c, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a4200, 0x1); + nbl_hw_wr32(phy_mgt, 0x15a4800, 0x1); +} + +static int nbl_intf_init(struct nbl_phy_mgt *phy_mgt) +{ + nbl_host_padpt_init(phy_mgt); + nbl_host_pcap_init(phy_mgt); + + return 0; +} + +static int nbl_phy_init_chip_module(void *priv, u8 eth_speed, u8 eth_num) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_PHY, "phy_chip_init"); + + nbl_dp_init(phy_mgt, eth_speed, eth_num); + nbl_ppe_init(phy_mgt); + nbl_intf_init(phy_mgt); + + phy_mgt->version = nbl_hw_rd32(phy_mgt, 0x1300904); + + return 0; +} + +static int nbl_phy_init_qid_map_table(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_virtio_qid_map_table info = {0}, info2 = {0}; + struct device *dev = NBL_PHY_MGT_TO_DEV(phy_mgt); + u16 i, j, k; + + memset(&info, 0, sizeof(info)); + info.local_qid = 0x1FF; + info.notify_addr_l = 0x7FFFFF; + info.notify_addr_h = 0xFFFFFFFF; + info.global_qid = 0xFFF; + info.ctrlq_flag = 0X1; + info.rsv1 = 0; + info.rsv2 = 0; + + for (k = 0; k < 2; k++) { /* 0 is primary table , 1 is standby table */ + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + j = 0; + do { + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_QID_MAP_REG_ARR(k, i), + (u8 *)&info, sizeof(info)); + nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_QID_MAP_REG_ARR(k, i), + (u8 *)&info2, sizeof(info2)); + if (likely(!memcmp(&info, &info2, sizeof(info)))) + break; + j++; + } while (j < NBL_REG_WRITE_MAX_TRY_TIMES); + + if (j == NBL_REG_WRITE_MAX_TRY_TIMES) + dev_err(dev, "Write to qid map table entry %hu failed\n", i); + } + } + + return 0; +} + +static int nbl_phy_set_qid_map_table(void *priv, void *data, int qid_map_select) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_qid_map_param *param = (struct nbl_qid_map_param *)data; + struct nbl_virtio_qid_map_table info = {0}, info_data = {0}; + struct nbl_queue_table_select select = {0}; + u64 reg; + int i, j; + + for (i = 0; i < param->len; i++) { + j = 0; + + info.local_qid = param->qid_map[i].local_qid; + info.notify_addr_l = param->qid_map[i].notify_addr_l; + info.notify_addr_h = param->qid_map[i].notify_addr_h; + info.global_qid = param->qid_map[i].global_qid; + info.ctrlq_flag = param->qid_map[i].ctrlq_flag; + + do { + reg = NBL_PCOMPLETER_QID_MAP_REG_ARR(qid_map_select, param->start + i); + nbl_hw_write_regs(phy_mgt, reg, (u8 *)(&info), sizeof(info)); + nbl_hw_read_regs(phy_mgt, reg, (u8 *)(&info_data), sizeof(info_data)); + if (likely(!memcmp(&info, &info_data, sizeof(info)))) + break; + j++; + } while (j < NBL_REG_WRITE_MAX_TRY_TIMES); + + if (j == NBL_REG_WRITE_MAX_TRY_TIMES) + nbl_err(common, NBL_DEBUG_QUEUE, "Write to qid map table entry %d failed\n", + param->start + i); + } + + select.select = qid_map_select; + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_QUEUE_TABLE_SELECT_REG, + (u8 *)&select, sizeof(select)); + + return 0; +} + +static int nbl_phy_set_qid_map_ready(void *priv, bool ready) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_table_ready queue_table_ready = {0}; + + queue_table_ready.ready = ready; + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_QUEUE_TABLE_READY_REG, + (u8 *)&queue_table_ready, sizeof(queue_table_ready)); + + return 0; +} + +static int nbl_phy_cfg_ipro_queue_tbl(void *priv, u16 queue_id, u16 vsi_id, u8 enable) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_queue_tbl ipro_queue_tbl = {0}; + + ipro_queue_tbl.vsi_en = enable; + ipro_queue_tbl.vsi_id = vsi_id; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_QUEUE_TBL(queue_id), + (u8 *)&ipro_queue_tbl, sizeof(ipro_queue_tbl)); + + return 0; +} + +static int nbl_phy_cfg_ipro_dn_sport_tbl(void *priv, u16 vsi_id, u16 dst_eth_id, + u16 bmode, bool binit) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + + if (binit) { + dpsport.entry_vld = 1; + dpsport.phy_flow = 1; + dpsport.set_dport.dport.down.upcall_flag = AUX_FWD_TYPE_NML_FWD; + dpsport.set_dport.dport.down.port_type = SET_DPORT_TYPE_ETH_LAG; + dpsport.set_dport.dport.down.lag_vld = 0; + dpsport.set_dport.dport.down.eth_vld = 1; + dpsport.set_dport.dport.down.eth_id = dst_eth_id; + dpsport.vlan_layer_num_1 = 3; + dpsport.set_dport_en = 1; + } else { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + } + + if (bmode == BRIDGE_MODE_VEPA) + dpsport.set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_EPRO; + else + dpsport.set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_NONE; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + return 0; +} + +static int nbl_phy_set_vnet_queue_info(void *priv, struct nbl_vnet_queue_info_param *param, + u16 queue_id) +{ + struct nbl_phy_mgt_leonis *phy_mgt_leonis = (struct nbl_phy_mgt_leonis *)priv; + struct nbl_phy_mgt *phy_mgt = &phy_mgt_leonis->phy_mgt; + struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; + + host_vnet_qinfo.function_id = param->function_id; + host_vnet_qinfo.device_id = param->device_id; + host_vnet_qinfo.bus_id = param->bus_id; + host_vnet_qinfo.valid = param->valid; + host_vnet_qinfo.msix_idx = param->msix_idx; + host_vnet_qinfo.msix_idx_valid = param->msix_idx_valid; +#ifndef NBL_DISABLE_RO + if (phy_mgt_leonis->ro_enable) { + host_vnet_qinfo.ido_en = 1; + host_vnet_qinfo.rlo_en = 1; + } +#endif + + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + + return 0; +} + +static int nbl_phy_clear_vnet_queue_info(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; + + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + return 0; +} + +static int nbl_phy_cfg_vnet_qinfo_log(void *priv, u16 queue_id, bool vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + host_vnet_qinfo.log_en = vld; + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + + return 0; +} + +static int nbl_phy_reset_dvn_cfg(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_dvn_queue_reset queue_reset = {0}; + struct nbl_dvn_queue_reset_done queue_reset_done = {0}; + int i = 0; + + queue_reset.dvn_queue_index = queue_id; + queue_reset.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DVN_QUEUE_RESET_REG, + (u8 *)&queue_reset, sizeof(queue_reset)); + + udelay(5); + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_RESET_DONE_REG, + (u8 *)&queue_reset_done, sizeof(queue_reset_done)); + while (!queue_reset_done.flag) { + i++; + if (!(i % 10)) { + nbl_err(common, NBL_DEBUG_QUEUE, "Wait too long for tx queue reset to be done"); + break; + } + + udelay(5); + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_RESET_DONE_REG, + (u8 *)&queue_reset_done, sizeof(queue_reset_done)); + } + + nbl_debug(common, NBL_DEBUG_QUEUE, "dvn:%u cfg reset succedd, wait %d 5ns\n", queue_id, i); + return 0; +} + +static int nbl_phy_reset_uvn_cfg(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_uvn_queue_reset queue_reset = {0}; + struct nbl_uvn_queue_reset_done queue_reset_done = {0}; + int i = 0; + + queue_reset.index = queue_id; + queue_reset.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_RESET_REG, + (u8 *)&queue_reset, sizeof(queue_reset)); + + udelay(5); + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_RESET_DONE_REG, + (u8 *)&queue_reset_done, sizeof(queue_reset_done)); + while (!queue_reset_done.flag) { + i++; + if (!(i % 10)) { + nbl_err(common, NBL_DEBUG_QUEUE, "Wait too long for rx queue reset to be done"); + break; + } + + udelay(5); + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_RESET_DONE_REG, + (u8 *)&queue_reset_done, sizeof(queue_reset_done)); + } + + nbl_debug(common, NBL_DEBUG_QUEUE, "uvn:%u cfg reset succedd, wait %d 5ns\n", queue_id, i); + return 0; +} + +static int nbl_phy_restore_dvn_context(void *priv, u16 queue_id, u16 split, u16 last_avail_index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct dvn_queue_context cxt = {0}; + + cxt.dvn_ring_wrap_counter = last_avail_index >> 15; + if (split) + cxt.dvn_avail_ring_read = last_avail_index; + else + cxt.dvn_l1_ring_read = last_avail_index & 0x7FFF; + + nbl_hw_write_regs(phy_mgt, NBL_DVN_QUEUE_CXT_TABLE_ARR(queue_id), (u8 *)&cxt, sizeof(cxt)); + nbl_info(common, NBL_DEBUG_QUEUE, "config tx ring: %u, last avail idx: %u\n", + queue_id, last_avail_index); + + return 0; +} + +static int nbl_phy_restore_uvn_context(void *priv, u16 queue_id, u16 split, u16 last_avail_index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct uvn_queue_cxt cxt = {0}; + + cxt.wrap_count = last_avail_index >> 15; + if (split) + cxt.queue_head = last_avail_index; + else + cxt.queue_head = last_avail_index & 0x7FFF; + + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_CXT_TABLE_ARR(queue_id), (u8 *)&cxt, sizeof(cxt)); + nbl_info(common, NBL_DEBUG_QUEUE, "config rx ring: %u, last avail idx: %u\n", + queue_id, last_avail_index); + + return 0; +} + +static int nbl_phy_get_tx_queue_cfg(void *priv, void *data, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_cfg_param *queue_cfg = (struct nbl_queue_cfg_param *)data; + struct dvn_queue_table info = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + + queue_cfg->desc = info.dvn_queue_baddr; + queue_cfg->avail = info.dvn_avail_baddr; + queue_cfg->used = info.dvn_used_baddr; + queue_cfg->size = info.dvn_queue_size; + queue_cfg->split = info.dvn_queue_type; + queue_cfg->extend_header = info.dvn_extend_header_en; + + return 0; +} + +static int nbl_phy_get_rx_queue_cfg(void *priv, void *data, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_cfg_param *queue_cfg = (struct nbl_queue_cfg_param *)data; + struct uvn_queue_table info = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + + queue_cfg->desc = info.queue_baddr; + queue_cfg->avail = info.avail_baddr; + queue_cfg->used = info.used_baddr; + queue_cfg->size = info.queue_size_mask_pow; + queue_cfg->split = info.queue_type; + queue_cfg->extend_header = info.extend_header_en; + queue_cfg->half_offload_en = info.half_offload_en; + queue_cfg->rxcsum = info.guest_csum_en; + + return 0; +} + +static int nbl_phy_cfg_tx_queue(void *priv, void *data, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_cfg_param *queue_cfg = (struct nbl_queue_cfg_param *)data; + struct dvn_queue_table info = {0}; + + info.dvn_queue_baddr = queue_cfg->desc; + if (!queue_cfg->split && !queue_cfg->extend_header) + queue_cfg->avail = queue_cfg->avail | 3; + info.dvn_avail_baddr = queue_cfg->avail; + info.dvn_used_baddr = queue_cfg->used; + info.dvn_queue_size = ilog2(queue_cfg->size); + info.dvn_queue_type = queue_cfg->split; + info.dvn_queue_en = 1; + info.dvn_extend_header_en = queue_cfg->extend_header; + + nbl_hw_write_regs(phy_mgt, NBL_DVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + + return 0; +} + +static int nbl_phy_cfg_rx_queue(void *priv, void *data, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_cfg_param *queue_cfg = (struct nbl_queue_cfg_param *)data; + struct uvn_queue_table info = {0}; + + info.queue_baddr = queue_cfg->desc; + info.avail_baddr = queue_cfg->avail; + info.used_baddr = queue_cfg->used; + info.queue_size_mask_pow = ilog2(queue_cfg->size); + info.queue_type = queue_cfg->split; + info.extend_header_en = queue_cfg->extend_header; + info.half_offload_en = queue_cfg->half_offload_en; + info.guest_csum_en = queue_cfg->rxcsum; + info.queue_enable = 1; + + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + + return 0; +} + +static bool nbl_phy_check_q2tc(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dsch_vn_q2tc_cfg_tbl info; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + return info.vld; +} + +static int nbl_phy_cfg_q2tc_netid(void *priv, u16 queue_id, u16 netid, u16 vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dsch_vn_q2tc_cfg_tbl info; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + info.tcid = (info.tcid & 0x7) | (netid << 3); + info.vld = vld; + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + return 0; +} + +static int nbl_phy_cfg_q2tc_tcid(void *priv, u16 queue_id, u16 tcid) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dsch_vn_q2tc_cfg_tbl info; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + info.tcid = (info.tcid & 0xFFF8) | tcid; + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + return 0; +} + +static int nbl_phy_set_tc_wgt(void *priv, u16 func_id, u8 *weight, u16 num_tc) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union dsch_vn_tc_wgt_cfg_tbl_u wgt_cfg = {.info = {0}}; + int i; + + for (i = 0; i < num_tc; i++) + wgt_cfg.data[i] = weight[i]; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_TC_WGT_CFG_TABLE_REG_ARR(func_id), + wgt_cfg.data, sizeof(wgt_cfg)); + + return 0; +} + +static void nbl_phy_active_shaping(void *priv, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net shaping_net = {0}; + struct dsch_vn_sha2net_map_tbl sha2net = {0}; + struct dsch_vn_net2sha_map_tbl net2sha = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + if (!shaping_net.depth) + return; + + sha2net.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); + + shaping_net.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + net2sha.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); +} + +static void nbl_phy_deactive_shaping(void *priv, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net shaping_net = {0}; + struct dsch_vn_sha2net_map_tbl sha2net = {0}; + struct dsch_vn_net2sha_map_tbl net2sha = {0}; + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + shaping_net.valid = 0; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); +} + +static int nbl_phy_set_shaping(void *priv, u16 func_id, u64 total_tx_rate, u8 vld, bool active) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net shaping_net = {0}; + struct dsch_vn_sha2net_map_tbl sha2net = {0}; + struct dsch_vn_net2sha_map_tbl net2sha = {0}; + + if (vld) { + sha2net.vld = active; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); + } else { + net2sha.vld = vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); + } + + /* cfg shaping cir/pir */ + if (vld) { + shaping_net.valid = active; + /* total_tx_rate unit Mb/s */ + /* cir 1 default represents 1Mbps */ + shaping_net.cir = total_tx_rate; + /* pir equal cir */ + shaping_net.pir = shaping_net.cir; + shaping_net.depth = max(shaping_net.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + shaping_net.cbs = shaping_net.depth; + shaping_net.pbs = shaping_net.depth; + } + + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + if (!vld) { + sha2net.vld = vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); + } else { + net2sha.vld = active; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); + } + + return 0; +} + +static int nbl_phy_cfg_dsch_net_to_group(void *priv, u16 func_id, u16 group_id, u16 vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dsch_vn_n2g_cfg_tbl info = {0}; + + info.grpid = group_id; + info.vld = vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_N2G_CFG_TABLE_REG_ARR(func_id), + (u8 *)&info, sizeof(info)); + return 0; +} + +static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_num, u16 *queue_list) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_epro_rss_ret_tbl rss_ret = {0}; + u32 table_id, table_end, group_count, odd_num, queue_id = 0; + + group_count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << size_type; + if (group_count > 256) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Rss group entry size type %u exceed the max value %u", + size_type, NBL_EPRO_RSS_ENTRY_SIZE_256); + return -EINVAL; + } + + if (q_num > group_count) { + nbl_err(common, NBL_DEBUG_QUEUE, + "q_num %u exceed the rss group count %u\n", q_num, group_count); + return -EINVAL; + } + if (index >= NBL_EPRO_RSS_RET_TBL_DEPTH || + (index + group_count) > NBL_EPRO_RSS_RET_TBL_DEPTH) { + nbl_err(common, NBL_DEBUG_QUEUE, + "index %u exceed the max table entry %u, entry size: %u\n", + index, NBL_EPRO_RSS_RET_TBL_DEPTH, group_count); + return -EINVAL; + } + + table_id = index / 2; + table_end = (index + group_count) / 2; + odd_num = index % 2; + nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + + if (odd_num) { + rss_ret.vld1 = 1; + rss_ret.dqueue1 = queue_list[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + table_id++; + } + + queue_id = queue_id % q_num; + for (; table_id < table_end; table_id++) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = queue_list[queue_id++]; + queue_id = queue_id % q_num; + rss_ret.vld1 = 1; + rss_ret.dqueue1 = queue_list[queue_id++]; + queue_id = queue_id % q_num; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + + if (odd_num) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = queue_list[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } + + return 0; +} + +static struct nbl_epro_rss_key epro_rss_key_def = { + .key0 = 0x6d5a6d5a6d5a6d5a, + .key1 = 0x6d5a6d5a6d5a6d5a, + .key2 = 0x6d5a6d5a6d5a6d5a, + .key3 = 0x6d5a6d5a6d5a6d5a, + .key4 = 0x6d5a6d5a6d5a6d5a, +}; + +static int nbl_phy_init_epro_rss_key(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_KEY_REG, + (u8 *)&epro_rss_key_def, sizeof(epro_rss_key_def)); + + return 0; +} + +static void nbl_phy_read_epro_rss_key(void *priv, u8 *rss_key) +{ + nbl_hw_read_regs(priv, NBL_EPRO_RSS_KEY_REG, + rss_key, sizeof(struct nbl_epro_rss_key)); +} + +static void nbl_phy_read_rss_indir(void *priv, u16 vsi_id, u32 *rss_indir, + u16 rss_ret_base, u16 rss_entry_size) +{ + struct nbl_epro_rss_ret_tbl rss_ret = {0}; + int i = 0; + u32 table_id, table_end, group_count, odd_num; + + group_count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << rss_entry_size; + table_id = rss_ret_base / 2; + table_end = (rss_ret_base + group_count) / 2; + odd_num = rss_ret_base % 2; + + if (odd_num) { + nbl_hw_read_regs(priv, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + rss_indir[i++] = rss_ret.dqueue1; + } + + for (; table_id < table_end; table_id++) { + nbl_hw_read_regs(priv, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + rss_indir[i++] = rss_ret.dqueue0; + rss_indir[i++] = rss_ret.dqueue1; + } + + if (odd_num) { + nbl_hw_read_regs(priv, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + rss_indir[i++] = rss_ret.dqueue0; + } +} + +static void nbl_phy_get_rss_alg_sel(void *priv, u8 eth_id, u8 *alg_sel) +{ + struct nbl_epro_ept_tbl ept_tbl = {0}; + + nbl_hw_read_regs(priv, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + + if (ept_tbl.lag_alg_sel == NBL_EPRO_RSS_ALG_TOEPLITZ_HASH) + *alg_sel = ETH_RSS_HASH_TOP; + else if (ept_tbl.lag_alg_sel == NBL_EPRO_RSS_ALG_CRC32) + *alg_sel = ETH_RSS_HASH_CRC32; +} + +static int nbl_phy_init_epro_vpt_tbl(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_vpt_tbl epro_vpt_tbl = {0}; + + epro_vpt_tbl.vld = 1; + epro_vpt_tbl.fwd = NBL_EPRO_FWD_TYPE_DROP; + epro_vpt_tbl.rss_alg_sel = NBL_EPRO_RSS_ALG_TOEPLITZ_HASH; + epro_vpt_tbl.rss_key_type_ipv4 = NBL_EPRO_RSS_KEY_TYPE_IPV4_L4; + epro_vpt_tbl.rss_key_type_ipv6 = NBL_EPRO_RSS_KEY_TYPE_IPV6_L4; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), + (u8 *)&epro_vpt_tbl, + sizeof(struct nbl_epro_vpt_tbl)); + + return 0; +} + +static int nbl_phy_set_epro_rss_default(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_vpt_tbl epro_vpt_tbl = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + + epro_vpt_tbl.rss_alg_sel = NBL_EPRO_RSS_ALG_TOEPLITZ_HASH; + epro_vpt_tbl.rss_key_type_ipv4 = NBL_EPRO_RSS_KEY_TYPE_IPV4_L4; + epro_vpt_tbl.rss_key_type_ipv6 = NBL_EPRO_RSS_KEY_TYPE_IPV6_L4; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), + (u8 *)&epro_vpt_tbl, + sizeof(struct nbl_epro_vpt_tbl)); + return 0; +} + +static int nbl_phy_set_epro_rss_pt(void *priv, u16 vsi_id, u16 rss_ret_base, u16 rss_entry_size) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_rss_pt_tbl epro_rss_pt_tbl = {0}; + struct nbl_epro_vpt_tbl epro_vpt_tbl; + + epro_rss_pt_tbl.vld = 1; + epro_rss_pt_tbl.entry_size = rss_entry_size; + epro_rss_pt_tbl.offset0_vld = 1; + epro_rss_pt_tbl.offset0 = rss_ret_base; + epro_rss_pt_tbl.offset1_vld = 0; + epro_rss_pt_tbl.offset1 = 0; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_PT_TABLE(vsi_id), (u8 *)&epro_rss_pt_tbl, + sizeof(epro_rss_pt_tbl)); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + epro_vpt_tbl.fwd = NBL_EPRO_FWD_TYPE_NORMAL; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + + return 0; +} + +static int nbl_phy_clear_epro_rss_pt(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_rss_pt_tbl epro_rss_pt_tbl = {0}; + struct nbl_epro_vpt_tbl epro_vpt_tbl; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_PT_TABLE(vsi_id), (u8 *)&epro_rss_pt_tbl, + sizeof(epro_rss_pt_tbl)); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + epro_vpt_tbl.fwd = NBL_EPRO_FWD_TYPE_DROP; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + + return 0; +} + +static int nbl_phy_disable_dvn(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dvn_queue_table info = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + info.dvn_queue_en = 0; + nbl_hw_write_regs(phy_mgt, NBL_DVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + return 0; +} + +static int nbl_phy_disable_uvn(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct uvn_queue_table info = {0}; + + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + return 0; +} + +static bool nbl_phy_is_txq_drain_out(struct nbl_phy_mgt *phy_mgt, u16 queue_id) +{ + struct dsch_vn_tc_q_list_tbl tc_q_list = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_TC_Q_LIST_TABLE_REG_ARR(queue_id), + (u8 *)&tc_q_list, sizeof(tc_q_list)); + if (!tc_q_list.regi && !tc_q_list.fly && !tc_q_list.vld) + return true; + + return false; +} + +static bool nbl_phy_is_rxq_drain_out(struct nbl_phy_mgt *phy_mgt, u16 queue_id) +{ + struct uvn_desc_cxt cache_ctx = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_UVN_DESC_CXT_TABLE_ARR(queue_id), + (u8 *)&cache_ctx, sizeof(cache_ctx)); + if (cache_ctx.cache_pref_num_prev == cache_ctx.cache_pref_num_post) + return true; + + return false; +} + +static int nbl_phy_lso_dsch_drain(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + int i = 0; + + do { + if (nbl_phy_is_txq_drain_out(phy_mgt, queue_id)) + break; + + usleep_range(10, 20); + } while (++i < NBL_DRAIN_WAIT_TIMES); + + if (i >= NBL_DRAIN_WAIT_TIMES) { + nbl_err(common, NBL_DEBUG_QUEUE, "nbl queue %u lso dsch drain\n", queue_id); + return -1; + } + + return 0; +} + +static int nbl_phy_rsc_cache_drain(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + int i = 0; + + do { + if (nbl_phy_is_rxq_drain_out(phy_mgt, queue_id)) + break; + + usleep_range(10, 20); + } while (++i < NBL_DRAIN_WAIT_TIMES); + + if (i >= NBL_DRAIN_WAIT_TIMES) { + nbl_err(common, NBL_DEBUG_QUEUE, "nbl queue %u rsc cache drain timeout\n", + queue_id); + return -1; + } + + return 0; +} + +static u16 nbl_phy_save_dvn_ctx(void *priv, u16 queue_id, u16 split) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct dvn_queue_context dvn_ctx = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_CXT_TABLE_ARR(queue_id), + (u8 *)&dvn_ctx, sizeof(dvn_ctx)); + + nbl_debug(common, NBL_DEBUG_QUEUE, "DVNQ save ctx: %d packed: %08x %08x split: %08x\n", + queue_id, dvn_ctx.dvn_ring_wrap_counter, dvn_ctx.dvn_l1_ring_read, + dvn_ctx.dvn_avail_ring_idx); + + if (split) + return (dvn_ctx.dvn_avail_ring_idx); + else + return (dvn_ctx.dvn_l1_ring_read & 0x7FFF) | (dvn_ctx.dvn_ring_wrap_counter << 15); +} + +static u16 nbl_phy_save_uvn_ctx(void *priv, u16 queue_id, u16 split, u16 queue_size) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct uvn_queue_cxt queue_cxt = {0}; + struct uvn_desc_cxt desc_cxt = {0}; + u16 cache_diff, queue_head, wrap_count; + + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_CXT_TABLE_ARR(queue_id), + (u8 *)&queue_cxt, sizeof(queue_cxt)); + nbl_hw_read_regs(phy_mgt, NBL_UVN_DESC_CXT_TABLE_ARR(queue_id), + (u8 *)&desc_cxt, sizeof(desc_cxt)); + + nbl_debug(common, NBL_DEBUG_QUEUE, + "UVN save ctx: %d cache_tail: %08x cache_head %08x queue_head: %08x\n", + queue_id, desc_cxt.cache_tail, desc_cxt.cache_head, queue_cxt.queue_head); + + cache_diff = (desc_cxt.cache_tail - desc_cxt.cache_head + 64) & (0x3F); + queue_head = (queue_cxt.queue_head - cache_diff + 65536) & (0xFFFF); + if (queue_size) + wrap_count = !((queue_head / queue_size) & 0x1); + else + return 0xffff; + + nbl_debug(common, NBL_DEBUG_QUEUE, "UVN save ctx: %d packed: %08x %08x split: %08x\n", + queue_id, wrap_count, queue_head, queue_head); + + if (split) + return (queue_head); + else + return (queue_head & 0x7FFF) | (wrap_count << 15); +} + +static void nbl_phy_get_rx_queue_err_stats(void *priv, u16 queue_id, + struct nbl_queue_err_stats *queue_err_stats) +{ + queue_err_stats->uvn_stat_pkt_drop = + nbl_hw_rd32(priv, NBL_UVN_STATIS_PKT_DROP(queue_id)); +} + +static void nbl_phy_get_tx_queue_err_stats(void *priv, u16 queue_id, + struct nbl_queue_err_stats *queue_err_stats) +{ + struct nbl_dvn_stat_cnt dvn_stat_cnt; + + nbl_hw_read_regs(priv, NBL_DVN_STAT_CNT(queue_id), + (u8 *)&dvn_stat_cnt, sizeof(dvn_stat_cnt)); + queue_err_stats->dvn_pkt_drop_cnt = dvn_stat_cnt.dvn_pkt_drop_cnt; +} + +static void nbl_phy_setup_queue_switch(void *priv, u16 eth_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_upsport_tbl upsport = {0}; + struct nbl_epro_ept_tbl ept_tbl = {0}; + struct dsch_vn_g2p_cfg_tbl info = {0}; + + upsport.phy_flow = 1; + upsport.entry_vld = 1; + upsport.set_dport_en = 1; + upsport.set_dport_pri = 0; + upsport.vlan_layer_num_0 = 3; + upsport.vlan_layer_num_1 = 3; + /* default we close promisc */ + upsport.set_dport.data = 0xFFF; + + ept_tbl.vld = 1; + ept_tbl.fwd = 1; + + info.vld = 1; + info.port = (eth_id << 1); + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_G2P_CFG_TABLE_REG_ARR(eth_id), + (u8 *)&info, sizeof(info)); +} + +static void nbl_phy_init_pfc(void *priv, u8 ether_ports) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_cos_map cos_map = {0}; + struct nbl_upa_pri_sel_conf sel_conf = {0}; + struct nbl_upa_pri_conf conf_table = {0}; + struct nbl_dqm_rxmac_tx_port_bp_en_cfg dqm_port_bp_en = {0}; + struct nbl_dqm_rxmac_tx_cos_bp_en_cfg dqm_cos_bp_en = {0}; + struct nbl_uqm_rx_cos_bp_en_cfg uqm_rx_cos_bp_en = {0}; + struct nbl_uqm_tx_cos_bp_en_cfg uqm_tx_cos_bp_en = {0}; + struct nbl_ustore_port_fc_th ustore_port_fc_th = {0}; + struct nbl_ustore_cos_fc_th ustore_cos_fc_th = {0}; + struct nbl_epro_port_pri_mdf_en_cfg pri_mdf_en_cfg = {0}; + int i, j; + + /* DQM */ + /* set default bp_mode: port */ + /* TX bp: dqm send received ETH RX Pause to DSCH */ + /* dqm rxmac_tx_port_bp_en */ + dqm_port_bp_en.eth0 = 1; + dqm_port_bp_en.eth1 = 1; + dqm_port_bp_en.eth2 = 1; + dqm_port_bp_en.eth3 = 1; + nbl_hw_write_regs(phy_mgt, NBL_DQM_RXMAC_TX_PORT_BP_EN, + (u8 *)(&dqm_port_bp_en), sizeof(dqm_port_bp_en)); + + /* TX bp: dqm donot send received ETH RX PFC to DSCH */ + /* dqm rxmac_tx_cos_bp_en */ + dqm_cos_bp_en.eth0 = 0; + dqm_cos_bp_en.eth1 = 0; + dqm_cos_bp_en.eth2 = 0; + dqm_cos_bp_en.eth3 = 0; + nbl_hw_write_regs(phy_mgt, NBL_DQM_RXMAC_TX_COS_BP_EN, + (u8 *)(&dqm_cos_bp_en), sizeof(dqm_cos_bp_en)); + + /* UQM */ + /* RX bp: uqm receive loopback/emp/rdma_e/rdma_h/l4s_e/l4s_h port bp */ + /* uqm rx_port_bp_en_cfg is ok */ + /* RX bp: uqm receive loopback/emp/rdma_e/rdma_h/l4s_e/l4s_h port bp */ + /* uqm tx_port_bp_en_cfg is ok */ + + /* RX bp: uqm receive loopback/emp/rdma_e/rdma_h/l4s_e/l4s_h cos bp */ + /* uqm rx_cos_bp_en */ + uqm_rx_cos_bp_en.vld_l = 0xFFFFFFFF; + uqm_rx_cos_bp_en.vld_h = 0xFFFF; + nbl_hw_write_regs(phy_mgt, NBL_UQM_RX_COS_BP_EN, (u8 *)(&uqm_rx_cos_bp_en), + sizeof(uqm_rx_cos_bp_en)); + + /* RX bp: uqm send received loopback/emp/rdma_e/rdma_h/l4s_e/l4s_h cos bp to USTORE */ + /* uqm tx_cos_bp_en */ + uqm_tx_cos_bp_en.vld_l = 0xFFFFFFFF; + uqm_tx_cos_bp_en.vld_l = 0xFF; + nbl_hw_write_regs(phy_mgt, NBL_UQM_TX_COS_BP_EN, (u8 *)(&uqm_tx_cos_bp_en), + sizeof(uqm_tx_cos_bp_en)); + + /* TX bp: DSCH dp0-3 response to DQM dp0-3 pfc/port bp */ + /* dsch_dpt_pfc_map_vnh default value is ok */ + /* TX bp: DSCH response to DQM cos bp, pkt_cos -> sch_cos map table */ + /* dsch vn_host_dpx_prixx_p2s_map_cfg is ok */ + + /* downstream: enable modify packet pri */ + /* epro port_pri_mdf_en */ + pri_mdf_en_cfg.eth0 = 1; + pri_mdf_en_cfg.eth1 = 1; + pri_mdf_en_cfg.eth2 = 1; + pri_mdf_en_cfg.eth3 = 1; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_PORT_PRI_MDF_EN, (u8 *)(&pri_mdf_en_cfg), + sizeof(pri_mdf_en_cfg)); + + for (i = 0; i < ether_ports; i++) { + /* set default bp_mode: port */ + /* RX bp: USTORE port bp th, enable send pause frame */ + /* ustore port_fc_th */ + ustore_port_fc_th.xoff_th = 0x190; + ustore_port_fc_th.xon_th = 0x190; + ustore_port_fc_th.fc_set = 0; + ustore_port_fc_th.fc_en = 1; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_PORT_FC_TH_REG_ARR(i), + (u8 *)(&ustore_port_fc_th), sizeof(ustore_port_fc_th)); + + for (j = 0; j < 8; j++) { + /* RX bp: ustore cos bp th, disable send pfc frame */ + /* ustore cos_fc_th */ + ustore_cos_fc_th.xoff_th = 0x64; + ustore_cos_fc_th.xon_th = 0x64; + ustore_cos_fc_th.fc_set = 0; + ustore_cos_fc_th.fc_en = 0; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(i * 8 + j), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + + /* downstream: sch_cos->pkt_cos or sch_cos->dscp */ + /* epro sch_cos_map */ + cos_map.pkt_cos = j; + cos_map.dscp = j << 3; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_SCH_COS_MAP_TABLE(i, j), + (u8 *)(&cos_map), sizeof(cos_map)); + } + } + + /* upstream: pkt dscp/802.1p -> sch_cos */ + for (i = 0; i < ether_ports; i++) { + /* upstream: when pfc_mode is 802.1p, vlan pri -> sch_cos map table */ + /* upa pri_conf_table */ + conf_table.pri0 = 0; + conf_table.pri1 = 1; + conf_table.pri2 = 2; + conf_table.pri3 = 3; + conf_table.pri4 = 4; + conf_table.pri5 = 5; + conf_table.pri6 = 6; + conf_table.pri7 = 7; + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_CONF_TABLE(i * 8), + (u8 *)(&conf_table), sizeof(conf_table)); + + /* upstream: set default pfc_mode is 802.1p, use outer vlan */ + /* upa pri_sel_conf */ + sel_conf.pri_sel = (1 << 4 | 1 << 3); + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_SEL_CONF_TABLE(i), + (u8 *)(&sel_conf), sizeof(sel_conf)); + } +} + +static void nbl_phy_enable_mailbox_irq(void *priv, u16 func_id, bool enable_msix, + u16 global_vector_id) +{ + struct nbl_mailbox_qinfo_map_table mb_qinfo_map = { 0 }; + + nbl_hw_read_regs(priv, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), + (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); + + if (enable_msix) { + mb_qinfo_map.msix_idx = global_vector_id; + mb_qinfo_map.msix_idx_vaild = 1; + } else { + mb_qinfo_map.msix_idx = 0; + mb_qinfo_map.msix_idx_vaild = 0; + } + + nbl_hw_write_regs(priv, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), + (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); +} + +static void nbl_abnormal_intr_init(struct nbl_phy_mgt *phy_mgt) +{ + struct nbl_fem_int_mask fem_mask = {0}; + struct nbl_epro_int_mask epro_mask = {0}; + u32 top_ctrl_mask = 0xFFFFFFFF; + + /* Mask and clear fem cfg_err */ + nbl_hw_read_regs(phy_mgt, NBL_FEM_INT_MASK, (u8 *)&fem_mask, sizeof(fem_mask)); + fem_mask.cfg_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_FEM_INT_MASK, (u8 *)&fem_mask, sizeof(fem_mask)); + + memset(&fem_mask, 0, sizeof(fem_mask)); + fem_mask.cfg_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_FEM_INT_STATUS, (u8 *)&fem_mask, sizeof(fem_mask)); + + nbl_hw_read_regs(phy_mgt, NBL_FEM_INT_MASK, (u8 *)&fem_mask, sizeof(fem_mask)); + + /* Mask and clear epro cfg_err */ + nbl_hw_read_regs(phy_mgt, NBL_EPRO_INT_MASK, (u8 *)&epro_mask, sizeof(epro_mask)); + epro_mask.cfg_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_INT_MASK, (u8 *)&epro_mask, sizeof(epro_mask)); + + memset(&epro_mask, 0, sizeof(epro_mask)); + epro_mask.cfg_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_INT_STATUS, (u8 *)&epro_mask, sizeof(epro_mask)); + + /* Mask and clear all top_tcrl abnormal intrs. + * TODO: might not need this + */ + nbl_hw_write_regs(phy_mgt, NBL_TOP_CTRL_INT_MASK, + (u8 *)&top_ctrl_mask, sizeof(top_ctrl_mask)); + + nbl_hw_write_regs(phy_mgt, NBL_TOP_CTRL_INT_STATUS, + (u8 *)&top_ctrl_mask, sizeof(top_ctrl_mask)); +} + +static void nbl_phy_enable_abnormal_irq(void *priv, bool enable_msix, + u16 global_vector_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_abnormal_msix_vector abnormal_msix_vetcor = { 0 }; + u32 abnormal_timeout = 0x927C0; /* 600000, 1ms */ + + if (enable_msix) { + abnormal_msix_vetcor.idx = global_vector_id; + abnormal_msix_vetcor.vld = 1; + } + + nbl_hw_write_regs(phy_mgt, NBL_PADPT_ABNORMAL_TIMEOUT, + (u8 *)&abnormal_timeout, sizeof(abnormal_timeout)); + + nbl_hw_write_regs(phy_mgt, NBL_PADPT_ABNORMAL_MSIX_VEC, + (u8 *)&abnormal_msix_vetcor, sizeof(abnormal_msix_vetcor)); + + nbl_abnormal_intr_init(phy_mgt); +} + +static void nbl_phy_enable_msix_irq(void *priv, u16 global_vector_id) +{ + struct nbl_msix_notify msix_notify = { 0 }; + + msix_notify.glb_msix_idx = global_vector_id; + + nbl_hw_write_regs(priv, NBL_PCOMPLETER_MSIX_NOTIRY_OFFSET, + (u8 *)&msix_notify, sizeof(msix_notify)); +} + +static u8 *nbl_phy_get_msix_irq_enable_info(void *priv, u16 global_vector_id, u32 *irq_data) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_msix_notify msix_notify = { 0 }; + + msix_notify.glb_msix_idx = global_vector_id; + memcpy(irq_data, &msix_notify, sizeof(msix_notify)); + + return (phy_mgt->hw_addr + NBL_PCOMPLETER_MSIX_NOTIRY_OFFSET); +} + +static void nbl_phy_configure_msix_map(void *priv, u16 func_id, bool valid, + dma_addr_t dma_addr, u8 bus, u8 devid, u8 function) +{ + struct nbl_function_msix_map function_msix_map = { 0 }; + + if (valid) { + function_msix_map.msix_map_base_addr = dma_addr; + /* use af's bdf, because dma memmory is alloc by af */ + function_msix_map.function = function; + function_msix_map.devid = devid; + function_msix_map.bus = bus; + function_msix_map.valid = 1; + } + + nbl_hw_write_regs(priv, NBL_PCOMPLETER_FUNCTION_MSIX_MAP_REG_ARR(func_id), + (u8 *)&function_msix_map, sizeof(function_msix_map)); +} + +static void nbl_phy_configure_msix_info(void *priv, u16 func_id, bool valid, u16 interrupt_id, + u8 bus, u8 devid, u8 function, bool msix_mask_en) +{ + struct nbl_pcompleter_host_msix_fid_table host_msix_fid_table = { 0 }; + struct nbl_host_msix_info msix_info = { 0 }; + + if (valid) { + host_msix_fid_table.vld = 1; + host_msix_fid_table.fid = func_id; + + msix_info.intrl_pnum = 0; + msix_info.intrl_rate = 0; + msix_info.function = function; + msix_info.devid = devid; + msix_info.bus = bus; + msix_info.valid = 1; + if (msix_mask_en) + msix_info.msix_mask_en = 1; + } + + nbl_hw_write_regs(priv, NBL_PADPT_HOST_MSIX_INFO_REG_ARR(interrupt_id), + (u8 *)&msix_info, sizeof(msix_info)); + nbl_hw_write_regs(priv, NBL_PCOMPLETER_HOST_MSIX_FID_TABLE(interrupt_id), + (u8 *)&host_msix_fid_table, sizeof(host_msix_fid_table)); +} + +static void nbl_phy_update_mailbox_queue_tail_ptr(void *priv, u16 tail_ptr, u8 txrx) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + /* local_qid 0 and 1 denote rx and tx queue respectively */ + u32 local_qid = txrx; + u32 value = ((u32)tail_ptr << 16) | local_qid; + + /* wmb for doorbell */ + wmb(); + writel(value, phy_mgt->mailbox_bar_hw_addr + NBL_MAILBOX_NOTIFY_ADDR); +} + +static void nbl_phy_config_mailbox_rxq(void *priv, dma_addr_t dma_addr, int size_bwid) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_rx_table = { 0 }; + + qinfo_cfg_rx_table.queue_rst = 1; + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); + + qinfo_cfg_rx_table.queue_base_addr_l = (u32)(dma_addr & 0xFFFFFFFF); + qinfo_cfg_rx_table.queue_base_addr_h = (u32)(dma_addr >> 32); + qinfo_cfg_rx_table.queue_size_bwind = (u32)size_bwid; + qinfo_cfg_rx_table.queue_rst = 0; + qinfo_cfg_rx_table.queue_en = 1; + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); +} + +static void nbl_phy_config_mailbox_txq(void *priv, dma_addr_t dma_addr, int size_bwid) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tx_table = { 0 }; + + qinfo_cfg_tx_table.queue_rst = 1; + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); + + qinfo_cfg_tx_table.queue_base_addr_l = (u32)(dma_addr & 0xFFFFFFFF); + qinfo_cfg_tx_table.queue_base_addr_h = (u32)(dma_addr >> 32); + qinfo_cfg_tx_table.queue_size_bwind = (u32)size_bwid; + qinfo_cfg_tx_table.queue_rst = 0; + qinfo_cfg_tx_table.queue_en = 1; + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); +} + +static void nbl_phy_stop_mailbox_rxq(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_rx_table = { 0 }; + + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); +} + +static void nbl_phy_stop_mailbox_txq(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tx_table = { 0 }; + + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); +} + +static u16 nbl_phy_get_mailbox_rx_tail_ptr(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_dbg_tbl cfg_dbg_tbl = { 0 }; + + nbl_hw_read_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_DBG_TABLE_ADDR, + (u8 *)&cfg_dbg_tbl, sizeof(cfg_dbg_tbl)); + return cfg_dbg_tbl.rx_tail_ptr; +} + +static bool nbl_phy_check_mailbox_dma_err(void *priv, bool tx) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tbl = { 0 }; + u64 addr; + + if (tx) + addr = NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR; + else + addr = NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR; + + nbl_hw_read_mbx_regs(priv, addr, (u8 *)&qinfo_cfg_tbl, sizeof(qinfo_cfg_tbl)); + return !!qinfo_cfg_tbl.dif_err; +} + +static u32 nbl_phy_get_host_pf_mask(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 data; + + nbl_hw_read_regs(phy_mgt, NBL_PCIE_HOST_K_PF_MASK_REG, (u8 *)&data, sizeof(data)); + return data; +} + +static u32 nbl_phy_get_host_pf_fid(void *priv, u8 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 data; + + nbl_hw_read_regs(phy_mgt, NBL_PCIE_HOST_K_PF_FID(func_id), (u8 *)&data, sizeof(data)); + return data; +} + +static void nbl_phy_cfg_mailbox_qinfo(void *priv, u16 func_id, u16 bus, u16 devid, u16 function) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_mailbox_qinfo_map_table mb_qinfo_map; + + memset(&mb_qinfo_map, 0, sizeof(mb_qinfo_map)); + mb_qinfo_map.function = function; + mb_qinfo_map.devid = devid; + mb_qinfo_map.bus = bus; + mb_qinfo_map.msix_idx_vaild = 0; + nbl_hw_write_regs(phy_mgt, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), + (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); +} + +static void nbl_phy_update_tail_ptr(void *priv, struct nbl_notify_param *param) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u8 __iomem *notify_addr = phy_mgt->hw_addr; + u32 local_qid = param->notify_qid; + u32 tail_ptr = param->tail_ptr; + + writel((((u32)tail_ptr << 16) | (u32)local_qid), notify_addr); +} + +static u8 *nbl_phy_get_tail_ptr(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return phy_mgt->hw_addr; +} + +static void nbl_phy_set_promisc_mode(void *priv, u16 vsi_id, u16 eth_id, u16 mode) +{ + struct nbl_ipro_upsport_tbl upsport; + + nbl_hw_read_regs(priv, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); + if (mode) { + upsport.set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + upsport.set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + upsport.set_dport.dport.up.port_id = vsi_id; + upsport.set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_NONE; + } else { + upsport.set_dport.data = 0xFFF; + } + nbl_hw_write_regs(priv, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); +} + +static void nbl_phy_get_coalesce(void *priv, u16 interrupt_id, u16 *pnum, u16 *rate) +{ + struct nbl_host_msix_info msix_info = { 0 }; + + nbl_hw_read_regs(priv, NBL_PADPT_HOST_MSIX_INFO_REG_ARR(interrupt_id), + (u8 *)&msix_info, sizeof(msix_info)); + + *pnum = msix_info.intrl_pnum; + *rate = msix_info.intrl_rate; +} + +static void nbl_phy_set_coalesce(void *priv, u16 interrupt_id, u16 pnum, u16 rate) +{ + struct nbl_host_msix_info msix_info = { 0 }; + + nbl_hw_read_regs(priv, NBL_PADPT_HOST_MSIX_INFO_REG_ARR(interrupt_id), + (u8 *)&msix_info, sizeof(msix_info)); + + msix_info.intrl_pnum = pnum; + msix_info.intrl_rate = rate; + nbl_hw_write_regs(priv, NBL_PADPT_HOST_MSIX_INFO_REG_ARR(interrupt_id), + (u8 *)&msix_info, sizeof(msix_info)); +} + +static int nbl_phy_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + u8 reverse_mac[ETH_ALEN]; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + nbl_convert_mac(mac, reverse_mac); + dpsport.smac_low = reverse_mac[0] | reverse_mac[1] << 8; + memcpy(&dpsport.smac_high, &reverse_mac[2], sizeof(u32)); + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + return 0; +} + +static int nbl_phy_set_spoof_check_enable(void *priv, u16 vsi_id, u8 enable) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + dpsport.addr_check_en = enable; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + return 0; +} + +static void nbl_phy_config_adminq_rxq(void *priv, dma_addr_t dma_addr, int size_bwid) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_rx_table = { 0 }; + + qinfo_cfg_rx_table.queue_rst = 1; + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); + + qinfo_cfg_rx_table.queue_base_addr_l = (u32)(dma_addr & 0xFFFFFFFF); + qinfo_cfg_rx_table.queue_base_addr_h = (u32)(dma_addr >> 32); + qinfo_cfg_rx_table.queue_size_bwind = (u32)size_bwid; + qinfo_cfg_rx_table.queue_rst = 0; + qinfo_cfg_rx_table.queue_en = 1; + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); +} + +static void nbl_phy_config_adminq_txq(void *priv, dma_addr_t dma_addr, int size_bwid) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tx_table = { 0 }; + + qinfo_cfg_tx_table.queue_rst = 1; + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); + + qinfo_cfg_tx_table.queue_base_addr_l = (u32)(dma_addr & 0xFFFFFFFF); + qinfo_cfg_tx_table.queue_base_addr_h = (u32)(dma_addr >> 32); + qinfo_cfg_tx_table.queue_size_bwind = (u32)size_bwid; + qinfo_cfg_tx_table.queue_rst = 0; + qinfo_cfg_tx_table.queue_en = 1; + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); +} + +static void nbl_phy_stop_adminq_rxq(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_rx_table = { 0 }; + + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); +} + +static void nbl_phy_stop_adminq_txq(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tx_table = { 0 }; + + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); +} + +static void nbl_phy_cfg_adminq_qinfo(void *priv, u16 bus, u16 devid, u16 function) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_adminq_qinfo_map_table adminq_qinfo_map = {0}; + + memset(&adminq_qinfo_map, 0, sizeof(adminq_qinfo_map)); + adminq_qinfo_map.function = function; + adminq_qinfo_map.devid = devid; + adminq_qinfo_map.bus = bus; + + nbl_hw_write_mbx_regs(phy_mgt, NBL_ADMINQ_MSIX_MAP_TABLE_ADDR, + (u8 *)&adminq_qinfo_map, sizeof(adminq_qinfo_map)); +} + +static void nbl_phy_enable_adminq_irq(void *priv, bool enable_msix, u16 global_vector_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_adminq_qinfo_map_table adminq_qinfo_map = { 0 }; + + adminq_qinfo_map.bus = common->bus; + adminq_qinfo_map.devid = common->devid; + adminq_qinfo_map.function = NBL_COMMON_TO_PCI_FUNC_ID(common); + + if (enable_msix) { + adminq_qinfo_map.msix_idx = global_vector_id; + adminq_qinfo_map.msix_idx_vaild = 1; + } else { + adminq_qinfo_map.msix_idx = 0; + adminq_qinfo_map.msix_idx_vaild = 0; + } + + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_MSIX_MAP_TABLE_ADDR, + (u8 *)&adminq_qinfo_map, sizeof(adminq_qinfo_map)); +} + +static void nbl_phy_update_adminq_queue_tail_ptr(void *priv, u16 tail_ptr, u8 txrx) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + /* local_qid 0 and 1 denote rx and tx queue respectively */ + u32 local_qid = txrx; + u32 value = ((u32)tail_ptr << 16) | local_qid; + + /* wmb for doorbell */ + wmb(); + writel(value, phy_mgt->mailbox_bar_hw_addr + NBL_ADMINQ_NOTIFY_ADDR); +} + +static u16 nbl_phy_get_adminq_rx_tail_ptr(void *priv) +{ + struct nbl_adminq_qinfo_cfg_dbg_tbl cfg_dbg_tbl = { 0 }; + + nbl_hw_read_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_DBG_TABLE_ADDR, + (u8 *)&cfg_dbg_tbl, sizeof(cfg_dbg_tbl)); + return cfg_dbg_tbl.rx_tail_ptr; +} + +static bool nbl_phy_check_adminq_dma_err(void *priv, bool tx) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tbl = { 0 }; + u64 addr; + + if (tx) + addr = NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR; + else + addr = NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR; + + nbl_hw_read_mbx_regs(priv, addr, (u8 *)&qinfo_cfg_tbl, sizeof(qinfo_cfg_tbl)); + + if (!qinfo_cfg_tbl.rsv1 && !qinfo_cfg_tbl.rsv2 && qinfo_cfg_tbl.dif_err) + return true; + + return false; +} + +static u8 __iomem *nbl_phy_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + if (size) + *size = (size_t)phy_mgt->hw_size; + return phy_mgt->hw_addr; +} + +static unsigned long nbl_phy_get_fw_ping(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + unsigned long ping; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_HEARTBEAT_PING, (u8 *)&ping, sizeof(ping)); + + return ping; +} + +static void nbl_phy_set_fw_ping(void *priv, unsigned long ping) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_write_mbx_regs(phy_mgt, NBL_FW_HEARTBEAT_PING, (u8 *)&ping, sizeof(ping)); +} + +static unsigned long nbl_phy_get_fw_pong(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + unsigned long pong; + + nbl_hw_read_regs(phy_mgt, NBL_FW_HEARTBEAT_PONG, (u8 *)&pong, sizeof(pong)); + + return pong; +} + +static void nbl_phy_set_fw_pong(void *priv, unsigned long pong) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_write_regs(phy_mgt, NBL_FW_HEARTBEAT_PONG, (u8 *)&pong, sizeof(pong)); +} + +static const u32 nbl_phy_reg_dump_list[] = { + NBL_TOP_CTRL_VERSION_INFO, + NBL_TOP_CTRL_VERSION_DATE, +}; + +static void nbl_phy_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + int i; + + for (i = 0; i < ARRAY_SIZE(nbl_phy_reg_dump_list) && i < len; i++) + nbl_hw_read_regs(phy_mgt, nbl_phy_reg_dump_list[i], + (u8 *)&data[i], sizeof(data[i])); +} + +static int nbl_phy_get_reg_dump_len(void *priv) +{ + return ARRAY_SIZE(nbl_phy_reg_dump_list) * sizeof(u32); +} + +static u32 nbl_phy_get_chip_temperature(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_TVSENSOR0); +} + +static int nbl_phy_process_abnormal_queue(struct nbl_phy_mgt *phy_mgt, u16 queue_id, int type, + struct nbl_abnormal_details *detail) +{ + struct nbl_ipro_queue_tbl ipro_queue_tbl = {0}; + struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; + u32 qinfo_id = type == NBL_ABNORMAL_EVENT_DVN ? NBL_PAIR_ID_GET_TX(queue_id) : + NBL_PAIR_ID_GET_RX(queue_id); + + if (type >= NBL_ABNORMAL_EVENT_MAX) + return -EINVAL; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_QUEUE_TBL(queue_id), + (u8 *)&ipro_queue_tbl, sizeof(ipro_queue_tbl)); + + detail->abnormal = true; + detail->qid = queue_id; + detail->vsi_id = ipro_queue_tbl.vsi_id; + + nbl_hw_read_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(qinfo_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + host_vnet_qinfo.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(qinfo_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + + return 0; +} + +static int nbl_phy_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct device *dev = NBL_PHY_MGT_TO_DEV(phy_mgt); + struct dvn_desc_dif_err_info desc_dif_err_info = {0}; + struct dvn_pkt_dif_err_info pkt_dif_err_info = {0}; + struct dvn_err_queue_id_get err_queue_id_get = {0}; + struct uvn_queue_err_info queue_err_info = {0}; + struct nbl_abnormal_details *detail; + u32 int_status = 0, rdma_other_abn = 0, tlp_out_drop_cnt = 0; + u32 desc_dif_err_cnt = 0, pkt_dif_err_cnt = 0; + u32 queue_err_cnt; + int ret = 0; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + if (int_status) { + if (int_status & BIT(NBL_DVN_INT_DESC_DIF_ERR)) { + nbl_hw_read_regs(phy_mgt, NBL_DVN_DESC_DIF_ERR_CNT, + (u8 *)&desc_dif_err_cnt, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_DVN_DESC_DIF_ERR_INFO, + (u8 *)&desc_dif_err_info, + sizeof(struct dvn_desc_dif_err_info)); + dev_info(dev, "dvn int_status:0x%x, desc_dif_mf_cnt:%d, queue_id:%d\n", + int_status, desc_dif_err_cnt, desc_dif_err_info.queue_id); + detail = &abnomal_info->details[NBL_ABNORMAL_EVENT_DVN]; + nbl_phy_process_abnormal_queue(phy_mgt, desc_dif_err_info.queue_id, + NBL_ABNORMAL_EVENT_DVN, detail); + + ret |= BIT(NBL_ABNORMAL_EVENT_DVN); + } + + if (int_status & BIT(NBL_DVN_INT_PKT_DIF_ERR)) { + nbl_hw_read_regs(phy_mgt, NBL_DVN_PKT_DIF_ERR_CNT, + (u8 *)&pkt_dif_err_cnt, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_DVN_PKT_DIF_ERR_INFO, + (u8 *)&pkt_dif_err_info, + sizeof(struct dvn_pkt_dif_err_info)); + dev_info(dev, "dvn int_status:0x%x, pkt_dif_mf_cnt:%d, queue_id:%d\n", + int_status, pkt_dif_err_cnt, pkt_dif_err_info.queue_id); + } + + /* clear dvn abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_DVN_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + + /* enable new queue error irq */ + err_queue_id_get.desc_flag = 1; + err_queue_id_get.pkt_flag = 1; + nbl_hw_write_regs(phy_mgt, NBL_DVN_ERR_QUEUE_ID_GET, + (u8 *)&err_queue_id_get, sizeof(err_queue_id_get)); + } + + int_status = 0; + nbl_hw_read_regs(phy_mgt, NBL_UVN_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + if (int_status) { + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_CNT, + (u8 *)&queue_err_cnt, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_INFO, + (u8 *)&queue_err_info, sizeof(struct uvn_queue_err_info)); + dev_info(dev, "uvn int_status:%x queue_err_cnt: 0x%x qid 0x%x\n", + int_status, queue_err_cnt, queue_err_info.queue_id); + + if (int_status & BIT(NBL_UVN_INT_QUEUE_ERR)) { + detail = &abnomal_info->details[NBL_ABNORMAL_EVENT_UVN]; + nbl_phy_process_abnormal_queue(phy_mgt, queue_err_info.queue_id, + NBL_ABNORMAL_EVENT_UVN, detail); + + ret |= BIT(NBL_ABNORMAL_EVENT_UVN); + } + + /* clear uvn abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_UVN_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + } + + int_status = 0; + nbl_hw_read_regs(phy_mgt, NBL_DSCH_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_DSCH_RDMA_OTHER_ABN, (u8 *)&rdma_other_abn, sizeof(u32)); + if (int_status && (int_status != NBL_DSCH_RDMA_OTHER_ABN_BIT || + rdma_other_abn != NBL_DSCH_RDMA_DPQM_DB_LOST)) { + dev_info(dev, "dsch int_status:%x\n", int_status); + + /* clear dsch abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_DSCH_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + } + + int_status = 0; + nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + if (int_status) { + nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_TLP_OUT_DROP_CNT, + (u8 *)&tlp_out_drop_cnt, sizeof(u32)); + dev_info(dev, "pcomleter int_status:0x%x tlp_out_drop_cnt 0x%x\n", + int_status, tlp_out_drop_cnt); + + /* clear pcomleter abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + } + + return ret; +} + +static u32 nbl_phy_get_uvn_desc_entry_stats(void *priv) +{ + return nbl_hw_rd32(priv, NBL_UVN_DESC_RD_ENTRY); +} + +static void nbl_phy_set_uvn_desc_wr_timeout(void *priv, u16 timeout) +{ + struct uvn_desc_wr_timeout wr_timeout = {0}; + + wr_timeout.num = timeout; + nbl_hw_write_regs(priv, NBL_UVN_DESC_WR_TIMEOUT, (u8 *)&wr_timeout, sizeof(wr_timeout)); +} + +static void nbl_phy_get_board_info(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fw_board_cfg_dw3 dw3 = {.info = {0}}; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW3_OFFSET, (u8 *)&dw3, sizeof(dw3)); + board_info->eth_num = dw3.info.port_num; + board_info->eth_speed = dw3.info.port_speed; +} + +static u32 nbl_phy_get_fw_eth_num(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fw_board_cfg_dw3 dw3 = {.info = {0}}; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW3_OFFSET, (u8 *)&dw3, sizeof(dw3)); + return dw3.info.port_num; +} + +static u32 nbl_phy_get_fw_eth_map(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fw_board_cfg_dw6 dw6 = {.info = {0}}; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW6_OFFSET, (u8 *)&dw6, sizeof(dw6)); + return dw6.info.eth_bitmap; +} + +static struct nbl_phy_ops phy_ops = { + .init_chip_module = nbl_phy_init_chip_module, + .init_qid_map_table = nbl_phy_init_qid_map_table, + .set_qid_map_table = nbl_phy_set_qid_map_table, + .set_qid_map_ready = nbl_phy_set_qid_map_ready, + .cfg_ipro_queue_tbl = nbl_phy_cfg_ipro_queue_tbl, + .cfg_ipro_dn_sport_tbl = nbl_phy_cfg_ipro_dn_sport_tbl, + .set_vnet_queue_info = nbl_phy_set_vnet_queue_info, + .clear_vnet_queue_info = nbl_phy_clear_vnet_queue_info, + .cfg_vnet_qinfo_log = nbl_phy_cfg_vnet_qinfo_log, + .reset_dvn_cfg = nbl_phy_reset_dvn_cfg, + .reset_uvn_cfg = nbl_phy_reset_uvn_cfg, + .restore_dvn_context = nbl_phy_restore_dvn_context, + .restore_uvn_context = nbl_phy_restore_uvn_context, + .get_tx_queue_cfg = nbl_phy_get_tx_queue_cfg, + .get_rx_queue_cfg = nbl_phy_get_rx_queue_cfg, + .cfg_tx_queue = nbl_phy_cfg_tx_queue, + .cfg_rx_queue = nbl_phy_cfg_rx_queue, + .check_q2tc = nbl_phy_check_q2tc, + .cfg_q2tc_netid = nbl_phy_cfg_q2tc_netid, + .cfg_q2tc_tcid = nbl_phy_cfg_q2tc_tcid, + .set_tc_wgt = nbl_phy_set_tc_wgt, + .active_shaping = nbl_phy_active_shaping, + .deactive_shaping = nbl_phy_deactive_shaping, + .set_shaping = nbl_phy_set_shaping, + .cfg_dsch_net_to_group = nbl_phy_cfg_dsch_net_to_group, + .init_epro_rss_key = nbl_phy_init_epro_rss_key, + .read_rss_key = nbl_phy_read_epro_rss_key, + .read_rss_indir = nbl_phy_read_rss_indir, + .get_rss_alg_sel = nbl_phy_get_rss_alg_sel, + .init_epro_vpt_tbl = nbl_phy_init_epro_vpt_tbl, + .set_epro_rss_default = nbl_phy_set_epro_rss_default, + .cfg_epro_rss_ret = nbl_phy_cfg_epro_rss_ret, + .set_epro_rss_pt = nbl_phy_set_epro_rss_pt, + .clear_epro_rss_pt = nbl_phy_clear_epro_rss_pt, + .set_promisc_mode = nbl_phy_set_promisc_mode, + .disable_dvn = nbl_phy_disable_dvn, + .disable_uvn = nbl_phy_disable_uvn, + .lso_dsch_drain = nbl_phy_lso_dsch_drain, + .rsc_cache_drain = nbl_phy_rsc_cache_drain, + .save_dvn_ctx = nbl_phy_save_dvn_ctx, + .save_uvn_ctx = nbl_phy_save_uvn_ctx, + .get_rx_queue_err_stats = nbl_phy_get_rx_queue_err_stats, + .get_tx_queue_err_stats = nbl_phy_get_tx_queue_err_stats, + .setup_queue_switch = nbl_phy_setup_queue_switch, + .init_pfc = nbl_phy_init_pfc, + .get_chip_temperature = nbl_phy_get_chip_temperature, + + .configure_msix_map = nbl_phy_configure_msix_map, + .configure_msix_info = nbl_phy_configure_msix_info, + .get_coalesce = nbl_phy_get_coalesce, + .set_coalesce = nbl_phy_set_coalesce, + + .set_ht = nbl_phy_set_ht, + .set_kt = nbl_phy_set_kt, + .search_key = nbl_phy_search_key, + .add_tcam = nbl_phy_add_tcam, + .del_tcam = nbl_phy_del_tcam, + .add_mcc = nbl_phy_add_mcc, + .del_mcc = nbl_phy_del_mcc, + .init_fem = nbl_phy_init_fem, + + .update_mailbox_queue_tail_ptr = nbl_phy_update_mailbox_queue_tail_ptr, + .config_mailbox_rxq = nbl_phy_config_mailbox_rxq, + .config_mailbox_txq = nbl_phy_config_mailbox_txq, + .stop_mailbox_rxq = nbl_phy_stop_mailbox_rxq, + .stop_mailbox_txq = nbl_phy_stop_mailbox_txq, + .get_mailbox_rx_tail_ptr = nbl_phy_get_mailbox_rx_tail_ptr, + .check_mailbox_dma_err = nbl_phy_check_mailbox_dma_err, + .get_host_pf_mask = nbl_phy_get_host_pf_mask, + .get_host_pf_fid = nbl_phy_get_host_pf_fid, + .cfg_mailbox_qinfo = nbl_phy_cfg_mailbox_qinfo, + .enable_mailbox_irq = nbl_phy_enable_mailbox_irq, + .enable_abnormal_irq = nbl_phy_enable_abnormal_irq, + .enable_msix_irq = nbl_phy_enable_msix_irq, + .get_msix_irq_enable_info = nbl_phy_get_msix_irq_enable_info, + + .config_adminq_rxq = nbl_phy_config_adminq_rxq, + .config_adminq_txq = nbl_phy_config_adminq_txq, + .stop_adminq_rxq = nbl_phy_stop_adminq_rxq, + .stop_adminq_txq = nbl_phy_stop_adminq_txq, + .cfg_adminq_qinfo = nbl_phy_cfg_adminq_qinfo, + .enable_adminq_irq = nbl_phy_enable_adminq_irq, + .update_adminq_queue_tail_ptr = nbl_phy_update_adminq_queue_tail_ptr, + .get_adminq_rx_tail_ptr = nbl_phy_get_adminq_rx_tail_ptr, + .check_adminq_dma_err = nbl_phy_check_adminq_dma_err, + + .update_tail_ptr = nbl_phy_update_tail_ptr, + .get_tail_ptr = nbl_phy_get_tail_ptr, + .set_spoof_check_addr = nbl_phy_set_spoof_check_addr, + .set_spoof_check_enable = nbl_phy_set_spoof_check_enable, + + .get_hw_addr = nbl_phy_get_hw_addr, + + .get_fw_ping = nbl_phy_get_fw_ping, + .set_fw_ping = nbl_phy_set_fw_ping, + .get_fw_pong = nbl_phy_get_fw_pong, + .set_fw_pong = nbl_phy_set_fw_pong, + + .get_reg_dump = nbl_phy_get_reg_dump, + .get_reg_dump_len = nbl_phy_get_reg_dump_len, + .process_abnormal_event = nbl_phy_process_abnormal_event, + .get_uvn_desc_entry_stats = nbl_phy_get_uvn_desc_entry_stats, + .set_uvn_desc_wr_timeout = nbl_phy_set_uvn_desc_wr_timeout, + + .get_fw_eth_num = nbl_phy_get_fw_eth_num, + .get_fw_eth_map = nbl_phy_get_fw_eth_map, + .get_board_info = nbl_phy_get_board_info, +}; + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_phy_setup_phy_mgt(struct nbl_common_info *common, + struct nbl_phy_mgt_leonis **phy_mgt_leonis) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + *phy_mgt_leonis = devm_kzalloc(dev, sizeof(struct nbl_phy_mgt_leonis), GFP_KERNEL); + if (!*phy_mgt_leonis) + return -ENOMEM; + + NBL_PHY_MGT_TO_COMMON(&(*phy_mgt_leonis)->phy_mgt) = common; + + return 0; +} + +static void nbl_phy_remove_phy_mgt(struct nbl_common_info *common, + struct nbl_phy_mgt_leonis **phy_mgt_leonis) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, *phy_mgt_leonis); + *phy_mgt_leonis = NULL; +} + +static int nbl_phy_setup_ops(struct nbl_common_info *common, struct nbl_phy_ops_tbl **phy_ops_tbl, + struct nbl_phy_mgt_leonis *phy_mgt_leonis) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + *phy_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_phy_ops_tbl), GFP_KERNEL); + if (!*phy_ops_tbl) + return -ENOMEM; + + NBL_PHY_OPS_TBL_TO_OPS(*phy_ops_tbl) = &phy_ops; + NBL_PHY_OPS_TBL_TO_PRIV(*phy_ops_tbl) = phy_mgt_leonis; + + return 0; +} + +static void nbl_phy_remove_ops(struct nbl_common_info *common, struct nbl_phy_ops_tbl **phy_ops_tbl) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, *phy_ops_tbl); + *phy_ops_tbl = NULL; +} + +static void nbl_phy_disable_rx_err_report(struct pci_dev *pdev) +{ +#define NBL_RX_ERR_BIT 0 +#define NBL_BAD_TLP_BIT 6 +#define NBL_BAD_DLLP_BIT 7 + u8 mask = 0; + + if (!pdev->aer_cap) + return; + + pci_read_config_byte(pdev, pdev->aer_cap + PCI_ERR_COR_MASK, &mask); + mask |= BIT(NBL_RX_ERR_BIT) | BIT(NBL_BAD_TLP_BIT) | BIT(NBL_BAD_DLLP_BIT); + pci_write_config_byte(pdev, pdev->aer_cap + PCI_ERR_COR_MASK, mask); +} + +int nbl_phy_init_leonis(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_common_info *common; + struct pci_dev *pdev; + struct nbl_phy_mgt_leonis **phy_mgt_leonis; + struct nbl_phy_mgt *phy_mgt; + struct nbl_phy_ops_tbl **phy_ops_tbl; + int bar_mask; + int ret = 0; + + common = NBL_ADAPTER_TO_COMMON(adapter); + phy_mgt_leonis = (struct nbl_phy_mgt_leonis **)&NBL_ADAPTER_TO_PHY_MGT(adapter); + phy_ops_tbl = &NBL_ADAPTER_TO_PHY_OPS_TBL(adapter); + pdev = NBL_COMMON_TO_PDEV(common); + + ret = nbl_phy_setup_phy_mgt(common, phy_mgt_leonis); + if (ret) + goto setup_mgt_fail; + + phy_mgt = &(*phy_mgt_leonis)->phy_mgt; + bar_mask = BIT(NBL_MEMORY_BAR) | BIT(NBL_MAILBOX_BAR); + ret = pci_request_selected_regions(pdev, bar_mask, NBL_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "Request memory bar and mailbox bar failed, err = %d\n", ret); + goto request_bar_region_fail; + } + + if (param->caps.has_ctrl || param->caps.has_factory_ctrl) { + phy_mgt->hw_addr = ioremap(pci_resource_start(pdev, NBL_MEMORY_BAR), + pci_resource_len(pdev, NBL_MEMORY_BAR) - + NBL_RDMA_NOTIFY_OFF); + if (!phy_mgt->hw_addr) { + dev_err(&pdev->dev, "Memory bar ioremap failed\n"); + ret = -EIO; + goto ioremap_err; + } + phy_mgt->hw_size = pci_resource_len(pdev, NBL_MEMORY_BAR) - NBL_RDMA_NOTIFY_OFF; + } else { + phy_mgt->hw_addr = ioremap(pci_resource_start(pdev, NBL_MEMORY_BAR), + NBL_RDMA_NOTIFY_OFF); + if (!phy_mgt->hw_addr) { + dev_err(&pdev->dev, "Memory bar ioremap failed\n"); + ret = -EIO; + goto ioremap_err; + } + phy_mgt->hw_size = NBL_RDMA_NOTIFY_OFF; + } + + phy_mgt->notify_offset = 0; + phy_mgt->mailbox_bar_hw_addr = pci_ioremap_bar(pdev, NBL_MAILBOX_BAR); + if (!phy_mgt->mailbox_bar_hw_addr) { + dev_err(&pdev->dev, "Mailbox bar ioremap failed\n"); + ret = -EIO; + goto mailbox_ioremap_err; + } + + spin_lock_init(&phy_mgt->reg_lock); + phy_mgt->should_lock = true; + + ret = nbl_phy_setup_ops(common, phy_ops_tbl, *phy_mgt_leonis); + if (ret) + goto setup_ops_fail; + + nbl_phy_disable_rx_err_report(pdev); + + (*phy_mgt_leonis)->ro_enable = pcie_relaxed_ordering_enabled(pdev); + + return 0; + +setup_ops_fail: + iounmap(phy_mgt->mailbox_bar_hw_addr); +mailbox_ioremap_err: + iounmap(phy_mgt->hw_addr); +ioremap_err: + pci_release_selected_regions(pdev, bar_mask); +request_bar_region_fail: + nbl_phy_remove_phy_mgt(common, phy_mgt_leonis); +setup_mgt_fail: + return ret; +} + +void nbl_phy_remove_leonis(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_common_info *common; + struct nbl_phy_mgt_leonis **phy_mgt_leonis; + struct nbl_phy_ops_tbl **phy_ops_tbl; + struct pci_dev *pdev; + u8 __iomem *hw_addr; + u8 __iomem *mailbox_bar_hw_addr; + int bar_mask = BIT(NBL_MEMORY_BAR) | BIT(NBL_MAILBOX_BAR); + + common = NBL_ADAPTER_TO_COMMON(adapter); + phy_mgt_leonis = (struct nbl_phy_mgt_leonis **)&NBL_ADAPTER_TO_PHY_MGT(adapter); + phy_ops_tbl = &NBL_ADAPTER_TO_PHY_OPS_TBL(adapter); + pdev = NBL_COMMON_TO_PDEV(common); + + hw_addr = (*phy_mgt_leonis)->phy_mgt.hw_addr; + mailbox_bar_hw_addr = (*phy_mgt_leonis)->phy_mgt.mailbox_bar_hw_addr; + + iounmap(mailbox_bar_hw_addr); + iounmap(hw_addr); + pci_release_selected_regions(pdev, bar_mask); + nbl_phy_remove_phy_mgt(common, phy_mgt_leonis); + + nbl_phy_remove_ops(common, phy_ops_tbl); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h new file mode 100644 index 000000000000..ad5c19ed1450 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h @@ -0,0 +1,2182 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_PHY_LEONIS_H_ +#define _NBL_PHY_LEONIS_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" +#include "nbl_phy.h" + +#define NBL_NOTIFY_DELAY_MIN_TIME_FOR_REGS 200 /* 200us for palladium,3us for s2c */ +#define NBL_NOTIFY_DELAY_MAX_TIME_FOR_REGS 300 /* 300us for palladium,5us for s2c */ + +#define NBL_DRAIN_WAIT_TIMES (30000) + +/* ---------- FEM ---------- */ +#define NBL_FEM_INT_STATUS (NBL_PPE_FEM_BASE + 0x00000000) +#define NBL_FEM_INT_MASK (NBL_PPE_FEM_BASE + 0x00000004) +#define NBL_FEM_INIT_START (NBL_PPE_FEM_BASE + 0x00000180) +#define NBL_FEM_KT_ACC_DATA (NBL_PPE_FEM_BASE + 0x00000348) +#define NBL_FEM_INSERT_SEARCH0_CTRL (NBL_PPE_FEM_BASE + 0x00000500) +#define NBL_FEM_INSERT_SEARCH0_ACK (NBL_PPE_FEM_BASE + 0x00000504) +#define NBL_FEM_INSERT_SEARCH0_DATA (NBL_PPE_FEM_BASE + 0x00000508) +#define KT_MASK_LEN32_ACTION_INFO (0x0) +#define KT_MASK_LEN12_ACTION_INFO (0xFFFFF000) +#define NBL_FEM_SEARCH_KEY_LEN 44 + +#define HT_PORT0_BANK_SEL (0b01000000) +#define HT_PORT1_BANK_SEL (0b00110000) +#define HT_PORT2_BANK_SEL (0b00000111) +#define KT_PORT0_BANK_SEL (0b11000000) +#define KT_PORT1_BANK_SEL (0b00110000) +#define KT_PORT2_BANK_SEL (0b00001111) +#define AT_PORT0_BANK_SEL (0b000000000000) +#define AT_PORT1_BANK_SEL (0b111000000000) +#define AT_PORT2_BANK_SEL (0b000111111111) +#define HT_PORT0_BTM 1 +#define HT_PORT1_BTM 3 +#define HT_PORT2_BTM 16 +#define NBL_1BIT 1 +#define NBL_8BIT 8 +#define NBL_16BIT 16 + +#define NBL_FEM_HT_BANK_SEL_BITMAP (NBL_PPE_FEM_BASE + 0x00000200) +#define NBL_FEM_KT_BANK_SEL_BITMAP (NBL_PPE_FEM_BASE + 0x00000204) +#define NBL_FEM_AT_BANK_SEL_BITMAP (NBL_PPE_FEM_BASE + 0x00000208) +#define NBL_FEM_AT_BANK_SEL_BITMAP2 (NBL_PPE_FEM_BASE + 0x0000020C) + +#define NBL_EM_PT_MASK_LEN_0 (0xFFFFFFFF) +#define NBL_EM_PT_MASK_LEN_64 (0x0000FFFF) +#define NBL_EM_PT_MASK_LEN_96 (0x000000FF) +#define NBL_EM_PT_MASK1_LEN_0 (0xFFFFFFFF) +#define NBL_EM_PT_MASK1_LEN_4 (0x7FFFFFFF) +#define NBL_EM_PT_MASK1_LEN_12 (0x1FFFFFFF) +#define NBL_EM_PT_MASK1_LEN_20 (0x07FFFFFF) +#define NBL_EM_PT_MASK1_LEN_28 (0x01FFFFFF) +#define NBL_EM_PT_MASK1_LEN_32 (0x00FFFFFF) +#define NBL_EM_PT_MASK1_LEN_76 (0x00001FFF) +#define NBL_EM_PT_MASK1_LEN_112 (0x0000000F) +#define NBL_EM_PT_MASK1_LEN_116 (0x00000007) +#define NBL_EM_PT_MASK1_LEN_124 (0x00000001) +#define NBL_EM_PT_MASK1_LEN_128 (0x0) +#define NBL_EM_PT_MASK2_LEN_28 (0x000007FF) +#define NBL_EM_PT_MASK2_LEN_36 (0x000001FF) +#define NBL_EM_PT_MASK2_LEN_44 (0x0000007F) +#define NBL_EM_PT_MASK2_LEN_52 (0x0000001F) +#define NBL_EM_PT_MASK2_LEN_60 (0x00000007) +#define NBL_EM_PT_MASK2_LEN_68 (0x00000001) +#define NBL_EM_PT_MASK2_LEN_72 (0x00000010) +#define NBL_EM_PT_MASK2_SEC_72 (0x00000000) + +#define NBL_KT_PHY_L2_DW_LEN 40 + +#define NBL_ACL_VSI_PF_UPCALL 3 +#define NBL_ACL_ETH_PF_UPCALL 2 +#define NBL_ACL_INDIRECT_ACCESS_WRITE (0) +#define NBL_ACL_INDIRECT_ACCESS_READ (1) +#define NBL_ETH_BASE_IDX 8 +#define NBL_VSI_BASE_IDX 0 +#define NBL_PF_MAX_NUM 4 +#define NBL_ACL_TCAM_UPCALL_IDX 15 + +#define NBL_GET_PF_ETH_ID(idx) ((idx) + NBL_ETH_BASE_IDX) +#define NBL_GET_PF_VSI_ID(idx) ((idx) * 256) +#define NBL_ACL_GET_ACTION_DATA(act_buf, act_data) (act_data = (act_buf) & 0x3fffff) +#define NBL_ACL_FLUSH_FLOW_BTM 0x7fff +#define NBL_ACL_FLUSH_UPCALL_BTM 0x8000 + +#define NBL_ACL_TCAM_DATA_X(t) (NBL_PPE_ACL_BASE + 0x00000904 + ((t) * 8)) +#define NBL_ACL_TCAM_DATA_Y(t) (NBL_PPE_ACL_BASE + 0x00000990 + ((t) * 8)) + +/* ---------- MCC ---------- */ +#define NBL_MCC_MODULE (0x00B44000) +#define NBL_MCC_LEAF_NODE_TABLE(i) \ + (NBL_MCC_MODULE + 0x00010000 + (i) * sizeof(struct nbl_mcc_tbl)) + +union nbl_acl_tcam_upcall_data_u { + struct { + u64 rsv1:26; + u64 vsi_id:8; + u64 sw_id:2; + u64 vsi_pt_id:4; + u64 vsi_rsv_h:24; + }; + struct { + u64 rsv2:32; + u64 eth_id:4; + u64 eth_pt_id:4; + u64 eth_rsv_h:24; + }; + u8 data[8]; + u64 tcam_data; +}; + +#pragma pack(1) + +struct nbl_fem_int_mask { + u32 rsv0:2; + u32 fifo_ovf_err:1; + u32 fifo_udf_err:1; + u32 cif_err:1; + u32 rsv1:1; + u32 cfg_err:1; + u32 data_ucor_err:1; + u32 bank_cflt_err:1; + u32 rsv2:23; +}; + +union nbl_fem_ht_acc_ctrl_u { + struct nbl_fem_ht_acc_ctrl { + u32 bucket_id:2; /* used for choose entry's hash-bucket */ + u32 entry_id:14; /* used for choose hash-bucket's entry */ + u32 ht_id:1; /* 0:HT0, 1:HT1 */ +#define NBL_ACC_HT0 (0) +#define NBL_ACC_HT1 (1) + u32 port:2; /* 0:pp0 1:pp1 2:pp2 */ + u32 rsv:10; + u32 access_size:1; /* 0:32bit 1:128bit,read support 128 */ +#define NBL_ACC_SIZE_32B (0) +#define NBL_ACC_SIZE_128B (1) + u32 rw:1; /* 1:read 0:write */ +#define NBL_ACC_MODE_READ (1) +#define NBL_ACC_MODE_WRITE (0) + u32 start:1; /* enable indirect access */ + } info; +#define NBL_FEM_HT_ACC_CTRL_TBL_WIDTH (sizeof(struct nbl_fem_ht_acc_ctrl)) + u8 data[NBL_FEM_HT_ACC_CTRL_TBL_WIDTH]; +}; + +#define NBL_FEM_HT_ACC_CTRL (NBL_PPE_FEM_BASE + 0x00000300) + +union nbl_fem_ht_acc_data_u { + struct nbl_fem_ht_acc_data { + u32 kt_index:17; + u32 hash:14; + u32 vld:1; + } info; +#define NBL_FEM_HT_ACC_DATA_TBL_WIDTH (sizeof(struct nbl_fem_ht_acc_data)) + u8 data[NBL_FEM_HT_ACC_DATA_TBL_WIDTH]; +}; + +#define NBL_FEM_HT_ACC_DATA (NBL_PPE_FEM_BASE + 0x00000308) + +union nbl_fem_ht_acc_ack_u { + struct nbl_fem_ht_acc_ack { + u32 done:1; /* indirect access is finished */ + u32 status:1; /* indirect access is error */ + u32 rsv:30; + } info; +#define NBL_FEM_HT_ACC_ACK_TBL_WIDTH (sizeof(struct nbl_fem_ht_acc_ack)) + u8 data[NBL_FEM_HT_ACC_ACK_TBL_WIDTH]; +}; + +#define NBL_FEM_HT_ACC_ACK (NBL_PPE_FEM_BASE + 0x00000304) + +union nbl_fem_kt_acc_ctrl_u { + struct nbl_fem_kt_acc_ctrl { + u32 addr:17; /* kt-index */ + u32 rsv:12; + u32 access_size:1; +#define NBL_ACC_SIZE_160B (0) +#define NBL_ACC_SIZE_320B (1) + u32 rw:1; /* 1:read 0:write */ + u32 start:1; /* enable ,indirect access */ + } info; +#define NBL_FEM_KT_ACC_CTRL_TBL_WIDTH (sizeof(struct nbl_fem_kt_acc_ctrl)) + u8 data[NBL_FEM_KT_ACC_CTRL_TBL_WIDTH]; +}; + +#define NBL_FEM_KT_ACC_CTRL (NBL_PPE_FEM_BASE + 0x00000340) + +union nbl_fem_kt_acc_ack_u { + struct nbl_fem_kt_acc_ack { + u32 done:1; /* indirect access is finished */ + u32 status:1; /* indirect access is error */ + u32 rsv:30; + } info; +#define NBL_FEM_KT_ACC_ACK_TBL_WIDTH (sizeof(struct nbl_fem_kt_acc_ack)) + u8 data[NBL_FEM_KT_ACC_ACK_TBL_WIDTH]; +}; + +#define NBL_FEM_KT_ACC_ACK (NBL_PPE_FEM_BASE + 0x00000344) + +union nbl_search_ctrl_u { + struct nbl_search_ctrl { + u32 rsv:31; + u32 start:1; + } info; +#define NBL_SEARCH_CTRL_WIDTH (sizeof(struct nbl_search_ctrl)) + u8 data[NBL_SEARCH_CTRL_WIDTH]; +}; + +union nbl_search_ack_u { + struct nbl_search_ack { + u32 done:1; + u32 status:1; + u32 rsv:30; + } info; +#define NBL_SEARCH_ACK_WIDTH (sizeof(struct nbl_search_ack)) + u8 data[NBL_SEARCH_ACK_WIDTH]; +}; + +#define NBL_FEM_EM0_TCAM_TABLE_ADDR (0xa0b000) +#define NBL_FEM_EM_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM_TCAM_TABLE_WIDTH (256) + +union fem_em_tcam_table_u { + struct fem_em_tcam_table { + u32 key[5]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [191:162] Default:0x0 RO */ + u32 rsv1[2]; /* [255:192] Default:0x0 RO */ + } info; + u32 data[NBL_FEM_EM_TCAM_TABLE_WIDTH / 32]; + u8 hash_key[sizeof(struct fem_em_tcam_table)]; +}; + +#define NBL_FEM_EM_TCAM_TABLE_REG(r, t) (NBL_FEM_EM0_TCAM_TABLE_ADDR + 0x1000 * (r) + \ + (NBL_FEM_EM_TCAM_TABLE_WIDTH / 8) * (t)) + +#define NBL_FEM_EM0_AD_TABLE_ADDR (0xa08000) +#define NBL_FEM_EM_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM_AD_TABLE_WIDTH (512) + +union fem_em_ad_table_u { + struct fem_em_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv[5]; /* [511:352] Default:0x0 RO */ + } info; + u32 data[NBL_FEM_EM_AD_TABLE_WIDTH / 32]; + u8 hash_key[sizeof(struct fem_em_ad_table)]; +}; + +#define NBL_FEM_EM_AD_TABLE_REG(r, t) (NBL_FEM_EM0_AD_TABLE_ADDR + 0x1000 * (r) + \ + (NBL_FEM_EM_AD_TABLE_WIDTH / 8) * (t)) + +#define NBL_FLOW_TCAM_TOTAL_LEN 32 +#define NBL_FLOW_AD_TOTAL_LEN 64 + +struct nbl_mcc_tbl { + u32 dport_act:16; + u32 dqueue_act:11; + u32 dqueue_en:1; + u32 dqueue_rsv:4; + u32 stateid_act:11; + u32 stateid_filter:1; + u32 flowid_filter:1; + u32 stateid_rsv:3; + u32 next_pntr:13; + u32 tail:1; + u32 vld:1; + u32 rsv:1; +}; + +union nbl_fem_ht_size_table_u { + struct nbl_fem_ht_size_table { + u32 pp0_size:5; + u32 rsv0:3; + u32 pp1_size:5; + u32 rsv1:3; + u32 pp2_size:5; + u32 rsv2:11; + } info; +#define NBL_FEM_HT_SIZE_TBL_WIDTH (sizeof(struct nbl_fem_ht_size_table)) + u8 data[NBL_FEM_HT_SIZE_TBL_WIDTH]; +}; + +#define NBL_FEM_HT_SIZE_REG (NBL_PPE_FEM_BASE + 0x0000011c) + +union nbl_fem_profile_tbl_u { + struct fem_profile_tbl { + u32 pt_cmd:1; + u32 pt_key_size:1; + u32 pt_mask_bmap0:30; + u32 pt_mask_bmap1; + u32 pt_mask_bmap2:18; + u32 pt_hash_sel0:2; + u32 pt_hash_sel1:2; + u32 pt_action0:16; + u32 pt_action0_id:6; + u32 fwd_queue:16; + u32 pt_action1_id:6; + u32 pt_action2:22; + u32 pt_action3:22; + u32 pt_action4:22; + u32 pt_action5:22; + u32 pt_action6:22; + u32 pt_action7:22; + u32 pt_act_num:4; + u32 pt_vld:1; + u32 rsv0:21; + u32 rsv1[7]; + } info; +#define NBL_FEM_PROFILE_TBL_WIDTH (sizeof(struct fem_profile_tbl)) + u8 data[NBL_FEM_PROFILE_TBL_WIDTH]; +}; + +#define NBL_FEM0_PROFILE_TABLE(t) (NBL_PPE_FEM_BASE + 0x00001000 + \ + (NBL_FEM_PROFILE_TBL_WIDTH) * (t)) + +/* ---------- REG BASE ADDR ---------- */ +#define NBL_LB_PCIEX16_TOP_BASE (0x01500000) +/* PPE modules base addr */ +#define NBL_PPE_FEM_BASE (0x00a04000) +#define NBL_PPE_IPRO_BASE (0x00b04000) +#define NBL_PPE_PP0_BASE (0x00b14000) +#define NBL_PPE_PP1_BASE (0x00b24000) +#define NBL_PPE_PP2_BASE (0x00b34000) +#define NBL_PPE_MCC_BASE (0x00b44000) +#define NBL_PPE_ACL_BASE (0x00b64000) +#define NBL_PPE_CAP_BASE (0x00e64000) +#define NBL_PPE_EPRO_BASE (0x00e74000) +#define NBL_PPE_DPRBAC_BASE (0x00904000) +#define NBL_PPE_UPRBAC_BASE (0x0000C000) +/* Interface modules base addr */ +#define NBL_INTF_HOST_PCOMPLETER_BASE (0x00f08000) +#define NBL_INTF_HOST_PADPT_BASE (0x00f4c000) +#define NBL_INTF_HOST_CTRLQ_BASE (0x00f8c000) +#define NBL_INTF_HOST_VDPA_NET_BASE (0x00f98000) +#define NBL_INTF_HOST_CMDQ_BASE (0x00fa0000) +#define NBL_INTF_HOST_MAILBOX_BASE (0x00fb0000) +#define NBL_INTF_HOST_PCIE_BASE (0X01504000) +#define NBL_INTF_HOST_PCAP_BASE (0X015a4000) +/* DP modules base addr */ +#define NBL_DP_URMUX_BASE (0x00008000) +#define NBL_DP_UPRBAC_BASE (0x0000C000) +#define NBL_DP_UPA_BASE (0x0008C000) +#define NBL_DP_USTORE_BASE (0x00104000) +#define NBL_DP_UPMEM_BASE (0x00108000) +#define NBL_DP_UBM_BASE (0x0010c000) +#define NBL_DP_UQM_BASE (0x00114000) +#define NBL_DP_USTAT_BASE (0x0011c000) +#define NBL_DP_UPED_BASE (0x0015c000) +#define NBL_DP_UCAR_BASE (0x00e84000) +#define NBL_DP_UL4S_BASE (0x00204000) +#define NBL_DP_UVN_BASE (0x00244000) +#define NBL_DP_DSCH_BASE (0x00404000) +#define NBL_DP_SHAPING_BASE (0x00504000) +#define NBL_DP_DVN_BASE (0x00514000) +#define NBL_DP_DL4S_BASE (0x00614000) +#define NBL_DP_DRMUX_BASE (0x00654000) +#define NBL_DP_DSTORE_BASE (0x00704000) +#define NBL_DP_DPMEM_BASE (0x00708000) +#define NBL_DP_DBM_BASE (0x0070c000) +#define NBL_DP_DQM_BASE (0x00714000) +#define NBL_DP_DSTAT_BASE (0x0071c000) +#define NBL_DP_DPED_BASE (0x0075c000) +#define NBL_DP_DPA_BASE (0x0085c000) +#define NBL_DP_DPRBAC_BASE (0x00904000) +#define NBL_DP_DDMUX_BASE (0x00984000) +#define NBL_DP_LB_DDP_BUF_BASE (0x00000000) +#define NBL_DP_LB_DDP_OUT_BASE (0x00000000) +#define NBL_DP_LB_DDP_DIST_BASE (0x00000000) +#define NBL_DP_LB_DDP_IN_BASE (0x00000000) +#define NBL_DP_LB_UDP_BUF_BASE (0x00000000) +#define NBL_DP_LB_UDP_OUT_BASE (0x00000000) +#define NBL_DP_LB_UDP_DIST_BASE (0x00000000) +#define NBL_DP_LB_UDP_IN_BASE (0x00000000) +#define NBL_DP_DL4S_BASE (0x00614000) +#define NBL_DP_UL4S_BASE (0x00204000) + +/* -------- LB -------- */ +#define NBL_LB_PF_CONFIGSPACE_SELECT_OFFSET (0x81100000) +#define NBL_LB_PF_CONFIGSPACE_SELECT_STRIDE (0x00100000) +#define NBL_LB_PF_CONFIGSPACE_BASE_ADDR (NBL_LB_PCIEX16_TOP_BASE + 0x00024000) +#define NBL_LB_PCIEX16_TOP_AHB (NBL_LB_PCIEX16_TOP_BASE + 0x00000020) + +/* -------- MAILBOX BAR2 ----- */ +#define NBL_MAILBOX_NOTIFY_ADDR (0x00000000) +#define NBL_MAILBOX_BAR_REG (0x00000000) +#define NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR (0x10) +#define NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR (0x20) +#define NBL_MAILBOX_QINFO_CFG_DBG_TABLE_ADDR (0x30) + +/* -------- ADMINQ BAR2 ----- */ +#define NBL_ADMINQ_NOTIFY_ADDR (0x40) +#define NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR (0x50) +#define NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR (0x60) +#define NBL_ADMINQ_QINFO_CFG_DBG_TABLE_ADDR (0x78) +#define NBL_ADMINQ_MSIX_MAP_TABLE_ADDR (0x80) + +/* -------- MAILBOX -------- */ + +/* mailbox BAR qinfo_cfg_dbg_table */ +struct nbl_mailbox_qinfo_cfg_dbg_tbl { + u16 rx_drop; + u16 rx_get; + u16 tx_drop; + u16 tx_out; + u16 rx_hd_ptr; + u16 tx_hd_ptr; + u16 rx_tail_ptr; + u16 tx_tail_ptr; +}; + +/* mailbox BAR qinfo_cfg_table */ +struct nbl_mailbox_qinfo_cfg_table { + u32 queue_base_addr_l; + u32 queue_base_addr_h; + u32 queue_size_bwind:4; + u32 rsv1:28; + u32 queue_rst:1; + u32 queue_en:1; + u32 dif_err:1; + u32 ptr_err:1; + u32 rsv2:28; +}; + +/* -------- ADMINQ -------- */ + +struct nbl_adminq_qinfo_map_table { + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 msix_idx:13; + u32 msix_idx_vaild:1; + u32 rsv:2; +}; + +/* adminq BAR qinfo_cfg_dbg_table */ +struct nbl_adminq_qinfo_cfg_dbg_tbl { + u16 rx_hd_ptr; + u16 tx_hd_ptr; + u16 rx_tail_ptr; + u16 tx_tail_ptr; +}; + +/* -------- MAILBOX BAR0 ----- */ +/* mailbox qinfo_map_table */ +#define NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id) \ + (NBL_INTF_HOST_MAILBOX_BASE + 0x00001000 + \ + (func_id) * sizeof(struct nbl_mailbox_qinfo_map_table)) + +/* MAILBOX qinfo_map_table */ +struct nbl_mailbox_qinfo_map_table { + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 msix_idx:13; + u32 msix_idx_vaild:1; + u32 rsv:2; +}; + +/* -------- HOST_PCIE -------- */ +#define NBL_PCIE_HOST_K_PF_MASK_REG (NBL_INTF_HOST_PCIE_BASE + 0x00001004) +#define NBL_PCIE_HOST_K_PF_FID(pf_id) \ + (NBL_INTF_HOST_PCIE_BASE + 0x0000106C + 4 * (pf_id)) + +/* -------- HOST_PADPT -------- */ +#define NBL_HOST_PADPT_HOST_CFG_FC_PD_DN (NBL_INTF_HOST_PADPT_BASE + 0x00000160) +#define NBL_HOST_PADPT_HOST_CFG_FC_PH_DN (NBL_INTF_HOST_PADPT_BASE + 0x00000164) +#define NBL_HOST_PADPT_HOST_CFG_FC_NPH_DN (NBL_INTF_HOST_PADPT_BASE + 0x0000016C) +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLH_UP (NBL_INTF_HOST_PADPT_BASE + 0x00000170) +/* host_padpt host_msix_info */ +#define NBL_PADPT_ABNORMAL_MSIX_VEC (NBL_INTF_HOST_PADPT_BASE + 0x00000200) +#define NBL_PADPT_ABNORMAL_TIMEOUT (NBL_INTF_HOST_PADPT_BASE + 0x00000204) +#define NBL_PADPT_HOST_MSIX_INFO_REG_ARR(vector_id) \ + (NBL_INTF_HOST_PADPT_BASE + 0x00010000 + (vector_id) * sizeof(struct nbl_host_msix_info)) +/* host_padpt host_vnet_qinfo */ +#define NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id) \ + (NBL_INTF_HOST_PADPT_BASE + 0x00008000 + (queue_id) * sizeof(struct nbl_host_vnet_qinfo)) + +struct nbl_host_msix_info { + u32 intrl_pnum:16; + u32 intrl_rate:16; + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 valid:1; + u32 msix_mask_en:1; + u32 rsv:14; +}; + +struct nbl_abnormal_msix_vector { + u32 idx:16; + u32 vld:1; + u32 rsv:15; +}; + +/* host_padpt host_vnet_qinfo */ +struct nbl_host_vnet_qinfo { + u32 function_id:3; + u32 device_id:5; + u32 bus_id:8; + u32 msix_idx:13; + u32 msix_idx_valid:1; + u32 log_en:1; + u32 valid:1; + u32 tph_en:1; + u32 ido_en:1; + u32 rlo_en:1; + u32 rsv0:29; +}; + +struct nbl_msix_notify { + u32 glb_msix_idx:13; + u32 rsv1:3; + u32 mask:1; + u32 rsv2:15; +}; + +/* -------- HOST_PCOMPLETER -------- */ +/* pcompleter_host pcompleter_host_virtio_qid_map_table */ +#define NBL_PCOMPLETER_QID_MAP_REG_ARR(select, i) \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00010000 + \ + (select) * NBL_QID_MAP_TABLE_ENTRIES * sizeof(struct nbl_virtio_qid_map_table) + \ + (i) * sizeof(struct nbl_virtio_qid_map_table)) +#define NBL_PCOMPLETER_FUNCTION_MSIX_MAP_REG_ARR(i) \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00004000 + (i) * sizeof(struct nbl_function_msix_map)) +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE(i) \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x0003a000 + \ + (i) * sizeof(struct nbl_pcompleter_host_msix_fid_table)) +#define NBL_PCOMPLETER_INT_STATUS (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00000000) +#define NBL_PCOMPLETER_TLP_OUT_DROP_CNT (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00002430) + +/* pcompleter_host pcompleter_host_virtio_table_ready */ +#define NBL_PCOMPLETER_QUEUE_TABLE_READY_REG \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x0000110C) +/* pcompleter_host pcompleter_host_virtio_table_select */ +#define NBL_PCOMPLETER_QUEUE_TABLE_SELECT_REG \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00001110) + +#define NBL_PCOMPLETER_MSIX_NOTIRY_OFFSET (0x1020) + +#define NBL_REG_WRITE_MAX_TRY_TIMES 2 + +/* pcompleter_host virtio_qid_map_table */ +struct nbl_virtio_qid_map_table { + u32 local_qid:9; + u32 notify_addr_l:23; + u32 notify_addr_h; + u32 global_qid:12; + u32 ctrlq_flag:1; + u32 rsv1:19; + u32 rsv2; +}; + +struct nbl_pcompleter_host_msix_fid_table { + u32 fid:10; + u32 vld:1; + u32 rsv:21; +}; + +struct nbl_function_msix_map { + u64 msix_map_base_addr; + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 valid:1; + u32 rsv0:15; + u32 rsv1; +}; + +struct nbl_queue_table_select { + u32 select:1; + u32 rsv:31; +}; + +struct nbl_queue_table_ready { + u32 ready:1; + u32 rsv:31; +}; + +/* IPRO ipro_queue_tbl */ +struct nbl_ipro_queue_tbl { + u32 vsi_id:10; + u32 vsi_en:1; + u32 rsv:21; +}; + +/* -------- HOST_PCAP -------- */ +#define NBL_HOST_PCAP_TX_CAP_EN (NBL_INTF_HOST_PCAP_BASE + 0x00000200) +#define NBL_HOST_PCAP_TX_CAP_STORE (NBL_INTF_HOST_PCAP_BASE + 0x00000204) +#define NBL_HOST_PCAP_TX_CAP_STALL (NBL_INTF_HOST_PCAP_BASE + 0x00000208) +#define NBL_HOST_PCAP_RX_CAP_EN (NBL_INTF_HOST_PCAP_BASE + 0x00000800) +#define NBL_HOST_PCAP_RX_CAP_STORE (NBL_INTF_HOST_PCAP_BASE + 0x00000804) +#define NBL_HOST_PCAP_RX_CAP_STALL (NBL_INTF_HOST_PCAP_BASE + 0x00000808) + +/* ---------- DPED ---------- */ +#define NBL_DPED_VLAN_OFFSET (NBL_DP_DPED_BASE + 0x000003F4) +#define NBL_DPED_DSCP_OFFSET_0 (NBL_DP_DPED_BASE + 0x000003F8) +#define NBL_DPED_DSCP_OFFSET_1 (NBL_DP_DPED_BASE + 0x000003FC) + +/* DPED dped_hw_edt_prof */ +#define NBL_DPED_HW_EDT_PROF_TABLE(i) \ + (NBL_DP_DPED_BASE + 0x00001000 + (i) * sizeof(struct ped_hw_edit_profile)) +/* DPED dped_l4_ck_cmd_40 */ + +/* DPED hw_edt_prof/ UPED hw_edt_prof */ +struct ped_hw_edit_profile { + u32 l4_len:2; +#define NBL_PED_L4_LEN_MDY_CMD_0 (0) +#define NBL_PED_L4_LEN_MDY_CMD_1 (1) +#define NBL_PED_L4_LEN_MDY_DISABLE (2) + u32 l3_len:2; +#define NBL_PED_L3_LEN_MDY_CMD_0 (0) +#define NBL_PED_L3_LEN_MDY_CMD_1 (1) +#define NBL_PED_L3_LEN_MDY_DISABLE (2) + u32 l4_ck:3; +#define NBL_PED_L4_CKSUM_CMD_0 (0) +#define NBL_PED_L4_CKSUM_CMD_1 (1) +#define NBL_PED_L4_CKSUM_CMD_2 (2) +#define NBL_PED_L4_CKSUM_CMD_3 (3) +#define NBL_PED_L4_CKSUM_CMD_4 (4) +#define NBL_PED_L4_CKSUM_CMD_5 (5) +#define NBL_PED_L4_CKSUM_CMD_6 (6) +#define NBL_PED_L4_CKSUM_DISABLE (7) + u32 l3_ck:1; +#define NBL_PED_L3_CKSUM_ENABLE (1) +#define NBL_PED_L3_CKSUM_DISABLE (0) + u32 l4_ck_zero_free:1; +#define NBL_PED_L4_CKSUM_ZERO_FREE_ENABLE (1) +#define NBL_PED_L4_CKSUM_ZERO_FREE_DISABLE (0) + u32 rsv:23; +}; + +struct nbl_ped_hw_edit_profile_cfg { + u32 table_id; + struct ped_hw_edit_profile edit_prf; +}; + +/* ---------- UPED ---------- */ +/* UPED uped_hw_edt_prof */ +#define NBL_UPED_HW_EDT_PROF_TABLE(i) \ + (NBL_DP_UPED_BASE + 0x00001000 + (i) * sizeof(struct ped_hw_edit_profile)) + +/* --------- SHAPING --------- */ +#define NBL_SHAPING_NET_TIMMING_ADD_ADDR (NBL_DP_SHAPING_BASE + 0x00000300) +#define NBL_SHAPING_NET(i) \ + (NBL_DP_SHAPING_BASE + 0x00001800 + (i) * sizeof(struct nbl_shaping_net)) + +/* cir 1, bandwidth 1kB/s in protol environment */ +/* cir 1, bandwidth 1Mb/s */ +#define NBL_LR_LEONIS_SYS_CLK 15000.0 /* 0105tag Khz */ +#define NBL_LR_LEONIS_NET_SHAPING_CYCLE_MAX 25 +#define NBL_LR_LEONIS_NET_SHAPING_DPETH 600 +#define NBL_LR_LEONIS_NET_BUCKET_DEPTH 9600 + +#define NBL_SHAPING_DPORT_25G_RATE 0x601E +#define NBL_SHAPING_DPORT_HALF_25G_RATE 0x300F + +#define NBL_SHAPING_DPORT_100G_RATE 0x1A400 +#define NBL_SHAPING_DPORT_HALF_100G_RATE 0xD200 + +#define NBL_DSTORE_DROP_XOFF_TH 0xC8 +#define NBL_DSTORE_DROP_XON_TH 0x64 + +#define NBL_DSTORE_DROP_XOFF_TH_100G 0x1F4 +#define NBL_DSTORE_DROP_XON_TH_100G 0x12C + +#define NBL_DSTORE_DROP_XOFF_TH_BOND_MAIN 0x180 +#define NBL_DSTORE_DROP_XON_TH_BOND_MAIN 0x180 + +#define NBL_DSTORE_DROP_XOFF_TH_BOND_OTHER 0x64 +#define NBL_DSTORE_DROP_XON_TH_BOND_OTHER 0x64 + +#define NBL_DSTORE_DROP_XOFF_TH_100G_BOND_MAIN 0x2D5 +#define NBL_DSTORE_DROP_XON_TH_100G_BOND_MAIN 0x2BC + +#define NBL_DSTORE_DROP_XOFF_TH_100G_BOND_OTHER 0x145 +#define NBL_DSTORE_DROP_XON_TH_100G_BOND_OTHER 0x12C + +#define NBL_DSTORE_DISC_BP_TH (NBL_DP_DSTORE_BASE + 0x00000630) + +struct dstore_disc_bp_th { + u32 xoff_th:10; + u32 rsv1:6; + u32 xon_th:10; + u32 rsv:5; + u32 en:1; +}; + +struct nbl_shaping_net_timming_add { + u32 cycle_max:12; /* [11:0] Default:0x8 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 depth:12; /* [27:16] Default:0x258 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ +}; + +/* DSCH dsch_vn_sha2net_map_tbl */ +struct dsch_vn_sha2net_map_tbl { + u32 vld:1; + u32 reserve:31; +}; + +/* DSCH dsch_vn_net2sha_map_tbl */ +struct dsch_vn_net2sha_map_tbl { + u32 vld:1; + u32 reserve:31; +}; + +struct dsch_psha_en { + u32 en:4; + u32 rsv:28; +}; + +/* SHAPING shaping_net */ +struct nbl_shaping_net { + u32 valid:1; + u32 depth:19; + u32 cir:19; + u32 pir:19; + u32 cbs:21; + u32 pbs:21; + u32 rsv:28; +}; + +struct nbl_shaping_dport { + u32 valid:1; + u32 depth:19; + u32 cir:19; + u32 pir:19; + u32 cbs:21; + u32 pbs:21; + u32 rsv:28; +}; + +struct nbl_shaping_dvn_dport { + u32 valid:1; + u32 depth:19; + u32 cir:19; + u32 pir:19; + u32 cbs:21; + u32 pbs:21; + u32 rsv:28; +}; + +struct nbl_shaping_rdma_dport { + u32 valid:1; + u32 depth:19; + u32 cir:19; + u32 pir:19; + u32 cbs:21; + u32 pbs:21; + u32 rsv:28; +}; + +/* ---------- DSCH ---------- */ +/* DSCH vn_host_qid_max */ +#define NBL_DSCH_NOTIFY_BITMAP_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00003000 + (i) * BYTES_PER_DWORD) +#define NBL_DSCH_FLY_BITMAP_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00004000 + (i) * BYTES_PER_DWORD) +#define NBL_DSCH_PORT_MAP_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00005000 + (i) * sizeof(struct nbl_port_map)) +/* DSCH dsch_vn_q2tc_cfg_tbl */ +#define NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00010000 + (i) * sizeof(struct dsch_vn_q2tc_cfg_tbl)) +/* DSCH dsch_vn_n2g_cfg_tbl */ +#define NBL_DSCH_VN_N2G_CFG_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00060000 + (i) * sizeof(struct dsch_vn_n2g_cfg_tbl)) +/* DSCH dsch_vn_g2p_cfg_tbl */ +#define NBL_DSCH_VN_G2P_CFG_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00064000 + (i) * sizeof(struct dsch_vn_g2p_cfg_tbl)) +/* DSCH dsch_vn_tc_wgt_cfg_tbl */ +#define NBL_DSCH_VN_TC_WGT_CFG_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00068000 + (i) * sizeof(union dsch_vn_tc_wgt_cfg_tbl_u)) +/* DSCH dsch_vn_sha2net_map_tbl */ +#define NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00070000 + (i) * sizeof(struct dsch_vn_sha2net_map_tbl)) +/* DSCH dsch_vn_net2sha_map_tbl */ +#define NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00074000 + (i) * sizeof(struct dsch_vn_net2sha_map_tbl)) +/* DSCH dsch_vn_tc_q_list_tbl */ +#define NBL_DSCH_VN_TC_Q_LIST_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00040000 + (i) * sizeof(struct dsch_vn_tc_q_list_tbl)) +/* DSCH dsch maxqid */ +#define NBL_DSCH_HOST_QID_MAX (NBL_DP_DSCH_BASE + 0x00000118) +#define NBL_DSCH_VN_QUANTA_ADDR (NBL_DP_DSCH_BASE + 0x00000134) +#define NBL_DSCH_INT_STATUS (NBL_DP_DSCH_BASE + 0x00000000) +#define NBL_DSCH_RDMA_OTHER_ABN (NBL_DP_DSCH_BASE + 0x00000080) +#define NBL_DSCH_RDMA_OTHER_ABN_BIT (0x4000) +#define NBL_DSCH_RDMA_DPQM_DB_LOST (2) + +#define NBL_MAX_QUEUE_ID (0x7ff) +#define NBL_HOST_QUANTA (0x8000) +#define NBL_ECPU_QUANTA (0x1000) + +/* DSCH dsch_vn_q2tc_cfg_tbl */ +struct dsch_vn_q2tc_cfg_tbl { + u32 tcid:13; + u32 rsv:18; + u32 vld:1; +}; + +/* DSCH dsch_vn_n2g_cfg_tbl */ +struct dsch_vn_n2g_cfg_tbl { + u32 grpid:8; + u32 rsv:23; + u32 vld:1; +}; + +/* DSCH dsch_vn_tc_qlist_tbl */ +struct dsch_vn_tc_q_list_tbl { + u32 nxt:11; + u32 reserve:18; + u32 regi:1; + u32 fly:1; + u32 vld:1; +}; + +/* DSCH dsch_vn_g2p_cfg_tbl */ +struct dsch_vn_g2p_cfg_tbl { + u32 port:3; + u32 rsv:28; + u32 vld:1; +}; + +/* DSCH dsch_vn_tc_wgt_cfg_tbl */ +union dsch_vn_tc_wgt_cfg_tbl_u { + struct dsch_vn_tc_wgt_cfg_tbl { + u8 tc0_wgt; + u8 tc1_wgt; + u8 tc2_wgt; + u8 tc3_wgt; + u8 tc4_wgt; + u8 tc5_wgt; + u8 tc6_wgt; + u8 tc7_wgt; + } info; +#define NBL_DSCH_VN_TC_WGT_CFG_TBL_WIDTH (sizeof(struct dsch_vn_tc_wgt_cfg_tbl)) + u8 data[NBL_DSCH_VN_TC_WGT_CFG_TBL_WIDTH]; +}; + +struct dsch_vn_quanta { + u32 h_qua:16; + u32 e_qua:16; +}; + +/* ---------- DVN ---------- */ + +struct nbl_dvn_stat_cnt { + u32 dvn_desc_fwd_cnt:16; + u32 rsv0:16; + u32 dvn_desc_drop_cnt:16; + u32 rsv1:16; + u32 dvn_pkt_fwd_cnt:16; + u32 rsv2:16; + u32 dvn_pkt_drop_cnt:16; + u32 rsv3:16; + u32 rsv4[4]; +}; + +/* DVN dvn_queue_table */ +#define NBL_DVN_QUEUE_TABLE_ARR(i) \ + (NBL_DP_DVN_BASE + 0x00020000 + (i) * sizeof(struct dvn_queue_table)) +#define NBL_DVN_QUEUE_CXT_TABLE_ARR(i) \ + (NBL_DP_DVN_BASE + 0x00030000 + (i) * sizeof(struct dvn_queue_context)) +#define NBL_DVN_STAT_CNT(i) (NBL_DP_DVN_BASE + 0x00040000 + (i) * sizeof(struct nbl_dvn_stat_cnt)) +/* DVN dvn_queue_reset */ +#define NBL_DVN_QUEUE_RESET_REG (NBL_DP_DVN_BASE + 0x00000400) +/* DVN dvn_queue_reset_done */ +#define NBL_DVN_QUEUE_RESET_DONE_REG (NBL_DP_DVN_BASE + 0x00000404) +#define NBL_DVN_ECPU_QUEUE_NUM (NBL_DP_DVN_BASE + 0x0000041C) +#define NBL_DVN_DESCREQ_NUM_CFG (NBL_DP_DVN_BASE + 0x00000430) +#define NBL_DVN_DESC_WR_MERGE_TIMEOUT (NBL_DP_DVN_BASE + 0x00000480) +#define NBL_DVN_DIF_REQ_RD_RO_FLAG (NBL_DP_DVN_BASE + 0x0000045C) +#define NBL_DVN_INT_STATUS (NBL_DP_DVN_BASE + 0x00000000) +#define NBL_DVN_DESC_DIF_ERR_CNT (NBL_DP_DVN_BASE + 0x0000003C) +#define NBL_DVN_DESC_DIF_ERR_INFO (NBL_DP_DVN_BASE + 0x00000038) +#define NBL_DVN_PKT_DIF_ERR_INFO (NBL_DP_DVN_BASE + 0x00000030) +#define NBL_DVN_PKT_DIF_ERR_CNT (NBL_DP_DVN_BASE + 0x00000034) +#define NBL_DVN_ERR_QUEUE_ID_GET (NBL_DP_DVN_BASE + 0x0000040C) +#define NBL_DVN_BACK_PRESSURE_MASK (NBL_DP_DVN_BASE + 0x00000464) + +#define DEFAULT_DVN_DESCREQ_NUMCFG (0x00080014) +#define DEFAULT_DVN_100G_DESCREQ_NUMCFG (0x00080020) + +#define NBL_DVN_INT_PKT_DIF_ERR (4) +#define DEFAULT_DVN_DESC_WR_MERGE_TIMEOUT_MAX (0x3FF) + +#define NBL_DVN_INT_DESC_DIF_ERR (5) + +struct nbl_dvn_descreq_num_cfg { + u32 avring_cfg_num:1; /* spilit ring descreq_num 0:8,1:16 */ + u32 rsv0:3; + u32 packed_l1_num:3; /* packet ring descreq_num 0:8,1:12,2:16;3:20,4:24,5:26;6:32,7:32 */ + u32 rsv1:25; +}; + +struct nbl_dvn_desc_wr_merge_timeout { + u32 cfg_cycle:10; + u32 rsv:22; +}; + +struct nbl_dvn_dif_req_rd_ro_flag { + u32 rd_desc_ro_en:1; + u32 rd_data_ro_en:1; + u32 rd_avring_ro_en:1; + u32 rsv:29; +}; + +/* DVN dvn_queue_table */ +struct dvn_queue_table { + u64 dvn_used_baddr; + u64 dvn_avail_baddr; + u64 dvn_queue_baddr; + u32 dvn_queue_size:4; + u32 dvn_queue_type:1; + u32 dvn_queue_en:1; + u32 dvn_extend_header_en:1; + u32 dvn_interleave_seg_disable:1; + u32 dvn_seg_disable:1; + u32 rsv0:23; + u32 rsv1:32; +}; + +/* DVN dvn_queue_context */ +struct dvn_queue_context { + u32 dvn_descrd_num:3; + u32 dvn_firstdescid:16; + u32 dvn_firstdesc:16; + u32 dvn_indirect_len:6; + u64 dvn_indirect_addr:64; + u32 dvn_indirect_next:5; + u32 dvn_l1_ring_read:16; + u32 dvn_avail_ring_read:16; + u32 dvn_ring_wrap_counter:1; + u32 dvn_lso_id:10; + u32 dvn_avail_ring_idx:16; + u32 dvn_used_ring_idx:16; + u32 dvn_indirect_left:1; + u32 dvn_desc_left:1; + u32 dvn_lso_flag:1; + u32 dvn_descrd_disable:1; + u32 dvn_queue_err:1; + u32 dvn_lso_drop:1; + u32 dvn_protected_bit:1; + u64 reserve; +}; + +/* DVN dvn_queue_reset */ +struct nbl_dvn_queue_reset { + u32 dvn_queue_index:11; + u32 vld:1; + u32 rsv:20; +}; + +/* DVN dvn_queue_reset_done */ +struct nbl_dvn_queue_reset_done { + u32 flag:1; + u32 rsv:31; +}; + +/* DVN dvn_desc_dif_err_info */ +struct dvn_desc_dif_err_info { + u32 queue_id:11; + u32 rsv:21; +}; + +struct dvn_pkt_dif_err_info { + u32 queue_id:11; + u32 rsv:21; +}; + +struct dvn_err_queue_id_get { + u32 pkt_flag:1; + u32 desc_flag:1; + u32 rsv:30; +}; + +struct dvn_back_pressure_mask { + u32 l4s_flag:1; + u32 dsch_flag:1; + u32 dstore_port0_flag:1; + u32 dstore_port1_flag:1; + u32 dstore_port2_flag:1; + u32 dstore_port3_flag:1; + u32 rsv:26; +}; + +/* ---------- UVN ---------- */ +/* UVN uvn_queue_table */ +#define NBL_UVN_QUEUE_TABLE_ARR(i) \ + (NBL_DP_UVN_BASE + 0x00010000 + (i) * sizeof(struct uvn_queue_table)) +/* UVN uvn_queue_cxt */ +#define NBL_UVN_QUEUE_CXT_TABLE_ARR(i) \ + (NBL_DP_UVN_BASE + 0x00020000 + (i) * sizeof(struct uvn_queue_cxt)) +/* UVN uvn_desc_cxt */ +#define NBL_UVN_DESC_CXT_TABLE_ARR(i) \ + (NBL_DP_UVN_BASE + 0x00028000 + (i) * sizeof(struct uvn_desc_cxt)) +/* UVN uvn_queue_reset */ +#define NBL_UVN_QUEUE_RESET_REG (NBL_DP_UVN_BASE + 0x00000200) +/* UVN uvn_queue_reset_done */ +#define NBL_UVN_QUEUE_RESET_DONE_REG (NBL_DP_UVN_BASE + 0x00000408) +#define NBL_UVN_STATIS_PKT_DROP(i) (NBL_DP_UVN_BASE + 0x00038000 + (i) * sizeof(u32)) +#define NBL_UVN_INT_STATUS (NBL_DP_UVN_BASE + 0x00000000) +#define NBL_UVN_QUEUE_ERR_INFO (NBL_DP_UVN_BASE + 0x00000034) +#define NBL_UVN_QUEUE_ERR_CNT (NBL_DP_UVN_BASE + 0x00000038) +#define NBL_UVN_DESC_RD_WAIT (NBL_DP_UVN_BASE + 0x0000020C) +#define NBL_UVN_QUEUE_ERR_MASK (NBL_DP_UVN_BASE + 0x00000224) +#define NBL_UVN_ECPU_QUEUE_NUM (NBL_DP_UVN_BASE + 0x0000023C) +#define NBL_UVN_DESC_WR_TIMEOUT (NBL_DP_UVN_BASE + 0x00000214) +#define NBL_UVN_DESC_RD_ENTRY (NBL_DP_UVN_BASE + 0x000012D0) +#define NBL_UVN_DIF_REQ_RO_FLAG (NBL_DP_UVN_BASE + 0x00000250) +#define NBL_UVN_DESC_WR_TIMEOUT_4US (0x960) + +#define NBL_UVN_INT_QUEUE_ERR (5) + +struct uvn_dif_req_ro_flag { + u32 avail_rd:1; + u32 desc_rd:1; + u32 pkt_wr:1; + u32 desc_wr:1; + u32 rsv:28; +}; + +/* UVN uvn_queue_table */ +struct uvn_queue_table { + u64 used_baddr; + u64 avail_baddr; + u64 queue_baddr; + u32 queue_size_mask_pow:4; + u32 queue_type:1; + u32 queue_enable:1; + u32 extend_header_en:1; + u32 guest_csum_en:1; + u32 half_offload_en:1; + u32 rsv0:23; + u32 rsv1:32; +}; + +/* uvn uvn_queue_cxt */ +struct uvn_queue_cxt { + u32 queue_head:16; + u32 wrap_count:1; + u32 queue_err:1; + u32 prefetch_null_cnt:2; + u32 ntf_finish:1; + u32 spnd_flag:1; + u32 reserve0:10; + u32 avail_idx:16; + u32 avail_idx_spnd_flag:1; + u32 reserve1:15; + u32 reserve2[2]; +}; + +/* uvn uvn_queue_reset */ +struct nbl_uvn_queue_reset { + u32 index:11; + u32 rsv0:5; + u32 vld:1; + u32 rsv1:15; +}; + +/* uvn uvn_queue_reset_done */ +struct nbl_uvn_queue_reset_done { + u32 flag:1; + u32 rsv:31; +}; + +/* uvn uvn_desc_cxt */ +struct uvn_desc_cxt { + u32 cache_head:9; + u32 reserve0:7; + u32 cache_tail:9; + u32 reserve1:7; + u32 cache_pref_num_prev:9; + u32 reserve2:7; + u32 cache_pref_num_post:9; + u32 reserve3:7; + u32 cache_head_byte:30; + u32 reserve4:2; + u32 cache_tail_byte:30; + u32 reserve5:2; +}; + +struct uvn_desc_wr_timeout { + u32 num:15; + u32 mask:1; + u32 rsv:16; +}; + +struct uvn_queue_err_info { + u32 queue_id:11; + u32 type:5; + u32 rsv:16; +}; + +struct uvn_queue_err_mask { + u32 rsv0:1; + u32 buffer_len_err:1; + u32 next_err:1; + u32 indirect_err:1; + u32 split_err:1; + u32 dif_err:1; + u32 rsv1:26; +}; + +/* -------- USTORE -------- */ +#define NBL_USTORE_PKT_LEN_ADDR (NBL_DP_USTORE_BASE + 0x00000108) +#define NBL_USTORE_PORT_FC_TH_REG_ARR(port_id) \ + (NBL_DP_USTORE_BASE + 0x00000134 + (port_id) * sizeof(struct nbl_ustore_port_fc_th)) + +#define NBL_USTORE_COS_FC_TH_REG_ARR(cos_id) \ + (NBL_DP_USTORE_BASE + 0x00000200 + (cos_id) * sizeof(struct nbl_ustore_cos_fc_th)) + +#define NBL_USTORE_PORT_DROP_TH_REG_ARR(port_id) \ + (NBL_DP_USTORE_BASE + 0x00000150 + (port_id) * sizeof(struct nbl_ustore_port_drop_th)) + +#define NBL_USTORE_SIGNLE_ETH_DROP_TH 0xC80 +#define NBL_USTORE_DUAL_ETH_DROP_TH 0x640 +#define NBL_USTORE_QUAD_ETH_DROP_TH 0x320 + +/* USTORE pkt_len */ +struct ustore_pkt_len { + u32 min:7; + u32 rsv:8; + u32 min_chk_en:1; + u32 max:14; + u32 rsv2:1; + u32 max_chk_len:1; +}; + +/* USTORE port_fc_th */ +struct nbl_ustore_port_fc_th { + u32 xoff_th:12; + u32 rsv1:4; + u32 xon_th:12; + u32 rsv2:2; + u32 fc_set:1; + u32 fc_en:1; +}; + +/* USTORE cos_fc_th */ +struct nbl_ustore_cos_fc_th { + u32 xoff_th:12; + u32 rsv1:4; + u32 xon_th:12; + u32 rsv2:2; + u32 fc_set:1; + u32 fc_en:1; +}; + +/* USTORE port_drop_th */ +struct nbl_ustore_port_drop_th { + u32 disc_th:12; + u32 rsv:19; + u32 en:1; +}; + +/* ---------- UL4S ---------- */ +#define NBL_UL4S_SCH_PAD_ADDR (NBL_DP_UL4S_BASE + 0x000006c4) + +/* UL4S UL4S_sch_pad */ +struct UL4S_sch_pad { + u32 en:1; + u32 clr:1; + u32 rsv:30; +}; + +/* ---------- IPRO ---------- */ +/* ipro module related macros */ +#define NBL_IPRO_MODULE (0xB04000) +/* ipro queue tbl */ +#define NBL_IPRO_QUEUE_TBL(i) \ + (NBL_IPRO_MODULE + 0x00004000 + (i) * sizeof(struct nbl_ipro_queue_tbl)) +#define NBL_IPRO_UP_SPORT_TABLE(i) \ + (NBL_IPRO_MODULE + 0x00007000 + (i) * sizeof(struct nbl_ipro_upsport_tbl)) +#define NBL_IPRO_DN_SRC_PORT_TABLE(i) \ + (NBL_PPE_IPRO_BASE + 0x00008000 + (i) * sizeof(struct nbl_ipro_dn_src_port_tbl)) + +enum nbl_fwd_type_e { + NBL_FWD_TYPE_NORMAL = 0, + NBL_FWD_TYPE_CPU_ASSIGNED = 1, + NBL_FWD_TYPE_UPCALL = 2, + NBL_FWD_TYPE_SRC_MIRROR = 3, + NBL_FWD_TYPE_OTHER_MIRROR = 4, + NBL_FWD_TYPE_MNG = 5, + NBL_FWD_TYPE_GLB_LB = 6, + NBL_FWD_TYPE_DROP = 7, + NBL_FWD_TYPE_MAX = 8, +}; + +/* IPRO dn_src_port_tbl */ +struct nbl_ipro_dn_src_port_tbl { + u32 entry_vld:1; + u32 mirror_en:1; + u32 mirror_pr:2; + u32 mirror_id:4; + u32 vlan_layer_num_1:2; + u32 phy_flow:1; + u32 not_used_0:4; + u32 addr_check_en:1; + u32 smac_low:16; + u32 smac_high; + u32 dqueue:11; + u32 dqueue_en:1; + u32 dqueue_pri:2; + u32 set_dport_pri:2; + union nbl_action_data set_dport; + u32 set_dport_en:1; + u32 proc_done:1; + u32 not_used_1:6; + u32 rsv:24; +}; + +/* IPRO up sport tab */ +struct nbl_ipro_upsport_tbl { + u32 entry_vld:1; + u32 vlan_layer_num_0:2; + u32 vlan_layer_num_1:2; + u32 lag_vld:1; + u32 lag_id:2; + u32 phy_flow:1; + u32 mirror_en:1; + u32 mirror_pr:2; + u32 mirror_id:4; + u32 dqueue_pri:2; + u32 set_dport_pri:2; + u32 dqueue:11; + u32 dqueue_en:1; + union nbl_action_data set_dport; + u32 set_dport_en:1; + u32 proc_done:1; + u32 car_en:1; + u32 car_pr:2; + u32 car_id:10; + u32 rsv:1; +}; + +/* ---------- EPRO ---------- */ +#define NBL_EPRO_INT_STATUS (NBL_PPE_EPRO_BASE + 0x00000000) +#define NBL_EPRO_INT_MASK (NBL_PPE_EPRO_BASE + 0x00000004) +#define NBL_EPRO_RSS_KEY_REG (NBL_PPE_EPRO_BASE + 0x00000400) +#define NBL_EPRO_MIRROR_ACT_PRI_REG (NBL_PPE_EPRO_BASE + 0x00000234) +#define NBL_EPRO_ACTION_FILTER_TABLE(i) (NBL_PPE_EPRO_BASE + 0x00001900 + \ + sizeof(struct nbl_epro_action_filter_tbl) * (i)) +/* epro epro_ept table */ +#define NBL_EPRO_EPT_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00001800 + (i) * sizeof(struct nbl_epro_ept_tbl)) +/* epro epro_vpt table */ +#define NBL_EPRO_VPT_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00004000 + (i) * sizeof(struct nbl_epro_vpt_tbl)) +/* epro epro_rss_pt table */ +#define NBL_EPRO_RSS_PT_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00002000 + (i) * sizeof(struct nbl_epro_rss_pt_tbl)) +/* epro epro_rss_ret table */ +#define NBL_EPRO_RSS_RET_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00008000 + (i) * sizeof(struct nbl_epro_rss_ret_tbl)) +/* epro epro_sch_cos_map table */ +#define NBL_EPRO_SCH_COS_MAP_TABLE(i, j) \ + (NBL_PPE_EPRO_BASE + 0x00000640 + ((i) * 0x20) + (j) * sizeof(struct nbl_epro_cos_map)) +/* epro epro_port_pri_mdf_en */ +#define NBL_EPRO_PORT_PRI_MDF_EN (NBL_PPE_EPRO_BASE + 0x000006E0) +/* epro epro_act_sel_en */ +#define NBL_EPRO_ACT_SEL_EN_REG \ + (NBL_PPE_EPRO_BASE + 0x00000214) +/* epro epro_kgen_ft table */ +#define NBL_EPRO_KGEN_FT_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00001980 + (i) * sizeof(struct nbl_epro_kgen_ft_tbl)) + +struct nbl_epro_int_mask { + u32 fatal_err:1; + u32 fifo_uflw_err:1; + u32 fifo_dflw_err:1; + u32 cif_err:1; + u32 input_err:1; + u32 cfg_err:1; + u32 data_ucor_err:1; + u32 bank_cor_err:1; + u32 rsv2:24; +}; + +struct nbl_epro_rss_key { + u64 key0; + u64 key1; + u64 key2; + u64 key3; + u64 key4; +}; + +struct nbl_epro_mirror_act_pri { + u32 car_idx_pri:2; + u32 dqueue_pri:2; + u32 dport_pri:2; + u32 rsv:26; +}; + +/* EPRO epro_rss_ret table */ +struct nbl_epro_rss_ret_tbl { + u32 dqueue0:11; + u32 vld0:1; + u32 rsv0:4; + u32 dqueue1:11; + u32 vld1:1; + u32 rsv1:4; +}; + +/* EPRO epro_rss_pt table */ +struct nbl_epro_rss_pt_tbl { + u32 entry_size:3; +#define NBL_EPRO_RSS_ENTRY_SIZE_16 (0) +#define NBL_EPRO_RSS_ENTRY_SIZE_32 (1) +#define NBL_EPRO_RSS_ENTRY_SIZE_64 (2) +#define NBL_EPRO_RSS_ENTRY_SIZE_128 (3) +#define NBL_EPRO_RSS_ENTRY_SIZE_256 (4) + u32 offset1:14; + u32 offset1_vld:1; + u32 offset0:14; + u32 offset0_vld:1; + u32 vld:1; + u32 rsv:30; +}; + +/*EPRO sch cos map*/ +struct nbl_epro_cos_map { + u32 pkt_cos:3; + u32 dscp:6; + u32 rsv:23; +}; + +/* EPRO epro_port_pri_mdf_en */ +struct nbl_epro_port_pri_mdf_en_cfg { + u32 eth0:1; + u32 eth1:1; + u32 eth2:1; + u32 eth3:1; + u32 loop:1; + u32 rsv:27; +}; + +enum nbl_md_action_id_e { + NBL_MD_ACTION_NONE = 0, + NBL_MD_ACTION_CLEAR_FLAG = 1, + NBL_MD_ACTION_SET_FLAG = NBL_MD_ACTION_CLEAR_FLAG, + NBL_MD_ACTION_SET_FWD = NBL_MD_ACTION_CLEAR_FLAG, + NBL_MD_ACTION_FLOWID0 = 2, + NBL_MD_ACTION_FLOWID1 = 3, + NBL_MD_ACTION_RSSIDX = 4, + NBL_MD_ACTION_PORT_CARIDX = 5, + NBL_MD_ACTION_FLOW_CARIDX = 6, + NBL_MD_ACTION_TABLE_INDEX = 7, + NBL_MD_ACTION_MIRRIDX = 8, + NBL_MD_ACTION_DPORT = 9, + NBL_MD_ACTION_SET_DPORT = NBL_MD_ACTION_DPORT, + NBL_MD_ACTION_DQUEUE = 10, + NBL_MD_ACTION_MCIDX = 13, + NBL_MD_ACTION_VNI0 = 14, + NBL_MD_ACTION_VNI1 = 15, + NBL_MD_ACTION_STAT_IDX = 16, + NBL_MD_ACTION_PRBAC_IDX = 17, + NBL_MD_ACTION_L4S_IDX = NBL_MD_ACTION_PRBAC_IDX, + NBL_MD_ACTION_DP_HASH0 = 19, + NBL_MD_ACTION_DP_HASH1 = 20, + NBL_MD_ACTION_MDF_PRI = 21, + + NBL_MD_ACTION_MDF_V4_SIP = 32, + NBL_MD_ACTION_MDF_V4_DIP = 33, + NBL_MD_ACTION_MDF_V6_SIP = 34, + NBL_MD_ACTION_MDF_V6_DIP = 35, + NBL_MD_ACTION_MDF_DPORT = 36, + NBL_MD_ACTION_MDF_SPORT = 37, + NBL_MD_ACTION_MDF_DMAC = 38, + NBL_MD_ACTION_MDF_SMAC = 39, + NBL_MD_ACTION_MDF_V4_DSCP_ECN = 40, + NBL_MD_ACTION_MDF_V6_DSCP_ECN = 41, + NBL_MD_ACTION_MDF_V4_TTL = 42, + NBL_MD_ACTION_MDF_V6_HOPLIMIT = 43, + NBL_MD_ACTION_DEL_O_VLAN = 44, + NBL_MD_ACTION_DEL_I_VLAN = 45, + NBL_MD_ACTION_MDF_O_VLAN = 46, + NBL_MD_ACTION_MDF_I_VLAN = 47, + NBL_MD_ACTION_ADD_O_VLAN = 48, + NBL_MD_ACTION_ADD_I_VLAN = 49, + NBL_MD_ACTION_ENCAP_TNL = 50, + NBL_MD_ACTION_DECAP_TNL = 51, + NBL_MD_ACTION_MDF_TNL_SPORT = 52, +}; + +/* EPRO action filter table */ +struct nbl_epro_action_filter_tbl { + u64 filter_mask; +}; + +#define NBL_EPRO_LAG_MAX (4) +#define NBL_EPRO_EPT_LAG_OFFSET (4) + +/* EPRO epr_ept table */ +struct nbl_epro_ept_tbl { + u32 cvlan:16; + u32 svlan:16; + u32 fwd:1; +#define NBL_EPRO_FWD_TYPE_DROP (0) +#define NBL_EPRO_FWD_TYPE_NORMAL (1) + u32 mirror_en:1; + u32 mirror_id:4; + u32 pop_i_vlan:1; + u32 pop_o_vlan:1; + u32 push_i_vlan:1; + u32 push_o_vlan:1; + u32 replace_i_vlan:1; + u32 replace_o_vlan:1; + u32 lag_alg_sel:2; +#define NBL_EPRO_LAG_ALG_L2_HASH (0) +#define NBL_EPRO_LAG_ALG_L23_HASH (1) +#define NBL_EPRO_LAG_ALG_LINUX_L34_HASH (2) +#define NBL_EPRO_LAG_ALG_DPDK_L34_HASH (3) + u32 lag_port_btm:4; + u32 lag_l2_protect_en:1; + u32 pfc_sch_cos_default:3; + u32 pfc_mode:1; + u32 vld:1; + u32 rsv:8; +}; + +/* EPRO epro_vpt table */ +struct nbl_epro_vpt_tbl { + u32 cvlan:16; + u32 svlan:16; + u32 fwd:1; +#define NBL_EPRO_FWD_TYPE_DROP (0) +#define NBL_EPRO_FWD_TYPE_NORMAL (1) + u32 mirror_en:1; + u32 mirror_id:4; + u32 car_en:1; + u32 car_id:10; + u32 pop_i_vlan:1; + u32 pop_o_vlan:1; + u32 push_i_vlan:1; + u32 push_o_vlan:1; + u32 replace_i_vlan:1; + u32 replace_o_vlan:1; + u32 rss_alg_sel:1; +#define NBL_EPRO_RSS_ALG_TOEPLITZ_HASH (0) +#define NBL_EPRO_RSS_ALG_CRC32 (1) + u32 rss_key_type_ipv4:1; +#define NBL_EPRO_RSS_KEY_TYPE_IPV4_L3 (0) +#define NBL_EPRO_RSS_KEY_TYPE_IPV4_L4 (1) + u32 rss_key_type_ipv6:1; +#define NBL_EPRO_RSS_KEY_TYPE_IPV6_L3 (0) +#define NBL_EPRO_RSS_KEY_TYPE_IPV6_L4 (1) + u32 vld:1; + u32 rsv:5; +}; + +/* UPA upa_pri_sel_conf */ +#define NBL_UPA_PRI_SEL_CONF_TABLE(id) (NBL_DP_UPA_BASE + 0x00000230 + \ + ((id) * sizeof(struct nbl_upa_pri_sel_conf))) +#define NBL_UPA_PRI_CONF_TABLE(id) (NBL_DP_UPA_BASE + 0x00002000 + \ + ((id) * sizeof(struct nbl_upa_pri_conf))) + +/* UPA pri_sel_conf */ +struct nbl_upa_pri_sel_conf { + u32 pri_sel:5; + u32 pri_default:3; + u32 pri_disen:1; + u32 rsv:23; +}; + +/* UPA pri_conf_table */ +struct nbl_upa_pri_conf { + u32 pri0:4; + u32 pri1:4; + u32 pri2:4; + u32 pri3:4; + u32 pri4:4; + u32 pri5:4; + u32 pri6:4; + u32 pri7:4; +}; + +#define NBL_DQM_RXMAC_TX_PORT_BP_EN (NBL_DP_DQM_BASE + 0x00000660) +#define NBL_DQM_RXMAC_TX_COS_BP_EN (NBL_DP_DQM_BASE + 0x00000664) +#define NBL_DQM_RXMAC_RX_PORT_BP_EN (NBL_DP_DQM_BASE + 0x00000670) +#define NBL_DQM_RX_PORT_BP_EN (NBL_DP_DQM_BASE + 0x00000610) +#define NBL_DQM_RX_COS_BP_EN (NBL_DP_DQM_BASE + 0x00000614) + +/* DQM rxmac_tx_port_bp_en */ +struct nbl_dqm_rxmac_tx_port_bp_en_cfg { + u32 eth0:1; + u32 eth1:1; + u32 eth2:1; + u32 eth3:1; + u32 rsv:28; +}; + +/* DQM rxmac_tx_cos_bp_en */ +struct nbl_dqm_rxmac_tx_cos_bp_en_cfg { + u32 eth0:8; + u32 eth1:8; + u32 eth2:8; + u32 eth3:8; +}; + +#define NBL_UQM_RX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000614) +#define NBL_UQM_TX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000604) + +/* UQM rx_cos_bp_en */ +struct nbl_uqm_rx_cos_bp_en_cfg { + u32 vld_l; + u32 vld_h:16; +}; + +/* UQM rx_port_bp_en */ +struct nbl_uqm_rx_port_bp_en_cfg { + u32 l4s_h:1; + u32 l4s_e:1; + u32 rdma_h:1; + u32 rdma_e:1; + u32 emp:1; + u32 loopback:1; + u32 rsv:26; +}; + +/* UQM tx_cos_bp_en */ +struct nbl_uqm_tx_cos_bp_en_cfg { + u32 vld_l; + u32 vld_h:8; +}; + +/* UQM tx_port_bp_en */ +struct nbl_uqm_tx_port_bp_en_cfg { + u32 l4s_h:1; + u32 l4s_e:1; + u32 rdma_h:1; + u32 rdma_e:1; + u32 emp:1; + u32 rsv:27; +}; + +/* dl4s */ +#define NBL_DL4S_KEY_SALT(_i) (NBL_DP_DL4S_BASE + 0x00010000 + (_i) * 64) +/* UL4S */ +#define NBL_UL4S_SYNC_TRIG (NBL_DP_UL4S_BASE + 0x00000700) +#define NBL_UL4S_SYNC_SID (NBL_DP_UL4S_BASE + 0x00000704) +#define NBL_UL4S_SYNC_TCP_SN (NBL_DP_UL4S_BASE + 0x00000710) +#define NBL_UL4S_SYNC_REC_NUM (NBL_DP_UL4S_BASE + 0x00000714) +#define NBL_UL4S_KEY_SALT(_i) (NBL_DP_UL4S_BASE + 0x00010000 + (_i) * 64) + +struct nbl_ktls_keymat { + u8 key[32]; + u8 salt[4]; + u32 mode:2; + u32 ena:1; + u32 rsv:29; +}; + +union nbl_ktls_sync_trig { + u32 data; + struct { + u32 rsv1 : 1; + u32 trig : 1; + u32 init_sync : 1; + u32 rsv2 : 29; + }; +}; + +/* dprbac */ +#define NBL_DPRBAC_INT_STATUS (NBL_PPE_DPRBAC_BASE + 0x00000000) +#define NBL_DPRBAC_LIFETIME_INFO (NBL_PPE_DPRBAC_BASE + 0x00000014) +#define NBL_DPRBAC_ENABLE (NBL_PPE_DPRBAC_BASE + 0x00000114) +#define NBL_DPRBAC_NAT (NBL_PPE_DPRBAC_BASE + 0x0000012C) +#define NBL_DPRBAC_SAD_LIFEDIFF (NBL_PPE_DPRBAC_BASE + 0x00000204) +#define NBL_DPRBAC_LIFETIME_DIFF (NBL_PPE_DPRBAC_BASE + 0x00000208) +#define NBL_DPRBAC_DBG_CNT_EN (NBL_PPE_DPRBAC_BASE + 0x00000680) + +#define NBL_DPRBAC_SAD_IV(_i) (NBL_PPE_DPRBAC_BASE + 0x000010000 + (_i) * 8) +#define NBL_DPRBAC_SAD_ESN(_i) (NBL_PPE_DPRBAC_BASE + 0x000020000 + (_i) * 16) +#define NBL_DPRBAC_SAD_LIFETIME(_i) (NBL_PPE_DPRBAC_BASE + 0x000030000 + (_i) * 16) +#define NBL_DPRBAC_SAD_CRYPTO_INFO(_i) (NBL_PPE_DPRBAC_BASE + 0x000040000 + (_i) * 64) +#define NBL_DPRBAC_SAD_ENCAP_INFO(_i) (NBL_PPE_DPRBAC_BASE + 0x000060000 + (_i) * 64) + +union nbl_dprbac_enable { + u32 data; + struct { + u32 prbac : 1; + u32 mf_fwd : 1; + u32 ipv4_nat_csm : 1; + u32 ipv6_nat_csm : 1; + u32 rsv : 28; + }; +}; + +union nbl_dprbac_clk_gate { + u32 data; + struct { + u32 clk_en : 1; + u32 rsv : 31; + }; +}; + +union nbl_dprbac_init_start { + u32 data; + struct { + u32 start : 1; + u32 rsv : 31; + }; +}; + +union nbl_dprbac_nat { + u32 data; + struct { + u32 rsv : 16; + u32 sport : 16; + }; +}; + +union nbl_dprbac_dbg_cnt_en { + u32 data; + struct { + u32 total : 1; + u32 in_right_bypass : 1; + u32 in_drop_bypass : 1; + u32 in_drop_prbac : 1; + u32 out_drop_prbac : 1; + u32 out_right_prbac : 1; + u32 rsv : 26; + }; +}; + +struct nbl_dprbac_sad_iv { + u64 iv; +}; + +struct nbl_dprbac_sad_esn { + u32 sn; + u32 esn; + u32 wrap_en : 1; + u32 enable : 1; + u32 rsv1 : 30; + u32 rsv2; +}; + +struct nbl_dprbac_sad_lifetime { + u32 diff; + u32 cnt; + u32 flag : 1; + u32 unit : 1; + u32 enable : 1; + u32 rsv1 : 29; + u32 rsv2; +}; + +struct nbl_dprbac_sad_crypto_info { + u32 key[8]; + u32 salt; + u32 crypto_type : 3; + u32 tunnel_mode : 1; + u32 icv_len : 2; + u32 rsv1 : 26; + u32 rsv2[6]; +}; + +struct nbl_dprbac_sad_encap_info { + u32 dip_addr[4]; + u32 sip_addr[4]; + u32 spi; + u32 dport : 16; + u32 nat_flag : 1; + u32 rsv1 : 15; + u32 rsv2[6]; +}; + +/* uprbac */ +#define NBL_UPRBAC_INT_STATUS (NBL_PPE_UPRBAC_BASE + 0x00000000) +#define NBL_UPRBAC_LIFETIME_INFO (NBL_PPE_UPRBAC_BASE + 0x00000014) +#define NBL_UPRBAC_ENABLE (NBL_PPE_UPRBAC_BASE + 0x00000114) +#define NBL_UPRBAC_NAT (NBL_PPE_UPRBAC_BASE + 0x0000012C) +#define NBL_UPRBAC_SAD_LIFEDIFF (NBL_PPE_UPRBAC_BASE + 0x00000204) +#define NBL_UPRBAC_LIFETIME_DIFF (NBL_PPE_UPRBAC_BASE + 0x00000208) +#define NBL_UPRBAC_DBG_CNT_EN (NBL_PPE_UPRBAC_BASE + 0x00000680) +#define LEONIS_UPRBAC_EM_PROFILE (NBL_PPE_UPRBAC_BASE + 0x00002000) + +#define NBL_UPRBAC_SAD_BOTTOM(_i) (NBL_PPE_UPRBAC_BASE + 0x000020000 + (_i) * 16) +#define NBL_UPRBAC_SAD_LIFETIME(_i) (NBL_PPE_UPRBAC_BASE + 0x000030000 + (_i) * 16) +#define NBL_UPRBAC_SAD_CRYPTO_INFO(_i) (NBL_PPE_UPRBAC_BASE + 0x000040000 + (_i) * 64) +#define NBL_UPRBAC_SAD_SLIDE_WINDOW(_i) (NBL_PPE_UPRBAC_BASE + 0x000060000 + (_i) * 64) + +#define NBL_UPRBAC_EM_TCAM(_i) (NBL_PPE_UPRBAC_BASE + 0x00002800 + (_i) * 16) +#define NBL_UPRBAC_EM_AD(_i) (NBL_PPE_UPRBAC_BASE + 0x00003000 + (_i) * 4) +#define NBL_UPRBAC_HT(_i, _j) (NBL_PPE_UPRBAC_BASE + 0x00004000 + \ + (_i) * 0x00004000 + (_j) * 16) +#define NBL_UPRBAC_KT(_i) (NBL_PPE_UPRBAC_BASE + 0x00010000 + (_i) * 32) + +union nbl_uprbac_enable { + u32 data; + struct { + u32 prbac : 1; + u32 padding_check : 1; + u32 pa_am : 1; + u32 dm_am : 1; + u32 icv_err : 1; + u32 pad_err : 1; + u32 ipv6_nat_csm0 : 1; + u32 rsv : 25; + }; +}; + +union nbl_uprbac_clk_gate { + u32 data; + struct { + u32 clk_en : 1; + u32 rsv : 31; + }; +}; + +union nbl_uprbac_init_start { + u32 data; + struct { + u32 start : 1; + u32 rsv : 31; + }; +}; + +union nbl_uprbac_nat { + u32 data; + struct { + u32 enable : 1; + u32 rsv : 15; + u32 dport : 16; + }; +}; + +union nbl_uprbac_dbg_cnt_en { + u32 data; + struct { + u32 drop_prbac : 1; + u32 right_prbac : 1; + u32 replay : 1; + u32 right_misc : 1; + u32 error_misc : 1; + u32 xoff_drop : 1; + u32 intf_cell : 1; + u32 sad_miss : 1; + u32 rsv : 24; + }; +}; + +struct nbl_uprbac_em_profile { + u32 pp_cmd_type : 1; + u32 key_size : 1; + u32 mask_btm0 : 20; + u32 mask_btm1 : 20; + u32 hash_sel0 : 2; + u32 hash_sel1 : 2; + u32 action0 : 1; + u32 act_num : 4; + u32 vld : 1; + u32 rsv : 12; +}; + +struct nbl_uprbac_sad_bottom { + u32 sn; + u32 esn; + u32 overlap : 1; + u32 enable : 1; + u32 rsv1 : 30; + u32 rsv2; +}; + +struct nbl_uprbac_sad_lifetime { + u32 diff; + u32 cnt; + u32 flag : 1; + u32 unit : 1; + u32 enable : 1; + u32 rsv1 : 29; + u32 rsv2; +}; + +struct nbl_uprbac_sad_crypto_info { + u32 key[8]; + u32 salt; + u32 crypto_type : 3; + u32 tunnel_mode : 1; + u32 icv_len : 2; + u32 rsv1 : 26; + u32 rsv2[6]; +}; + +struct nbl_uprbac_sad_slide_window { + u32 bitmap[8]; + u32 option : 2; + u32 enable : 1; + u32 rsv1 : 29; + u32 rsv2[7]; +}; + +struct nbl_uprbac_em_tcam { + u32 key_dat0; + u32 key_dat1; + u32 key_dat2 : 16; + u32 key_vld : 1; + u32 key_size : 1; + u32 rsv1 : 14; + u32 rsv2; +}; + +union nbl_uprbac_em_ad { + u32 data; + struct { + u32 sad_index : 11; + u32 rsv : 21; + }; +}; + +union nbl_uprbac_ht { + u8 data[16]; + struct { + u32 kt_index0 : 11; + u32 ht_other_index0 : 9; + u32 vld0 : 1; + + u32 kt_index1 : 11; + u32 ht_other_index1 : 9; + u32 vld1 : 1; + + u32 kt_index2 : 11; + u32 ht_other_index2 : 9; + u32 vld2 : 1; + + u32 kt_index3 : 11; + u32 ht_other_index3 : 9; + u32 vld3 : 1; + + u32 rsv1 : 12; + u32 rsv2; + }; +}; + +struct nbl_uprbac_kt { + u32 key[5]; + u32 sad_index : 11; + u32 rsv1 : 21; + u32 rsv[2]; +}; + +union nbl_ipsec_lifetime_diff { + u32 data[2]; + struct { + u32 sad_index : 11; + u32 rsv1 : 5; + u32 msb_value : 1; + u32 flag_value : 1; + u32 rsv2 : 2; + u32 msb_wen : 1; + u32 flag_wen : 1; + u32 rsv3 : 10; + u32 lifetime_diff; + }; +}; + +#pragma pack() + +/* ---------- TOP ---------- */ +/* lb_top_ctrl_crg_cfg crg_cfg */ +#define NBL_TOP_CTRL_MODULE (0x01300000) +#define NBL_TOP_CTRL_INT_STATUS (NBL_TOP_CTRL_MODULE + 0X0000) +#define NBL_TOP_CTRL_INT_MASK (NBL_TOP_CTRL_MODULE + 0X0004) +#define NBL_TOP_CTRL_TVSENSOR0 (NBL_TOP_CTRL_MODULE + 0X0254) +#define NBL_TOP_CTRL_SOFT_DEF0 (NBL_TOP_CTRL_MODULE + 0x0430) +#define NBL_TOP_CTRL_SOFT_DEF1 (NBL_TOP_CTRL_MODULE + 0x0434) +#define NBL_TOP_CTRL_SOFT_DEF2 (NBL_TOP_CTRL_MODULE + 0x0438) +#define NBL_TOP_CTRL_SOFT_DEF3 (NBL_TOP_CTRL_MODULE + 0x043c) +#define NBL_TOP_CTRL_SOFT_DEF4 (NBL_TOP_CTRL_MODULE + 0x0440) +#define NBL_TOP_CTRL_SOFT_DEF5 (NBL_TOP_CTRL_MODULE + 0x0444) +#define NBL_TOP_CTRL_VERSION_INFO (NBL_TOP_CTRL_MODULE + 0X0900) +#define NBL_TOP_CTRL_VERSION_DATE (NBL_TOP_CTRL_MODULE + 0X0904) + +#define NBL_FW_HEARTBEAT_PONG NBL_TOP_CTRL_SOFT_DEF1 + +#define NBL_PP_NUM (3) +#define NBL_PP_TYPE_0 (0) +#define NBL_PP_TYPE_1 (1) +#define NBL_PP_TYPE_2 (2) +#define NBL_ACT_DATA_BITS (16) + +#define NBL_CMDQ_DIF_MODE_VALUE (2) +#define NBL_CMDQ_DELAY_200US (200) +#define NBL_CMDQ_DELAY_300US (300) +#define NBL_CMDQ_RESET_MAX_WAIT (30) +#define NBL_CMD_NOTIFY_ADDR (0x00001000) +#define NBL_ACL_RD_RETRY (50000) +#define NBL_ACL_RD_WAIT_100US (100) +#define NBL_ACL_RD_WAIT_200US (200) +#define NBL_ACL_CPU_WRITE (0) +#define NBL_ACL_CPU_READ (1) + +/* the capacity of storing acl-items in all tcams */ +#define NBL_ACL_ITEM_CAP (1536) +#define NBL_ACL_KEY_WIDTH (120) +#define NBL_ACL_ITEM6_CAP (512) +#define NBL_ACL_KEY6_WIDTH (240) +#define NBL_ACL_TCAM_DEPTH (512) +#define NBL_ACL_S1_PROFILE_ID (0) +#define NBL_ACL_S2_PROFILE_ID (1) +#define NBL_ACL_TCAM_CNT (16) +#define NBL_ACL_TCAM_HALF (8) +#define NBL_ACL_TCAM_DEPTH (512) +#define NBL_ACL_TCAM_BITS (40) +#define NBL_ACL_HALF_TCAMS_BITS (320) +#define NBL_ACL_HALF_TCAMS_BYTES (40) +#define NBL_ACL_ALL_TCAMS_BITS (640) +#define NBL_ACL_ALL_TCAMS_BYTES (80) +#define NBL_ACL_ACT_RAM_CNT (4) + +#define NBL_FEM_TCAM_MAX_NUM (64) + +#define RTE_ETHER_TYPE_VLAN 0x8100 +#define RTE_ETHER_TYPE_QINQ 0x88A8 +#define RTE_ETHER_TYPE_QINQ1 0x9100 +#define RTE_ETHER_TYPE_QINQ2 0x9200 +#define NBL_BYTES_IN_REG (4) +#define NBL_CMDQ_HI_DWORD(x) ((u32)(((x) >> 32) & 0xFFFFFFFF)) +#define NBL_CMDQ_LO_DWORD(x) ((u32)(x) & 0xFFFFFFFF) +#define NBL_FEM_INIT_START_KERN (0xFE) +#define NBL_FEM_INIT_START_VALUE (0x7E) +#define NBL_PED_VSI_TYPE_ETH_BASE (1027) +#define NBL_DPED_VLAN_TYPE_PORT_NUM (1031) +#define NBL_CHAN_REG_MAX_LEN (32) +#define NBL_EPRO_RSS_KEY_32 (0x6d5a6d5a) + +#define NBL_SHAPING_GRP_TIMMING_ADD_ADDR (0x504400) +#define NBL_SHAPING_GRP_ADDR (0x504800) +#define NBL_SHAPING_GRP_DWLEN (4) +#define NBL_SHAPING_GRP_REG(r) (NBL_SHAPING_GRP_ADDR + \ + (NBL_SHAPING_GRP_DWLEN * 4) * (r)) +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_ADDR (0x47c000) +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_DWLEN (1) +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_REG(r) (NBL_DSCH_VN_SHA2GRP_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_SHA2GRP_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_ADDR (0x480000) +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_DWLEN (1) +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_REG(r) (NBL_DSCH_VN_GRP2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_GRP2SHA_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_SHAPING_DPORT_TIMMING_ADD_ADDR (0x504504) +#define NBL_SHAPING_DPORT_ADDR (0x504700) +#define NBL_SHAPING_DPORT_DWLEN (4) +#define NBL_SHAPING_DPORT_REG(r) (NBL_SHAPING_DPORT_ADDR + \ + (NBL_SHAPING_DPORT_DWLEN * 4) * (r)) +#define NBL_SHAPING_DVN_DPORT_ADDR (0x504750) +#define NBL_SHAPING_DVN_DPORT_DWLEN (4) +#define NBL_SHAPING_DVN_DPORT_REG(r) (NBL_SHAPING_DVN_DPORT_ADDR + \ + (NBL_SHAPING_DVN_DPORT_DWLEN * 4) * (r)) +#define NBL_SHAPING_RDMA_DPORT_ADDR (0x5047a0) +#define NBL_SHAPING_RDMA_DPORT_DWLEN (4) +#define NBL_SHAPING_RDMA_DPORT_REG(r) (NBL_SHAPING_RDMA_DPORT_ADDR + \ + (NBL_SHAPING_RDMA_DPORT_DWLEN * 4) * (r)) +#define NBL_DSCH_PSHA_EN_ADDR (0x404314) +#define NBL_SHAPING_NET_ADDR (0x505800) +#define NBL_SHAPING_NET_DWLEN (4) +#define NBL_SHAPING_NET_REG(r) (NBL_SHAPING_NET_ADDR + \ + (NBL_SHAPING_NET_DWLEN * 4) * (r)) +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_ADDR (0x474000) +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_DWLEN (1) +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_REG(r) (NBL_DSCH_VN_SHA2NET_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_SHA2NET_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_ADDR (0x478000) +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_DWLEN (1) +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_REG(r) (NBL_DSCH_VN_NET2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_NET2SHA_MAP_TBL_DWLEN * 4) * (r)) + +/* Mailbox bar phy register offset begin */ +#define NBL_FW_HEARTBEAT_PING 0x84 +#define NBL_FW_BOARD_CONFIG 0x200 +#define NBL_FW_BOARD_DW3_OFFSET (NBL_FW_BOARD_CONFIG + 12) +#define NBL_FW_BOARD_DW6_OFFSET (NBL_FW_BOARD_CONFIG + 24) + +/* Mailbox bar phy register offset end */ + +enum nbl_ethdev_repr_flag { + NBL_ETHDEV_VIRTIO_REP = 0, + NBL_ETHDEV_ETH_REP, + NBL_ETHDEV_PF_REP, + NBL_ETHDEV_INVALID_REP, +}; + +enum nbl_ped_vlan_type_e { + INNER_VLAN_TYPE, + OUTER_VLAN_TYPE, +}; + +enum nbl_eth_rep_id { + ETH_NET_REP_ID_0 = 2048, + ETH_NET_REP_ID_1, + ETH_NET_REP_ID_2, + ETH_NET_REP_ID_3, + ETH_NET_REP_ID_MAX +}; + +enum nbl_ped_vlan_tpid_e { + PED_VLAN_TYPE_8100 = 0, + PED_VLAN_TYPE_88A8 = 1, + PED_VLAN_TYPE_9100 = 2, + PED_VLAN_TYPE_9200 = 3, + PED_VLAN_TYPE_NUM = 4, +}; + +enum nbl_error_code_e { + NBL_ERROR_CODE_NONE = 0, + NBL_ERROR_CODE_VLAN = 1, + NBL_ERROR_CODE_L3_HEAD_LEN = 2, + NBL_ERROR_CODE_L3_PLD_LEN = 3, + NBL_ERROR_CODE_L3_CHKSUM = 4, + NBL_ERROR_CODE_L4_CHKSUM = 5, + NBL_ERROR_CODE_TTL_HOPLIMT = 6, + NBL_ERROR_CODE_ESP_AUTH_FAIL = 7, + NBL_ERROR_CODE_ESP_BAD_FAIL = 8, + NBL_ERROR_CODE_PA_RECG_FAIL = 9, + NBL_ERROR_CODE_DN_SMAC = 10, + NBL_ERROR_CODE_TOTAL_NUM = 16, +}; + +enum nbl_epro_act_pri_e { + EPRO_ACT_MIRRORIDX_PRI = 3, + EPRO_ACT_CARIDX_PRI = 3, + EPRO_ACT_DQUEUE_PRI = 3, + EPRO_ACT_DPORT_PRI = 3, + EPRO_ACT_POP_IVLAN_PRI = 3, + EPRO_ACT_POP_OVLAN_PRI = 3, + EPRO_ACT_REPLACE_IVLAN_PRI = 3, + EPRO_ACT_REPLACE_OVLAN_PRI = 3, + EPRO_ACT_PUSH_IVLAN_PRI = 3, + EPRO_ACT_PUSH_OVLAN_PRI = 3, + EPRO_ACT_OUTER_SPORT_MDF_PRI = 3, + EPRO_ACT_PRI_MDF_PRI = 3, + EPRO_ACT_DP_HASH0_PRI = 3, + EPRO_ACT_DP_HASH1_PRI = 3, +}; + +enum nbl_epro_mirror_act_pri_e { + EPRO_MIRROR_ACT_CARIDX_PRI = 3, + EPRO_MIRROR_ACT_DQUEUE_PRI = 3, + EPRO_MIRROR_ACT_DPORT_PRI = 3, +}; + +union nbl_ped_port_vlan_type_u { + struct ped_port_vlan_type { + u32 o_vlan_sel:2; + u32 i_vlan_sel:2; + u32 rsv:28; + } __packed info; +#define NBL_PED_PORT_VLAN_TYPE_TABLE_WIDTH (sizeof(struct ped_port_vlan_type) \ + / sizeof(u32)) + u32 data[NBL_PED_PORT_VLAN_TYPE_TABLE_WIDTH]; +}; + +#define NBL_ACL_ACTION_RAM_TBL(r, i) (NBL_ACL_BASE + 0x00002000 + 0x2000 * (r) + \ + (NBL_ACL_ACTION_RAM0_DWLEN * 4 * (i))) +#define NBL_DPED_MIR_CMD_0_TABLE(t) (NBL_DPED_MIR_CMD_00_ADDR + \ + (NBL_DPED_MIR_CMD_00_DWLEN * 2 * (t))) +#define NBL_SET_DPORT(upcall_flag, nxtstg_sel, port_type, port_id) \ + ((upcall_flag) << 14 | (nxtstg_sel) << 12 | (port_type) << 10 | (port_id)) + +#define MAX_RSS_LEN (100) +#define NBL_RSS_FUNC_TYPE "rss_func_type=" +enum rss_func_type { + NBL_SYM_TOEPLITZ_INT = 0, + NBL_XOR_INT, + NBL_INVALID_FUNC_TYPE +}; + +#define NBL_XOR "xor" +#define NBL_SYM_TOEPLITZ "sym_toeplitz" +#define NBL_RSS_KEY_TYPE "rss_key_type" + +enum rss_field_type { + NBL_KEY_IPV4_L3_INT = 0, + NBL_KEY_IPV4_L4_INT, + NBL_KEY_IPV6_L3_INT, + NBL_KEY_IPV6_L4_INT, + NBL_KEY_AUTO, +}; + +#define NBL_KEY_IPV4_L3 "ipv4" +#define NBL_KEY_IPV4_L4 "ipv4_l4" +#define NBL_KEY_IPV6_L3 "ipv6" +#define NBL_KEY_IPV6_L4 "ipv6_l4" + +#define RSS_SPLIT_STR_NUM 2 +#define NBL_KEY_IP4_L4_RSS_BIT 1 +#define NBL_KEY_IP6_L4_RSS_BIT 2 + +#define NBL_DPED_L4_CK_CMD_40_ADDR (0x75c338) +#define NBL_DPED_L4_CK_CMD_40_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_40_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_40_DWLEN (1) +struct dped_l4_ck_cmd_40 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x8 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x1 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ +}; + +#define NBL_DSTORE_D_DPORT_FC_TH_ADDR (0x704600) +#define NBL_DSTORE_D_DPORT_FC_TH_DEPTH (5) +#define NBL_DSTORE_D_DPORT_FC_TH_WIDTH (32) +#define NBL_DSTORE_D_DPORT_FC_TH_DWLEN (1) + +struct dstore_d_dport_fc_th { + u32 xoff_th:11; /* [10:0] Default:200 RW */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 xon_th:11; /* [26:16] Default:100 RW */ + u32 rsv:3; /* [29:27] Default:0x0 RO */ + u32 fc_set:1; /* [30:30] Default:0x0 RW */ + u32 fc_en:1; /* [31:31] Default:0x0 RW */ +}; + +#define NBL_DSTORE_D_DPORT_FC_TH_REG(r) (NBL_DSTORE_D_DPORT_FC_TH_ADDR + \ + (NBL_DSTORE_D_DPORT_FC_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_PORT_DROP_TH_ADDR (0x704150) +#define NBL_DSTORE_PORT_DROP_TH_DEPTH (6) +#define NBL_DSTORE_PORT_DROP_TH_WIDTH (32) +#define NBL_DSTORE_PORT_DROP_TH_DWLEN (1) + +struct dstore_port_drop_th { + u32 disc_th:10; /* [9:0] Default:800 RW */ + u32 rsv:21; /* [30:10] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ +}; + +#define NBL_DSTORE_PORT_DROP_TH_REG(r) (NBL_DSTORE_PORT_DROP_TH_ADDR + \ + (NBL_DSTORE_PORT_DROP_TH_DWLEN * 4) * (r)) + +union nbl_fw_board_cfg_dw3 { + struct board_cfg_dw3 { + u32 port_typpe:1; + u32 port_num:7; + u32 port_speed:2; + u32 rsv:22; + } __packed info; + u32 data; +}; + +union nbl_fw_board_cfg_dw6 { + struct board_cfg_dw6 { + u8 lane_bitmap; + u8 eth_bitmap; + u16 rsv; + } __packed info; + u32 data; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c new file mode 100644 index 000000000000..92ad9254e568 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c @@ -0,0 +1,1212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_queue_leonis.h" + +static struct nbl_queue_vsi_info * +nbl_res_queue_get_vsi_info(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + u16 func_id; + int i; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + + for (i = 0; i < NBL_VSI_MAX; i++) + if (queue_info->vsi_info[i].vsi_id == vsi_id) + return &queue_info->vsi_info[i]; + + return NULL; +} + +static int nbl_res_queue_get_net_id(u16 func_id, u16 vsi_type) +{ + switch (vsi_type) { + case NBL_VSI_DATA: + return func_id; + case NBL_VSI_USER: + return func_id + NBL_SPECIFIC_VSI_NET_ID_OFFSET; + case NBL_VSI_CTRL: + return func_id + NBL_SPECIFIC_VSI_NET_ID_OFFSET; + default: + return func_id; + } +} + +static int nbl_res_queue_setup_queue_info(struct nbl_resource_mgt *res_mgt, u16 func_id, + u16 num_queues) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + u16 *txrx_queues, *queues_context; + u16 queue_index; + int i, ret = 0; + + nbl_info(common, NBL_DEBUG_QUEUE, + "Setup qid map, func_id:%d, num_queues:%d", func_id, num_queues); + + txrx_queues = kcalloc(num_queues, sizeof(txrx_queues[0]), GFP_ATOMIC); + if (!txrx_queues) { + ret = -ENOMEM; + goto alloc_txrx_queues_fail; + } + + queues_context = kcalloc(num_queues * 2, sizeof(txrx_queues[0]), GFP_ATOMIC); + if (!queues_context) { + ret = -ENOMEM; + goto alloc_queue_contex_fail; + } + + queue_info->num_txrx_queues = num_queues; + queue_info->txrx_queues = txrx_queues; + queue_info->queues_context = queues_context; + + for (i = 0; i < num_queues; i++) { + queue_index = find_first_zero_bit(queue_mgt->txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE); + if (queue_index == NBL_MAX_TXRX_QUEUE) { + ret = -ENOSPC; + goto get_txrx_queue_fail; + } + txrx_queues[i] = queue_index; + set_bit(queue_index, queue_mgt->txrx_queue_bitmap); + } + + return 0; + +get_txrx_queue_fail: + while (--i + 1) { + queue_index = txrx_queues[i]; + clear_bit(queue_index, queue_mgt->txrx_queue_bitmap); + } + queue_info->num_txrx_queues = 0; + queue_info->txrx_queues = NULL; +alloc_queue_contex_fail: + kfree(txrx_queues); +alloc_txrx_queues_fail: + return ret; +} + +static void nbl_res_queue_remove_queue_info(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + u16 i; + + for (i = 0; i < queue_info->num_txrx_queues; i++) + clear_bit(queue_info->txrx_queues[i], queue_mgt->txrx_queue_bitmap); + + kfree(queue_info->txrx_queues); + kfree(queue_info->queues_context); + queue_info->txrx_queues = NULL; + queue_info->queues_context = NULL; + + queue_info->num_txrx_queues = 0; +} + +static inline u64 nbl_res_queue_qid_map_key(struct nbl_qid_map_table qid_map) +{ + u64 notify_addr_l = qid_map.notify_addr_l; + u64 notify_addr_h = qid_map.notify_addr_h; + + return (notify_addr_h << NBL_QID_MAP_NOTIFY_ADDR_LOW_PART_LEN) | notify_addr_l; +} + +static void nbl_res_queue_set_qid_map_table(struct nbl_resource_mgt *res_mgt, u16 tail) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_qid_map_param param; + int i; + + param.qid_map = kcalloc(tail, sizeof(param.qid_map[0]), GFP_ATOMIC); + if (!param.qid_map) + return; + + for (i = 0; i < tail; i++) + param.qid_map[i] = queue_mgt->qid_map_table[i]; + + param.start = 0; + param.len = tail; + + phy_ops->set_qid_map_table(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¶m, + queue_mgt->qid_map_select); + queue_mgt->qid_map_select = !queue_mgt->qid_map_select; + + if (!queue_mgt->qid_map_ready) { + phy_ops->set_qid_map_ready(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), true); + queue_mgt->qid_map_ready = true; + } + + kfree(param.qid_map); +} + +int nbl_res_queue_setup_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, u16 func_id, + u64 notify_addr) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_qid_map_table qid_map; + u64 key; + u16 *txrx_queues = queue_info->txrx_queues; + u16 qid_map_entries = queue_info->num_txrx_queues, qid_map_base, tail; + int i; + + /* Get base location */ + queue_info->notify_addr = notify_addr; + key = notify_addr >> NBL_QID_MAP_NOTIFY_ADDR_SHIFT; + + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + WARN_ON(key == nbl_res_queue_qid_map_key(queue_mgt->qid_map_table[i])); + if (key < nbl_res_queue_qid_map_key(queue_mgt->qid_map_table[i])) { + qid_map_base = i; + break; + } + } + if (i == NBL_QID_MAP_TABLE_ENTRIES) { + nbl_err(common, NBL_DEBUG_QUEUE, "No valid qid map key for func %d", func_id); + return -ENOSPC; + } + + /* Calc tail, we will set the qid_map from 0 to tail. + * We have to make sure that this range (0, tail) can cover all the changes, which need to + * consider all the two tables. Therefore, it is necessary to store each table's tail, and + * always use the larger one between this table's tail and the added tail. + * + * The reason can be illustrated in the following example: + * Step 1: del some entries, which happens on table 1, and each table could be + * Table 0: 0 - 31 used + * Table 1: 0 - 15 used + * SW : queue_mgt->total_qid_map_entries = 16 + * Step 2: add 2 entries, which happens on table 0, if we use 16 + 2 as the tail, then + * Table 0: 0 - 17 correctly added, 18 - 31 garbage data + * Table 1: 0 - 15 used + * SW : queue_mgt->total_qid_map_entries = 18 + * And this is definitely wrong, it should use 32, table 0's original tail + */ + queue_mgt->total_qid_map_entries += qid_map_entries; + tail = max(queue_mgt->total_qid_map_entries, + queue_mgt->qid_map_tail[queue_mgt->qid_map_select]); + queue_mgt->qid_map_tail[queue_mgt->qid_map_select] = queue_mgt->total_qid_map_entries; + + /* Update qid map */ + for (i = NBL_QID_MAP_TABLE_ENTRIES - qid_map_entries; i > qid_map_base; i--) + queue_mgt->qid_map_table[i - 1 + qid_map_entries] = queue_mgt->qid_map_table[i - 1]; + + for (i = 0; i < queue_info->num_txrx_queues; i++) { + qid_map.local_qid = 2 * i + 1; + qid_map.notify_addr_l = key; + qid_map.notify_addr_h = key >> NBL_QID_MAP_NOTIFY_ADDR_LOW_PART_LEN; + qid_map.global_qid = txrx_queues[i]; + qid_map.ctrlq_flag = 0; + queue_mgt->qid_map_table[qid_map_base + i] = qid_map; + } + + nbl_res_queue_set_qid_map_table(res_mgt, tail); + + return 0; +} + +void nbl_res_queue_remove_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_qid_map_table qid_map; + u64 key; + u16 qid_map_entries = queue_info->num_txrx_queues, qid_map_base, tail; + int i; + + /* Get base location */ + key = queue_info->notify_addr >> NBL_QID_MAP_NOTIFY_ADDR_SHIFT; + + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + if (key == nbl_res_queue_qid_map_key(queue_mgt->qid_map_table[i])) { + qid_map_base = i; + break; + } + } + if (i == NBL_QID_MAP_TABLE_ENTRIES) { + nbl_err(common, NBL_DEBUG_QUEUE, "No valid qid map key for func %d", func_id); + return; + } + + /* Calc tail, we will set the qid_map from 0 to tail. + * We have to make sure that this range (0, tail) can cover all the changes, which need to + * consider all the two tables. Therefore, it is necessary to store each table's tail, and + * always use the larger one between this table's tail and the driver-stored tail. + * + * The reason can be illustrated in the following example: + * Step 1: del some entries, which happens on table 1, and each table could be + * Table 0: 0 - 31 used + * Table 1: 0 - 15 used + * SW : queue_mgt->total_qid_map_entries = 16 + * Step 2: del 2 entries, which happens on table 0, if we use 16 as the tail, then + * Table 0: 0 - 13 correct, 14 - 31 garbage data + * Table 1: 0 - 15 used + * SW : queue_mgt->total_qid_map_entries = 14 + * And this is definitely wrong, it should use 32, table 0's original tail + */ + tail = max(queue_mgt->total_qid_map_entries, + queue_mgt->qid_map_tail[queue_mgt->qid_map_select]); + queue_mgt->total_qid_map_entries -= qid_map_entries; + queue_mgt->qid_map_tail[queue_mgt->qid_map_select] = queue_mgt->total_qid_map_entries; + + /* Update qid map */ + memset(&qid_map, U8_MAX, sizeof(qid_map)); + + for (i = qid_map_base; i < NBL_QID_MAP_TABLE_ENTRIES - qid_map_entries; i++) + queue_mgt->qid_map_table[i] = queue_mgt->qid_map_table[i + qid_map_entries]; + for (; i < NBL_QID_MAP_TABLE_ENTRIES; i++) + queue_mgt->qid_map_table[i] = qid_map; + + nbl_res_queue_set_qid_map_table(res_mgt, tail); +} + +static int nbl_res_queue_get_rss_ret_base(struct nbl_resource_mgt *res_mgt, u16 count, u16 *result) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + u16 index, i, j, k; + int success = 1; + int ret = -EFAULT; + + for (i = 0; i < NBL_EPRO_RSS_RET_TBL_DEPTH;) { + index = find_next_zero_bit(queue_mgt->rss_ret_bitmap, + NBL_EPRO_RSS_RET_TBL_DEPTH, i); + if (index == NBL_EPRO_RSS_RET_TBL_DEPTH) { + nbl_err(common, NBL_DEBUG_QUEUE, "There is no available rss ret left"); + break; + } + + success = 1; + for (j = index + 1; j < (index + count); j++) { + if (j >= NBL_EPRO_RSS_RET_TBL_DEPTH) { + success = 0; + break; + } + + if (test_bit(j, queue_mgt->rss_ret_bitmap)) { + success = 0; + break; + } + } + if (success) { + for (k = index; k < (index + count); k++) + set_bit(k, queue_mgt->rss_ret_bitmap); + *result = index; + ret = 0; + break; + } + i = j; + } + + return ret; +} + +static int nbl_res_queue_setup_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_info *queue_info = NULL; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id; + int ret = 0, i; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + /*config ipro queue tbl*/ + for (i = vsi_info->queue_offset; + i < vsi_info->queue_offset + vsi_info->queue_num && i < queue_info->num_txrx_queues; + i++) { + ret = phy_ops->cfg_ipro_queue_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], vsi_id, 1); + if (ret) { + while (--i + 1) + phy_ops->cfg_ipro_queue_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], 0, 0); + return ret; + } + } + + return 0; +} + +static void nbl_res_queue_remove_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_info *queue_info = NULL; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id; + int i; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + /*config ipro queue tbl*/ + for (i = vsi_info->queue_offset; + i < vsi_info->queue_offset + vsi_info->queue_num && i < queue_info->num_txrx_queues; + i++) + phy_ops->cfg_ipro_queue_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], 0, 0); +} + +static int nbl_res_queue_setup_rss(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 rss_entry_size, count; + int ret = 0; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + rss_entry_size = (vsi_info->queue_num + NBL_EPRO_RSS_ENTRY_SIZE_UNIT - 1) + / NBL_EPRO_RSS_ENTRY_SIZE_UNIT; + rss_entry_size = ilog2(roundup_pow_of_two(rss_entry_size)); + count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << rss_entry_size; + + ret = nbl_res_queue_get_rss_ret_base(res_mgt, count, &vsi_info->rss_ret_base); + if (ret) + return -ENOSPC; + + vsi_info->rss_entry_size = rss_entry_size; + vsi_info->rss_vld = true; + + return 0; +} + +static void nbl_res_queue_remove_rss(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 rss_ret_base, rss_entry_size, count; + int i; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + if (!vsi_info->rss_vld) + return; + + rss_ret_base = vsi_info->rss_ret_base; + rss_entry_size = vsi_info->rss_entry_size; + count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << rss_entry_size; + + for (i = rss_ret_base; i < (rss_ret_base + count); i++) + clear_bit(i, queue_mgt->rss_ret_bitmap); + + vsi_info->rss_vld = false; +} + +static void nbl_res_queue_setup_queue_cfg(struct nbl_queue_mgt *queue_mgt, + struct nbl_queue_cfg_param *cfg_param, + struct nbl_txrx_queue_param *queue_param, + bool is_tx, u16 func_id) +{ + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + + cfg_param->desc = queue_param->dma; + cfg_param->size = queue_param->desc_num; + cfg_param->global_vector = queue_param->global_vector_id; + cfg_param->global_queue_id = queue_info->txrx_queues[queue_param->local_queue_id]; + + cfg_param->avail = queue_param->avail; + cfg_param->used = queue_param->used; + cfg_param->extend_header = queue_param->extend_header; + cfg_param->split = queue_param->split; + cfg_param->last_avail_idx = queue_param->cxt; + + cfg_param->intr_en = queue_param->intr_en; + cfg_param->intr_mask = queue_param->intr_mask; + + cfg_param->tx = is_tx; + cfg_param->rxcsum = queue_param->rxcsum; + cfg_param->half_offload_en = queue_param->half_offload_en; +} + +static void nbl_res_queue_setup_hw_dq(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_cfg_param *queue_cfg, u16 func_id) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_vnet_queue_info_param param = {0}; + u16 global_queue_id = queue_cfg->global_queue_id; + u8 bus, dev, func; + + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &dev, &func); + queue_info->split = queue_cfg->split; + queue_info->queue_size = queue_cfg->size; + + param.function_id = func; + param.device_id = dev; + param.bus_id = bus; + param.valid = 1; + + if (queue_cfg->intr_en) { + param.msix_idx = queue_cfg->global_vector; + param.msix_idx_valid = 1; + } + + if (queue_cfg->tx) { + phy_ops->set_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¶m, + NBL_PAIR_ID_GET_TX(global_queue_id)); + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + if (!queue_cfg->extend_header) + phy_ops->restore_dvn_context(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_cfg->split, + queue_cfg->last_avail_idx); + phy_ops->cfg_tx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_cfg, global_queue_id); + + } else { + phy_ops->set_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¶m, + NBL_PAIR_ID_GET_RX(global_queue_id)); + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + if (!queue_cfg->extend_header) + phy_ops->restore_uvn_context(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_cfg->split, + queue_cfg->last_avail_idx); + phy_ops->cfg_rx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), queue_cfg, + global_queue_id); + } +} + +static void nbl_res_queue_remove_all_hw_dq(struct nbl_resource_mgt *res_mgt, u16 func_id, + struct nbl_queue_vsi_info *vsi_info) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 start = vsi_info->queue_offset, end = vsi_info->queue_offset + vsi_info->queue_num; + u16 global_queue; + int i; + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + + phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->disable_dvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + } + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + + phy_ops->disable_uvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + } + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + queue_info->queues_context[NBL_PAIR_ID_GET_RX(i)] = + phy_ops->save_uvn_ctx(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue, queue_info->split, + queue_info->queue_size); + queue_info->queues_context[NBL_PAIR_ID_GET_TX(i)] = + phy_ops->save_dvn_ctx(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue, queue_info->split); + } + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + } + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + phy_ops->clear_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_RX(global_queue)); + phy_ops->clear_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_TX(global_queue)); + } +} + +int nbl_res_queue_init_qid_map_table(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_mgt *queue_mgt, + struct nbl_phy_ops *phy_ops) +{ + struct nbl_qid_map_table invalid_qid_map; + u16 i; + + queue_mgt->qid_map_ready = 0; + queue_mgt->qid_map_select = NBL_MASTER_QID_MAP_TABLE; + + memset(&invalid_qid_map, 0, sizeof(invalid_qid_map)); + invalid_qid_map.local_qid = 0x1FF; + invalid_qid_map.notify_addr_l = 0x7FFFFF; + invalid_qid_map.notify_addr_h = 0xFFFFFFFF; + invalid_qid_map.global_qid = 0xFFF; + invalid_qid_map.ctrlq_flag = 0X1; + + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) + queue_mgt->qid_map_table[i] = invalid_qid_map; + + phy_ops->init_qid_map_table(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + return 0; +} + +static int nbl_res_queue_init_epro_rss_key(struct nbl_resource_mgt *res_mgt, + struct nbl_phy_ops *phy_ops) +{ + int ret = 0; + + ret = phy_ops->init_epro_rss_key(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + return ret; +} + +static int nbl_res_queue_init_epro_vpt_table(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + int pfid, vfid; + u16 vsi_id, vf_vsi_id; + + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + + if (sriov_info->bdf != 0) { + /* init pf vsi */ + phy_ops->init_epro_vpt_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + + for (vfid = 0; vfid < sriov_info->num_vfs; vfid++) { + vf_vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); + if (vf_vsi_id == 0xFFFF) + continue; + + phy_ops->init_epro_vpt_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vf_vsi_id); + } + } + + return 0; +} + +static int nbl_res_vsi_init_ipro_dn_sport_tbl(struct nbl_resource_mgt *res_mgt, + u16 func_id, u16 bmode, bool binit) + +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + int pfid, vfid; + u16 eth_id, vsi_id, vf_vsi_id; + int i; + + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + + if (sriov_info->bdf != 0) { + eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); + + for (i = 0; i < NBL_VSI_MAX; i++) + phy_ops->cfg_ipro_dn_sport_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_id + i, eth_id, bmode, binit); + + for (vfid = 0; vfid < sriov_info->num_vfs; vfid++) { + vf_vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); + if (vf_vsi_id == 0xFFFF) + continue; + + phy_ops->cfg_ipro_dn_sport_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vf_vsi_id, eth_id, bmode, binit); + } + } + + return 0; +} + +static int nbl_res_vsi_set_bridge_mode(void *priv, u16 func_id, u16 bmode) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_vsi_init_ipro_dn_sport_tbl(res_mgt, func_id, bmode, false); +} + +static int nbl_res_queue_init_rss(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_mgt *queue_mgt, + struct nbl_phy_ops *phy_ops) +{ + return nbl_res_queue_init_epro_rss_key(res_mgt, phy_ops); +} + +static int nbl_res_queue_alloc_txrx_queues(void *priv, u16 vsi_id, u16 queue_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u64 notify_addr; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + int ret = 0; + + notify_addr = nbl_res_get_func_bar_base_addr(res_mgt, func_id); + + ret = nbl_res_queue_setup_queue_info(res_mgt, func_id, queue_num); + if (ret) + goto setup_queue_info_fail; + + ret = nbl_res_queue_setup_qid_map_table_leonis(res_mgt, func_id, notify_addr); + if (ret) + goto setup_qid_map_fail; + + return 0; + +setup_qid_map_fail: + nbl_res_queue_remove_queue_info(res_mgt, func_id); +setup_queue_info_fail: + return ret; +} + +static void nbl_res_queue_free_txrx_queues(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + nbl_res_queue_remove_qid_map_table_leonis(res_mgt, func_id); + nbl_res_queue_remove_queue_info(res_mgt, func_id); +} + +static int nbl_res_queue_setup_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_cfg_param cfg_param = {0}; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, param->vsi_id); + + nbl_res_queue_setup_queue_cfg(NBL_RES_MGT_TO_QUEUE_MGT(res_mgt), + &cfg_param, param, is_tx, func_id); + + nbl_res_queue_setup_hw_dq(res_mgt, &cfg_param, func_id); + return 0; +} + +static void nbl_res_queue_remove_all_queues(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_vsi_info *vsi_info = NULL; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + nbl_res_queue_remove_all_hw_dq(res_mgt, func_id, vsi_info); +} + +static int nbl_res_queue_register_vsi2q(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = NULL; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = &queue_info->vsi_info[vsi_index]; + + memset(vsi_info, 0, sizeof(*vsi_info)); + + vsi_info->vld = 1; + vsi_info->vsi_index = vsi_index; + vsi_info->vsi_id = vsi_id; + vsi_info->queue_offset = queue_offset; + vsi_info->queue_num = queue_num; + + return 0; +} + +static int nbl_res_queue_cfg_dsch(void *priv, u16 vsi_id, bool vld) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info; + u16 group_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); /* group_id is same with eth_id */ + int i, ret = 0; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + vsi_info->net_id = nbl_res_queue_get_net_id(func_id, vsi_info->vsi_index); + + if (!vld) + phy_ops->deactive_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id); + + for (i = vsi_info->queue_offset; i < vsi_info->queue_num + vsi_info->queue_offset; i++) { + phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], vsi_info->net_id, vld); + } + + ret = phy_ops->cfg_dsch_net_to_group(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_info->net_id, group_id, vld); + if (ret) + return ret; + + if (vld) + phy_ops->active_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id); + + return 0; +} + +static int nbl_res_queue_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + if (real_qps == vsi_info->curr_qps) + return 0; + + if (real_qps) + phy_ops->cfg_epro_rss_ret(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_info->rss_ret_base, + vsi_info->rss_entry_size, real_qps, + queue_info->txrx_queues + vsi_info->queue_offset); + + if (!vsi_info->curr_qps) + phy_ops->set_epro_rss_pt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + vsi_info->rss_ret_base, vsi_info->rss_entry_size); + + vsi_info->curr_qps = real_qps; + vsi_info->curr_qps_static = real_qps; + return 0; +} + +static void nbl_res_queue_remove_cqs(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + phy_ops->clear_epro_rss_pt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + + vsi_info->curr_qps = 0; +} + +static int nbl_res_queue_init_switch(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) + phy_ops->setup_queue_switch(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), i); + + return 0; +} + +static int nbl_res_queue_init(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt; + struct nbl_phy_ops *phy_ops; + int i, ret = 0; + + if (!res_mgt) + return -EINVAL; + + queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = nbl_res_queue_init_qid_map_table(res_mgt, queue_mgt, phy_ops); + if (ret) + goto init_queue_fail; + + ret = nbl_res_queue_init_rss(res_mgt, queue_mgt, phy_ops); + if (ret) + goto init_queue_fail; + + ret = nbl_res_queue_init_switch(res_mgt); + if (ret) + goto init_queue_fail; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + nbl_res_queue_init_epro_vpt_table(res_mgt, i); + nbl_res_vsi_init_ipro_dn_sport_tbl(res_mgt, i, BRIDGE_MODE_VEB, true); + } + phy_ops->init_pfc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), NBL_MAX_ETHERNET); + + return 0; + +init_queue_fail: + return ret; +} + +static int nbl_res_queue_get_queue_err_stats(void *priv, u16 func_id, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, + bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops; + u16 global_queue_id; + + if (queue_id >= queue_info->num_txrx_queues) + return -EINVAL; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + global_queue_id = queue_info->txrx_queues[queue_id]; + + if (is_tx) + phy_ops->get_tx_queue_err_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_err_stats); + else + phy_ops->get_rx_queue_err_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_err_stats); + + return 0; +} + +static void nbl_res_queue_get_rxfh_indir_size(void *priv, u16 vsi_id, u32 *rxfh_indir_size) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_vsi_info *vsi_info = NULL; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + *rxfh_indir_size = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << vsi_info->rss_entry_size; +} + +static void nbl_res_queue_get_rxfh_indir(void *priv, u16 vsi_id, u32 *indir) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_vsi_info *vsi_info = NULL; + int i, j; + u32 rxfh_indir_size; + u16 queue_num; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + queue_num = vsi_info->curr_qps_static ? vsi_info->curr_qps_static : vsi_info->queue_num; + rxfh_indir_size = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << vsi_info->rss_entry_size; + + for (i = 0, j = 0; i < rxfh_indir_size; i++) { + indir[i] = j; + j++; + if (j == queue_num) + j = 0; + } +} + +static void nbl_res_queue_get_rxfh_rss_key_size(void *priv, u32 *rxfh_rss_key_size) +{ + *rxfh_rss_key_size = NBL_EPRO_RSS_SK_SIZE; +} + +static void nbl_res_rss_key_reverse_order(u8 *key) +{ + u8 temp; + int i; + + for (i = 0; i < (NBL_EPRO_RSS_PER_KEY_SIZE / 2); i++) { + temp = key[i]; + key[i] = key[NBL_EPRO_RSS_PER_KEY_SIZE - 1 - i]; + key[NBL_EPRO_RSS_PER_KEY_SIZE - 1 - i] = temp; + } +} + +static void nbl_res_queue_get_rss_key(void *priv, u8 *rss_key) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int i; + + phy_ops->read_rss_key(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), rss_key); + + for (i = 0; i < NBL_EPRO_RSS_KEY_NUM; i++) + nbl_res_rss_key_reverse_order(rss_key + i * NBL_EPRO_RSS_PER_KEY_SIZE); +} + +static void nbl_res_queue_get_rss_alg_sel(void *priv, u8 *alg_sel, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_rss_alg_sel(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, alg_sel); +} + +static void nbl_res_queue_clear_queues(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + + nbl_res_queue_remove_rss(priv, vsi_id); + nbl_res_queue_remove_q2vsi(priv, vsi_id); + if (!queue_info->num_txrx_queues) + return; + + nbl_res_queue_remove_cqs(res_mgt, vsi_id); + nbl_res_queue_cfg_dsch(res_mgt, vsi_id, false); + nbl_res_queue_remove_all_queues(res_mgt, vsi_id); + nbl_res_queue_free_txrx_queues(res_mgt, vsi_id); +} + +/* for pmd driver */ +static u16 nbl_res_queue_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + + if (!queue_info->num_txrx_queues) + return 0xffff; + + return queue_info->txrx_queues[local_qid]; +} + +static u16 nbl_get_adapt_desc_gother_level(u16 last_level, u64 rates) +{ + switch (last_level) { + case NBL_ADAPT_DESC_GOTHER_LEVEL0: + if (rates > NBL_ADAPT_DESC_GOTHER_LEVEL1_TH) + return NBL_ADAPT_DESC_GOTHER_LEVEL1; + else + return NBL_ADAPT_DESC_GOTHER_LEVEL0; + case NBL_ADAPT_DESC_GOTHER_LEVEL1: + if (rates > NBL_ADAPT_DESC_GOTHER_LEVEL1_DOWNGRADE_TH) + return NBL_ADAPT_DESC_GOTHER_LEVEL1; + else + return NBL_ADAPT_DESC_GOTHER_LEVEL0; + default: + return NBL_ADAPT_DESC_GOTHER_LEVEL0; + } +} + +static u16 nbl_get_adapt_desc_gother_timeout(u16 level) +{ + switch (level) { + case NBL_ADAPT_DESC_GOTHER_LEVEL0: + return NBL_ADAPT_DESC_GOTHER_LEVEL0_TIMEOUT; + case NBL_ADAPT_DESC_GOTHER_LEVEL1: + return NBL_ADAPT_DESC_GOTHER_LEVEL1_TIMEOUT; + default: + return NBL_ADAPT_DESC_GOTHER_LEVEL0_TIMEOUT; + } +} + +static void nbl_res_queue_adapt_desc_gother(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_adapt_desc_gother *adapt_desc_gother = &queue_mgt->adapt_desc_gother; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u32 last_uvn_desc_rd_entry = adapt_desc_gother->uvn_desc_rd_entry; + u64 last_get_stats_jiffies = adapt_desc_gother->get_desc_stats_jiffies; + u64 time_diff; + u32 uvn_desc_rd_entry; + u32 rx_rate; + u16 level, last_level, timeout; + + last_level = adapt_desc_gother->level; + time_diff = jiffies - last_get_stats_jiffies; + uvn_desc_rd_entry = phy_ops->get_uvn_desc_entry_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + rx_rate = (uvn_desc_rd_entry - last_uvn_desc_rd_entry) / time_diff * HZ; + adapt_desc_gother->get_desc_stats_jiffies = jiffies; + adapt_desc_gother->uvn_desc_rd_entry = uvn_desc_rd_entry; + + level = nbl_get_adapt_desc_gother_level(last_level, rx_rate); + if (level != last_level) { + timeout = nbl_get_adapt_desc_gother_timeout(level); + phy_ops->set_uvn_desc_wr_timeout(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), timeout); + adapt_desc_gother->level = level; + } +} + +static void nbl_res_flr_clear_queues(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + + if (nbl_res_vf_is_active(priv, func_id)) + nbl_res_queue_clear_queues(priv, vsi_id); +} + +static int nbl_res_queue_restore_tx_queue(struct nbl_resource_mgt *res_mgt, u16 vsi_id, + u16 local_queue_id, dma_addr_t dma) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_queue_cfg_param queue_cfg = {0}; + u16 global_queue, func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + queue_info = &queue_mgt->queue_info[func_id]; + global_queue = queue_info->txrx_queues[local_queue_id]; + + phy_ops->get_tx_queue_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + /* Rectify size, in register it is log2(size) */ + queue_cfg.size = queue_info->queue_size; + /* DMA addr is realloced, updated it */ + queue_cfg.desc = dma; + + phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->disable_dvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->cfg_tx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + + return 0; +} + +static int nbl_res_queue_restore_rx_queue(struct nbl_resource_mgt *res_mgt, u16 vsi_id, + u16 local_queue_id, dma_addr_t dma) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_queue_cfg_param queue_cfg = {0}; + u16 global_queue, func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + queue_info = &queue_mgt->queue_info[func_id]; + global_queue = queue_info->txrx_queues[local_queue_id]; + + phy_ops->get_rx_queue_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + /* Rectify size, in register it is log2(size) */ + queue_cfg.size = queue_info->queue_size; + /* DMA addr is realloced, updated it */ + queue_cfg.desc = dma; + + phy_ops->disable_uvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->cfg_rx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + + return 0; +} + +static int nbl_res_queue_restore_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + switch (type) { + case NBL_TX: + return nbl_res_queue_restore_tx_queue(res_mgt, vsi_id, local_queue_id, dma); + case NBL_RX: + return nbl_res_queue_restore_rx_queue(res_mgt, vsi_id, local_queue_id, dma); + default: + break; + } + + return -EINVAL; +} + +static u16 nbl_res_queue_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + int i; + + queue_info = &queue_mgt->queue_info[func_id]; + + if (queue_info->txrx_queues) + for (i = 0; i < queue_info->num_txrx_queues; i++) + if (global_queue_id == queue_info->txrx_queues[i]) + return i; + + return U16_MAX; +} + +/* NBL_QUEUE_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_QUEUE_OPS_TBL \ +do { \ + NBL_QUEUE_SET_OPS(alloc_txrx_queues, nbl_res_queue_alloc_txrx_queues); \ + NBL_QUEUE_SET_OPS(free_txrx_queues, nbl_res_queue_free_txrx_queues); \ + NBL_QUEUE_SET_OPS(register_vsi2q, nbl_res_queue_register_vsi2q); \ + NBL_QUEUE_SET_OPS(setup_q2vsi, nbl_res_queue_setup_q2vsi); \ + NBL_QUEUE_SET_OPS(remove_q2vsi, nbl_res_queue_remove_q2vsi); \ + NBL_QUEUE_SET_OPS(setup_rss, nbl_res_queue_setup_rss); \ + NBL_QUEUE_SET_OPS(remove_rss, nbl_res_queue_remove_rss); \ + NBL_QUEUE_SET_OPS(setup_queue, nbl_res_queue_setup_queue); \ + NBL_QUEUE_SET_OPS(remove_all_queues, nbl_res_queue_remove_all_queues); \ + NBL_QUEUE_SET_OPS(cfg_dsch, nbl_res_queue_cfg_dsch); \ + NBL_QUEUE_SET_OPS(setup_cqs, nbl_res_queue_setup_cqs); \ + NBL_QUEUE_SET_OPS(remove_cqs, nbl_res_queue_remove_cqs); \ + NBL_QUEUE_SET_OPS(queue_init, nbl_res_queue_init); \ + NBL_QUEUE_SET_OPS(get_queue_err_stats, nbl_res_queue_get_queue_err_stats); \ + NBL_QUEUE_SET_OPS(get_rxfh_indir_size, nbl_res_queue_get_rxfh_indir_size); \ + NBL_QUEUE_SET_OPS(get_rxfh_indir, nbl_res_queue_get_rxfh_indir); \ + NBL_QUEUE_SET_OPS(get_rxfh_rss_key_size, nbl_res_queue_get_rxfh_rss_key_size); \ + NBL_QUEUE_SET_OPS(get_rxfh_rss_key, nbl_res_queue_get_rss_key); \ + NBL_QUEUE_SET_OPS(get_rss_alg_sel, nbl_res_queue_get_rss_alg_sel); \ + NBL_QUEUE_SET_OPS(clear_queues, nbl_res_queue_clear_queues); \ + NBL_QUEUE_SET_OPS(get_vsi_global_queue_id, nbl_res_queue_get_vsi_global_qid); \ + NBL_QUEUE_SET_OPS(adapt_desc_gother, nbl_res_queue_adapt_desc_gother); \ + NBL_QUEUE_SET_OPS(flr_clear_queues, nbl_res_flr_clear_queues); \ + NBL_QUEUE_SET_OPS(restore_hw_queue, nbl_res_queue_restore_hw_queue); \ + NBL_QUEUE_SET_OPS(get_local_queue_id, nbl_res_queue_get_local_queue_id); \ + NBL_QUEUE_SET_OPS(set_bridge_mode, nbl_res_vsi_set_bridge_mode); \ +} while (0) + +int nbl_queue_setup_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_QUEUE_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_QUEUE_OPS_TBL; +#undef NBL_QUEUE_SET_OPS + + return 0; +} + +void nbl_queue_remove_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_QUEUE_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_QUEUE_OPS_TBL; +#undef NBL_QUEUE_SET_OPS +} + +void nbl_queue_mgt_init_leonis(struct nbl_queue_mgt *queue_mgt) +{ + queue_mgt->qid_map_select = NBL_MASTER_QID_MAP_TABLE; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h new file mode 100644 index 000000000000..72fad47a3d87 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_QUEUE_LEONIS_H_ +#define _NBL_QUEUE_LEONIS_H_ + +#include "nbl_resource.h" + +#define NBL_QID_MAP_NOTIFY_ADDR_SHIFT (9) +#define NBL_QID_MAP_NOTIFY_ADDR_LOW_PART_LEN (23) + +#define NBL_ADAPT_DESC_GOTHER_LEVEL1_TH (1000000) /* 1000k */ +#define NBL_ADAPT_DESC_GOTHER_LEVEL1_DOWNGRADE_TH (700000) /* 700k */ +#define NBL_ADAPT_DESC_GOTHER_LEVEL0 (0) +#define NBL_ADAPT_DESC_GOTHER_LEVEL1 (1) + +#define NBL_ADAPT_DESC_GOTHER_LEVEL0_TIMEOUT (0x12c) +#define NBL_ADAPT_DESC_GOTHER_LEVEL1_TIMEOUT (0x960) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c new file mode 100644 index 000000000000..1550e3888591 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c @@ -0,0 +1,1007 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_resource_leonis.h" + +MODULE_VERSION(NBL_LEONIS_DRIVER_VERSION); + +static void nbl_res_setup_common_ops(struct nbl_resource_mgt *res_mgt) +{ +} + +static int nbl_res_pf_to_eth_id(struct nbl_resource_mgt *res_mgt, u16 pf_id) +{ + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + if (pf_id >= NBL_MAX_PF) + return 0; + + return eth_info->eth_id[pf_id]; +} + +static u32 nbl_res_get_pfvf_queue_num(struct nbl_resource_mgt *res_mgt, int pfid, int vfid) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; + u16 func_id = nbl_res_pfvfid_to_func_id(res_mgt, pfid, vfid); + u32 queue_num = 0; + + if (vfid >= 0) { + if (num_info->net_max_qp_num[func_id] != 0) + queue_num = num_info->net_max_qp_num[func_id]; + else + queue_num = num_info->vf_def_max_net_qp_num; + } else { + if (num_info->net_max_qp_num[func_id] != 0) + queue_num = num_info->net_max_qp_num[func_id]; + else + queue_num = num_info->pf_def_max_net_qp_num; + } + + if (queue_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) { + nbl_warn(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_QUEUE, + "Invalid queue num %u for func %d, use default", queue_num, func_id); + queue_num = vfid >= 0 ? NBL_DEFAULT_VF_HW_QUEUE_NUM : NBL_DEFAULT_PF_HW_QUEUE_NUM; + } + + return queue_num; +} + +static void nbl_res_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + if (num_info->net_max_qp_num[func_id] != 0) + *queue_num = num_info->net_max_qp_num[func_id]; + else + *queue_num = num_info->pf_def_max_net_qp_num; + + *queue_size = NBL_DEFAULT_DESC_NUM; + + if (*queue_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) { + nbl_warn(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_QUEUE, + "Invalid user queue num %d for func %d, use default", *queue_num, func_id); + *queue_num = NBL_DEFAULT_PF_HW_QUEUE_NUM; + } +} + +static int nbl_res_get_queue_num(struct nbl_resource_mgt *res_mgt, + u16 func_id, u16 *tx_queue_num, u16 *rx_queue_num) +{ + int pfid, vfid; + + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + + *tx_queue_num = nbl_res_get_pfvf_queue_num(res_mgt, pfid, vfid); + *rx_queue_num = nbl_res_get_pfvf_queue_num(res_mgt, pfid, vfid); + + return 0; +} + +static int nbl_res_save_vf_bar_info(struct nbl_resource_mgt *res_mgt, + u16 func_id, struct nbl_register_net_param *register_param) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + u64 pf_bar_start; + u16 pf_bdf; + u64 vf_bar_start; + u64 vf_bar_size; + u16 total_vfs; + u16 offset; + u16 stride; + + pf_bar_start = register_param->pf_bar_start; + if (pf_bar_start) { + sriov_info->pf_bar_start = pf_bar_start; + dev_info(dev, "sriov_info, pf_bar_start:%llx\n", sriov_info->pf_bar_start); + } + + pf_bdf = register_param->pf_bdf; + vf_bar_start = register_param->vf_bar_start; + vf_bar_size = register_param->vf_bar_size; + total_vfs = register_param->total_vfs; + offset = register_param->offset; + stride = register_param->stride; + + if (total_vfs) { + if (pf_bdf != sriov_info->bdf) { + dev_err(dev, "PF bdf donot equal, af record = %u, real pf bdf: %u\n", + sriov_info->bdf, pf_bdf); + return -EIO; + } + sriov_info->offset = offset; + sriov_info->stride = stride; + sriov_info->vf_bar_start = vf_bar_start; + sriov_info->vf_bar_len = vf_bar_size / total_vfs; + + dev_info(dev, "sriov_info, bdf:%x:%x.%x, num_vfs:%d\n", + PCI_BUS_NUM(pf_bdf), PCI_SLOT(pf_bdf & 0xff), + PCI_FUNC(pf_bdf & 0xff), sriov_info->num_vfs); + dev_info(dev, "start_vf_func_id:%d, offset:%d, stride:%d\n", + sriov_info->start_vf_func_id, offset, stride); + } + + return 0; +} + +static int nbl_res_prepare_vf_chan(struct nbl_resource_mgt *res_mgt, + u16 func_id, struct nbl_register_net_param *register_param) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + u16 pf_bdf; + u16 total_vfs; + u16 offset; + u16 stride; + u8 pf_bus; + u8 pf_devfn; + u16 vf_id; + u8 bus; + u8 devfn; + u8 devid; + u8 function; + u16 vf_func_id; + + pf_bdf = register_param->pf_bdf; + total_vfs = register_param->total_vfs; + offset = register_param->offset; + stride = register_param->stride; + + if (total_vfs) { + if (pf_bdf != sriov_info->bdf) { + dev_err(dev, "PF bdf donot equal, af record = %u, real pf bdf: %u\n", + sriov_info->bdf, pf_bdf); + return -EIO; + } + + /* Configure mailbox qinfo_map_table for the pf's all vf, + * so vf's mailbox is ready, vf can use mailbox. + */ + pf_bus = PCI_BUS_NUM(sriov_info->bdf); + pf_devfn = sriov_info->bdf & 0xff; + for (vf_id = 0; vf_id < sriov_info->num_vfs; vf_id++) { + vf_func_id = sriov_info->start_vf_func_id + vf_id; + + bus = pf_bus + ((pf_devfn + offset + stride * vf_id) >> 8); + devfn = (pf_devfn + offset + stride * vf_id) & 0xff; + devid = PCI_SLOT(devfn); + function = PCI_FUNC(devfn); + + phy_ops->cfg_mailbox_qinfo(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vf_func_id, bus, devid, function); + } + } + + return 0; +} + +static int nbl_res_update_active_vf_num(struct nbl_resource_mgt *res_mgt, u16 func_id, + bool add_flag) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_resource_info *resource_info = res_mgt->resource_info; + struct nbl_sriov_info *sriov_info = res_mgt->resource_info->sriov_info; + int pfid = 0; + int vfid = 0; + int ret; + + ret = nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "convert func id to pfvfid failed\n"); + return ret; + } + + if (vfid == U32_MAX) + return 0; + + if (add_flag) { + if (!test_bit(func_id, resource_info->func_bitmap)) { + sriov_info[pfid].active_vf_num++; + set_bit(func_id, resource_info->func_bitmap); + } + } else if (sriov_info[pfid].active_vf_num) { + if (test_bit(func_id, resource_info->func_bitmap)) { + sriov_info[pfid].active_vf_num--; + clear_bit(func_id, resource_info->func_bitmap); + } + } + + return 0; +} + +static int nbl_res_register_net(void *priv, u16 func_id, + struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + netdev_features_t csumo_features = 0; + netdev_features_t tso_features = 0; + u16 tx_queue_num, rx_queue_num; + u8 mac[ETH_ALEN] = {0}; + int ret = 0; + + csumo_features = NBL_FEATURE(NETIF_F_RXCSUM) | + NBL_FEATURE(NETIF_F_IP_CSUM) | + NBL_FEATURE(NETIF_F_IPV6_CSUM); + tso_features = NBL_FEATURE(NETIF_F_TSO) | + NBL_FEATURE(NETIF_F_TSO6) | + NBL_FEATURE(NETIF_F_GSO_UDP_L4); + + register_result->hw_features |= csumo_features | + tso_features | + NBL_FEATURE(NETIF_F_SG) | + NBL_FEATURE(NETIF_F_HW_TC); + register_result->features |= register_result->hw_features | + NBL_FEATURE(NETIF_F_HW_TC) | + NBL_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER) | + NBL_FEATURE(NETIF_F_HW_VLAN_STAG_FILTER); + + register_result->max_mtu = NBL_MAX_JUMBO_FRAME_SIZE - NBL_PKT_HDR_PAD; + + if (func_id < NBL_MAX_PF) + nbl_res_get_eth_mac(res_mgt, mac, nbl_res_pf_to_eth_id(res_mgt, func_id)); + memcpy(register_result->mac, mac, ETH_ALEN); + + nbl_res_get_queue_num(res_mgt, func_id, &tx_queue_num, &rx_queue_num); + register_result->tx_queue_num = tx_queue_num; + register_result->rx_queue_num = rx_queue_num; + register_result->queue_size = NBL_DEFAULT_DESC_NUM; + + ret = nbl_res_update_active_vf_num(res_mgt, func_id, 1); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "change active vf num failed with ret: %d\n", + ret); + goto update_active_vf_fail; + } + + if (func_id >= NBL_RES_MGT_TO_PF_NUM(res_mgt)) + return 0; + + ret = nbl_res_save_vf_bar_info(res_mgt, func_id, register_param); + if (ret) + goto save_vf_bar_info_fail; + + ret = nbl_res_prepare_vf_chan(res_mgt, func_id, register_param); + if (ret) + goto prepare_vf_chan_fail; + + nbl_res_open_sfp(res_mgt, nbl_res_pf_to_eth_id(res_mgt, func_id)); + + return ret; + +prepare_vf_chan_fail: +save_vf_bar_info_fail: +update_active_vf_fail: + return -EIO; +} + +static int nbl_res_unregister_net(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_update_active_vf_num(res_mgt, func_id, 0); +} + +static u16 nbl_res_get_vsi_id(void *priv, u16 func_id, u16 type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_func_id_to_vsi_id(res_mgt, func_id, type); +} + +static void nbl_res_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u16 pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + *eth_mode = eth_info->eth_num; + if (pf_id < eth_info->eth_num) + *eth_id = eth_info->eth_id[pf_id]; + /* if pf_id > eth_num, use eth_id 0 */ + else + *eth_id = eth_info->eth_id[0]; +} + +static u8 __iomem *nbl_res_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_hw_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), size); +} + +static u64 nbl_res_get_real_hw_addr(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + return nbl_res_get_func_bar_base_addr(res_mgt, func_id); +} + +static u16 nbl_res_get_function_id(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); +} + +static void nbl_res_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + nbl_res_func_id_to_bdf(res_mgt, func_id, bus, dev, function); +} + +static u32 nbl_res_check_active_vf(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_sriov_info *sriov_info = res_mgt->resource_info->sriov_info; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int pfid = 0; + int vfid = 0; + int ret; + + ret = nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "convert func id to pfvfid failed\n"); + return ret; + } + + return sriov_info[pfid].active_vf_num; +} + +static void nbl_res_get_base_mac_addr(void *priv, u8 *mac) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + nbl_res_get_eth_mac(res_mgt, mac, nbl_res_pf_to_eth_id(res_mgt, 0)); +} + +static u32 nbl_res_get_chip_temperature(void *priv) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_chip_temperature(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static u32 nbl_res_get_chip_temperature_max(void *priv) +{ + return NBL_LEONIS_TEMP_MAX; +} + +static u32 nbl_res_get_chip_temperature_crit(void *priv) +{ + return NBL_LEONIS_TEMP_CRIT; +} + +static void nbl_res_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_reg_dump(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data, len); +} + +static int nbl_res_get_reg_dump_len(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_reg_dump_len(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->process_abnormal_event(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), abnomal_info); +} + +static int nbl_res_get_driver_info(void *priv, struct nbl_driver_info *driver_info) +{ + strscpy(driver_info->driver_version, NBL_LEONIS_DRIVER_VERSION, + sizeof(driver_info->driver_version)); + return 1; +} + +static int nbl_res_get_p4_info(void *priv, char *verify_code) +{ + /* We actually only care about the snic-v3r1 part, won't check m181xx */ + strscpy(verify_code, "snic_v3r1_m181xx", NBL_P4_NAME_LEN); + + return NBL_P4_DEFAULT; +} + +static int nbl_res_get_p4_used(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + return resource_info->p4_used; +} + +static int nbl_res_set_p4_used(void *priv, int p4_type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + resource_info->p4_used = p4_type; + + return 0; +} + +static void nbl_res_get_board_info(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + memcpy(board_info, &res_mgt->resource_info->board_info, sizeof(*board_info)); +} + +static u16 nbl_res_get_vf_base_vsi_id(void *priv, u16 pf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_pfvfid_to_vsi_id(res_mgt, pf_id, 0, NBL_VSI_DATA); +} + +static void nbl_res_flr_clear_net(void *priv, u16 vf_id) +{ + u16 func_id = vf_id + NBL_MAX_PF; + + if (nbl_res_vf_is_active(priv, func_id)) + nbl_res_unregister_net(priv, func_id); +} + +static int nbl_res_get_board_id(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + return NBL_COMMON_TO_BOARD_ID(common); +} + +static struct nbl_resource_ops res_ops = { + .register_net = nbl_res_register_net, + .unregister_net = nbl_res_unregister_net, + .check_active_vf = nbl_res_check_active_vf, + .get_base_mac_addr = nbl_res_get_base_mac_addr, + .get_vsi_id = nbl_res_get_vsi_id, + .get_eth_id = nbl_res_get_eth_id, + .get_user_queue_info = nbl_res_get_user_queue_info, + .get_hw_addr = nbl_res_get_hw_addr, + .get_real_hw_addr = nbl_res_get_real_hw_addr, + .get_function_id = nbl_res_get_function_id, + .get_real_bdf = nbl_res_get_real_bdf, + .get_product_flex_cap = nbl_res_get_flex_capability, + .get_product_fix_cap = nbl_res_get_fix_capability, + .get_chip_temperature = nbl_res_get_chip_temperature, + .get_chip_temperature_max = nbl_res_get_chip_temperature_max, + .get_chip_temperature_crit = nbl_res_get_chip_temperature_crit, + .get_driver_info = nbl_res_get_driver_info, + .get_board_info = nbl_res_get_board_info, + .flr_clear_net = nbl_res_flr_clear_net, + + .get_reg_dump = nbl_res_get_reg_dump, + .get_reg_dump_len = nbl_res_get_reg_dump_len, + .process_abnormal_event = nbl_res_process_abnormal_event, + + .get_p4_info = nbl_res_get_p4_info, + .get_p4_used = nbl_res_get_p4_used, + .set_p4_used = nbl_res_set_p4_used, + .get_vf_base_vsi_id = nbl_res_get_vf_base_vsi_id, + + .get_board_id = nbl_res_get_board_id, +}; + +static struct nbl_res_product_ops product_ops = { + .queue_mgt_init = nbl_queue_mgt_init_leonis, + .setup_qid_map_table = nbl_res_queue_setup_qid_map_table_leonis, + .remove_qid_map_table = nbl_res_queue_remove_qid_map_table_leonis, + .init_qid_map_table = nbl_res_queue_init_qid_map_table, +}; + +static bool is_ops_inited; +static int nbl_res_setup_res_mgt(struct nbl_common_info *common, + struct nbl_resource_mgt_leonis **res_mgt_leonis) +{ + struct device *dev; + struct nbl_resource_info *resource_info; + + dev = NBL_COMMON_TO_DEV(common); + *res_mgt_leonis = devm_kzalloc(dev, sizeof(struct nbl_resource_mgt_leonis), GFP_KERNEL); + if (!*res_mgt_leonis) + return -ENOMEM; + NBL_RES_MGT_TO_COMMON(&(*res_mgt_leonis)->res_mgt) = common; + + resource_info = devm_kzalloc(dev, sizeof(struct nbl_resource_info), GFP_KERNEL); + if (!resource_info) + return -ENOMEM; + NBL_RES_MGT_TO_RES_INFO(&(*res_mgt_leonis)->res_mgt) = resource_info; + + return 0; +} + +static void nbl_res_remove_res_mgt(struct nbl_common_info *common, + struct nbl_resource_mgt_leonis **res_mgt_leonis) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, NBL_RES_MGT_TO_RES_INFO(&(*res_mgt_leonis)->res_mgt)); + devm_kfree(dev, *res_mgt_leonis); + *res_mgt_leonis = NULL; +} + +static void nbl_res_remove_ops(struct device *dev, struct nbl_resource_ops_tbl **res_ops_tbl) +{ + devm_kfree(dev, *res_ops_tbl); + *res_ops_tbl = NULL; +} + +static int nbl_res_setup_ops(struct device *dev, struct nbl_resource_ops_tbl **res_ops_tbl, + struct nbl_resource_mgt_leonis *res_mgt_leonis) +{ + int ret = 0; + + *res_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_resource_ops_tbl), GFP_KERNEL); + if (!*res_ops_tbl) + return -ENOMEM; + + if (!is_ops_inited) { + ret = nbl_flow_setup_ops_leonis(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_queue_setup_ops_leonis(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_txrx_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_intr_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_vsi_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_adminq_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + is_ops_inited = true; + } + + NBL_RES_OPS_TBL_TO_OPS(*res_ops_tbl) = &res_ops; + NBL_RES_OPS_TBL_TO_PRIV(*res_ops_tbl) = res_mgt_leonis; + + return 0; + +setup_fail: + nbl_res_remove_ops(dev, res_ops_tbl); + return -EAGAIN; +} + +static int nbl_res_ctrl_dev_setup_eth_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_eth_info *eth_info; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u32 eth_num = 0; + u32 eth_bitmap, eth_id; + int i; + + eth_info = devm_kzalloc(dev, sizeof(struct nbl_eth_info), GFP_KERNEL); + if (!eth_info) + return -ENOMEM; + + NBL_RES_MGT_TO_ETH_INFO(res_mgt) = eth_info; + + eth_info->eth_num = (u8)phy_ops->get_fw_eth_num(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + eth_bitmap = phy_ops->get_fw_eth_map(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + /* for 2 eth port board, the eth_id is 0, 2 */ + for (i = 0; i < NBL_MAX_ETHERNET; i++) { + if ((1 << i) & eth_bitmap) { + set_bit(i, eth_info->eth_bitmap); + eth_info->eth_id[eth_num] = i; + eth_info->logic_eth_id[i] = eth_num; + eth_num++; + } + } + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + /* if pf_id <= eth_num, the pf relate corresponding eth_id*/ + if (i < eth_num) { + eth_id = eth_info->eth_id[i]; + eth_info->pf_bitmap[eth_id] |= BIT(i); + } + /* if pf_id > eth_num, the pf relate eth 0*/ + else + eth_info->pf_bitmap[0] |= BIT(i); + } + + return 0; +} + +static int nbl_res_ctrl_dev_sriov_info_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_sriov_info *sriov_info; + u32 vf_fid, vf_startid, vf_endid; + u16 func_id; + u16 function; + + sriov_info = devm_kcalloc(dev, NBL_RES_MGT_TO_PF_NUM(res_mgt), + sizeof(struct nbl_sriov_info), GFP_KERNEL); + if (!sriov_info) + return -ENOMEM; + + NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) = sriov_info; + + for (func_id = 0; func_id < NBL_RES_MGT_TO_PF_NUM(res_mgt); func_id++) { + sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + function = NBL_COMMON_TO_PCI_FUNC_ID(common) + func_id; + + sriov_info->bdf = PCI_DEVID(common->bus, + PCI_DEVFN(common->devid, function)); + vf_fid = phy_ops->get_host_pf_fid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + func_id); + vf_startid = vf_fid & 0xFFFF; + vf_endid = (vf_fid >> 16) & 0xFFFF; + sriov_info->start_vf_func_id = vf_startid + NBL_MAX_PF_LEONIS; + sriov_info->num_vfs = vf_endid - vf_startid; + } + + return 0; +} + +static void nbl_res_ctrl_dev_sriov_info_remove(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_sriov_info **sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + + if (!(*sriov_info)) + return; + + devm_kfree(dev, *sriov_info); + *sriov_info = NULL; +} + +static int nbl_res_ctrl_dev_vsi_info_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_vsi_info *vsi_info; + struct nbl_sriov_info *sriov_info; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i; + + vsi_info = devm_kcalloc(dev, NBL_RES_MGT_TO_PF_NUM(res_mgt), + sizeof(struct nbl_vsi_info), GFP_KERNEL); + if (!vsi_info) + return -ENOMEM; + + NBL_RES_MGT_TO_VSI_INFO(res_mgt) = vsi_info; + /** + * 1 two port(2pf) + * pf0,pf1(NBL_VSI_SERV_PF_DATA_TYPE) vsi is 0,512 + * pf0,pf1(NBL_VSI_SERV_PF_CTLR_TYPE) vsi is 1,513 + * pf0,pf1(NBL_VSI_SERV_PF_USER_TYPE) vsi is 2,514 + * pf0.vf0-pf0.vf255(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 3-258 + * pf1.vf0-pf1.vf255(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 515-770 + * pf2-pf7(NBL_VSI_SERV_PF_EXTRA_TYPE) vsi 259-264(if exist) + * 2 four port(4pf) + * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_DATA_TYPE) vsi is 0,256,512,768 + * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_CTLR_TYPE) vsi is 1,257,513,769 + * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_USER_TYPE) vsi is 2,258,514,770 + * pf0.vf0-pf0.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 3-130 + * pf1.vf0-pf1.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 259-386 + * pf2.vf0-pf2.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 515-642 + * pf3.vf0-pf3.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 771-898 + * pf4-pf7(NBL_VSI_SERV_PF_EXTRA_TYPE) vsi 387-390(if exist) + */ + + vsi_info->num = eth_info->eth_num; + for (i = 0; i < vsi_info->num; i++) { + vsi_info->serv_info[i][NBL_VSI_SERV_PF_DATA_TYPE].base_id = i + * NBL_VSI_ID_GAP(vsi_info->num); + vsi_info->serv_info[i][NBL_VSI_SERV_PF_DATA_TYPE].num = 1; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].base_id = + vsi_info->serv_info[i][NBL_VSI_SERV_PF_DATA_TYPE].base_id + + vsi_info->serv_info[i][NBL_VSI_SERV_PF_DATA_TYPE].num; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].num = 1; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].base_id = + vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].base_id + + vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].num; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].num = 1; + vsi_info->serv_info[i][NBL_VSI_SERV_VF_DATA_TYPE].base_id = + vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].base_id + + vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].num; + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + i; + vsi_info->serv_info[i][NBL_VSI_SERV_VF_DATA_TYPE].num = sriov_info->num_vfs; + } + + /* pf_id >= eth_num, it belong pf0's switch */ + vsi_info->serv_info[0][NBL_VSI_SERV_PF_EXTRA_TYPE].base_id = + vsi_info->serv_info[0][NBL_VSI_SERV_VF_DATA_TYPE].base_id + + vsi_info->serv_info[0][NBL_VSI_SERV_VF_DATA_TYPE].num; + vsi_info->serv_info[0][NBL_VSI_SERV_PF_EXTRA_TYPE].num = + NBL_RES_MGT_TO_PF_NUM(res_mgt) - vsi_info->num; + + return 0; +} + +static void nbl_res_ctrl_dev_remove_vsi_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_vsi_info **vsi_info = &NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + if (!(*vsi_info)) + return; + + devm_kfree(dev, *vsi_info); + *vsi_info = NULL; +} + +static int nbl_res_ring_num_info_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &resource_info->net_ring_num_info; + + num_info->pf_def_max_net_qp_num = NBL_DEFAULT_PF_HW_QUEUE_NUM; + num_info->vf_def_max_net_qp_num = NBL_DEFAULT_VF_HW_QUEUE_NUM; + + return 0; +} + +static int nbl_res_check_fw_working(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + unsigned long fw_pong_current = 0; + unsigned long seconds_current = 0; + unsigned long sleep_us = USEC_PER_MSEC; + u64 timeout_us = 100 * USEC_PER_MSEC; + ktime_t timeout; + + seconds_current = (unsigned long)ktime_get_real_seconds(); + phy_ops->set_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), seconds_current - 1); + phy_ops->set_fw_ping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), seconds_current); + + timeout = ktime_add_us(ktime_get(), timeout_us); + might_sleep_if((sleep_us) != 0); + + for (;;) { + fw_pong_current = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (fw_pong_current == seconds_current) + break; + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { + fw_pong_current = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + break; + } + if (sleep_us) + usleep_range((sleep_us >> 2) + 1, sleep_us); + } + + if (fw_pong_current == seconds_current) + return 0; + else + return -ETIMEDOUT; +} + +static int nbl_res_init_pf_num(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u32 pf_mask; + u32 pf_num = 0; + int i; + + pf_mask = phy_ops->get_host_pf_mask(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + for (i = 0; i < NBL_MAX_PF_LEONIS; i++) { + if (!(pf_mask & (1 << i))) + pf_num++; + else + break; + } + + NBL_RES_MGT_TO_PF_NUM(res_mgt) = pf_num; + + if (!pf_num) + return -1; + + return 0; +} + +static void nbl_res_init_board_info(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_board_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + &res_mgt->resource_info->board_info); +} + +static void nbl_res_stop(struct nbl_resource_mgt_leonis *res_mgt_leonis) +{ + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + + nbl_queue_mgt_stop(res_mgt); + nbl_txrx_mgt_stop(res_mgt); + nbl_intr_mgt_stop(res_mgt); + nbl_adminq_mgt_stop(res_mgt); + nbl_vsi_mgt_stop(res_mgt); + nbl_flow_mgt_stop_leonis(res_mgt); + nbl_res_ctrl_dev_remove_vsi_info(res_mgt); + nbl_res_ctrl_dev_sriov_info_remove(res_mgt); +} + +static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, + struct nbl_func_caps caps) +{ + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + + if (caps.has_ctrl) { + ret = nbl_res_check_fw_working(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "fw is not working"); + return ret; + } + + nbl_res_init_board_info(res_mgt); + + ret = nbl_res_init_pf_num(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "pf number is illegal"); + return ret; + } + + ret = nbl_res_ctrl_dev_sriov_info_init(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Failed to init sr_iov info"); + return ret; + } + + ret = nbl_res_ctrl_dev_setup_eth_info(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_res_ctrl_dev_vsi_info_init(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_res_ring_num_info_init(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_flow_mgt_start_leonis(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_queue_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_vsi_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_adminq_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_intr_mgt_start(res_mgt); + if (ret) + goto start_fail; + + nbl_res_set_flex_capability(res_mgt, NBL_SECURITY_ACCEL_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_DUMP_FLOW_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_HB_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_RESET_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_CLEAN_ADMINDQ_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_RESTOOL_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_ADAPT_DESC_GOTHER); + nbl_res_set_fix_capability(res_mgt, NBL_PROCESS_FLR_CAP); + } + + if (caps.has_net) { + ret = nbl_txrx_mgt_start(res_mgt); + if (ret) + goto start_fail; + } + + nbl_res_set_fix_capability(res_mgt, NBL_HWMON_TEMP_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_CLEAN_MAILBOX_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_ITR_DYNAMIC); + nbl_res_set_fix_capability(res_mgt, NBL_P4_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_KEEP_ALIVE); + + return 0; + +start_fail: + nbl_res_stop(res_mgt_leonis); + return ret; +} + +int nbl_res_init_leonis(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_resource_mgt_leonis **res_mgt_leonis; + struct nbl_resource_ops_tbl **res_ops_tbl; + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + int ret = 0; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + res_mgt_leonis = (struct nbl_resource_mgt_leonis **)&NBL_ADAPTER_TO_RES_MGT(adapter); + res_ops_tbl = &NBL_ADAPTER_TO_RES_OPS_TBL(adapter); + phy_ops_tbl = NBL_ADAPTER_TO_PHY_OPS_TBL(adapter); + chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + + ret = nbl_res_setup_res_mgt(common, res_mgt_leonis); + if (ret) + goto setup_mgt_fail; + + nbl_res_setup_common_ops(&(*res_mgt_leonis)->res_mgt); + NBL_RES_MGT_TO_CHAN_OPS_TBL(&(*res_mgt_leonis)->res_mgt) = chan_ops_tbl; + NBL_RES_MGT_TO_PHY_OPS_TBL(&(*res_mgt_leonis)->res_mgt) = phy_ops_tbl; + + NBL_RES_MGT_TO_PROD_OPS(&(*res_mgt_leonis)->res_mgt) = &product_ops; + + ret = nbl_res_start(*res_mgt_leonis, param->caps); + if (ret) + goto start_fail; + + ret = nbl_res_setup_ops(dev, res_ops_tbl, *res_mgt_leonis); + if (ret) + goto setup_ops_fail; + + return 0; + +setup_ops_fail: + nbl_res_stop(*res_mgt_leonis); +start_fail: + nbl_res_remove_res_mgt(common, res_mgt_leonis); +setup_mgt_fail: + return ret; +} + +void nbl_res_remove_leonis(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_resource_mgt_leonis **res_mgt; + struct nbl_resource_ops_tbl **res_ops_tbl; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + res_mgt = (struct nbl_resource_mgt_leonis **)&NBL_ADAPTER_TO_RES_MGT(adapter); + res_ops_tbl = &NBL_ADAPTER_TO_RES_OPS_TBL(adapter); + + nbl_res_remove_ops(dev, res_ops_tbl); + nbl_res_stop(*res_mgt); + nbl_res_remove_res_mgt(common, res_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h new file mode 100644 index 000000000000..ba1320dcb972 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_RESOURCE_LEONIS_H_ +#define _NBL_RESOURCE_LEONIS_H_ + +#include "nbl_resource.h" + +#define NBL_MAX_PF_LEONIS 8 +/* product NO(ASIC SNIC as 3)-V NO.R NO.B NO.SP NO */ +#define NBL_LEONIS_DRIVER_VERSION "3-3.1.120" + +int nbl_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt); +void nbl_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt); +int nbl_flow_setup_ops_leonis(struct nbl_resource_ops *resource_ops); +void nbl_flow_remove_ops_leonis(struct nbl_resource_ops *resource_ops); +int nbl_queue_setup_ops_leonis(struct nbl_resource_ops *resource_ops); +void nbl_queue_remove_ops_leonis(struct nbl_resource_ops *resource_ops); + +void nbl_queue_mgt_init_leonis(struct nbl_queue_mgt *queue_mgt); +int nbl_res_queue_setup_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, u16 func_id, + u64 notify_addr); +void nbl_res_queue_remove_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, u16 func_id); +int nbl_res_queue_init_qid_map_table(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_mgt *queue_mgt, struct nbl_phy_ops *phy_ops); + +void nbl_intr_mgt_init_leonis(struct nbl_resource_mgt *res_mgt); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c new file mode 100644 index 000000000000..6445fc548383 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_interrupt.h" + +static int nbl_res_intr_destroy_msix_map(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev, *dma_dev; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + struct nbl_msix_map_table *msix_map_table; + u16 *interrupts; + u16 intr_num; + u16 i; + int ret = 0; + + if (!res_mgt) + return -EINVAL; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + dev = NBL_RES_MGT_TO_DEV(res_mgt); + dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + + /* use ctrl dev bdf */ + phy_ops->configure_msix_map(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, false, + 0, 0, 0, 0); + + intr_num = intr_mgt->func_intr_res[func_id].num_interrupts; + interrupts = intr_mgt->func_intr_res[func_id].interrupts; + + WARN_ON(!interrupts); + for (i = 0; i < intr_num; i++) { + if (interrupts[i] >= NBL_MAX_OTHER_INTERRUPT) + clear_bit(interrupts[i] - NBL_MAX_OTHER_INTERRUPT, + intr_mgt->interrupt_net_bitmap); + else + clear_bit(interrupts[i], intr_mgt->interrupt_others_bitmap); + + phy_ops->configure_msix_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, false, + interrupts[i], 0, 0, 0, false); + } + + kfree(interrupts); + intr_mgt->func_intr_res[func_id].interrupts = NULL; + intr_mgt->func_intr_res[func_id].num_interrupts = 0; + + msix_map_table = &intr_mgt->func_intr_res[func_id].msix_map_table; + dma_free_coherent(dma_dev, msix_map_table->size, msix_map_table->base_addr, + msix_map_table->dma); + msix_map_table->size = 0; + msix_map_table->base_addr = NULL; + msix_map_table->dma = 0; + + return ret; +} + +static int nbl_res_intr_configure_msix_map(void *priv, u16 func_id, u16 num_net_msix, + u16 num_others_msix, bool net_msix_mask_en) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev, *dma_dev; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + struct nbl_common_info *common; + struct nbl_msix_map_table *msix_map_table; + struct nbl_msix_map *msix_map_entries; + u16 *interrupts; + u16 requested; + u16 intr_index; + u16 i; + u8 bus, devid, function; + bool msix_mask_en; + int ret = 0; + + if (!res_mgt) + return -EINVAL; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + dev = NBL_RES_MGT_TO_DEV(res_mgt); + dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (intr_mgt->func_intr_res[func_id].interrupts) + nbl_res_intr_destroy_msix_map(priv, func_id); + + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &devid, &function); + + msix_map_table = &intr_mgt->func_intr_res[func_id].msix_map_table; + WARN_ON(msix_map_table->base_addr); + msix_map_table->size = sizeof(struct nbl_msix_map) * NBL_MSIX_MAP_TABLE_MAX_ENTRIES; + msix_map_table->base_addr = dma_alloc_coherent(dma_dev, msix_map_table->size, + &msix_map_table->dma, + GFP_ATOMIC | __GFP_ZERO); + if (!msix_map_table->base_addr) { + pr_err("Allocate DMA memory for function msix map table failed\n"); + msix_map_table->size = 0; + return -ENOMEM; + } + + requested = num_net_msix + num_others_msix; + interrupts = kcalloc(requested, sizeof(interrupts[0]), GFP_ATOMIC); + if (!interrupts) { + pr_err("Allocate function interrupts array failed\n"); + ret = -ENOMEM; + goto alloc_interrupts_err; + } + + intr_mgt->func_intr_res[func_id].interrupts = interrupts; + intr_mgt->func_intr_res[func_id].num_interrupts = requested; + + for (i = 0; i < num_net_msix; i++) { + intr_index = find_first_zero_bit(intr_mgt->interrupt_net_bitmap, + NBL_MAX_NET_INTERRUPT); + if (intr_index == NBL_MAX_NET_INTERRUPT) { + pr_err("There is no available interrupt left\n"); + ret = -EAGAIN; + goto get_interrupt_err; + } + interrupts[i] = intr_index + NBL_MAX_OTHER_INTERRUPT; + set_bit(intr_index, intr_mgt->interrupt_net_bitmap); + } + + for (i = num_net_msix; i < requested; i++) { + intr_index = find_first_zero_bit(intr_mgt->interrupt_others_bitmap, + NBL_MAX_OTHER_INTERRUPT); + if (intr_index == NBL_MAX_OTHER_INTERRUPT) { + pr_err("There is no available interrupt left\n"); + ret = -EAGAIN; + goto get_interrupt_err; + } + interrupts[i] = intr_index; + set_bit(intr_index, intr_mgt->interrupt_others_bitmap); + } + + msix_map_entries = msix_map_table->base_addr; + for (i = 0; i < requested; i++) { + msix_map_entries[i].global_msix_index = interrupts[i]; + msix_map_entries[i].valid = 1; + + if (i < num_net_msix && net_msix_mask_en) + msix_mask_en = 1; + else + msix_mask_en = 0; + phy_ops->configure_msix_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, true, + interrupts[i], bus, devid, function, msix_mask_en); + if (i < num_net_msix) + phy_ops->set_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + interrupts[i], 0, 0); + } + + /* use ctrl dev bdf */ + phy_ops->configure_msix_map(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, true, + msix_map_table->dma, common->bus, common->devid, + NBL_COMMON_TO_PCI_FUNC_ID(common)); + + return 0; + +get_interrupt_err: + while (i--) { + intr_index = interrupts[i]; + if (intr_index >= NBL_MAX_OTHER_INTERRUPT) + clear_bit(intr_index - NBL_MAX_OTHER_INTERRUPT, + intr_mgt->interrupt_net_bitmap); + else + clear_bit(intr_index, intr_mgt->interrupt_others_bitmap); + } + kfree(interrupts); + intr_mgt->func_intr_res[func_id].num_interrupts = 0; + intr_mgt->func_intr_res[func_id].interrupts = NULL; + +alloc_interrupts_err: + dma_free_coherent(dma_dev, msix_map_table->size, msix_map_table->base_addr, + msix_map_table->dma); + msix_map_table->size = 0; + msix_map_table->base_addr = NULL; + msix_map_table->dma = 0; + + return ret; +} + +static int nbl_res_intr_enable_mailbox_irq(void *priv, u16 func_id, u16 vector_id, bool enable_msix) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + u16 global_vector_id; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id]; + phy_ops->enable_mailbox_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, enable_msix, + global_vector_id); + + return 0; +} + +static int nbl_res_intr_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + u16 global_vector_id; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + global_vector_id = intr_mgt->func_intr_res[0].interrupts[vector_id]; + phy_ops->enable_abnormal_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), enable_msix, + global_vector_id); + return 0; +} + +static int nbl_res_intr_enable_msix_irq(void *priv, u16 global_vector_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->enable_msix_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_vector_id); + return 0; +} + +static u8 *nbl_res_get_msix_irq_enable_info(void *priv, u16 global_vector_id, u32 *irq_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_msix_irq_enable_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_vector_id, + irq_data); +} + +static u16 nbl_res_intr_get_global_vector(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + return intr_mgt->func_intr_res[func_id].interrupts[local_vector_id]; +} + +static u16 nbl_res_intr_get_msix_entry_id(void *priv, u16 vsi_id, u16 local_vector_id) +{ + return local_vector_id; +} + +static void nbl_res_intr_get_coalesce(void *priv, u16 func_id, u16 vector_id, + struct ethtool_coalesce *ec) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + u16 global_vector_id; + u16 pnum = 0; + u16 rate = 0; + + global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id]; + phy_ops->get_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_vector_id, &pnum, &rate); + /* tx and rx using the same interrupt */ + ec->tx_coalesce_usecs = rate; + ec->tx_max_coalesced_frames = pnum; + ec->rx_coalesce_usecs = rate; + ec->rx_max_coalesced_frames = pnum; +} + +static void nbl_res_intr_set_coalesce(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 pnum, u16 rate) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + u16 global_vector_id; + int i; + + for (i = 0; i < num_net_msix; i++) { + global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id + i]; + phy_ops->set_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_vector_id, pnum, rate); + } +} + +static int nbl_res_intr_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + u16 global_vector_id; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + global_vector_id = intr_mgt->func_intr_res[0].interrupts[vector_id]; + phy_ops->enable_adminq_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), enable_msix, + global_vector_id); + return 0; +} + +static int nbl_res_intr_get_mbx_irq_num(void *priv) +{ + return 1; +} + +static int nbl_res_intr_get_adminq_irq_num(void *priv) +{ + return 1; +} + +static int nbl_res_intr_get_abnormal_irq_num(void *priv) +{ + return 1; +} + +static u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level) +{ + switch (last_level) { + case NBL_INTR_SUPPRESS_LEVEL0: + if (rates > NBL_INTR_SUPPRESS_LEVEL1_THRESHOLD) + return NBL_INTR_SUPPRESS_LEVEL1; + else + return NBL_INTR_SUPPRESS_LEVEL0; + case NBL_INTR_SUPPRESS_LEVEL1: + if (rates > NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD) + return NBL_INTR_SUPPRESS_LEVEL1; + else + return NBL_INTR_SUPPRESS_LEVEL0; + default: + return NBL_INTR_SUPPRESS_LEVEL0; + } +} + +static void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 level) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + u16 global_vector_id; + u16 pnum, rate; + int i; + + switch (level) { + case NBL_INTR_SUPPRESS_LEVEL1: + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + pnum = NBL_INTR_SUPPRESS_LEVEL1_100G_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL1_100G_RATE; + } else { + pnum = NBL_INTR_SUPPRESS_LEVEL1_25G_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL1_25G_RATE; + } + break; + default: + pnum = NBL_INTR_SUPPRESS_LEVEL0_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL0_RATE; + break; + } + for (i = 0; i < num_net_msix; i++) { + global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id + i]; + phy_ops->set_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_vector_id, pnum, rate); + } +} + +static void nbl_res_flr_clear_interrupt(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + if (intr_mgt->func_intr_res[func_id].interrupts) + nbl_res_intr_destroy_msix_map(priv, func_id); +} + +static void nbl_res_intr_unmask(struct nbl_resource_mgt *res_mgt, u16 interrupts_id) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->enable_msix_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), interrupts_id); +} + +static void nbl_res_unmask_all_interrupts(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + int i, j; + + for (i = 0; i < NBL_MAX_PF; i++) { + if (intr_mgt->func_intr_res[i].interrupts) { + for (j = 0; j < intr_mgt->func_intr_res[i].num_interrupts; j++) + nbl_res_intr_unmask(res_mgt, + intr_mgt->func_intr_res[i].interrupts[j]); + } + } +} + +/* NBL_INTR_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_INTR_OPS_TBL \ +do { \ + NBL_INTR_SET_OPS(configure_msix_map, nbl_res_intr_configure_msix_map); \ + NBL_INTR_SET_OPS(destroy_msix_map, nbl_res_intr_destroy_msix_map); \ + NBL_INTR_SET_OPS(enable_mailbox_irq, nbl_res_intr_enable_mailbox_irq); \ + NBL_INTR_SET_OPS(enable_abnormal_irq, nbl_res_intr_enable_abnormal_irq); \ + NBL_INTR_SET_OPS(enable_adminq_irq, nbl_res_intr_enable_adminq_irq); \ + NBL_INTR_SET_OPS(enable_msix_irq, nbl_res_intr_enable_msix_irq); \ + NBL_INTR_SET_OPS(get_msix_irq_enable_info, nbl_res_get_msix_irq_enable_info); \ + NBL_INTR_SET_OPS(get_global_vector, nbl_res_intr_get_global_vector); \ + NBL_INTR_SET_OPS(get_msix_entry_id, nbl_res_intr_get_msix_entry_id); \ + NBL_INTR_SET_OPS(get_coalesce, nbl_res_intr_get_coalesce); \ + NBL_INTR_SET_OPS(set_coalesce, nbl_res_intr_set_coalesce); \ + NBL_INTR_SET_OPS(get_mbx_irq_num, nbl_res_intr_get_mbx_irq_num); \ + NBL_INTR_SET_OPS(get_adminq_irq_num, nbl_res_intr_get_adminq_irq_num); \ + NBL_INTR_SET_OPS(get_abnormal_irq_num, nbl_res_intr_get_abnormal_irq_num); \ + NBL_INTR_SET_OPS(get_intr_suppress_level, nbl_res_intr_get_suppress_level); \ + NBL_INTR_SET_OPS(set_intr_suppress_level, nbl_res_intr_set_intr_suppress_level);\ + NBL_INTR_SET_OPS(flr_clear_interrupt, nbl_res_flr_clear_interrupt); \ + NBL_INTR_SET_OPS(unmask_all_interrupts, nbl_res_unmask_all_interrupts); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_intr_setup_mgt(struct device *dev, struct nbl_interrupt_mgt **intr_mgt) +{ + *intr_mgt = devm_kzalloc(dev, sizeof(struct nbl_interrupt_mgt), GFP_KERNEL); + if (!*intr_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_intr_remove_mgt(struct device *dev, struct nbl_interrupt_mgt **intr_mgt) +{ + devm_kfree(dev, *intr_mgt); + *intr_mgt = NULL; +} + +int nbl_intr_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_interrupt_mgt **intr_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + intr_mgt = &NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + return nbl_intr_setup_mgt(dev, intr_mgt); +} + +void nbl_intr_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_interrupt_mgt **intr_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + intr_mgt = &NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + if (!(*intr_mgt)) + return; + + nbl_intr_remove_mgt(dev, intr_mgt); +} + +int nbl_intr_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_INTR_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_INTR_OPS_TBL; +#undef NBL_INTR_SET_OPS + + return 0; +} + +void nbl_intr_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_INTR_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_INTR_OPS_TBL; +#undef NBL_INTR_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h new file mode 100644 index 000000000000..30ca7aec72bc --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_INTERRUPT_H_ +#define _NBL_INTERRUPT_H_ + +#include "nbl_resource.h" + +#define NBL_MSIX_MAP_TABLE_MAX_ENTRIES (1024) + +#define NBL_INTR_SUPPRESS_LEVEL1_THRESHOLD (100000) /* 100k pps */ +#define NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD (60000) /* 60kpps */ +#define NBL_INTR_SUPPRESS_LEVEL0 (0) +#define NBL_INTR_SUPPRESS_LEVEL1 (1) + +#define NBL_INTR_SUPPRESS_LEVEL0_PNUM (0) +#define NBL_INTR_SUPPRESS_LEVEL1_25G_PNUM (8) +#define NBL_INTR_SUPPRESS_LEVEL1_100G_PNUM (16) +#define NBL_INTR_SUPPRESS_LEVEL0_RATE (0) +#define NBL_INTR_SUPPRESS_LEVEL1_25G_RATE (1) +#define NBL_INTR_SUPPRESS_LEVEL1_100G_RATE (2) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h new file mode 100644 index 000000000000..383dbd5dd08f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_P4_ACTION_H +#define _NBL_P4_ACTION_H + +// Code generated by P4 compiler. DO NOT EDIT. +#define NBL_ACT_SET_FLAGS 1 +#define NBL_ACT_CLEAR_FLAGS 1 +#define NBL_ACT_SET_AUX_FIELD 1 +#define NBL_ACT_SET_FLOW_STAT0 2 +#define NBL_ACT_SET_FLOW_STAT1 3 +#define NBL_ACT_SET_RSS 4 +#define NBL_ACT_SET_CAR 5 +#define NBL_ACT_SET_FLOW_CAR 6 +#define NBL_ACT_SET_TAB_INDEX 7 +#define NBL_ACT_SET_MIRROR 8 +#define NBL_ACT_SET_DPORT 9 +#define NBL_ACT_SET_QUE_IDX 10 +#define NBL_ACT_SET_MCC 13 +#define NBL_ACT_SET_VNI0 14 +#define NBL_ACT_SET_VNI1 15 +#define NBL_ACT_SET_SPECIAL_FLOW_STAT 16 +#define NBL_ACT_SET_PRBAC 17 +#define NBL_ACT_SET_DP_HASH0 19 +#define NBL_ACT_SET_DP_HASH1 20 +#define NBL_ACT_SET_PRI_MDF0 21 +#define NBL_ACT_SET_PRI_MDF1 21 +#define NBL_ACT_NEXT_AT_HALF0 60 +#define NBL_ACT_NEXT_AT_HALF1 61 +#define NBL_ACT_NEXT_AT_FULL0 62 +#define NBL_ACT_NEXT_AT_FULL1 63 +#define NBL_ACT_REP_IPV4_SIP 32 +#define NBL_ACT_REP_IPV4_DIP 33 +#define NBL_ACT_REP_IPV6_SIP 34 +#define NBL_ACT_REP_IPV6_DIP 35 +#define NBL_ACT_REP_DPORT 36 +#define NBL_ACT_REP_SPORT 37 +#define NBL_ACT_REP_DMAC 38 +#define NBL_ACT_REP_SMAC 39 +#define NBL_ACT_REP_IPV4_DSCP 40 +#define NBL_ACT_REP_IPV6_DSCP 41 +#define NBL_ACT_REP_IPV4_TTL 42 +#define NBL_ACT_REP_IPV6_TTL 43 +#define NBL_ACT_DEL_CVLAN 44 +#define NBL_ACT_DEL_SVLAN 45 +#define NBL_ACT_REP_SVLAN 46 +#define NBL_ACT_REP_CVLAN 47 +#define NBL_ACT_REP_SINGLE_CVLAN 48 +#define NBL_ACT_ADD_SVLAN 49 +#define NBL_ACT_ADD_CVLAN 50 +#define NBL_ACT_TNL_ENCAP 51 +#define NBL_ACT_TNL_DECAP 52 +#define NBL_ACT_REP_OUTER_SPORT 53 + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h new file mode 100644 index 000000000000..e15bc2b174f5 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_PHY_H_ +#define _NBL_PHY_H_ + +#include "nbl_core.h" + +#define NBL_PHY_MGT_TO_COMMON(phy_mgt) ((phy_mgt)->common) +#define NBL_PHY_MGT_TO_DEV(phy_mgt) NBL_COMMON_TO_DEV(NBL_PHY_MGT_TO_COMMON(phy_mgt)) +#define NBL_MEMORY_BAR (0) +#define NBL_MAILBOX_BAR (2) +#define NBL_RDMA_NOTIFY_OFF (8192) + +struct nbl_phy_mgt { + struct nbl_common_info *common; + u8 __iomem *hw_addr; + u8 __iomem *mailbox_bar_hw_addr; + u64 notify_offset; + u32 version; + u32 hw_size; + spinlock_t reg_lock; /* Protect reg access */ + bool should_lock; +}; + +static inline __maybe_unused u32 rd32(u8 __iomem *addr, u64 reg) +{ + return readl(addr + (reg)); +} + +static inline __maybe_unused void wr32_barrier(u8 __iomem *addr, u64 reg, u32 value) +{ + writel((value), (addr + (reg))); +} + +static inline __maybe_unused void nbl_hw_read_regs(struct nbl_phy_mgt *phy_mgt, u64 reg, + u8 *data, u32 len) +{ + u32 size = len / 4; + u32 i = 0; + + if (len % 4) + return; + + if (size > 1 && phy_mgt->should_lock) + spin_lock(&phy_mgt->reg_lock); + + for (i = 0; i < size; i++) + *(u32 *)(data + i * sizeof(u32)) = rd32(phy_mgt->hw_addr, reg + i * sizeof(u32)); + + if (size > 1 && phy_mgt->should_lock) + spin_unlock(&phy_mgt->reg_lock); +} + +static inline __maybe_unused void nbl_hw_write_regs(struct nbl_phy_mgt *phy_mgt, + u64 reg, const u8 *data, u32 len) +{ + u32 size = len / 4; + u32 i = 0; + + if (len % 4) + return; + + if (size > 1 && phy_mgt->should_lock) + spin_lock(&phy_mgt->reg_lock); + + for (i = 0; i < size; i++) + /* Used for emu, make sure that we won't write too frequently */ + wr32_barrier(phy_mgt->hw_addr, reg + i * sizeof(u32), + *(u32 *)(data + i * sizeof(u32))); + + if (size > 1 && phy_mgt->should_lock) + spin_unlock(&phy_mgt->reg_lock); +} + +static __maybe_unused void nbl_hw_wr32(struct nbl_phy_mgt *phy_mgt, u64 reg, u32 value) +{ + /* Used for emu, make sure that we won't write too frequently */ + wr32_barrier(phy_mgt->hw_addr, reg, value); +} + +static __maybe_unused u32 nbl_hw_rd32(struct nbl_phy_mgt *phy_mgt, u64 reg) +{ + return rd32(phy_mgt->hw_addr, reg); +} + +static __maybe_unused void nbl_mbx_wr32(void *priv, u64 reg, u32 value) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + writel((value), ((phy_mgt)->mailbox_bar_hw_addr + (reg))); +} + +static __maybe_unused u32 nbl_mbx_rd32(void *priv, u64 reg) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return readl((phy_mgt)->mailbox_bar_hw_addr + (reg)); +} + +static __maybe_unused void nbl_hw_read_mbx_regs(struct nbl_phy_mgt *phy_mgt, + u64 reg, u8 *data, u32 len) +{ + u32 i = 0; + + if (len % 4) + return; + + for (i = 0; i < len / 4; i++) + *(u32 *)(data + i * sizeof(u32)) = nbl_mbx_rd32(phy_mgt, reg + i * sizeof(u32)); +} + +static __maybe_unused void nbl_hw_write_mbx_regs(struct nbl_phy_mgt *phy_mgt, + u64 reg, const u8 *data, u32 len) +{ + u32 i = 0; + + if (len % 4) + return; + + for (i = 0; i < len / 4; i++) + /* Used for emu, make sure that we won't write too frequently */ + nbl_mbx_wr32(phy_mgt, reg + i * sizeof(u32), + *(u32 *)(data + i * sizeof(u32))); +} + +/* Mgt structure for each product. + * Every indivisual mgt must have the common mgt as its first member, and contains its unique + * data structure in the reset of it. + */ +struct nbl_phy_mgt_leonis { + struct nbl_phy_mgt phy_mgt; + bool ro_enable; +}; + +struct nbl_phy_mgt_bootis { + struct nbl_phy_mgt phy_mgt; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.c new file mode 100644 index 000000000000..a5b2140fc2eb --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_queue.h" + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_queue_setup_mgt(struct device *dev, struct nbl_queue_mgt **queue_mgt) +{ + *queue_mgt = devm_kzalloc(dev, sizeof(struct nbl_queue_mgt), GFP_KERNEL); + if (!*queue_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_queue_remove_mgt(struct device *dev, struct nbl_queue_mgt **queue_mgt) +{ + devm_kfree(dev, *queue_mgt); + *queue_mgt = NULL; +} + +int nbl_queue_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_queue_mgt **queue_mgt; + struct nbl_res_product_ops *product_ops = NBL_RES_MGT_TO_PROD_OPS(res_mgt); + int ret = 0; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + queue_mgt = &NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + + ret = nbl_queue_setup_mgt(dev, queue_mgt); + if (ret) + return ret; + + NBL_OPS_CALL(product_ops->queue_mgt_init, (*queue_mgt)); + + return 0; +} + +void nbl_queue_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_queue_mgt **queue_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + queue_mgt = &NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + + if (!(*queue_mgt)) + return; + + nbl_queue_remove_mgt(dev, queue_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h new file mode 100644 index 000000000000..097fef9a1662 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_QUEUE_H_ +#define _NBL_QUEUE_H_ + +#include "nbl_resource.h" + +int nbl_queue_setup_ops_leonis(struct nbl_resource_ops *res_ops); +void nbl_queue_mgt_init_leonis(struct nbl_queue_mgt *queue_mgt); +void nbl_queue_mgt_init_bootis(struct nbl_queue_mgt *queue_mgt); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c new file mode 100644 index 000000000000..57e21862f4d0 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_resource.h" + +static u16 pfvfid_to_vsi_id(void *p, int pfid, int vfid, u16 type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + enum nbl_vsi_serv_type dst_type = NBL_VSI_SERV_PF_DATA_TYPE; + u16 vsi_id; + int diff; + + diff = nbl_common_pf_id_subtraction_mgtpf_id(NBL_RES_MGT_TO_COMMON(res_mgt), pfid); + if (vfid == U32_MAX) { + if (diff < vsi_info->num) { + nbl_res_pf_dev_vsi_type_to_hw_vsi_type(type, &dst_type); + vsi_id = vsi_info->serv_info[diff][dst_type].base_id; + } else { + vsi_id = vsi_info->serv_info[0][NBL_VSI_SERV_PF_EXTRA_TYPE].base_id + + (diff - vsi_info->num); + } + } else { + vsi_id = vsi_info->serv_info[diff][NBL_VSI_SERV_VF_DATA_TYPE].base_id + vfid; + } + + return vsi_id; +} + +static u16 func_id_to_vsi_id(void *p, u16 func_id, u16 type) +{ + int pfid = U32_MAX; + int vfid = U32_MAX; + + nbl_res_func_id_to_pfvfid(p, func_id, &pfid, &vfid); + + return nbl_res_pfvfid_to_vsi_id(p, pfid, vfid, type); +} + +static u16 vsi_id_to_func_id(void *p, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + int i, j; + u16 func_id = U16_MAX; + bool vsi_find = false; + + for (i = 0; i < vsi_info->num; i++) { + for (j = 0; j < NBL_VSI_SERV_MAX_TYPE; j++) { + if (vsi_id >= vsi_info->serv_info[i][j].base_id && + (vsi_id < vsi_info->serv_info[i][j].base_id + + vsi_info->serv_info[i][j].num)) { + vsi_find = true; + break; + } + } + + if (vsi_find) + break; + } + + if (vsi_find) { + /* if pf_id < eth_num */ + if (j >= NBL_VSI_SERV_PF_DATA_TYPE && j <= NBL_VSI_SERV_PF_USER_TYPE) + func_id = i + NBL_COMMON_TO_MGT_PF(common); + /* if vf */ + else if (j == NBL_VSI_SERV_VF_DATA_TYPE) { + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + i; + func_id = sriov_info->start_vf_func_id + + (vsi_id - vsi_info->serv_info[i][NBL_VSI_SERV_VF_DATA_TYPE].base_id); + /* if extra pf */ + } else { + func_id = vsi_info->num + + (vsi_id - vsi_info->serv_info[i][NBL_VSI_SERV_PF_EXTRA_TYPE].base_id); + } + } + + return func_id; +} + +static int vsi_id_to_pf_id(void *p, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + int i, j; + u32 pf_id = U32_MAX; + bool vsi_find = false; + + for (i = 0; i < vsi_info->num; i++) { + for (j = 0; j < NBL_VSI_SERV_MAX_TYPE; j++) + if (vsi_id >= vsi_info->serv_info[i][j].base_id && + (vsi_id < vsi_info->serv_info[i][j].base_id + + vsi_info->serv_info[i][j].num)){ + vsi_find = true; + break; + } + + if (vsi_find) + break; + } + + if (vsi_find) { + /* if pf_id < eth_num */ + if (j >= NBL_VSI_SERV_PF_DATA_TYPE && j <= NBL_VSI_SERV_VF_DATA_TYPE) + pf_id = i + NBL_COMMON_TO_MGT_PF(common); + /* if extra pf */ + else if (j == NBL_VSI_SERV_PF_EXTRA_TYPE) + pf_id = vsi_info->num + + (vsi_id - vsi_info->serv_info[i][NBL_VSI_SERV_PF_EXTRA_TYPE].base_id); + } + + return pf_id; +} + +static int func_id_to_pfvfid(void *p, u16 func_id, int *pfid, int *vfid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + int diff; + int pf_id_tmp; + + if (func_id < NBL_RES_MGT_TO_PF_NUM(res_mgt)) { + *pfid = func_id; + *vfid = U32_MAX; + return 0; + } + + for (pf_id_tmp = 0; pf_id_tmp < NBL_RES_MGT_TO_PF_NUM(res_mgt); pf_id_tmp++) { + diff = nbl_common_pf_id_subtraction_mgtpf_id(common, pf_id_tmp); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + diff; + if (func_id >= sriov_info->start_vf_func_id && + func_id < sriov_info->start_vf_func_id + sriov_info->num_vfs) { + *pfid = pf_id_tmp; + *vfid = func_id - sriov_info->start_vf_func_id; + return 0; + } + } + + return U32_MAX; +} + +static int func_id_to_bdf(void *p, u16 func_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + int pfid = U32_MAX; + int vfid = U32_MAX; + int diff; + u8 pf_bus, pf_devfn, devfn; + + if (nbl_res_func_id_to_pfvfid(p, func_id, &pfid, &vfid)) + return U32_MAX; + + diff = nbl_common_pf_id_subtraction_mgtpf_id(common, pfid); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + diff; + pf_bus = PCI_BUS_NUM(sriov_info->bdf); + pf_devfn = sriov_info->bdf & 0xff; + + if (vfid != U32_MAX) { + *bus = pf_bus + ((pf_devfn + sriov_info->offset + sriov_info->stride * vfid) >> 8); + devfn = (pf_devfn + sriov_info->offset + sriov_info->stride * vfid) & 0xff; + } else { + *bus = pf_bus; + devfn = pf_devfn; + } + + *dev = PCI_SLOT(devfn); + *function = PCI_FUNC(devfn); + return 0; +} + +static u16 pfvfid_to_func_id(void *p, int pfid, int vfid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + int diff; + + if (vfid == U32_MAX) + return pfid; + + diff = nbl_common_pf_id_subtraction_mgtpf_id(common, pfid); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + diff; + + return sriov_info->start_vf_func_id + vfid; +} + +static u64 get_func_bar_base_addr(void *p, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + u64 base_addr = 0; + int pfid = U32_MAX; + int vfid = U32_MAX; + int diff; + + if (nbl_res_func_id_to_pfvfid(p, func_id, &pfid, &vfid)) + return 0; + + diff = nbl_common_pf_id_subtraction_mgtpf_id(common, pfid); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + diff; + if (!sriov_info->pf_bar_start) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Try to get bar addr for func %d, but PF_%d sriov not init", + func_id, pfid); + return 0; + } + + if (vfid == U32_MAX) + base_addr = sriov_info->pf_bar_start; + else + base_addr = sriov_info->vf_bar_start + sriov_info->vf_bar_len * vfid; + + nbl_info(common, NBL_DEBUG_QUEUE, "pfid %d vfid %d base_addr %llx\n", + pfid, vfid, base_addr); + return base_addr; +} + +static u8 vsi_id_to_eth_id(void *p, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + if (eth_info) + return eth_info->eth_id[nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id)]; + else + return 0; +} + +static u8 eth_id_to_pf_id(void *p, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i; + u8 pf_id_offset = 0; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + if (i == eth_id) + break; + pf_id_offset++; + } + + return pf_id_offset + NBL_COMMON_TO_MGT_PF(common); +} + +int nbl_res_func_id_to_pfvfid(struct nbl_resource_mgt *res_mgt, u16 func_id, int *pfid, int *vfid) +{ + if (!res_mgt->common_ops.func_id_to_pfvfid) + return func_id_to_pfvfid(res_mgt, func_id, pfid, vfid); + + return res_mgt->common_ops.func_id_to_pfvfid(res_mgt, func_id, pfid, vfid); +} + +u16 nbl_res_pfvfid_to_func_id(struct nbl_resource_mgt *res_mgt, int pfid, int vfid) +{ + if (!res_mgt->common_ops.pfvfid_to_func_id) + return pfvfid_to_func_id(res_mgt, pfid, vfid); + + return res_mgt->common_ops.pfvfid_to_func_id(res_mgt, pfid, vfid); +} + +u16 nbl_res_pfvfid_to_vsi_id(struct nbl_resource_mgt *res_mgt, int pfid, int vfid, u16 type) +{ + if (!res_mgt->common_ops.pfvfid_to_vsi_id) + return pfvfid_to_vsi_id(res_mgt, pfid, vfid, type); + + return res_mgt->common_ops.pfvfid_to_vsi_id(res_mgt, pfid, vfid, type); +} + +int nbl_res_func_id_to_bdf(struct nbl_resource_mgt *res_mgt, u16 func_id, u8 *bus, + u8 *dev, u8 *function) +{ + if (!res_mgt->common_ops.func_id_to_bdf) + return func_id_to_bdf(res_mgt, func_id, bus, dev, function); + + return res_mgt->common_ops.func_id_to_bdf(res_mgt, func_id, bus, dev, function); +} + +u16 nbl_res_vsi_id_to_func_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + if (!res_mgt->common_ops.vsi_id_to_func_id) + return vsi_id_to_func_id(res_mgt, vsi_id); + + return res_mgt->common_ops.vsi_id_to_func_id(res_mgt, vsi_id); +} + +int nbl_res_vsi_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + if (!res_mgt->common_ops.vsi_id_to_pf_id) + return vsi_id_to_pf_id(res_mgt, vsi_id); + + return res_mgt->common_ops.vsi_id_to_pf_id(res_mgt, vsi_id); +} + +u16 nbl_res_func_id_to_vsi_id(struct nbl_resource_mgt *res_mgt, u16 func_id, u16 type) +{ + if (!res_mgt->common_ops.func_id_to_vsi_id) + return func_id_to_vsi_id(res_mgt, func_id, type); + + return res_mgt->common_ops.func_id_to_vsi_id(res_mgt, func_id, type); +} + +u64 nbl_res_get_func_bar_base_addr(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + if (!res_mgt->common_ops.get_func_bar_base_addr) + return get_func_bar_base_addr(res_mgt, func_id); + + return res_mgt->common_ops.get_func_bar_base_addr(res_mgt, func_id); +} + +u16 nbl_res_get_particular_queue_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + return res_mgt->common_ops.get_particular_queue_id(res_mgt, vsi_id); +} + +u8 nbl_res_vsi_id_to_eth_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + if (!res_mgt->common_ops.vsi_id_to_eth_id) + return vsi_id_to_eth_id(res_mgt, vsi_id); + + return res_mgt->common_ops.vsi_id_to_eth_id(res_mgt, vsi_id); +} + +u8 nbl_res_eth_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + if (!res_mgt->common_ops.eth_id_to_pf_id) + return eth_id_to_pf_id(res_mgt, eth_id); + + return res_mgt->common_ops.eth_id_to_pf_id(res_mgt, eth_id); +} + +bool nbl_res_get_flex_capability(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return test_bit(cap_type, res_mgt->flex_capability); +} + +bool nbl_res_get_fix_capability(void *priv, enum nbl_fix_cap_type cap_type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return test_bit(cap_type, res_mgt->fix_capability); +} + +void nbl_res_set_flex_capability(struct nbl_resource_mgt *res_mgt, enum nbl_flex_cap_type cap_type) +{ + set_bit(cap_type, res_mgt->flex_capability); +} + +void nbl_res_set_fix_capability(struct nbl_resource_mgt *res_mgt, enum nbl_fix_cap_type cap_type) +{ + set_bit(cap_type, res_mgt->fix_capability); +} + +void nbl_res_pf_dev_vsi_type_to_hw_vsi_type(u16 src_type, enum nbl_vsi_serv_type *dst_type) +{ + if (src_type == NBL_VSI_DATA) + *dst_type = NBL_VSI_SERV_PF_DATA_TYPE; + else if (src_type == NBL_VSI_USER) + *dst_type = NBL_VSI_SERV_PF_USER_TYPE; + else if (src_type == NBL_VSI_CTRL) + *dst_type = NBL_VSI_SERV_PF_CTLR_TYPE; +} + +bool nbl_res_vf_is_active(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = res_mgt->resource_info; + + return test_bit(func_id, resource_info->func_bitmap); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h new file mode 100644 index 000000000000..868ca7af412e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h @@ -0,0 +1,769 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_RESOURCE_H_ +#define _NBL_RESOURCE_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" + +#define NBL_RES_MGT_TO_COMMON(res_mgt) ((res_mgt)->common) +#define NBL_RES_MGT_TO_COMMON_OPS(res_mgt) (&((res_mgt)->common_ops)) +#define NBL_RES_MGT_TO_DEV(res_mgt) NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)) +#define NBL_RES_MGT_TO_DMA_DEV(res_mgt) \ + NBL_COMMON_TO_DMA_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)) +#define NBL_RES_MGT_TO_INTR_MGT(res_mgt) ((res_mgt)->intr_mgt) +#define NBL_RES_MGT_TO_QUEUE_MGT(res_mgt) ((res_mgt)->queue_mgt) +#define NBL_RES_MGT_TO_TXRX_MGT(res_mgt) ((res_mgt)->txrx_mgt) +#define NBL_RES_MGT_TO_FLOW_MGT(res_mgt) ((res_mgt)->flow_mgt) +#define NBL_RES_MGT_TO_VSI_MGT(res_mgt) ((res_mgt)->vsi_mgt) +#define NBL_RES_MGT_TO_PORT_MGT(res_mgt) ((res_mgt)->port_mgt) +#define NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt) ((res_mgt)->adminq_mgt) +#define NBL_RES_MGT_TO_RES_INFO(res_mgt) ((res_mgt)->resource_info) +#define NBL_RES_MGT_TO_PROD_OPS(res_mgt) ((res_mgt)->product_ops) +#define NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->sriov_info) +#define NBL_RES_MGT_TO_ETH_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->eth_info) +#define NBL_RES_MGT_TO_VSI_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->vsi_info) +#define NBL_RES_MGT_TO_PF_NUM(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->max_pf) + +#define NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt) ((res_mgt)->phy_ops_tbl) +#define NBL_RES_MGT_TO_PHY_OPS(res_mgt) (NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt)->ops) +#define NBL_RES_MGT_TO_PHY_PRIV(res_mgt) (NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt)->priv) +#define NBL_RES_MGT_TO_CHAN_OPS_TBL(res_mgt) ((res_mgt)->chan_ops_tbl) +#define NBL_RES_MGT_TO_CHAN_OPS(res_mgt) (NBL_RES_MGT_TO_CHAN_OPS_TBL(res_mgt)->ops) +#define NBL_RES_MGT_TO_CHAN_PRIV(res_mgt) (NBL_RES_MGT_TO_CHAN_OPS_TBL(res_mgt)->priv) +#define NBL_RES_MGT_TO_TX_RING(res_mgt, index) \ + (NBL_RES_MGT_TO_TXRX_MGT(res_mgt)->tx_rings[(index)]) +#define NBL_RES_MGT_TO_RX_RING(res_mgt, index) \ + (NBL_RES_MGT_TO_TXRX_MGT(res_mgt)->rx_rings[(index)]) +#define NBL_RES_MGT_TO_VECTOR(res_mgt, index) \ + (NBL_RES_MGT_TO_TXRX_MGT(res_mgt)->vectors[(index)]) + +#define NBL_RES_BASE_QID(res_mgt) NBL_RES_MGT_TO_RES_INFO(res_mgt)->base_qid +#define NBL_RES_NOFITY_QID(res_mgt, local_qid) (NBL_RES_BASE_QID(res_mgt) * 2 + (local_qid)) + +#define NBL_MAX_FUNC (520) +#define NBL_MAX_JUMBO_FRAME_SIZE (9600) +#define NBL_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + +/* temperature threshold1 */ +#define NBL_LEONIS_TEMP_MAX (100) +/* temperature threshold2 */ +#define NBL_LEONIS_TEMP_CRIT (115) + +/* --------- QUEUE ---------- */ +#define NBL_MAX_TXRX_QUEUE (2048) +#define NBL_DEFAULT_DESC_NUM (1024) +#define NBL_MAX_TXRX_QUEUE_PER_FUNC (256) + +#define NBL_DEFAULT_REP_HW_QUEUE_NUM (16) +#define NBL_DEFAULT_PF_HW_QUEUE_NUM (16) +#define NBL_DEFAULT_USER_HW_QUEUE_NUM (16) +#define NBL_DEFAULT_VF_HW_QUEUE_NUM (2) + +#define NBL_SPECIFIC_VSI_NET_ID_OFFSET (4) +#define NBL_MAX_CACHE_SIZE (256) +#define NBL_MAX_BATCH_DESC (64) + +enum nbl_qid_map_table_type { + NBL_MASTER_QID_MAP_TABLE, + NBL_SLAVE_QID_MAP_TABLE, + NBL_QID_MAP_TABLE_MAX +}; + +struct nbl_queue_vsi_info { + u32 curr_qps; + u16 curr_qps_static; /* This will not be reset when netdev down */ + u16 vsi_index; + u16 vsi_id; + u16 rss_ret_base; + u16 rss_entry_size; + u16 net_id; + u16 queue_offset; + u16 queue_num; + bool rss_vld; + bool vld; +}; + +struct nbl_queue_info { + struct nbl_queue_vsi_info vsi_info[NBL_VSI_MAX]; + u64 notify_addr; + u32 qid_map_index; + u16 num_txrx_queues; + u16 *txrx_queues; + u16 *queues_context; + u16 rss_ret_base; + u16 rss_entry_size; + u32 curr_qps; + u16 split; + u16 queue_size; +}; + +struct nbl_adapt_desc_gother { + u16 level; + u32 uvn_desc_rd_entry; + u64 get_desc_stats_jiffies; +}; + +struct nbl_queue_mgt { + DECLARE_BITMAP(txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE); + DECLARE_BITMAP(rss_ret_bitmap, NBL_EPRO_RSS_RET_TBL_DEPTH); + struct nbl_qid_map_table qid_map_table[NBL_QID_MAP_TABLE_ENTRIES]; + struct nbl_queue_info queue_info[NBL_MAX_FUNC]; + u32 total_qid_map_entries; + int qid_map_select; + bool qid_map_ready; + u32 qid_map_tail[NBL_QID_MAP_TABLE_MAX]; + struct nbl_adapt_desc_gother adapt_desc_gother; +}; + +/* --------- INTERRUPT ---------- */ +#define NBL_MAX_OTHER_INTERRUPT 1024 +#define NBL_MAX_NET_INTERRUPT 4096 + +struct nbl_msix_map { + u16 valid:1; + u16 global_msix_index:13; + u16 rsv:2; +}; + +struct nbl_msix_map_table { + struct nbl_msix_map *base_addr; + dma_addr_t dma; + size_t size; +}; + +struct nbl_func_interrupt_resource_mng { + u16 num_interrupts; + u16 msix_base; + u16 msix_max; + u16 *interrupts; + struct nbl_msix_map_table msix_map_table; +}; + +struct nbl_interrupt_mgt { + DECLARE_BITMAP(interrupt_net_bitmap, NBL_MAX_NET_INTERRUPT); + DECLARE_BITMAP(interrupt_others_bitmap, NBL_MAX_OTHER_INTERRUPT); + struct nbl_func_interrupt_resource_mng func_intr_res[NBL_MAX_FUNC]; +}; + +struct nbl_port_mgt { +}; + +/* --------- TXRX ---------- */ +struct nbl_ring_desc { + /* buffer address */ + __le64 addr; + /* buffer length */ + __le32 len; + /* buffer ID */ + __le16 id; + /* the flags depending on descriptor type */ + __le16 flags; +}; + +struct nbl_tx_buffer { + struct nbl_ring_desc *next_to_watch; + struct sk_buff *skb; + dma_addr_t dma; + u32 len; + + unsigned int bytecount; + unsigned short gso_segs; + bool page; + u32 tx_flags; +}; + +struct nbl_dma_info { + dma_addr_t addr; + struct page *page; +}; + +struct nbl_page_cache { + u32 head; + u32 tail; + struct nbl_dma_info page_cache[NBL_MAX_CACHE_SIZE]; +}; + +struct nbl_rx_buffer { + struct nbl_dma_info *di; + u32 offset; + bool last_in_page; +}; + +struct nbl_res_vector { + struct napi_struct napi; + struct nbl_res_tx_ring *tx_ring; + struct nbl_res_rx_ring *rx_ring; + u8 *irq_enable_base; + u32 irq_data; + bool started; + bool net_msix_mask_en; +}; + +struct nbl_res_tx_ring { + /*data path*/ + struct nbl_ring_desc *desc; + struct nbl_tx_buffer *tx_bufs; + struct device *dma_dev; + struct net_device *netdev; + u8 __iomem *notify_addr; + + enum nbl_product_type product_type; + u16 queue_index; + u16 desc_num; + u16 notify_qid; + u16 avail_used_flags; + /* device ring wrap counter */ + bool used_wrap_counter; + u16 next_to_use; + u16 next_to_clean; + u16 tail_ptr; + u16 mode; + u8 eth_id; + u8 extheader_tx_len; + + struct nbl_queue_stats stats; + struct u64_stats_sync syncp; + struct nbl_tx_queue_stats tx_stats; + + /* control path */ + // dma for desc[] + dma_addr_t dma; + // size for desc[] + unsigned int size; + bool valid; +} ____cacheline_internodealigned_in_smp; + +struct nbl_res_rx_ring { + /* data path */ + struct nbl_ring_desc *desc; + struct nbl_rx_buffer *rx_bufs; + struct nbl_dma_info *di; + struct device *dma_dev; + struct net_device *netdev; + struct page_pool *page_pool; + struct nbl_queue_stats stats; + struct nbl_rx_queue_stats rx_stats; + struct u64_stats_sync syncp; + struct nbl_page_cache page_cache; + + enum nbl_product_type product_type; + u32 buf_len; + u16 avail_used_flags; + bool used_wrap_counter; + u16 next_to_use; + u16 next_to_clean; + u16 tail_ptr; + u16 mode; + u16 desc_num; + u16 queue_index; + + /* control path */ + struct nbl_common_info *common; + void *txrx_mgt; + // dma for desc[] + dma_addr_t dma; + // size for desc[] + unsigned int size; + bool valid; + u16 notify_qid; +} ____cacheline_internodealigned_in_smp; + +struct nbl_txrx_vsi_info { + u16 ring_offset; + u16 ring_num; +}; + +struct nbl_txrx_mgt { + struct nbl_res_vector **vectors; + struct nbl_res_tx_ring **tx_rings; + struct nbl_res_rx_ring **rx_rings; + struct nbl_txrx_vsi_info vsi_info[NBL_VSI_MAX]; + u16 tx_ring_num; + u16 rx_ring_num; +}; + +struct nbl_vsi_mgt { +}; + +struct nbl_emp_version { + char app_version[16]; + char kernel_version[16]; + char build_version[16]; +}; + +struct nbl_adminq_mgt { + struct nbl_emp_version emp_verion; + u32 fw_last_hb_seq; + unsigned long fw_last_hb_time; + + struct work_struct eth_task; + struct nbl_resource_mgt *res_mgt; + u8 module_inplace_changed[NBL_MAX_ETHERNET]; + u8 link_state_changed[NBL_MAX_ETHERNET]; + + bool fw_resetting; + struct wait_queue_head wait_queue; + + struct mutex eth_lock; /* To prevent link_state_changed mismodified. */ + + void *cmd_filter; +}; + +/* --------- FLOW ---------- */ +#define NBL_FEM_HT_PP0_LEN (1 * 1024) +#define NBL_MACVLAN_TABLE_LEN (4096) + +enum nbl_next_stg_id_e { + NBL_NEXT_STG_PA = 1, + NBL_NEXT_STG_IPRO = 2, + NBL_NEXT_STG_PP0_S0 = 3, + NBL_NEXT_STG_PP0_S1 = 4, + NBL_NEXT_STG_PP1_S0 = 5, + NBL_NEXT_STG_PP1_S1 = 6, + NBL_NEXT_STG_PP2_S0 = 7, + NBL_NEXT_STG_PP2_S1 = 8, + NBL_NEXT_STG_MCC = 9, + NBL_NEXT_STG_ACL_S0 = 10, + NBL_NEXT_STG_ACL_S1 = 11, + NBL_NEXT_STG_EPRO = 12, + NBL_NEXT_STG_BYPASS = 0xf, +}; + +enum { + NBL_FLOW_UP_TNL, + NBL_FLOW_UP, + NBL_FLOW_DOWN, + NBL_FLOW_MACVLAN_MAX, + NBL_FLOW_L2_UP = NBL_FLOW_MACVLAN_MAX, + NBL_FLOW_L2_DOWN, + NBL_FLOW_L3_UP, + NBL_FLOW_L3_DOWN, + NBL_FLOW_TYPE_MAX, + NBL_FLOW_LLDP_LACP_UP, +}; + +struct nbl_flow_ht_key { + u16 vid; + u16 ht_other_index; + u32 kt_index; +}; + +struct nbl_flow_ht_tbl { + struct nbl_flow_ht_key key[4]; + u32 ref_cnt; +}; + +struct nbl_flow_ht_mng { + struct nbl_flow_ht_tbl *hash_map[NBL_FEM_HT_PP0_LEN]; +}; + +struct nbl_flow_fem_entry { + s32 type; + u16 flow_id; + u16 ht0_hash; + u16 ht1_hash; + u16 hash_table; + u16 hash_bucket; + u16 tcam_index; + u8 tcam_flag; + u8 flow_type; +}; + +struct nbl_flow_mcc_node { + struct list_head node; + u16 mcc_id; +}; + +struct nbl_flow_multi_group { + struct list_head mcc_list; + struct nbl_flow_fem_entry entry[NBL_FLOW_TYPE_MAX - NBL_FLOW_MACVLAN_MAX]; + u8 ether_id; + u16 mcc_id; + u16 network_status; + u16 pfc_mode; + u16 bp_mode; +}; + +struct nbl_flow_lacp_rule { + struct nbl_flow_fem_entry entry; + struct list_head node; + u16 vsi; +}; + +struct nbl_flow_lldp_rule { + struct nbl_flow_fem_entry entry; + struct list_head node; + u16 vsi; +}; + +struct nbl_flow_UL4S_rule { + struct nbl_flow_fem_entry UL4S_entry; + struct list_head node; + u16 vsi; + u32 index; +}; + +struct nbl_flow_dipsec_rule { + struct nbl_flow_fem_entry dipsec_entry; + struct list_head node; + u16 vsi; + u32 index; +}; + +#define NBL_FLOW_PMD_ND_UPCALL_NA (0) +#define NBL_FLOW_PMD_ND_UPCALL_NS (1) +#define NBL_FLOW_PMD_ND_UPCALL_FLOW_NUM (2) + +struct nbl_flow_nd_upcall_rule { + struct nbl_flow_fem_entry entry[NBL_FLOW_PMD_ND_UPCALL_FLOW_NUM]; + struct list_head node; +}; + +struct nbl_flow_mgt { + DECLARE_BITMAP(flow_id, NBL_MACVLAN_TABLE_LEN); + DECLARE_BITMAP(tcam_id, NBL_TCAM_TABLE_LEN); + u32 pp_tcam_count; + u32 unicast_mac_threshold; + struct nbl_flow_ht_mng pp0_ht0_mng; + struct nbl_flow_ht_mng pp0_ht1_mng; + struct nbl_flow_multi_group multi_flow[NBL_MAX_ETHERNET]; + void *mac_hash_tbl[NBL_MAX_ETHERNET]; + struct list_head lldp_list; + struct list_head lacp_list; + void *mcc_tbl_priv; +}; + +#define NBL_FLOW_INIT_BIT BIT(1) +#define NBL_FLOW_AVAILABLE_BIT BIT(2) +#define NBL_ALL_PROFILE_NUM (64) +#define NBL_ASSOC_PROFILE_GRAPH_NUM (32) +#define NBL_ASSOC_PROFILE_NUM (16) +#define NBL_ASSOC_PROFILE_STAGE_NUM (8) +#define NBL_PROFILE_KEY_MAX_NUM (32) +#define NBL_FLOW_KEY_NAME_SIZE (32) +#define NBL_FLOW_INDEX_LEN 131072 +#define NBL_FLOW_TABLE_NUM (64 * 1024) +#define NBL_FEM_TCAM_MAX_NUM (64) +#define NBL_AT_MAX_NUM 8 +#define NBL_MAX_ACTION_NUM 16 +#define NBL_ACT_BYTE_LEN 32 + +enum nbl_flow_key_type { + NBL_FLOW_KEY_TYPE_PID, // profile id + NBL_FLOW_KEY_TYPE_ACTION, // AT action data, in 22 bits + NBL_FLOW_KEY_TYPE_PHV, // keys: PHV fields, inport, tab_index + // and other extracted 16 bits actions + NBL_FLOW_KEY_TYPE_MASK, // mask 4 bits + NBL_FLOW_KEY_TYPE_BTS // bit setter +}; + +#define NBL_PP0_KT_NUM (0) +#define NBL_PP1_KT_NUM (12 * 1024) +#define NBL_PP2_KT_NUM (112 * 1024) +#define NBL_PP0_KT_OFFSET (124 * 1024) +#define NBL_PP1_KT_OFFSET (112 * 1024) +#define NBL_FEM_HT_PP0_LEN (1 * 1024) +#define NBL_FEM_HT_PP1_LEN (3 * 1024) +#define NBL_FEM_HT_PP2_LEN (16 * 1024) +#define NBL_FEM_HT_PP0_DEPTH (1 * 1024) +#define NBL_FEM_HT_PP1_DEPTH (3 * 1024) +#define NBL_FEM_HT_PP2_DEPTH (0) +#define NBL_FEM_AT_PP1_LEN (6 * 1024) +#define NBL_FEM_AT2_PP1_LEN (2 * 1024) +#define NBL_FEM_AT_PP2_LEN (72 * 1024) +#define NBL_FEM_AT2_PP2_LEN (16 * 1024) + +struct nbl_flow_key_info { + bool valid; + enum nbl_flow_key_type key_type; + u16 offset; + u16 length; + u8 key_id; + char name[NBL_FLOW_KEY_NAME_SIZE]; +}; + +struct nbl_profile_msg { + bool valid; + // pp loopback or not + bool pp_mode; + bool key_full; + bool pt_cmd; + bool from_start; + bool to_end; + bool need_upcall; + + // id in range of 0 to 2 + u8 pp_id; + + // id in range of 0 to 15 + u8 profile_id; + + // id in range of 0 to 47 + u8 g_profile_id; + + // count of valid profile keys in the flow_keys list + u8 key_count; + u16 key_len; + u64 key_flag; + u8 act_count; + u8 pre_assoc_profile_id[NBL_ASSOC_PROFILE_NUM]; + u8 next_assoc_profile_id[NBL_ASSOC_PROFILE_NUM]; + // store all profile key info + struct nbl_flow_key_info flow_keys[NBL_PROFILE_KEY_MAX_NUM]; +}; + +struct nbl_flow_tab_hash_info { + struct hlist_head *flow_tab_head; + s32 tab_cnt; +}; + +struct nbl_flow_index_mng { + struct hlist_head flow_index_head[NBL_FLOW_INDEX_LEN]; + DECLARE_BITMAP(flow_index_bmp, NBL_FLOW_INDEX_LEN); +}; + +struct nbl_profile_assoc_graph { + u64 key_flag; + u8 profile_count; + u8 profile_id[NBL_ASSOC_PROFILE_STAGE_NUM]; +}; + +/* pp ht hash-list struct */ +struct nbl_flow_pp_ht_key { + u16 vid; + u16 ht_other_index; + u32 kt_index; +}; + +struct nbl_flow_pp_ht_tbl { + struct nbl_flow_pp_ht_key key[4]; + u32 ref_cnt; +}; + +struct nbl_flow_pp_ht_mng { + struct nbl_flow_pp_ht_tbl **hash_map; +}; + +/* at hash-list struct */ +struct nbl_flow_pp_at_key { + union { + u32 act[NBL_AT_MAX_NUM]; + u8 act_data[NBL_ACT_BYTE_LEN]; + }; +}; + +struct nbl_flow_at_tbl { + struct hlist_node node; + struct nbl_flow_pp_at_key key; + u32 at_bitmap_index; + u32 ref_cnt; +}; + +/* --------- INFO ---------- */ +#define NBL_RES_RDMA_MAX (63) +#define NBL_RES_RDMA_INTR_NUM (3) +#define NBL_MAX_VF (NBL_MAX_FUNC - NBL_MAX_PF) + +struct nbl_sriov_info { + unsigned int bdf; + unsigned int num_vfs; + unsigned int start_vf_func_id; + unsigned short offset; + unsigned short stride; + unsigned short active_vf_num; + u64 vf_bar_start; + u64 vf_bar_len; + u64 pf_bar_start; +}; + +struct nbl_eth_info { + DECLARE_BITMAP(eth_bitmap, NBL_MAX_ETHERNET); + u64 port_caps[NBL_MAX_ETHERNET]; + u64 port_advertising[NBL_MAX_ETHERNET]; + u64 port_lp_advertising[NBL_MAX_ETHERNET]; + u32 link_speed[NBL_MAX_ETHERNET]; /* in Mbps units */ + u8 active_fc[NBL_MAX_ETHERNET]; + u8 active_fec[NBL_MAX_ETHERNET]; + u8 link_state[NBL_MAX_ETHERNET]; + u8 module_inplace[NBL_MAX_ETHERNET]; + u8 port_type[NBL_MAX_ETHERNET]; /* enum nbl_port_type */ + u8 port_max_rate[NBL_MAX_ETHERNET]; /* enum nbl_port_max_rate */ + + u8 pf_bitmap[NBL_MAX_ETHERNET]; + u8 eth_num; + u8 resv[3]; + u8 eth_id[NBL_MAX_PF]; + u8 logic_eth_id[NBL_MAX_PF]; +}; + +enum nbl_vsi_serv_type { + NBL_VSI_SERV_PF_DATA_TYPE, + NBL_VSI_SERV_PF_CTLR_TYPE, + NBL_VSI_SERV_PF_USER_TYPE, + NBL_VSI_SERV_VF_DATA_TYPE, + /* use for pf_num > eth_num, the extra pf belong pf0's switch */ + NBL_VSI_SERV_PF_EXTRA_TYPE, + NBL_VSI_SERV_MAX_TYPE, +}; + +struct nbl_vsi_serv_info { + u16 base_id; + u16 num; +}; + +struct nbl_vsi_info { + u16 num; + struct nbl_vsi_serv_info serv_info[NBL_MAX_ETHERNET][NBL_VSI_SERV_MAX_TYPE]; +}; + +struct nbl_net_ring_num_info { + u16 pf_def_max_net_qp_num; + u16 vf_def_max_net_qp_num; + u16 net_max_qp_num[NBL_MAX_FUNC]; +}; + +struct nbl_resource_info { + /* ctrl-dev owned pfs */ + DECLARE_BITMAP(func_bitmap, NBL_MAX_FUNC); + struct nbl_sriov_info *sriov_info; + struct nbl_eth_info *eth_info; + struct nbl_vsi_info *vsi_info; + u32 base_qid; + + struct nbl_net_ring_num_info net_ring_num_info; + + /* for af use */ + int p4_used; + u16 eth_mode; + u16 init_acl_refcnt; + u8 max_pf; + u16 nd_upcall_refnt; + struct nbl_board_port_info board_info; +}; + +struct nbl_upcall_port_info { + bool upcall_port_active; + u16 func_id; +}; + +struct nbl_rep_offload_status { +#define NBL_OFFLOAD_STATUS_MAX_VSI (1024) +#define NBL_OFFLOAD_STATUS_MAX_ETH (4) + DECLARE_BITMAP(rep_vsi_bitmap, NBL_OFFLOAD_STATUS_MAX_VSI); + DECLARE_BITMAP(rep_eth_bitmap, NBL_OFFLOAD_STATUS_MAX_ETH); + bool status[NBL_MAX_ETHERNET]; + bool pmd_debug; + unsigned long timestamp; +}; + +struct nbl_resource_common_ops { + u16 (*vsi_id_to_func_id)(void *res_mgt, u16 vsi_id); + int (*vsi_id_to_pf_id)(void *res_mgt, u16 vsi_id); + u16 (*pfvfid_to_func_id)(void *res_mgt, int pfid, int vfid); + u16 (*pfvfid_to_vsi_id)(void *res_mgt, int pfid, int vfid, u16 type); + u16 (*func_id_to_vsi_id)(void *res_mgt, u16 func_id, u16 type); + int (*func_id_to_pfvfid)(void *res_mgt, u16 func_id, int *pfid, int *vfid); + int (*func_id_to_bdf)(void *res_mgt, u16 func_id, u8 *bus, u8 *dev, u8 *function); + u64 (*get_func_bar_base_addr)(void *res_mgt, u16 func_id); + u16 (*get_particular_queue_id)(void *res_mgt, u16 vsi_id); + u8 (*vsi_id_to_eth_id)(void *res_mgt, u16 vsi_id); + u8 (*eth_id_to_pf_id)(void *res_mgt, u8 eth_id); +}; + +struct nbl_res_product_ops { + /* for queue */ + void (*queue_mgt_init)(struct nbl_queue_mgt *queue_mgt); + int (*setup_qid_map_table)(struct nbl_resource_mgt *res_mgt, u16 func_id, u64 notify_addr); + void (*remove_qid_map_table)(struct nbl_resource_mgt *res_mgt, u16 func_id); + int (*init_qid_map_table)(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_mgt *queue_mgt, struct nbl_phy_ops *phy_ops); + + /* for intr */ + void (*nbl_intr_mgt_init)(struct nbl_resource_mgt *res_mgt); +}; + +struct nbl_resource_mgt { + struct nbl_resource_common_ops common_ops; + struct nbl_common_info *common; + struct nbl_resource_info *resource_info; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_queue_mgt *queue_mgt; + struct nbl_interrupt_mgt *intr_mgt; + struct nbl_txrx_mgt *txrx_mgt; + struct nbl_flow_mgt *flow_mgt; + struct nbl_vsi_mgt *vsi_mgt; + struct nbl_adminq_mgt *adminq_mgt; + struct nbl_accel_mgt *accel_mgt; + struct nbl_port_mgt *port_mgt; + struct nbl_res_product_ops *product_ops; + DECLARE_BITMAP(flex_capability, NBL_FLEX_CAP_NBITS); + DECLARE_BITMAP(fix_capability, NBL_FIX_CAP_NBITS); +}; + +/* Mgt structure for each product. + * Every indivisual mgt must have the common mgt as its first member, and contains its unique + * data structure in the reset of it. + */ +struct nbl_resource_mgt_leonis { + struct nbl_resource_mgt res_mgt; +}; + +#define NBL_RES_FW_CMD_FILTER_MAX 8 +struct nbl_res_fw_cmd_filter { + int (*in)(struct nbl_resource_mgt *res_mgt, void *data, int len); + int (*out)(struct nbl_resource_mgt *res_mgt, void *data, int len); +}; + +u16 nbl_res_vsi_id_to_func_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); +int nbl_res_vsi_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); +u16 nbl_res_pfvfid_to_func_id(struct nbl_resource_mgt *res_mgt, int pfid, int vfid); +u16 nbl_res_pfvfid_to_vsi_id(struct nbl_resource_mgt *res_mgt, int pfid, int vfid, u16 type); +u16 nbl_res_func_id_to_vsi_id(struct nbl_resource_mgt *res_mgt, u16 func_id, u16 type); +int nbl_res_func_id_to_pfvfid(struct nbl_resource_mgt *res_mgt, u16 func_id, int *pfid, int *vfid); +u8 nbl_res_eth_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u8 eth_id); +int nbl_res_func_id_to_bdf(struct nbl_resource_mgt *res_mgt, u16 func_id, u8 *bus, + u8 *dev, u8 *function); +u64 nbl_res_get_func_bar_base_addr(struct nbl_resource_mgt *res_mgt, u16 func_id); +u16 nbl_res_get_particular_queue_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); +u8 nbl_res_vsi_id_to_eth_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); + +int nbl_adminq_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_adminq_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_adminq_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_adminq_remove_ops(struct nbl_resource_ops *resource_ops); + +int nbl_intr_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_intr_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_intr_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_intr_remove_ops(struct nbl_resource_ops *resource_ops); + +int nbl_queue_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_queue_mgt_stop(struct nbl_resource_mgt *res_mgt); + +int nbl_txrx_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_txrx_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_txrx_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_txrx_remove_ops(struct nbl_resource_ops *resource_ops); + +int nbl_vsi_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_vsi_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_vsi_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_vsi_remove_ops(struct nbl_resource_ops *resource_ops); + +bool nbl_res_get_flex_capability(void *priv, enum nbl_flex_cap_type cap_type); +bool nbl_res_get_fix_capability(void *priv, enum nbl_fix_cap_type cap_type); +void nbl_res_set_flex_capability(struct nbl_resource_mgt *res_mgt, enum nbl_flex_cap_type cap_type); +void nbl_res_set_fix_capability(struct nbl_resource_mgt *res_mgt, enum nbl_fix_cap_type cap_type); + +int nbl_res_open_sfp(struct nbl_resource_mgt *res_mgt, u8 eth_id); +int nbl_res_get_eth_mac(struct nbl_resource_mgt *res_mgt, u8 *mac, u8 eth_id); +void nbl_res_pf_dev_vsi_type_to_hw_vsi_type(u16 src_type, enum nbl_vsi_serv_type *dst_type); +bool nbl_res_vf_is_active(void *priv, u16 func_id); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c new file mode 100644 index 000000000000..f1b9f92ac057 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c @@ -0,0 +1,2243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include +#include +#include +#include +#include + +#include "nbl_txrx.h" + +int nbl_alloc_tx_rings(struct nbl_resource_mgt *res_mgt, struct net_device *netdev, + u16 tx_num, u16 desc_num) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_res_tx_ring *ring; + u32 ring_index; + + if (txrx_mgt->tx_rings) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Try to allocate tx_rings which already exists\n"); + return -EINVAL; + } + + txrx_mgt->tx_ring_num = tx_num; + + txrx_mgt->tx_rings = devm_kcalloc(dev, tx_num, + sizeof(struct nbl_res_tx_ring *), GFP_KERNEL); + if (!txrx_mgt->tx_rings) + return -ENOMEM; + + for (ring_index = 0; ring_index < tx_num; ring_index++) { + ring = txrx_mgt->tx_rings[ring_index]; + WARN_ON(ring); + ring = devm_kzalloc(dev, sizeof(struct nbl_res_tx_ring), GFP_KERNEL); + if (!ring) + goto alloc_tx_ring_failed; + + ring->dma_dev = common->dma_dev; + ring->product_type = common->product_type; + ring->eth_id = common->eth_id; + ring->queue_index = ring_index; + ring->notify_addr = phy_ops->get_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + ring->notify_qid = NBL_RES_NOFITY_QID(res_mgt, ring_index * 2 + 1); + ring->netdev = netdev; + ring->desc_num = desc_num; + ring->used_wrap_counter = 1; + ring->avail_used_flags |= BIT(NBL_PACKED_DESC_F_AVAIL); + WRITE_ONCE(txrx_mgt->tx_rings[ring_index], ring); + } + + return 0; + +alloc_tx_ring_failed: + while (ring_index--) + devm_kfree(dev, txrx_mgt->tx_rings[ring_index]); + devm_kfree(dev, txrx_mgt->tx_rings); + txrx_mgt->tx_rings = NULL; + return -ENOMEM; +} + +static void nbl_free_tx_rings(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct nbl_res_tx_ring *ring; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + u16 ring_count; + u16 ring_index; + + ring_count = txrx_mgt->tx_ring_num; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = txrx_mgt->tx_rings[ring_index]; + devm_kfree(dev, ring); + } + devm_kfree(dev, txrx_mgt->tx_rings); + txrx_mgt->tx_rings = NULL; +} + +static int nbl_alloc_rx_rings(struct nbl_resource_mgt *res_mgt, struct net_device *netdev, + u16 rx_num, u16 desc_num) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_res_rx_ring *ring; + u32 ring_index; + + if (txrx_mgt->rx_rings) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Try to allocate rx_rings which already exists\n"); + return -EINVAL; + } + + txrx_mgt->rx_ring_num = rx_num; + + txrx_mgt->rx_rings = devm_kcalloc(dev, rx_num, + sizeof(struct nbl_res_rx_ring *), GFP_KERNEL); + if (!txrx_mgt->rx_rings) + return -ENOMEM; + + for (ring_index = 0; ring_index < rx_num; ring_index++) { + ring = txrx_mgt->rx_rings[ring_index]; + WARN_ON(ring); + ring = devm_kzalloc(dev, sizeof(struct nbl_res_rx_ring), GFP_KERNEL); + if (!ring) + goto alloc_rx_ring_failed; + + ring->common = common; + ring->txrx_mgt = txrx_mgt; + ring->dma_dev = common->dma_dev; + ring->queue_index = ring_index; + ring->notify_qid = NBL_RES_NOFITY_QID(res_mgt, ring_index * 2); + ring->netdev = netdev; + ring->desc_num = desc_num; + /* TODO: maybe TX buffer length should be determined by other factors */ + ring->buf_len = NBL_RX_BUFSZ - NBL_RX_PAD; + + ring->used_wrap_counter = 1; + ring->avail_used_flags |= BIT(NBL_PACKED_DESC_F_AVAIL); + WRITE_ONCE(txrx_mgt->rx_rings[ring_index], ring); + } + + return 0; + +alloc_rx_ring_failed: + while (ring_index--) + devm_kfree(dev, txrx_mgt->rx_rings[ring_index]); + devm_kfree(dev, txrx_mgt->rx_rings); + txrx_mgt->rx_rings = NULL; + return -ENOMEM; +} + +static void nbl_free_rx_rings(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct nbl_res_rx_ring *ring; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + u16 ring_count; + u16 ring_index; + + ring_count = txrx_mgt->rx_ring_num; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = txrx_mgt->rx_rings[ring_index]; + devm_kfree(dev, ring); + } + devm_kfree(dev, txrx_mgt->rx_rings); + txrx_mgt->rx_rings = NULL; +} + +static int nbl_alloc_vectors(struct nbl_resource_mgt *res_mgt, u16 num) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_res_vector *vector; + u32 index; + + if (txrx_mgt->vectors) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Try to allocate vectors which already exists\n"); + return -EINVAL; + } + + txrx_mgt->vectors = devm_kcalloc(dev, num, sizeof(struct nbl_res_vector *), GFP_KERNEL); + if (!txrx_mgt->vectors) + return -ENOMEM; + + for (index = 0; index < num; index++) { + vector = txrx_mgt->vectors[index]; + WARN_ON(vector); + vector = devm_kzalloc(dev, sizeof(struct nbl_res_vector), GFP_KERNEL); + if (!vector) + goto alloc_vector_failed; + + vector->rx_ring = txrx_mgt->rx_rings[index]; + vector->tx_ring = txrx_mgt->tx_rings[index]; + WRITE_ONCE(txrx_mgt->vectors[index], vector); + } + + return 0; + +alloc_vector_failed: + while (index--) + devm_kfree(dev, txrx_mgt->vectors[index]); + devm_kfree(dev, txrx_mgt->vectors); + txrx_mgt->vectors = NULL; + return -ENOMEM; +} + +static void nbl_free_vectors(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct nbl_res_vector *vector; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + u16 count, index; + + count = txrx_mgt->rx_ring_num; + for (index = 0; index < count; index++) { + vector = txrx_mgt->vectors[index]; + devm_kfree(dev, vector); + } + devm_kfree(dev, txrx_mgt->vectors); + txrx_mgt->vectors = NULL; +} + +static int nbl_res_txrx_alloc_rings(void *priv, struct net_device *netdev, u16 tx_num, + u16 rx_num, u16 tx_desc_num, u16 rx_desc_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + int err = 0; + + err = nbl_alloc_tx_rings(res_mgt, netdev, tx_num, tx_desc_num); + if (err) + return err; + + err = nbl_alloc_rx_rings(res_mgt, netdev, rx_num, rx_desc_num); + if (err) + goto alloc_rx_rings_err; + + err = nbl_alloc_vectors(res_mgt, rx_num); + if (err) + goto alloc_vectors_err; + + nbl_info(res_mgt->common, NBL_DEBUG_RESOURCE, + "Alloc rings for %d tx, %d rx, %d tx_desc %d rx_desc\n", + tx_num, rx_num, tx_desc_num, rx_desc_num); + return 0; + +alloc_vectors_err: + nbl_free_rx_rings(res_mgt); +alloc_rx_rings_err: + nbl_free_tx_rings(res_mgt); + return err; +} + +static void nbl_res_txrx_remove_rings(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + nbl_free_vectors(res_mgt); + nbl_free_tx_rings(res_mgt); + nbl_free_rx_rings(res_mgt); + nbl_info(res_mgt->common, NBL_DEBUG_RESOURCE, "Remove rings"); +} + +static dma_addr_t nbl_res_txrx_start_tx_ring(void *priv, u8 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); + + if (tx_ring->tx_bufs) { + nbl_err(res_mgt->common, NBL_DEBUG_RESOURCE, + "Try to setup a TX ring with buffer management array already allocated\n"); + return (dma_addr_t)NULL; + } + + tx_ring->tx_bufs = devm_kcalloc(dev, tx_ring->desc_num, sizeof(*tx_ring->tx_bufs), + GFP_KERNEL); + if (!tx_ring->tx_bufs) + return (dma_addr_t)NULL; + + /* Alloc twice memory, and second half is used to back up the desc for desc checking */ + tx_ring->size = ALIGN(tx_ring->desc_num * sizeof(struct nbl_ring_desc), PAGE_SIZE); + tx_ring->desc = dmam_alloc_coherent(dma_dev, tx_ring->size, &tx_ring->dma, + GFP_KERNEL | __GFP_ZERO); + if (!tx_ring->desc) + goto alloc_dma_err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->tail_ptr = 0; + + tx_ring->valid = true; + nbl_debug(res_mgt->common, NBL_DEBUG_RESOURCE, "Start tx ring %d", ring_index); + return tx_ring->dma; + +alloc_dma_err: + devm_kfree(dev, tx_ring->tx_bufs); + tx_ring->tx_bufs = NULL; + tx_ring->size = 0; + return (dma_addr_t)NULL; +} + +static inline bool nbl_rx_cache_get(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info) +{ + struct nbl_page_cache *cache = &rx_ring->page_cache; + struct nbl_rx_queue_stats *stats = &rx_ring->rx_stats; + + if (unlikely(cache->head == cache->tail)) { + stats->rx_cache_empty++; + return false; + } + + if (page_ref_count(cache->page_cache[cache->head].page) != 1) { + stats->rx_cache_busy++; + return false; + } + + *dma_info = cache->page_cache[cache->head]; + cache->head = (cache->head + 1) & (NBL_MAX_CACHE_SIZE - 1); + stats->rx_cache_reuse++; + + dma_sync_single_for_device(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, DMA_FROM_DEVICE); + return true; +} + +static inline int nbl_page_alloc_pool(struct nbl_res_rx_ring *rx_ring, + struct nbl_dma_info *dma_info) +{ + if (nbl_rx_cache_get(rx_ring, dma_info)) + return 0; + + dma_info->page = page_pool_dev_alloc_pages(rx_ring->page_pool); + if (unlikely(!dma_info->page)) + return -ENOMEM; + + dma_info->addr = dma_map_page_attrs(rx_ring->dma_dev, dma_info->page, 0, PAGE_SIZE, + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); + + if (unlikely(dma_mapping_error(rx_ring->dma_dev, dma_info->addr))) { + page_pool_recycle_direct(rx_ring->page_pool, dma_info->page); + dma_info->page = NULL; + return -ENOMEM; + } + + return 0; +} + +static inline int nbl_get_rx_frag(struct nbl_res_rx_ring *rx_ring, struct nbl_rx_buffer *buffer) +{ + int err = 0; + + /* first buffer alloc page */ + if (buffer->offset == NBL_RX_PAD) + err = nbl_page_alloc_pool(rx_ring, buffer->di); + + return err; +} + +static inline bool nbl_alloc_rx_bufs(struct nbl_res_rx_ring *rx_ring, u16 count) +{ + u32 buf_len; + u16 next_to_use, head; + __le16 head_flags = 0; + struct nbl_ring_desc *rx_desc, *head_desc; + struct nbl_rx_buffer *rx_buf; + int i; + + if (unlikely(!rx_ring || !count)) { + nbl_warn(NBL_RING_TO_COMMON(rx_ring), NBL_DEBUG_RESOURCE, + "invalid input parameters, rx_ring is %p, count is %d.\n", rx_ring, count); + return -EINVAL; + } + + buf_len = rx_ring->buf_len; + next_to_use = rx_ring->next_to_use; + + head = next_to_use; + head_desc = NBL_RX_DESC(rx_ring, next_to_use); + rx_desc = NBL_RX_DESC(rx_ring, next_to_use); + rx_buf = NBL_RX_BUF(rx_ring, next_to_use); + + if (unlikely(!rx_desc || !rx_buf)) { + nbl_warn(NBL_RING_TO_COMMON(rx_ring), NBL_DEBUG_RESOURCE, + "invalid input parameters, next_to_use:%d, rx_desc is %p, rx_buf is %p.\n", + next_to_use, rx_desc, rx_buf); + return -EINVAL; + } + + do { + if (nbl_get_rx_frag(rx_ring, rx_buf)) + break; + + for (i = 0; i < NBL_RX_PAGE_PER_FRAGS; i++, rx_desc++, rx_buf++) { + rx_desc->addr = cpu_to_le64(rx_buf->di->addr + rx_buf->offset); + rx_desc->len = cpu_to_le32(buf_len); + rx_desc->id = cpu_to_le16(next_to_use); + + if (likely(head != next_to_use || i)) + rx_desc->flags = cpu_to_le16(rx_ring->avail_used_flags | + NBL_PACKED_DESC_F_WRITE); + else + head_flags = cpu_to_le16(rx_ring->avail_used_flags | + NBL_PACKED_DESC_F_WRITE); + } + + next_to_use += NBL_RX_PAGE_PER_FRAGS; + rx_ring->tail_ptr += NBL_RX_PAGE_PER_FRAGS; + count -= NBL_RX_PAGE_PER_FRAGS; + if (next_to_use == rx_ring->desc_num) { + next_to_use = 0; + rx_desc = NBL_RX_DESC(rx_ring, next_to_use); + rx_buf = NBL_RX_BUF(rx_ring, next_to_use); + rx_ring->avail_used_flags ^= + BIT(NBL_PACKED_DESC_F_AVAIL) | + BIT(NBL_PACKED_DESC_F_USED); + } + } while (count); + + if (next_to_use != head) { + /* wmb */ + wmb(); + head_desc->flags = head_flags; + rx_ring->next_to_use = next_to_use; + } + + return !!count; +} + +static dma_addr_t nbl_res_txrx_start_rx_ring(void *priv, u8 ring_index, bool use_napi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + struct page_pool_params pp_params = {0}; + int i, j; + + if (rx_ring->rx_bufs) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Try to setup a RX ring with buffer management array already allocated\n"); + return (dma_addr_t)NULL; + } + + pp_params.order = 0; + pp_params.flags = 0; + pp_params.pool_size = rx_ring->desc_num; + pp_params.nid = dev_to_node(dev); + pp_params.dev = dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + + rx_ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_ring->page_pool)) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Page_pool Allocate %u Failed failed\n", + rx_ring->queue_index); + return (dma_addr_t)NULL; + } + + rx_ring->di = kvzalloc_node(array_size(rx_ring->desc_num / NBL_RX_PAGE_PER_FRAGS, + sizeof(struct nbl_dma_info)), + GFP_KERNEL, dev_to_node(dev)); + if (!rx_ring->di) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Dma info Allocate %u Failed failed\n", + rx_ring->queue_index); + goto alloc_di_err; + } + + rx_ring->rx_bufs = devm_kcalloc(dev, rx_ring->desc_num, sizeof(*rx_ring->rx_bufs), + GFP_KERNEL); + if (!rx_ring->rx_bufs) + goto alloc_buffers_err; + + /* Alloc twice memory, and second half is used to back up the desc for desc checking */ + rx_ring->size = ALIGN(rx_ring->desc_num * sizeof(struct nbl_ring_desc), PAGE_SIZE); + rx_ring->desc = dmam_alloc_coherent(dma_dev, rx_ring->size, &rx_ring->dma, + GFP_KERNEL | __GFP_ZERO); + if (!rx_ring->desc) + goto alloc_dma_err; + + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + rx_ring->tail_ptr = 0; + + j = 0; + for (i = 0; i < rx_ring->desc_num / NBL_RX_PAGE_PER_FRAGS; i++) { + struct nbl_dma_info *di = &rx_ring->di[i]; + struct nbl_rx_buffer *buffer; + int f; + + for (f = 0; f < NBL_RX_PAGE_PER_FRAGS; f++, j++) { + buffer = &rx_ring->rx_bufs[j]; + buffer->di = di; + buffer->offset = NBL_RX_PAD + f * NBL_RX_BUFSZ; + buffer->last_in_page = false; + } + + buffer->last_in_page = true; + } + + if (nbl_alloc_rx_bufs(rx_ring, rx_ring->desc_num - NBL_MAX_BATCH_DESC)) + goto alloc_rx_bufs_err; + + rx_ring->valid = true; + if (use_napi) + vector->started = true; + + nbl_debug(common, NBL_DEBUG_RESOURCE, "Start rx ring %d", ring_index); + return rx_ring->dma; + +alloc_rx_bufs_err: + dmam_free_coherent(dma_dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + rx_ring->dma = (dma_addr_t)NULL; +alloc_dma_err: + devm_kfree(dev, rx_ring->rx_bufs); + rx_ring->rx_bufs = NULL; +alloc_buffers_err: + kvfree(rx_ring->di); +alloc_di_err: + page_pool_destroy(rx_ring->page_pool); + rx_ring->size = 0; + return (dma_addr_t)NULL; +} + +static void nbl_unmap_and_free_tx_resource(struct nbl_res_tx_ring *ring, + struct nbl_tx_buffer *tx_buffer, + bool free_skb, bool in_napi) +{ + struct device *dma_dev = NBL_RING_TO_DMA_DEV(ring); + + if (tx_buffer->skb) { + if (likely(free_skb)) { + if (in_napi) + napi_consume_skb(tx_buffer->skb, NBL_TX_POLL_WEIGHT); + else + dev_kfree_skb_any(tx_buffer->skb); + } + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(dma_dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (tx_buffer->page && dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(dma_dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_single(dma_dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + tx_buffer->page = 0; + dma_unmap_len_set(tx_buffer, len, 0); +} + +static void nbl_free_tx_ring_bufs(struct nbl_res_tx_ring *tx_ring) +{ + struct nbl_tx_buffer *tx_buffer; + u16 i; + + i = tx_ring->next_to_clean; + tx_buffer = NBL_TX_BUF(tx_ring, i); + while (i != tx_ring->next_to_use) { + nbl_unmap_and_free_tx_resource(tx_ring, tx_buffer, true, false); + i++; + tx_buffer++; + if (i == tx_ring->desc_num) { + i = 0; + tx_buffer = NBL_TX_BUF(tx_ring, i); + } + } + + tx_ring->next_to_clean = 0; + tx_ring->next_to_use = 0; + tx_ring->tail_ptr = 0; + + tx_ring->used_wrap_counter = 1; + tx_ring->avail_used_flags = BIT(NBL_PACKED_DESC_F_AVAIL); + memset(tx_ring->desc, 0, tx_ring->size); +} + +static void nbl_res_txrx_stop_tx_ring(void *priv, u8 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); + struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + + vector->started = false; + /* Flush napi task, to ensue the sched napi finish. So napi will no to access the + * ring memory(wild point), bacause the vector->started has set false. + */ + napi_synchronize(&vector->napi); + + tx_ring->valid = false; + + nbl_free_tx_ring_bufs(tx_ring); + WRITE_ONCE(NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index), tx_ring); + + devm_kfree(dev, tx_ring->tx_bufs); + tx_ring->tx_bufs = NULL; + + dmam_free_coherent(dma_dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + tx_ring->dma = (dma_addr_t)NULL; + tx_ring->size = 0; + + nbl_debug(res_mgt->common, NBL_DEBUG_RESOURCE, "Stop tx ring %d", ring_index); +} + +static inline bool nbl_rx_cache_put(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info) +{ + struct nbl_page_cache *cache = &rx_ring->page_cache; + u32 tail_next = (cache->tail + 1) & (NBL_MAX_CACHE_SIZE - 1); + struct nbl_rx_queue_stats *stats = &rx_ring->rx_stats; + + if (tail_next == cache->head) { + stats->rx_cache_full++; + return false; + } + + if (!dev_page_is_reusable(dma_info->page)) { + stats->rx_cache_waive++; + return false; + } + + cache->page_cache[cache->tail] = *dma_info; + cache->tail = tail_next; + + return true; +} + +static inline void nbl_page_release_dynamic(struct nbl_res_rx_ring *rx_ring, + struct nbl_dma_info *dma_info, bool recycle) +{ + if (likely(recycle)) { + if (nbl_rx_cache_put(rx_ring, dma_info)) + return; + dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); + page_pool_recycle_direct(rx_ring->page_pool, dma_info->page); + } else { + dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); + page_pool_put_page(rx_ring->page_pool, dma_info->page, PAGE_SIZE, true); + } +} + +static inline void nbl_put_rx_frag(struct nbl_res_rx_ring *rx_ring, + struct nbl_rx_buffer *buffer, bool recycle) +{ + if (buffer->last_in_page) + nbl_page_release_dynamic(rx_ring, buffer->di, recycle); +} + +static void nbl_free_rx_ring_bufs(struct nbl_res_rx_ring *rx_ring) +{ + struct nbl_rx_buffer *rx_buf; + u16 i; + + i = rx_ring->next_to_clean; + rx_buf = NBL_RX_BUF(rx_ring, i); + while (i != rx_ring->next_to_use) { + nbl_put_rx_frag(rx_ring, rx_buf, false); + i++; + rx_buf++; + if (i == rx_ring->desc_num) { + i = 0; + rx_buf = NBL_RX_BUF(rx_ring, i); + } + } + + for (i = rx_ring->page_cache.head; i != rx_ring->page_cache.tail; + i = (i + 1) & (NBL_MAX_CACHE_SIZE - 1)) { + struct nbl_dma_info *dma_info = &rx_ring->page_cache.page_cache[i]; + + nbl_page_release_dynamic(rx_ring, dma_info, false); + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->tail_ptr = 0; + rx_ring->page_cache.head = 0; + rx_ring->page_cache.tail = 0; + + rx_ring->used_wrap_counter = 1; + rx_ring->avail_used_flags = BIT(NBL_PACKED_DESC_F_AVAIL); + memset(rx_ring->desc, 0, rx_ring->size); +} + +static void nbl_res_txrx_stop_rx_ring(void *priv, u8 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + + rx_ring->valid = false; + + nbl_free_rx_ring_bufs(rx_ring); + WRITE_ONCE(NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index), rx_ring); + + devm_kfree(dev, rx_ring->rx_bufs); + kvfree(rx_ring->di); + rx_ring->rx_bufs = NULL; + + dmam_free_coherent(dma_dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + rx_ring->dma = (dma_addr_t)NULL; + rx_ring->size = 0; + + page_pool_destroy(rx_ring->page_pool); + + nbl_debug(res_mgt->common, NBL_DEBUG_RESOURCE, "Stop rx ring %d", ring_index); +} + +static inline bool nbl_ring_desc_used(struct nbl_ring_desc *ring_desc, bool used_wrap_counter) +{ + bool avail; + bool used; + u16 flags; + + flags = le16_to_cpu(ring_desc->flags); + avail = !!(flags & BIT(NBL_PACKED_DESC_F_AVAIL)); + used = !!(flags & BIT(NBL_PACKED_DESC_F_USED)); + + return avail == used && used == used_wrap_counter; +} + +static int nbl_res_txrx_clean_tx_irq(struct nbl_res_tx_ring *tx_ring) +{ + struct nbl_tx_buffer *tx_buffer; + struct nbl_ring_desc *tx_desc; + unsigned int i = tx_ring->next_to_clean; + unsigned int total_tx_pkts = 0; + unsigned int total_tx_bytes = 0; + unsigned int total_tx_descs = 0; + int count = 64; + + tx_buffer = NBL_TX_BUF(tx_ring, i); + tx_desc = NBL_TX_DESC(tx_ring, i); + i -= tx_ring->desc_num; + + do { + struct nbl_ring_desc *end_desc = tx_buffer->next_to_watch; + + if (!end_desc) + break; + + /* smp_rmb */ + smp_rmb(); + + if (!nbl_ring_desc_used(tx_desc, tx_ring->used_wrap_counter)) + break; + + total_tx_pkts += tx_buffer->gso_segs; + total_tx_bytes += tx_buffer->bytecount; + + while (true) { + total_tx_descs++; + nbl_unmap_and_free_tx_resource(tx_ring, tx_buffer, true, true); + if (tx_desc == end_desc) + break; + i++; + tx_buffer++; + tx_desc++; + if (unlikely(!i)) { + i -= tx_ring->desc_num; + tx_buffer = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_ring->used_wrap_counter ^= 1; + } + } + + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->desc_num; + tx_buffer = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_ring->used_wrap_counter ^= 1; + } + + prefetch(tx_desc); + + } while (--count); + + i += tx_ring->desc_num; + + tx_ring->next_to_clean = i; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_tx_bytes; + tx_ring->stats.packets += total_tx_pkts; + tx_ring->stats.descs += total_tx_descs; + u64_stats_update_end(&tx_ring->syncp); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_tx_pkts && netif_carrier_ok(tx_ring->netdev) && + tx_ring->queue_index < NBL_DEFAULT_PF_HW_QUEUE_NUM && + (nbl_unused_tx_desc_count(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index)) { + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + dev_dbg(NBL_RING_TO_DEV(tx_ring), "wake queue %u\n", tx_ring->queue_index); + } + } + + return count; +} + +static void nbl_rx_csum(struct nbl_res_rx_ring *rx_ring, struct sk_buff *skb, + struct nbl_rx_extend_head *hdr) +{ + skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + + /* if user disable RX Checksum Offload, then stack verify the rx checksum */ + if (!(rx_ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (!hdr->checksum_status) + return; + + if (hdr->error_code) { + rx_ring->rx_stats.rx_csum_errors++; + return; + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; + rx_ring->rx_stats.rx_csum_packets++; +} + +static inline void nbl_add_rx_frag(struct nbl_rx_buffer *rx_buffer, + struct sk_buff *skb, unsigned int size) +{ + page_ref_inc(rx_buffer->di->page); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->di->page, + rx_buffer->offset, size, NBL_RX_BUFSZ); +} + +static void nbl_txrx_register_vsi_ring(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + + txrx_mgt->vsi_info[vsi_index].ring_offset = ring_offset; + txrx_mgt->vsi_info[vsi_index].ring_num = ring_num; +} + +/** + * Current version support merging multiple descriptor for one packet. + */ +static struct sk_buff *nbl_construct_skb(struct nbl_res_rx_ring *rx_ring, struct napi_struct *napi, + struct nbl_rx_buffer *rx_buf, unsigned int size) +{ + struct sk_buff *skb; + char *p, *buf; + int tailroom, shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + unsigned int truesize = NBL_RX_BUFSZ; + unsigned int headlen; + + /* p point dma buff start, buf point whole buffer start*/ + p = page_address(rx_buf->di->page) + rx_buf->offset; + buf = p - NBL_RX_PAD; + + /* p point pkt start */ + p += NBL_BUFFER_HDR_LEN; + tailroom = truesize - size - NBL_RX_PAD; + size -= NBL_BUFFER_HDR_LEN; + + if (size > NBL_RX_HDR_SIZE && tailroom >= shinfo_size) { + skb = build_skb(buf, truesize); + if (unlikely(!skb)) + return NULL; + + page_ref_inc(rx_buf->di->page); + skb_reserve(skb, p - buf); + skb_put(skb, size); + goto ok; + } + + skb = napi_alloc_skb(napi, NBL_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + headlen = size; + if (headlen > NBL_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, p, NBL_RX_HDR_SIZE); + + memcpy(__skb_put(skb, headlen), p, ALIGN(headlen, sizeof(long))); + size -= headlen; + if (size) { + page_ref_inc(rx_buf->di->page); + skb_add_rx_frag(skb, 0, rx_buf->di->page, + rx_buf->offset + NBL_BUFFER_HDR_LEN + headlen, + size, truesize); + } +ok: + skb_record_rx_queue(skb, rx_ring->queue_index); + + return skb; +} + +static inline struct nbl_rx_buffer *nbl_get_rx_buf(struct nbl_res_rx_ring *rx_ring) +{ + struct nbl_rx_buffer *rx_buf; + + rx_buf = NBL_RX_BUF(rx_ring, rx_ring->next_to_clean); + prefetchw(rx_buf->di->page); + + dma_sync_single_range_for_cpu(rx_ring->dma_dev, rx_buf->di->addr, rx_buf->offset, + rx_ring->buf_len, DMA_FROM_DEVICE); + + return rx_buf; +} + +static inline void nbl_put_rx_buf(struct nbl_res_rx_ring *rx_ring, struct nbl_rx_buffer *rx_buf) +{ + u16 ntc = rx_ring->next_to_clean + 1; + + /* if at the end of the ring, reset ntc and flip used wrap bit */ + if (unlikely(ntc >= rx_ring->desc_num)) { + ntc = 0; + rx_ring->used_wrap_counter ^= 1; + } + + rx_ring->next_to_clean = ntc; + prefetch(NBL_RX_DESC(rx_ring, ntc)); + + nbl_put_rx_frag(rx_ring, rx_buf, true); +} + +static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, + struct napi_struct *napi, + int budget) +{ + struct nbl_ring_desc *rx_desc; + struct nbl_rx_buffer *rx_buf; + struct nbl_rx_extend_head *hdr; + struct sk_buff *skb = NULL; + unsigned int total_rx_pkts = 0; + unsigned int total_rx_bytes = 0; + unsigned int size; + u16 desc_count = 0; + u16 num_buffers = 0; + u32 rx_multicast_packets = 0; + u32 rx_unicast_packets = 0; + u16 cleaned_count = nbl_unused_rx_desc_count(rx_ring); + u16 sport_id; + bool failure = 0; + + while (likely(total_rx_pkts < budget)) { + rx_desc = NBL_RX_DESC(rx_ring, rx_ring->next_to_clean); + if (!nbl_ring_desc_used(rx_desc, rx_ring->used_wrap_counter)) + break; + + // nbl_trace(clean_rx_irq, rx_ring, rx_desc); + + dma_rmb(); + size = le32_to_cpu(rx_desc->len); + rx_buf = nbl_get_rx_buf(rx_ring); + + desc_count++; + + if (skb) { + nbl_add_rx_frag(rx_buf, skb, size); + } else { + hdr = page_address(rx_buf->di->page) + rx_buf->offset; + net_prefetch(hdr); + skb = nbl_construct_skb(rx_ring, napi, rx_buf, size); + if (unlikely(!skb)) { + rx_ring->rx_stats.rx_alloc_buf_err_cnt++; + break; + } + + num_buffers = le16_to_cpu(hdr->num_buffers); + sport_id = hdr->sport_id; + nbl_rx_csum(rx_ring, skb, hdr); + } + + cleaned_count++; + nbl_put_rx_buf(rx_ring, rx_buf); + if (desc_count < num_buffers) + continue; + desc_count = 0; + + if (unlikely(eth_skb_pad(skb))) { + skb = NULL; + continue; + } + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + if (unlikely(skb->pkt_type == PACKET_BROADCAST || + skb->pkt_type == PACKET_MULTICAST)) + rx_multicast_packets++; + else + rx_unicast_packets++; + + total_rx_bytes += skb->len; + + // nbl_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); + napi_gro_receive(napi, skb); + skb = NULL; + total_rx_pkts++; + } + + if (cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))) + failure = nbl_alloc_rx_bufs(rx_ring, cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_pkts; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.rx_multicast_packets += rx_multicast_packets; + rx_ring->rx_stats.rx_unicast_packets += rx_unicast_packets; + u64_stats_update_end(&rx_ring->syncp); + + return failure ? budget : total_rx_pkts; +} + +static int nbl_res_napi_poll(struct napi_struct *napi, int budget) +{ + struct nbl_res_vector *vector = container_of(napi, struct nbl_res_vector, napi); + struct nbl_res_tx_ring *tx_ring; + struct nbl_res_rx_ring *rx_ring; + int complete = 1, cleaned = 0, tx_done = 1; + + tx_ring = vector->tx_ring; + rx_ring = vector->rx_ring; + + if (vector->started) { + tx_done = nbl_res_txrx_clean_tx_irq(tx_ring); + cleaned = nbl_res_txrx_clean_rx_irq(rx_ring, napi, budget); + } + + if (!tx_done) + complete = 0; + + if (cleaned >= budget) + complete = 0; + + if (!complete) + return budget; + + if (!napi_complete_done(napi, cleaned)) + return min_t(int, cleaned, budget - 1); + + /* unmask irq passthrough for performace */ + if (vector->net_msix_mask_en) + writel(vector->irq_data, vector->irq_enable_base); + + return min_t(int, cleaned, budget - 1); +} + +static inline unsigned int nbl_txd_use_count(unsigned int size) +{ + /* TODO: how to compute tx desc needed more efficiently */ + return DIV_ROUND_UP(size, NBL_TXD_DATALEN_MAX); +} + +static unsigned int nbl_xmit_desc_count(struct sk_buff *skb) +{ + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int size; + unsigned int count; + + /* We need: 1 descriptor per page * PAGE_SIZE/NBL_MAX_DATA_PER_TX_DESC, + * + 1 desc for skb_headlen/NBL_MAX_DATA_PER_TX_DESC, + * + 2 desc gap to keep tail from touching head, + * otherwise try next time. + */ + size = skb_headlen(skb); + count = 2; + for (;;) { + count += nbl_txd_use_count(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +static inline int nbl_maybe_stop_tx(struct nbl_res_tx_ring *tx_ring, unsigned int size) +{ + if (likely(nbl_unused_tx_desc_count(tx_ring) >= size)) + return 0; + + if (tx_ring->queue_index >= NBL_DEFAULT_PF_HW_QUEUE_NUM) + return -EBUSY; + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "unused_desc_count:%u, size:%u, stop queue %u\n", + nbl_unused_tx_desc_count(tx_ring), size, tx_ring->queue_index); + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* smp_mb */ + smp_mb(); + + if (likely(nbl_unused_tx_desc_count(tx_ring) < size)) + return -EBUSY; + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "unused_desc_count:%u, size:%u, start queue %u\n", + nbl_unused_tx_desc_count(tx_ring), size, tx_ring->queue_index); + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +/* set up TSO(TCP Segmentation Offload) */ +static int nbl_tx_tso(struct nbl_tx_buffer *first, struct nbl_tx_hdr_param *hdr_param) +{ + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u8 l4_start; + u32 payload_len; + u8 header_len = 0; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 1; + + if (!skb_is_gso(skb)) + return 1; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize IP header fields*/ + if (ip.v4->version == IP_VERSION_V4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + + /* length of (MAC + IP) header */ + l4_start = (u8)(l4.hdr - skb->data); + + /* l4 packet length */ + payload_len = skb->len - l4_start; + + /* remove l4 packet length from L4 pseudo-header checksum */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(payload_len)); + /* compute length of UDP segmentation header */ + header_len = (u8)sizeof(l4.udp) + l4_start; + } else { + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(payload_len)); + /* compute length of TCP segmentation header */ + header_len = (u8)(l4.tcp->doff * 4 + l4_start); + } + + hdr_param->tso = 1; + hdr_param->mss = skb_shinfo(skb)->gso_size; + hdr_param->total_hlen = header_len; + + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * header_len; + first->tx_flags = NBL_TX_FLAGS_TSO; + + return first->gso_segs; +} + +/* set up Tx checksum offload */ +static int nbl_tx_csum(struct nbl_tx_buffer *first, struct nbl_tx_hdr_param *hdr_param) +{ + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + __be16 frag_off, protocol; + u8 inner_ip_type = 0, l4_type = 0, l4_csum = 0, l4_proto = 0; + u32 l2_len = 0, l3_len = 0, l4_len = 0; + unsigned char *exthdr; + int ret; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + l2_len = ip.hdr - skb->data; + + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_IP)) { + inner_ip_type = NBL_TX_IIPT_IPV4; + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + inner_ip_type = NBL_TX_IIPT_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + + if (l4.hdr != exthdr) { + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); + if (ret < 0) + return -1; + } + } else { + return -1; + } + + l3_len = l4.hdr - ip.hdr; + + switch (l4_proto) { + case IPPROTO_TCP: + l4_type = NBL_TX_L4T_TCP; + l4_len = l4.tcp->doff; + l4_csum = 1; + break; + case IPPROTO_UDP: + l4_type = NBL_TX_L4T_UDP; + l4_len = (sizeof(struct udphdr) >> 2); + l4_csum = 1; + break; + case IPPROTO_SCTP: + if (first->tx_flags & NBL_TX_FLAGS_TSO) + return -1; + l4_type = NBL_TX_L4T_RSV; + l4_len = (sizeof(struct sctphdr) >> 2); + l4_csum = 1; + break; + default: + if (first->tx_flags & NBL_TX_FLAGS_TSO) + return -2; + + /* unsopported L4 protocol, device cannot offload L4 checksum, + * so software compute L4 checskum + */ + skb_checksum_help(skb); + return 0; + } + + hdr_param->mac_len = l2_len >> 1; + hdr_param->ip_len = l3_len >> 2; + hdr_param->l4_len = l4_len; + hdr_param->l4_type = l4_type; + hdr_param->inner_ip_type = inner_ip_type; + hdr_param->l3_csum_en = 0; + hdr_param->l4_csum_en = l4_csum; + + return 1; +} + +static int nbl_map_skb(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, + u16 first, u16 *desc_index) +{ + u16 index = *desc_index; + const skb_frag_t *frag; + unsigned int frag_num = skb_shinfo(skb)->nr_frags; + struct device *dma_dev = NBL_RING_TO_DMA_DEV(tx_ring); + struct nbl_tx_buffer *tx_buffer = NBL_TX_BUF(tx_ring, index); + struct nbl_ring_desc *tx_desc = NBL_TX_DESC(tx_ring, index); + unsigned int i; + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + dma = dma_map_single(dma_dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) + return -1; + + tx_buffer->dma = dma; + tx_buffer->len = size; + + tx_desc->addr = cpu_to_le64(dma); + tx_desc->len = size; + if (!first) + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags | NBL_PACKED_DESC_F_NEXT); + + index++; + tx_desc++; + tx_buffer++; + if (index == tx_ring->desc_num) { + index = 0; + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_buffer = NBL_TX_BUF(tx_ring, 0); + } + + if (!frag_num) { + *desc_index = index; + return 0; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < frag_num; i++) { + size = skb_frag_size(frag); + dma = skb_frag_dma_map(dma_dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) { + *desc_index = index; + return -1; + } + + tx_buffer->dma = dma; + tx_buffer->len = size; + tx_buffer->page = 1; + + tx_desc->addr = cpu_to_le64(dma); + tx_desc->len = size; + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags | NBL_PACKED_DESC_F_NEXT); + index++; + tx_desc++; + tx_buffer++; + if (index == tx_ring->desc_num) { + index = 0; + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_buffer = NBL_TX_BUF(tx_ring, 0); + } + frag++; + } + + *desc_index = index; + return 0; +} + +static inline void nbl_tx_fill_tx_extend_header_leonis(union nbl_tx_extend_head *pkthdr, + struct nbl_tx_hdr_param *param) +{ + pkthdr->mac_len = param->mac_len; + pkthdr->ip_len = param->ip_len; + pkthdr->l4_len = param->l4_len; + pkthdr->l4_type = param->l4_type; + pkthdr->inner_ip_type = param->inner_ip_type; + + pkthdr->l4s_sid = param->l4s_sid; + pkthdr->l4s_sync_ind = param->l4s_sync_ind; + pkthdr->l4s_hdl_ind = param->l4s_hdl_ind; + pkthdr->l4s_pbrac_mode = param->l4s_pbrac_mode; + + pkthdr->mss = param->mss; + pkthdr->tso = param->tso; + + pkthdr->fwd = param->fwd; + pkthdr->rss_lag_en = param->rss_lag_en; + pkthdr->dport = param->dport; + pkthdr->dport_id = param->dport_id; + + pkthdr->l3_csum_en = param->l3_csum_en; + pkthdr->l4_csum_en = param->l4_csum_en; +} + +static bool nbl_skb_is_lacp_or_lldp(struct sk_buff *skb) +{ + __be16 protocol; + + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_SLOW) || protocol == htons(ETH_P_LLDP)) + return true; + + return false; +} + +static int nbl_tx_map(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, + struct nbl_tx_hdr_param *hdr_param) +{ + struct device *dma_dev = NBL_RING_TO_DMA_DEV(tx_ring); + struct nbl_tx_buffer *first; + struct nbl_ring_desc *first_desc; + struct nbl_ring_desc *tx_desc; + union nbl_tx_extend_head *pkthdr; + dma_addr_t hdrdma; + int tso, csum; + u16 desc_index = tx_ring->next_to_use; + u16 head = desc_index; + u16 avail_used_flags = tx_ring->avail_used_flags; + u32 pkthdr_len; + bool can_push; + + first_desc = NBL_TX_DESC(tx_ring, desc_index); + first = NBL_TX_BUF(tx_ring, desc_index); + first->gso_segs = 1; + first->bytecount = skb->len; + first->tx_flags = 0; + first->skb = skb; + skb_tx_timestamp(skb); + + can_push = !skb_header_cloned(skb) && skb_headroom(skb) >= sizeof(*pkthdr); + + if (can_push) + pkthdr = (union nbl_tx_extend_head *)(skb->data - sizeof(*pkthdr)); + else + pkthdr = (union nbl_tx_extend_head *)(skb->cb); + + tso = nbl_tx_tso(first, hdr_param); + if (tso < 0) { + netdev_err(tx_ring->netdev, "tso ret:%d\n", tso); + goto out_drop; + } + + csum = nbl_tx_csum(first, hdr_param); + if (csum < 0) { + netdev_err(tx_ring->netdev, "csum ret:%d\n", csum); + goto out_drop; + } + + memset(pkthdr, 0, sizeof(*pkthdr)); + switch (tx_ring->product_type) { + case NBL_LEONIS_TYPE: + nbl_tx_fill_tx_extend_header_leonis(pkthdr, hdr_param); + break; + default: + netdev_err(tx_ring->netdev, "fill tx extend header failed, product type: %d, eth: %u.\n", + tx_ring->product_type, hdr_param->dport_id); + goto out_drop; + } + + pkthdr_len = sizeof(union nbl_tx_extend_head); + + if (can_push) { + __skb_push(skb, pkthdr_len); + if (nbl_map_skb(tx_ring, skb, 1, &desc_index)) + goto dma_map_error; + __skb_pull(skb, pkthdr_len); + } else { + hdrdma = dma_map_single(dma_dev, pkthdr, pkthdr_len, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + tx_ring->tx_stats.tx_dma_busy++; + return NETDEV_TX_BUSY; + } + + first_desc->addr = cpu_to_le64(hdrdma); + first_desc->len = pkthdr_len; + + first->dma = hdrdma; + first->len = pkthdr_len; + + desc_index++; + if (desc_index == tx_ring->desc_num) { + desc_index = 0; + tx_ring->avail_used_flags ^= 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } + if (nbl_map_skb(tx_ring, skb, 0, &desc_index)) + goto dma_map_error; + } + + /* stats */ + if (is_multicast_ether_addr(skb->data)) + tx_ring->tx_stats.tx_multicast_packets += tso; + else + tx_ring->tx_stats.tx_unicast_packets += tso; + + if (tso > 1) { + tx_ring->tx_stats.tso_packets++; + tx_ring->tx_stats.tso_bytes += skb->len; + } + tx_ring->tx_stats.tx_csum_packets += csum; + + tx_desc = NBL_TX_DESC(tx_ring, (desc_index == 0 ? tx_ring->desc_num : desc_index) - 1); + tx_desc->flags &= cpu_to_le16(~NBL_PACKED_DESC_F_NEXT); + first->next_to_watch = tx_desc; + first_desc->len += (hdr_param->total_hlen << NBL_TX_TOTAL_HEADERLEN_SHIFT); + first_desc->id = cpu_to_le16(skb_shinfo(skb)->gso_size); + + /* wmb */ + wmb(); + + /* first desc last set flag */ + if (first_desc == tx_desc) + first_desc->flags = cpu_to_le16(avail_used_flags); + else + first_desc->flags = cpu_to_le16(avail_used_flags | NBL_PACKED_DESC_F_NEXT); + + tx_ring->next_to_use = desc_index; + + nbl_maybe_stop_tx(tx_ring, DESC_NEEDED); + /* kick doorbell passthrough for performace */ + writel(tx_ring->notify_qid, tx_ring->notify_addr); + + // nbl_trace(tx_map_ok, tx_ring, skb, head, first_desc, pkthdr); + + return NETDEV_TX_OK; + +dma_map_error: + while (desc_index != head) { + if (unlikely(!desc_index)) + desc_index = tx_ring->desc_num; + desc_index--; + nbl_unmap_and_free_tx_resource(tx_ring, NBL_TX_BUF(tx_ring, desc_index), + false, false); + } + + tx_ring->avail_used_flags = avail_used_flags; + tx_ring->tx_stats.tx_dma_busy++; + return NETDEV_TX_BUSY; + +out_drop: + netdev_err(tx_ring->netdev, "tx_map, free_skb\n"); + tx_ring->tx_stats.tx_skb_free++; + // nbl_trace(tx_map_drop, tx_ring, skb); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static netdev_tx_t nbl_res_txrx_rep_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring = txrx_mgt->tx_rings[skb_get_queue_mapping(skb)]; + struct nbl_tx_hdr_param hdr_param = { + .mac_len = 14 >> 1, + .ip_len = 20 >> 2, + .l4_len = 20 >> 2, + .mss = 256, + }; + unsigned int count; + int ret = 0; + + count = nbl_xmit_desc_count(skb); + /* TODO: we can not tranmit a packet with more than 32 descriptors */ + WARN_ON(count > MAX_DESC_NUM_PER_PKT); + if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { + if (net_ratelimit()) + dev_warn(NBL_RING_TO_DEV(tx_ring), "There is not enough descriptor to transmit packet in queue %u\n", + tx_ring->queue_index); + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + eth_skb_pad(skb); + + hdr_param.dport_id = *(u16 *)(&skb->cb[NBL_SKB_FILL_VSI_ID_OFF]); + hdr_param.dport = NBL_TX_DPORT_HOST; + hdr_param.rss_lag_en = 1; + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + + ret = nbl_tx_map(tx_ring, skb, &hdr_param); + + return ret; +} + +static netdev_tx_t nbl_res_txrx_self_test_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring = txrx_mgt->tx_rings[skb_get_queue_mapping(skb)]; + struct nbl_tx_hdr_param hdr_param = { + .mac_len = 14 >> 1, + .ip_len = 20 >> 2, + .l4_len = 20 >> 2, + .mss = 256, + }; + unsigned int count; + + count = nbl_xmit_desc_count(skb); + /* TODO: we can not tranmit a packet with more than 32 descriptors */ + WARN_ON(count > MAX_DESC_NUM_PER_PKT); + if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { + if (net_ratelimit()) + dev_warn(NBL_RING_TO_DEV(tx_ring), "There is not enough descriptor to transmit packet in queue %u\n", + tx_ring->queue_index); + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* for dstore and eth, min packet len is 60 */ + eth_skb_pad(skb); + + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + hdr_param.dport = NBL_TX_DPORT_ETH; + hdr_param.dport_id = tx_ring->eth_id; + hdr_param.rss_lag_en = 0; + + return nbl_tx_map(tx_ring, skb, &hdr_param); +} + +static netdev_tx_t nbl_res_txrx_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring = txrx_mgt->tx_rings[skb_get_queue_mapping(skb)]; + struct nbl_tx_hdr_param hdr_param = { + .mac_len = 14 >> 1, + .ip_len = 20 >> 2, + .l4_len = 20 >> 2, + .mss = 256, + }; + unsigned int count; + int ret = 0; + + // nbl_trace(xmit_frame_ring, tx_ring, skb); + + count = nbl_xmit_desc_count(skb); + /* TODO: we can not tranmit a packet with more than 32 descriptors */ + WARN_ON(count > MAX_DESC_NUM_PER_PKT); + if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { + if (net_ratelimit()) + dev_warn(NBL_RING_TO_DEV(tx_ring), "There is not enough descriptor to transmit packet in queue %u\n", + tx_ring->queue_index); + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* for dstore and eth, min packet len is 60 */ + eth_skb_pad(skb); + + hdr_param.dport_id = tx_ring->eth_id; + hdr_param.fwd = 1; + hdr_param.rss_lag_en = 0; + + if (nbl_skb_is_lacp_or_lldp(skb)) { + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + hdr_param.dport = NBL_TX_DPORT_ETH; + } + + ret = nbl_tx_map(tx_ring, skb, &hdr_param); + + return ret; +} + +static void nbl_res_txrx_kick_rx_ring(void *priv, u16 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_notify_param notify_param = {0}; + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, index); + + notify_param.notify_qid = rx_ring->notify_qid; + notify_param.tail_ptr = rx_ring->tail_ptr; + phy_ops->update_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¬ify_param); +} + +static int nbl_res_txring_is_invalid(struct nbl_resource_mgt *res_mgt, + struct seq_file *m, int index) +{ + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring; + u8 ring_num = txrx_mgt->tx_ring_num; + + if (index >= ring_num) { + seq_printf(m, "Invalid tx index %d, max ring num is %d\n", index, ring_num); + return -EINVAL; + } + + tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, index); + if (!tx_ring || !tx_ring->valid) { + seq_puts(m, "Ring doesn't exist, wrong index or the netdev might be stopped\n"); + return -EINVAL; + } + + return 0; +} + +static int nbl_res_rxring_is_invalid(struct nbl_resource_mgt *res_mgt, + struct seq_file *m, int index) +{ + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring; + u8 ring_num = txrx_mgt->rx_ring_num; + + if (index >= ring_num) { + seq_printf(m, "Invalid rx index %d, max ring num is %d\n", index, ring_num); + return -EINVAL; + } + + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, index); + if (!rx_ring || !rx_ring->valid) { + seq_puts(m, "Ring doesn't exist, wrong index or the netdev might be stopped\n"); + return -EINVAL; + } + + return 0; +} + +static int nbl_res_rx_dump_ring(struct nbl_resource_mgt *res_mgt, struct seq_file *m, int index) +{ + struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, index); + struct nbl_ring_desc *desc; + int i; + + if (nbl_res_rxring_is_invalid(res_mgt, m, index)) + return 0; + + seq_printf(m, "queue_index %d desc_num %d used_wrap_counter 0x%x avail_used_flags 0x%x\n", + ring->queue_index, ring->desc_num, + ring->used_wrap_counter, ring->avail_used_flags); + seq_printf(m, "ntu 0x%x, ntc 0x%x, tail_ptr 0x%x\n", + ring->next_to_use, ring->next_to_clean, ring->tail_ptr); + seq_printf(m, "desc dma 0x%llx, HZ %u\n", ring->dma, HZ); + + seq_puts(m, "desc:\n"); + for (i = 0; i < ring->desc_num; i++) { + desc = ring->desc + i; + seq_printf(m, "desc id %d, addr 0x%llx len %d flag 0x%x\n", + desc->id, desc->addr, desc->len, desc->flags); + } + + return 0; +} + +static int nbl_res_tx_dump_ring(struct nbl_resource_mgt *res_mgt, struct seq_file *m, int index) +{ + struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, index); + struct nbl_ring_desc *desc; + u32 total_header_len; + u32 desc_len; + int i; + + if (nbl_res_txring_is_invalid(res_mgt, m, index)) + return 0; + + seq_printf(m, "queue_index %d desc_num %d used_wrap_counter 0x%x avail_used_flags 0x%x\n", + ring->queue_index, ring->desc_num, + ring->used_wrap_counter, ring->avail_used_flags); + seq_printf(m, "ntu 0x%x, ntc 0x%x tail_ptr 0x%x\n", + ring->next_to_use, ring->next_to_clean, ring->tail_ptr); + seq_printf(m, "desc dma 0x%llx, HZ %u\n", ring->dma, HZ); + seq_printf(m, "tx_skb_free %llu\n", ring->tx_stats.tx_skb_free); + + seq_puts(m, "desc:\n"); + for (i = 0; i < ring->desc_num; i++) { + desc = ring->desc + i; + total_header_len = desc->len >> NBL_TX_TOTAL_HEADERLEN_SHIFT; + desc_len = desc->len & 0xFFFFFF; + seq_printf(m, "desc %d: id/gso_size %d, addr 0x%llx len %d header_len %d flag 0x%x\n", + i, desc->id, desc->addr, desc_len, total_header_len, desc->flags); + } + + return 0; +} + +static int nbl_res_txrx_dump_ring(void *priv, struct seq_file *m, bool is_tx, int index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + if (is_tx) + return nbl_res_tx_dump_ring(res_mgt, m, index); + else + return nbl_res_rx_dump_ring(res_mgt, m, index); +} + +static int nbl_res_tx_dump_ring_stats(struct nbl_resource_mgt *res_mgt, + struct seq_file *m, int index) +{ + struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, index); + + if (nbl_res_txring_is_invalid(res_mgt, m, index)) + return 0; + + seq_printf(m, "pkts: %lld, bytes: %lld, descs: %lld\n", + ring->stats.packets, ring->stats.bytes, ring->stats.descs); + seq_printf(m, "tso_pkts: %lld, tso_bytes: %lld, tx_checksum_pkts: %lld\n", + ring->tx_stats.tso_packets, ring->tx_stats.tso_bytes, + ring->tx_stats.tx_csum_packets); + seq_printf(m, "tx_busy: %lld, tx_dma_busy: %lld\n", + ring->tx_stats.tx_busy, ring->tx_stats.tx_dma_busy); + seq_printf(m, "tx_multicast_pkts: %lld, tx_unicast_pkts: %lld\n", + ring->tx_stats.tx_multicast_packets, + ring->tx_stats.tx_unicast_packets); + seq_printf(m, "tx_skb_free: %lld, tx_desc_addr_err: %lld, tx_desc_len_err: %lld\n", + ring->tx_stats.tx_skb_free, ring->tx_stats.tx_desc_addr_err_cnt, + ring->tx_stats.tx_desc_len_err_cnt); + return 0; +} + +static int nbl_res_rx_dump_ring_stats(struct nbl_resource_mgt *res_mgt, + struct seq_file *m, int index) +{ + struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, index); + + if (nbl_res_rxring_is_invalid(res_mgt, m, index)) + return 0; + + seq_printf(m, "rx_checksum_pkts: %lld, rx_checksum_errors: %lld\n", + ring->rx_stats.rx_csum_packets, ring->rx_stats.rx_csum_errors); + seq_printf(m, "rx_multicast_pkts: %lld, rx_unicast_pkts: %lld\n", + ring->rx_stats.rx_multicast_packets, + ring->rx_stats.rx_unicast_packets); + seq_printf(m, "rx_desc_addr_err: %lld\n", + ring->rx_stats.rx_desc_addr_err_cnt); + seq_printf(m, "rx_alloc_buf_err_cnt: %lld\n", + ring->rx_stats.rx_alloc_buf_err_cnt); + + return 0; +} + +static int nbl_res_txrx_dump_ring_stats(void *priv, struct seq_file *m, bool is_tx, int index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + if (is_tx) + return nbl_res_tx_dump_ring_stats(res_mgt, m, index); + else + return nbl_res_rx_dump_ring_stats(res_mgt, m, index); +} + +static struct napi_struct *nbl_res_txrx_get_vector_napi(void *priv, u16 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + + if (!txrx_mgt->vectors || index >= txrx_mgt->rx_ring_num) { + nbl_err(common, NBL_DEBUG_RESOURCE, "vectors not allocated\n"); + return NULL; + } + + return &txrx_mgt->vectors[index]->napi; +} + +static void nbl_res_txrx_set_vector_info(void *priv, u8 *irq_enable_base, + u32 irq_data, u16 index, bool mask_en) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + + if (!txrx_mgt->vectors || index >= txrx_mgt->rx_ring_num) { + nbl_err(common, NBL_DEBUG_RESOURCE, "vectors not allocated\n"); + return; + } + + txrx_mgt->vectors[index]->irq_enable_base = irq_enable_base; + txrx_mgt->vectors[index]->irq_data = irq_data; + txrx_mgt->vectors[index]->net_msix_mask_en = mask_en; +} + +static void nbl_res_get_pt_ops(void *priv, struct nbl_resource_pt_ops *pt_ops) +{ + pt_ops->start_xmit = nbl_res_txrx_start_xmit; + pt_ops->rep_xmit = nbl_res_txrx_rep_xmit; + pt_ops->self_test_xmit = nbl_res_txrx_self_test_start_xmit; + pt_ops->napi_poll = nbl_res_napi_poll; +} + +static u32 nbl_res_txrx_get_tx_headroom(void *priv) +{ + return sizeof(union nbl_tx_extend_head); +} + +static void nbl_res_txrx_get_queue_stats(void *priv, u8 queue_id, + struct nbl_queue_stats *queue_stats, bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct u64_stats_sync *syncp; + struct nbl_queue_stats *stats; + unsigned int start; + + if (is_tx) { + struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, queue_id); + + syncp = &ring->syncp; + stats = &ring->stats; + } else { + struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, queue_id); + + syncp = &ring->syncp; + stats = &ring->stats; + } + + do { + start = u64_stats_fetch_begin(syncp); + memcpy(queue_stats, stats, sizeof(*stats)); + } while (u64_stats_fetch_retry(syncp, start)); +} + +static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + int i; + u64 bytes = 0, packets = 0; + u64 tso_packets = 0, tso_bytes = 0; + u64 tx_csum_packets = 0; + u64 rx_csum_packets = 0, rx_csum_errors = 0; + u64 tx_multicast_packets = 0, tx_unicast_packets = 0; + u64 rx_multicast_packets = 0, rx_unicast_packets = 0; + u64 tx_busy = 0, tx_dma_busy = 0; + u64 tx_desc_addr_err_cnt = 0; + u64 tx_desc_len_err_cnt = 0; + u64 rx_desc_addr_err_cnt = 0; + u64 rx_alloc_buf_err_cnt = 0; + u64 rx_cache_reuse = 0; + u64 rx_cache_full = 0; + u64 rx_cache_empty = 0; + u64 rx_cache_busy = 0; + u64 rx_cache_waive = 0; + u64 tx_skb_free = 0; + unsigned int start; + + rcu_read_lock(); + for (i = 0; i < txrx_mgt->rx_ring_num; i++) { + struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); + + do { + start = u64_stats_fetch_begin(&ring->syncp); + bytes += ring->stats.bytes; + packets += ring->stats.packets; + rx_csum_packets += ring->rx_stats.rx_csum_packets; + rx_csum_errors += ring->rx_stats.rx_csum_errors; + rx_multicast_packets += ring->rx_stats.rx_multicast_packets; + rx_unicast_packets += ring->rx_stats.rx_unicast_packets; + rx_desc_addr_err_cnt += ring->rx_stats.rx_desc_addr_err_cnt; + rx_alloc_buf_err_cnt += ring->rx_stats.rx_alloc_buf_err_cnt; + rx_cache_reuse += ring->rx_stats.rx_cache_reuse; + rx_cache_full += ring->rx_stats.rx_cache_full; + rx_cache_empty += ring->rx_stats.rx_cache_empty; + rx_cache_busy += ring->rx_stats.rx_cache_busy; + rx_cache_waive += ring->rx_stats.rx_cache_waive; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + } + + net_stats->rx_packets = packets; + net_stats->rx_bytes = bytes; + + net_stats->rx_csum_packets = rx_csum_packets; + net_stats->rx_csum_errors = rx_csum_errors; + net_stats->rx_multicast_packets = rx_multicast_packets; + net_stats->rx_unicast_packets = rx_unicast_packets; + + bytes = 0; + packets = 0; + + for (i = 0; i < txrx_mgt->tx_ring_num; i++) { + struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i); + + do { + start = u64_stats_fetch_begin(&ring->syncp); + bytes += ring->stats.bytes; + packets += ring->stats.packets; + tso_packets += ring->tx_stats.tso_packets; + tso_bytes += ring->tx_stats.tso_bytes; + tx_csum_packets += ring->tx_stats.tx_csum_packets; + tx_busy += ring->tx_stats.tx_busy; + tx_dma_busy += ring->tx_stats.tx_dma_busy; + tx_multicast_packets += ring->tx_stats.tx_multicast_packets; + tx_unicast_packets += ring->tx_stats.tx_unicast_packets; + tx_skb_free += ring->tx_stats.tx_skb_free; + tx_desc_addr_err_cnt += ring->tx_stats.tx_desc_addr_err_cnt; + tx_desc_len_err_cnt += ring->tx_stats.tx_desc_len_err_cnt; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + } + + rcu_read_unlock(); + + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + net_stats->tso_packets = tso_packets; + net_stats->tso_bytes = tso_bytes; + net_stats->tx_csum_packets = tx_csum_packets; + net_stats->tx_busy = tx_busy; + net_stats->tx_dma_busy = tx_dma_busy; + net_stats->tx_multicast_packets = tx_multicast_packets; + net_stats->tx_unicast_packets = tx_unicast_packets; + net_stats->tx_skb_free = tx_skb_free; + net_stats->tx_desc_addr_err_cnt = tx_desc_addr_err_cnt; + net_stats->tx_desc_len_err_cnt = tx_desc_len_err_cnt; + net_stats->rx_desc_addr_err_cnt = rx_desc_addr_err_cnt; + net_stats->rx_alloc_buf_err_cnt = rx_alloc_buf_err_cnt; + net_stats->rx_cache_reuse = rx_cache_reuse; + net_stats->rx_cache_full = rx_cache_full; + net_stats->rx_cache_empty = rx_cache_empty; + net_stats->rx_cache_busy = rx_cache_busy; + net_stats->rx_cache_waive = rx_cache_waive; +} + +static u16 nbl_res_txrx_get_max_desc_num(void) +{ + return NBL_MAX_DESC_NUM; +} + +static u16 nbl_res_txrx_get_min_desc_num(void) +{ + return NBL_MIN_DESC_NUM; +} + +static u16 nbl_res_txrx_get_tx_desc_num(void *priv, u32 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *ring = txrx_mgt->tx_rings[ring_index]; + + return ring->desc_num; +} + +static u16 nbl_res_txrx_get_rx_desc_num(void *priv, u32 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *ring = txrx_mgt->rx_rings[ring_index]; + + return ring->desc_num; +} + +static void nbl_res_txrx_set_tx_desc_num(void *priv, u32 ring_index, u16 desc_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *ring = txrx_mgt->tx_rings[ring_index]; + + ring->desc_num = desc_num; +} + +static void nbl_res_txrx_set_rx_desc_num(void *priv, u32 ring_index, u16 desc_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *ring = txrx_mgt->rx_rings[ring_index]; + + ring->desc_num = desc_num; +} + +static struct sk_buff *nbl_fetch_rx_buffer_lb_test(struct nbl_res_rx_ring *rx_ring, + const struct nbl_ring_desc *rx_desc, + u16 *num_buffers) +{ + struct nbl_rx_buffer *rx_buf; + struct sk_buff *skb; + const struct page *page; + const void *page_addr; + struct nbl_rx_extend_head *hdr; + u32 size = 256; + + rx_buf = nbl_get_rx_buf(rx_ring); + page = rx_buf->di->page; + prefetchw(page); + + page_addr = page_address(page) + rx_buf->offset; + prefetch(page_addr); + + skb = alloc_skb(size, GFP_KERNEL); + if (unlikely(!skb)) + return NULL; + + prefetchw(skb->data); + /* get number of buffers */ + hdr = (struct nbl_rx_extend_head *)page_addr; + *num_buffers = le16_to_cpu(hdr->num_buffers); + nbl_rx_csum(rx_ring, skb, hdr); + + memcpy(__skb_put(skb, size), page_addr + sizeof(*hdr), ALIGN(size, sizeof(long))); + + nbl_put_rx_buf(rx_ring, rx_buf); + + return skb; +} + +static struct sk_buff *nbl_res_txrx_clean_rx_lb_test(void *priv, u32 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring = txrx_mgt->rx_rings[ring_index]; + struct nbl_ring_desc *rx_desc; + struct sk_buff *skb; + u16 num_buffers = 0; + u16 cleaned_count = nbl_unused_rx_desc_count(rx_ring); + + if (cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))) { + nbl_alloc_rx_bufs(rx_ring, cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))); + cleaned_count = 0; + } + + rx_desc = NBL_RX_DESC(rx_ring, rx_ring->next_to_clean); + if (!nbl_ring_desc_used(rx_desc, rx_ring->used_wrap_counter)) + return NULL; + + /* rmb for read desc */ + rmb(); + + skb = nbl_fetch_rx_buffer_lb_test(rx_ring, rx_desc, &num_buffers); + if (!skb) + return NULL; + + cleaned_count++; + + if (num_buffers > 1) + nbl_err(common, NBL_DEBUG_RESOURCE, "More than one desc in lb rx, not supported\n"); + + if (cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))) + nbl_alloc_rx_bufs(rx_ring, cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))); + + return skb; +} + +static dma_addr_t nbl_res_txrx_restore_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + + vector->started = false; + napi_synchronize(&vector->napi); + + switch (type) { + case NBL_TX: + nbl_res_txrx_stop_tx_ring(res_mgt, ring_index); + return nbl_res_txrx_start_tx_ring(res_mgt, ring_index); + case NBL_RX: + nbl_res_txrx_stop_rx_ring(res_mgt, ring_index); + return nbl_res_txrx_start_rx_ring(res_mgt, ring_index, true); + default: + break; + } + + return -EINVAL; +} + +static int nbl_res_txrx_restart_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); + struct nbl_res_vector *vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + + switch (type) { + case NBL_TX: + writel(tx_ring->notify_qid, tx_ring->notify_addr); + break; + case NBL_RX: + nbl_res_txrx_kick_rx_ring(res_mgt, ring_index); + break; + default: + break; + } + + vector->started = true; + + return 0; +} + +/* NBL_TXRX_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_TXRX_OPS_TBL \ +do { \ + NBL_TXRX_SET_OPS(get_resource_pt_ops, nbl_res_get_pt_ops); \ + NBL_TXRX_SET_OPS(alloc_rings, nbl_res_txrx_alloc_rings); \ + NBL_TXRX_SET_OPS(remove_rings, nbl_res_txrx_remove_rings); \ + NBL_TXRX_SET_OPS(start_tx_ring, nbl_res_txrx_start_tx_ring); \ + NBL_TXRX_SET_OPS(stop_tx_ring, nbl_res_txrx_stop_tx_ring); \ + NBL_TXRX_SET_OPS(start_rx_ring, nbl_res_txrx_start_rx_ring); \ + NBL_TXRX_SET_OPS(stop_rx_ring, nbl_res_txrx_stop_rx_ring); \ + NBL_TXRX_SET_OPS(kick_rx_ring, nbl_res_txrx_kick_rx_ring); \ + NBL_TXRX_SET_OPS(dump_ring, nbl_res_txrx_dump_ring); \ + NBL_TXRX_SET_OPS(dump_ring_stats, nbl_res_txrx_dump_ring_stats); \ + NBL_TXRX_SET_OPS(get_vector_napi, nbl_res_txrx_get_vector_napi); \ + NBL_TXRX_SET_OPS(set_vector_info, nbl_res_txrx_set_vector_info); \ + NBL_TXRX_SET_OPS(get_tx_headroom, nbl_res_txrx_get_tx_headroom); \ + NBL_TXRX_SET_OPS(get_queue_stats, nbl_res_txrx_get_queue_stats); \ + NBL_TXRX_SET_OPS(get_net_stats, nbl_res_txrx_get_net_stats); \ + NBL_TXRX_SET_OPS(get_max_desc_num, nbl_res_txrx_get_max_desc_num); \ + NBL_TXRX_SET_OPS(get_min_desc_num, nbl_res_txrx_get_min_desc_num); \ + NBL_TXRX_SET_OPS(get_tx_desc_num, nbl_res_txrx_get_tx_desc_num); \ + NBL_TXRX_SET_OPS(get_rx_desc_num, nbl_res_txrx_get_rx_desc_num); \ + NBL_TXRX_SET_OPS(set_tx_desc_num, nbl_res_txrx_set_tx_desc_num); \ + NBL_TXRX_SET_OPS(set_rx_desc_num, nbl_res_txrx_set_rx_desc_num); \ + NBL_TXRX_SET_OPS(clean_rx_lb_test, nbl_res_txrx_clean_rx_lb_test); \ + NBL_TXRX_SET_OPS(restore_abnormal_ring, nbl_res_txrx_restore_abnormal_ring); \ + NBL_TXRX_SET_OPS(restart_abnormal_ring, nbl_res_txrx_restart_abnormal_ring); \ + NBL_TXRX_SET_OPS(register_vsi_ring, nbl_txrx_register_vsi_ring); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_txrx_setup_mgt(struct device *dev, struct nbl_txrx_mgt **txrx_mgt) +{ + *txrx_mgt = devm_kzalloc(dev, sizeof(struct nbl_txrx_mgt), GFP_KERNEL); + if (!*txrx_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_txrx_remove_mgt(struct device *dev, struct nbl_txrx_mgt **txrx_mgt) +{ + devm_kfree(dev, *txrx_mgt); + *txrx_mgt = NULL; +} + +int nbl_txrx_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_txrx_mgt **txrx_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + txrx_mgt = &NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + + return nbl_txrx_setup_mgt(dev, txrx_mgt); +} + +void nbl_txrx_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_txrx_mgt **txrx_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + txrx_mgt = &NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + + if (!(*txrx_mgt)) + return; + + nbl_txrx_remove_mgt(dev, txrx_mgt); +} + +int nbl_txrx_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_TXRX_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_TXRX_OPS_TBL; +#undef NBL_TXRX_SET_OPS + + return 0; +} + +void nbl_txrx_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_TXRX_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_TXRX_OPS_TBL; +#undef NBL_TXRX_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h new file mode 100644 index 000000000000..86130a9a7240 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_TXRX_H_ +#define _NBL_TXRX_H_ + +#include "nbl_resource.h" + +#define NBL_RING_TO_COMMON(ring) ((ring)->common) +#define NBL_RING_TO_DEV(ring) ((ring)->dma_dev) +#define NBL_RING_TO_DMA_DEV(ring) ((ring)->dma_dev) + +#define NBL_MIN_DESC_NUM 128 +#define NBL_MAX_DESC_NUM 32768 + +#define NBL_PACKED_DESC_F_NEXT 1 +#define NBL_PACKED_DESC_F_WRITE 2 + +#define DEFAULT_MAX_PF_QUEUE_PAIRS_NUM 16 +#define DEFAULT_MAX_VF_QUEUE_PAIRS_NUM 2 + +#define NBL_PACKED_DESC_F_AVAIL 7 +#define NBL_PACKED_DESC_F_USED 15 + +#define NBL_TX_DESC(tx_ring, i) (&(((tx_ring)->desc)[i])) +#define NBL_RX_DESC(rx_ring, i) (&(((rx_ring)->desc)[i])) +#define NBL_TX_BUF(tx_ring, i) (&(((tx_ring)->tx_bufs)[i])) +#define NBL_RX_BUF(rx_ring, i) (&(((rx_ring)->rx_bufs)[i])) + +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +#define NBL_TX_POLL_WEIGHT 256 + +#define NBL_RX_BUF_256 256 +#define NBL_RX_HDR_SIZE NBL_RX_BUF_256 +#define NBL_RX_BUF_WRITE 16 +#define NBL_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD - NBL_BUFFER_HDR_LEN) + +#define NBL_TXD_DATALEN_BITS 16 +#define NBL_TXD_DATALEN_MAX BIT(NBL_TXD_DATALEN_BITS) + +#define MAX_DESC_NUM_PER_PKT (32) + +#define NBL_RX_BUFSZ (2048) +#define NBL_RX_BUFSZ_ORDER (11) + +#define NBL_BUFFER_HDR_LEN (sizeof(struct nbl_rx_extend_head)) + +#define NBL_ETH_FRAME_MIN_SIZE 60 + +#define NBL_TX_TSO_MSS_MIN (256) +#define NBL_TX_TSO_MSS_MAX (16383) +#define NBL_TX_TSO_L2L3L4_HDR_LEN_MIN (42) +#define NBL_TX_TSO_L2L3L4_HDR_LEN_MAX (128) +#define NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX (255) +#define IP_VERSION_V4 (4) +#define NBL_TX_FLAGS_TSO BIT(0) + +#define NBL_TX_TOTAL_HEADERLEN_SHIFT 24 + +#define NBL_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define NBL_RX_PAGE_PER_FRAGS (PAGE_SIZE >> NBL_RX_BUFSZ_ORDER) + +/* TX inner IP header type */ +enum nbl_tx_iipt { + NBL_TX_IIPT_NONE = 0x0, + NBL_TX_IIPT_IPV6 = 0x1, + NBL_TX_IIPT_IPV4 = 0x2, + NBL_TX_IIPT_RSV = 0x3 +}; + +/* TX L4 packet type */ +enum nbl_tx_l4t { + NBL_TX_L4T_NONE = 0x0, + NBL_TX_L4T_TCP = 0x1, + NBL_TX_L4T_UDP = 0x2, + NBL_TX_L4T_RSV = 0x3 +}; + +struct nbl_tx_hdr_param { + u8 l4s_pbrac_mode; + u8 l4s_hdl_ind; + u8 l4s_sync_ind; + u8 tso; + u16 l4s_sid; + u16 mss; + u8 mac_len; + u8 ip_len; + u8 l4_len; + u8 l4_type; + u8 inner_ip_type; + u8 l3_csum_en; + u8 l4_csum_en; + u16 total_hlen; + u16 dport_id:10; + u16 fwd:2; + u16 dport:3; + u16 rss_lag_en:1; +}; + +union nbl_tx_extend_head { + struct { + /* DW0 */ + u32 mac_len :5; + u32 ip_len :5; + u32 l4_len :4; + u32 l4_type :2; + u32 inner_ip_type :2; + u32 external_ip_type :2; + u32 external_ip_len :5; + u32 l4_tunnel_type :2; + u32 l4_tunnel_len :5; + /* DW1 */ + u32 l4s_sid :10; + u32 l4s_sync_ind :1; + u32 l4s_redun_ind :1; + u32 l4s_redun_head_ind :1; + u32 l4s_hdl_ind :1; + u32 l4s_pbrac_mode :1; + u32 rsv0 :2; + u32 mss :14; + u32 tso :1; + /* DW2 */ + /* if dport = NBL_TX_DPORT_ETH; dport_info = 0 + * if dport = NBL_TX_DPORT_HOST; dport_info = host queue id + * if dport = NBL_TX_DPORT_ECPU; dport_info = ecpu queue_id + */ + u32 dport_info :11; + /* if dport = NBL_TX_DPORT_ETH; dport_id[3:0] = eth port id, dport_id[9:4] = lag id + * if dport = NBL_TX_DPORT_HOST; dport_id[9:0] = host vsi_id + * if dport = NBL_TX_DPORT_ECPU; dport_id[9:0] = ecpu vsi_id + */ + u32 dport_id :10; +#define NBL_TX_DPORT_ID_LAG_OFFSET (4) + u32 dport :3; +#define NBL_TX_DPORT_ETH (0) +#define NBL_TX_DPORT_HOST (1) +#define NBL_TX_DPORT_ECPU (2) +#define NBL_TX_DPORT_EMP (3) +#define NBL_TX_DPORT_BMC (4) + u32 fwd :2; +#define NBL_TX_FWD_TYPE_DROP (0) +#define NBL_TX_FWD_TYPE_NORMAL (1) +#define NBL_TX_FWD_TYPE_RSV (2) +#define NBL_TX_FWD_TYPE_CPU_ASSIGNED (3) + u32 rss_lag_en :1; + u32 l4_csum_en :1; + u32 l3_csum_en :1; + u32 rsv1 :3; + }; + struct bootis_hdr { + /* DW0 */ + u32 mac_len :5; + u32 ip_len :5; + u32 l4_len :4; + u32 l4_type :2; + u32 inner_ip_type :2; + u32 external_ip_type :2; + u32 external_ip_len :5; + u32 l4_tunnel_type :2; + u32 l4_tunnel_len :5; + /* DW1 */ + u32 l4s_sid :10; + u32 inner_l3_cs :1; + u32 inner_l4_cs :1; + u32 dport :3; + u32 tag_idx :2; + u32 mss :14; + u32 tso :1; + /* DW2 */ + u32 dport_info :11; + u32 dport_id :12; + u32 tag_en :1; + u32 fwd :2; + u32 rss_lag_en :1; + u32 l4_csum_en :1; + u32 l3_csum_en :1; + u32 rsv1 :3; + } bootis; +}; + +struct nbl_rx_extend_head { + /* DW0 */ + /* 0x0:eth, 0x1:host, 0x2:ecpu, 0x3:emp, 0x4:bcm */ + uint32_t sport :3; + uint32_t dport_info :11; + /* sport = 0, sport_id[3:0] = eth id, + * sport = 1, sport_id[9:0] = host vsi_id, + * sport = 2, sport_id[9:0] = ecpu vsi_id, + */ + uint32_t sport_id :10; + /* 0x0:drop, 0x1:normal, 0x2:cpu upcall */ + uint32_t fwd :2; + uint32_t rsv0 :6; + /* DW1 */ + uint32_t error_code :6; + uint32_t ptype :10; + uint32_t profile_id :4; + uint32_t checksum_status :1; + uint32_t rsv1 :1; + uint32_t l4s_sid :10; + /* DW2 */ + uint32_t rsv3 :2; + uint32_t l4s_hdl_ind :1; + uint32_t l4s_tcp_offset :14; + uint32_t l4s_resync_ind :1; + uint32_t l4s_check_ind :1; + uint32_t l4s_dec_ind :1; + uint32_t rsv2 :4; + uint32_t num_buffers :8; +} __packed; + +static inline u16 nbl_unused_rx_desc_count(struct nbl_res_rx_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->desc_num) + ntc - ntu - 1; +} + +static inline u16 nbl_unused_tx_desc_count(struct nbl_res_tx_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->desc_num) + ntc - ntu - 1; +} + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c new file mode 100644 index 000000000000..1c9caf981576 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include "nbl_vsi.h" + +static int nbl_res_set_promisc_mode(void *priv, u16 vsi_id, u16 mode) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + u16 eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); + + if (pf_id >= NBL_RES_MGT_TO_PF_NUM(res_mgt)) + return -EINVAL; + + phy_ops->set_promisc_mode(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, eth_id, mode); + + return 0; +} + +static int nbl_res_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->set_spoof_check_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, mac); +} + +static int nbl_res_set_vf_spoof_check(void *priv, u16 vsi_id, int vfid, u8 enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int pfid = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + u16 vf_vsi = vfid == -1 ? vsi_id : nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, + NBL_VSI_DATA); + + return phy_ops->set_spoof_check_enable(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vf_vsi, enable); +} + +static u16 nbl_res_get_vf_function_id(void *priv, u16 vsi_id, int vfid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_sriov_info *sriov_info; + u16 vf_vsi; + int pfid = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[pfid]; + + if (vfid >= sriov_info->active_vf_num) + return U16_MAX; + + vf_vsi = vfid == -1 ? vsi_id : nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); + + return nbl_res_vsi_id_to_func_id(res_mgt, vf_vsi); +} + +static int nbl_res_vsi_init_chip_module(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt; + struct nbl_phy_ops *phy_ops; + int ret = 0; + + if (!res_mgt) + return -EINVAL; + + queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = phy_ops->init_chip_module(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + res_mgt->resource_info->board_info.eth_speed, + res_mgt->resource_info->board_info.eth_num); + + return ret; +} + +static int nbl_res_vsi_init(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_vsi_mgt *vsi_mgt; + struct nbl_phy_ops *phy_ops; + int ret = 0; + + if (!res_mgt) + return -EINVAL; + + vsi_mgt = NBL_RES_MGT_TO_VSI_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + /* TODO: unnecessary? */ + + return ret; +} + +static void nbl_res_get_phy_caps(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) +{ + /*TODO need to get it through adminq*/ + phy_caps->speed = 0xFF; + phy_caps->fec_ability = BIT(ETHTOOL_FEC_RS_BIT) | BIT(ETHTOOL_FEC_BASER_BIT); + phy_caps->pause_param = 0x3; +} + +static void nbl_res_get_phy_state(void *priv, u8 eth_id, struct nbl_phy_state *phy_state) +{ + /*TODO need to get it through adminq*/ + phy_state->current_speed = SPEED_10000; + phy_state->fec_mode = ETHTOOL_FEC_OFF; + phy_state->fc.tx_pause = 1; + phy_state->fc.rx_pause = 1; +} + +/* NBL_vsi_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_VSI_OPS_TBL \ +do { \ + NBL_VSI_SET_OPS(init_chip_module, nbl_res_vsi_init_chip_module); \ + NBL_VSI_SET_OPS(vsi_init, nbl_res_vsi_init); \ + NBL_VSI_SET_OPS(set_promisc_mode, nbl_res_set_promisc_mode); \ + NBL_VSI_SET_OPS(set_spoof_check_addr, nbl_res_set_spoof_check_addr); \ + NBL_VSI_SET_OPS(set_vf_spoof_check, nbl_res_set_vf_spoof_check); \ + NBL_VSI_SET_OPS(get_phy_caps, nbl_res_get_phy_caps); \ + NBL_VSI_SET_OPS(get_phy_state, nbl_res_get_phy_state); \ + NBL_VSI_SET_OPS(get_vf_function_id, nbl_res_get_vf_function_id); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_vsi_setup_mgt(struct device *dev, struct nbl_vsi_mgt **vsi_mgt) +{ + *vsi_mgt = devm_kzalloc(dev, sizeof(struct nbl_vsi_mgt), GFP_KERNEL); + if (!*vsi_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_vsi_remove_mgt(struct device *dev, struct nbl_vsi_mgt **vsi_mgt) +{ + devm_kfree(dev, *vsi_mgt); + *vsi_mgt = NULL; +} + +int nbl_vsi_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_vsi_mgt **vsi_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + vsi_mgt = &NBL_RES_MGT_TO_VSI_MGT(res_mgt); + + return nbl_vsi_setup_mgt(dev, vsi_mgt); +} + +void nbl_vsi_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_vsi_mgt **vsi_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + vsi_mgt = &NBL_RES_MGT_TO_VSI_MGT(res_mgt); + + if (!(*vsi_mgt)) + return; + + nbl_vsi_remove_mgt(dev, vsi_mgt); +} + +int nbl_vsi_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_VSI_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_VSI_OPS_TBL; +#undef NBL_VSI_SET_OPS + + return 0; +} + +void nbl_vsi_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_VSI_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_VSI_OPS_TBL; +#undef NBL_VSI_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h new file mode 100644 index 000000000000..a23bb223f9a5 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_VSI_H_ +#define _NBL_VSI_H_ + +#include "nbl_resource.h" + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h new file mode 100644 index 000000000000..f4413ea38315 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h @@ -0,0 +1,616 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEF_CHANNEL_H_ +#define _NBL_DEF_CHANNEL_H_ + +#include "nbl_include.h" + +#define NBL_CHAN_OPS_TBL_TO_OPS(chan_ops_tbl) ((chan_ops_tbl)->ops) +#define NBL_CHAN_OPS_TBL_TO_PRIV(chan_ops_tbl) ((chan_ops_tbl)->priv) + +#define NBL_CHAN_SEND(chan_send, dst_id, mesg_type, \ + argument, arg_length, response, resp_length, need_ack) \ +do { \ + typeof(chan_send) *__chan_send = &(chan_send); \ + __chan_send->dstid = (dst_id); \ + __chan_send->msg_type = (mesg_type); \ + __chan_send->arg = (argument); \ + __chan_send->arg_len = (arg_length); \ + __chan_send->resp = (response); \ + __chan_send->resp_len = (resp_length); \ + __chan_send->ack = (need_ack); \ +} while (0) + +#define NBL_CHAN_ACK(chan_ack, dst_id, mesg_type, msg_id, err_code, ack_data, data_length) \ +do { \ + typeof(chan_ack) *__chan_ack = &(chan_ack); \ + __chan_ack->dstid = (dst_id); \ + __chan_ack->msg_type = (mesg_type); \ + __chan_ack->msgid = (msg_id); \ + __chan_ack->err = (err_code); \ + __chan_ack->data = (ack_data); \ + __chan_ack->data_len = (data_length); \ +} while (0) + +typedef void (*nbl_chan_resp)(void *, u16, u16, void *, u32); + +enum { + NBL_CHAN_RESP_OK, + NBL_CHAN_RESP_ERR, +}; + +enum nbl_chan_msg_type { + NBL_CHAN_MSG_ACK, + NBL_CHAN_MSG_ADD_MACVLAN, + NBL_CHAN_MSG_DEL_MACVLAN, + NBL_CHAN_MSG_ADD_MULTI_RULE, + NBL_CHAN_MSG_DEL_MULTI_RULE, + NBL_CHAN_MSG_SETUP_MULTI_GROUP, + NBL_CHAN_MSG_REMOVE_MULTI_GROUP, + NBL_CHAN_MSG_REGISTER_NET, + NBL_CHAN_MSG_UNREGISTER_NET, + NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, + NBL_CHAN_MSG_FREE_TXRX_QUEUES, + NBL_CHAN_MSG_SETUP_QUEUE, + NBL_CHAN_MSG_REMOVE_ALL_QUEUES, + NBL_CHAN_MSG_CFG_DSCH, + NBL_CHAN_MSG_SETUP_CQS, + NBL_CHAN_MSG_REMOVE_CQS, + NBL_CHAN_MSG_CFG_QDISC_MQPRIO, + NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, + NBL_CHAN_MSG_DESTROY_MSIX_MAP, + NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, + NBL_CHAN_MSG_GET_GLOBAL_VECTOR, + NBL_CHAN_MSG_GET_VSI_ID, + NBL_CHAN_MSG_SET_PROSISC_MODE, + NBL_CHAN_MSG_GET_FIRMWARE_VERSION, + NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, + NBL_CHAN_MSG_GET_COALESCE, + NBL_CHAN_MSG_SET_COALESCE, + NBL_CHAN_MSG_SET_SPOOF_CHECK_ADDR, + NBL_CHAN_MSG_SET_VF_SPOOF_CHECK, + NBL_CHAN_MSG_GET_RXFH_INDIR_SIZE, + NBL_CHAN_MSG_GET_RXFH_INDIR, + NBL_CHAN_MSG_GET_RXFH_RSS_KEY, + NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, + NBL_CHAN_MSG_GET_PHY_CAPS, + NBL_CHAN_MSG_GET_PHY_STATE, + NBL_CHAN_MSG_REGISTER_RDMA, + NBL_CHAN_MSG_UNREGISTER_RDMA, + NBL_CHAN_MSG_GET_REAL_HW_ADDR, + NBL_CHAN_MSG_GET_REAL_BDF, + NBL_CHAN_MSG_GRC_PROCESS, + NBL_CHAN_MSG_SET_SFP_STATE, + NBL_CHAN_MSG_SET_ETH_LOOPBACK, + NBL_CHAN_MSG_CHECK_ACTIVE_VF, + NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, + NBL_CHAN_MSG_ALLOC_KTLS_TX_INDEX, + NBL_CHAN_MSG_FREE_KTLS_TX_INDEX, + NBL_CHAN_MSG_CFG_KTLS_TX_KEYMAT, + NBL_CHAN_MSG_ALLOC_KTLS_RX_INDEX, + NBL_CHAN_MSG_FREE_KTLS_RX_INDEX, + NBL_CHAN_MSG_CFG_KTLS_RX_KEYMAT, + NBL_CHAN_MSG_CFG_KTLS_RX_RECORD, + NBL_CHAN_MSG_ADD_KTLS_RX_FLOW, + NBL_CHAN_MSG_DEL_KTLS_RX_FLOW, + NBL_CHAN_MSG_ALLOC_IPSEC_TX_INDEX, + NBL_CHAN_MSG_FREE_IPSEC_TX_INDEX, + NBL_CHAN_MSG_ALLOC_IPSEC_RX_INDEX, + NBL_CHAN_MSG_FREE_IPSEC_RX_INDEX, + NBL_CHAN_MSG_CFG_IPSEC_TX_SAD, + NBL_CHAN_MSG_CFG_IPSEC_RX_SAD, + NBL_CHAN_MSG_ADD_IPSEC_TX_FLOW, + NBL_CHAN_MSG_DEL_IPSEC_TX_FLOW, + NBL_CHAN_MSG_ADD_IPSEC_RX_FLOW, + NBL_CHAN_MSG_DEL_IPSEC_RX_FLOW, + NBL_CHAN_MSG_NOTIFY_IPSEC_HARD_EXPIRE, + NBL_CHAN_MSG_GET_MBX_IRQ_NUM, + NBL_CHAN_MSG_CLEAR_FLOW, + NBL_CHAN_MSG_CLEAR_QUEUE, + NBL_CHAN_MSG_GET_ETH_ID, + NBL_CHAN_MSG_SET_OFFLOAD_STATUS, + + NBL_CHAN_MSG_INIT_OFLD, + NBL_CHAN_MSG_INIT_CMDQ, + NBL_CHAN_MSG_DESTROY_CMDQ, + NBL_CHAN_MSG_RESET_CMDQ, + NBL_CHAN_MSG_INIT_FLOW, + NBL_CHAN_MSG_DEINIT_FLOW, + NBL_CHAN_MSG_OFFLOAD_FLOW_RULE, + NBL_CHAN_MSG_GET_ACL_SWITCH, + NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, + NBL_CHAN_MSG_INIT_REP, + NBL_CHAN_MSG_GET_LINE_RATE_INFO, + + NBL_CHAN_MSG_REGISTER_NET_REP, + NBL_CHAN_MSG_UNREGISTER_NET_REP, + NBL_CHAN_MSG_REGISTER_ETH_REP, + NBL_CHAN_MSG_UNREGISTER_ETH_REP, + NBL_CHAN_MSG_REGISTER_UPCALL_PORT, + NBL_CHAN_MSG_UNREGISTER_UPCALL_PORT, + NBL_CHAN_MSG_GET_PORT_STATE, + NBL_CHAN_MSG_SET_PORT_ADVERTISING, + NBL_CHAN_MSG_GET_MODULE_INFO, + NBL_CHAN_MSG_GET_MODULE_EEPROM, + NBL_CHAN_MSG_GET_LINK_STATE, + NBL_CHAN_MSG_NOTIFY_LINK_STATE, + + NBL_CHAN_MSG_GET_QUEUE_CXT, + NBL_CHAN_MSG_CFG_LOG, + NBL_CHAN_MSG_INIT_VDPAQ, + NBL_CHAN_MSG_DESTROY_VDPAQ, + NBL_CHAN_GET_UPCALL_PORT, + NBL_CHAN_MSG_NOTIFY_ETH_REP_LINK_STATE, + NBL_CHAN_MSG_SET_ETH_MAC_ADDR, + NBL_CHAN_MSG_GET_FUNCTION_ID, + NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, + + NBL_CHAN_MSG_DISABLE_PHY_FLOW, + NBL_CHAN_MSG_ENABLE_PHY_FLOW, + NBL_CHAN_MSG_SET_UPCALL_RULE, + NBL_CHAN_MSG_UNSET_UPCALL_RULE, + + NBL_CHAN_MSG_GET_REG_DUMP, + NBL_CHAN_MSG_GET_REG_DUMP_LEN, + + NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM, + NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD, + NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST, + NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR, + NBL_CHAN_MSG_ADD_LAG_FLOW, + NBL_CHAN_MSG_DEL_LAG_FLOW, + + NBL_CHAN_MSG_SWITCHDEV_INIT_CMDQ, + NBL_CHAN_MSG_SWITCHDEV_DEINIT_CMDQ, + NBL_CHAN_MSG_SET_TC_FLOW_INFO, + NBL_CHAN_MSG_UNSET_TC_FLOW_INFO, + NBL_CHAN_MSG_INIT_ACL, + NBL_CHAN_MSG_UNINIT_ACL, + + NBL_CHAN_MSG_CFG_LAG_MCC, + + NBL_CHAN_MSG_REGISTER_VSI2Q, + NBL_CHAN_MSG_SETUP_Q2VSI, + NBL_CHAN_MSG_REMOVE_Q2VSI, + NBL_CHAN_MSG_SETUP_RSS, + NBL_CHAN_MSG_REMOVE_RSS, + NBL_CHAN_MSG_GET_REP_QUEUE_INFO, + NBL_CHAN_MSG_CTRL_PORT_LED, + NBL_CHAN_MSG_NWAY_RESET, + NBL_CHAN_MSG_SET_INTL_SUPPRESS_LEVEL, + NBL_CHAN_MSG_GET_ETH_STATS, + NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, + NBL_CHAN_MSG_GET_BOARD_INFO, + + NBL_CHAN_MSG_GET_P4_USED, + NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, + + NBL_CHAN_MSG_ADD_LLDP_FLOW, + NBL_CHAN_MSG_DEL_LLDP_FLOW, + + NBL_CHAN_MSG_CFG_ETH_BOND_INFO, + NBL_CHAN_MSG_CFG_DUPPKT_MCC, + + NBL_CHAN_MSG_ADD_ND_UPCALL_FLOW, + NBL_CHAN_MSG_DEL_ND_UPCALL_FLOW, + + NBL_CHAN_MSG_GET_BOARD_ID, + + NBL_CHAN_MSG_SET_SHAPING_DPORT_VLD, + NBL_CHAN_MSG_SET_DPORT_FC_TH_VLD, + + NBL_CHAN_MSG_REGISTER_RDMA_BOND, + NBL_CHAN_MSG_UNREGISTER_RDMA_BOND, + + NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, + NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, + NBL_CHAN_MSG_RESTORE_HW_QUEUE, + + NBL_CHAN_MSG_KEEP_ALIVE, + + NBL_CHAN_MSG_GET_BASE_MAC_ADDR, + + NBL_CHAN_MSG_CFG_BOND_SHAPING, + NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE, + + NBL_CHAN_MSG_ALLOC_KT_BLOCK, + NBL_CHAN_MSG_FREE_KT_BLOCK, + + NBL_CHAN_MSG_GET_USER_QUEUE_INFO, + NBL_CHAN_MSG_GET_ETH_BOND_INFO, + + NBL_CHAN_MSG_CLEAR_ACCEL_FLOW, + NBL_CHAN_MSG_SET_BRIDGE_MODE, + + NBL_CHAN_MSG_GET_VF_FUNCTION_ID, + NBL_CHAN_MSG_SET_VF_LINK_STATE, + + NBL_CHAN_MSG_SET_PMD_DEBUG, + + /* mailbox msg end */ + NBL_CHAN_MSG_MAILBOX_MAX, + + /* adminq msg */ + NBL_CHAN_MSG_ADMINQ_GET_EMP_VERSION = 0x8101, /* Deprecated, should not be used */ + NBL_CHAN_MSG_ADMINQ_GET_NVM_VERSION = 0x8102, + NBL_CHAN_MSG_ADMINQ_REBOOT = 0x8104, + NBL_CHAN_MSG_ADMINQ_FLR_NOTIFY = 0x8105, + NBL_CHAN_MSG_ADMINQ_LOAD_P4 = 0x8107, + NBL_CHAN_MSG_ADMINQ_LOAD_P4_DEFAULT = 0x8108, + NBL_CHAN_MSG_ADMINQ_FLASH_ERASE = 0x8201, + NBL_CHAN_MSG_ADMINQ_FLASH_READ = 0x8202, + NBL_CHAN_MSG_ADMINQ_FLASH_WRITE = 0x8203, + NBL_CHAN_MSG_ADMINQ_FLASH_ACTIVATE = 0x8204, + NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE = 0x8205, + NBL_CHAN_MSG_ADMINQ_RESOURCE_READ = 0x8206, + NBL_CHAN_MSG_ADMINQ_GET_NVM_BANK_INDEX = 0x820B, + NBL_CHAN_MSG_ADMINQ_VERIFY_NVM_BANK = 0x820C, + NBL_CHAN_MSG_ADMINQ_FLASH_LOCK = 0x820D, + NBL_CHAN_MSG_ADMINQ_FLASH_UNLOCK = 0x820E, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES = 0x8300, + NBL_CHAN_MSG_ADMINQ_PORT_NOTIFY = 0x8301, + NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM = 0x8302, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS = 0x8303, + /* TODO: new kernel and ethtool support show fec stats */ + NBL_CHAN_MSG_ADMINQ_GET_FEC_STATS = 0x408, + NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_WRITE = 0x8F01, + NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_READ = 0x8F02, + + NBL_CHAN_MSG_MAX, +}; + +#define NBL_CHAN_ADMINQ_FUNCTION_ID (0xFFFF) + +struct nbl_chan_vsi_qid_info { + u16 vsi_id; + u16 local_qid; +}; + +enum nbl_chan_state { + NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_RESETTING, + NBL_CHAN_STATE_NBITS +}; + +struct nbl_chan_param_add_macvlan { + u8 mac[ETH_ALEN]; + u16 vlan; + u16 vsi; +}; + +struct nbl_chan_param_del_macvlan { + u8 mac[ETH_ALEN]; + u16 vlan; + u16 vsi; +}; + +struct nbl_chan_param_register_net_info { + u16 pf_bdf; + u64 vf_bar_start; + u64 vf_bar_size; + u16 total_vfs; + u16 offset; + u16 stride; + u64 pf_bar_start; +}; + +struct nbl_chan_param_alloc_txrx_queues { + u16 vsi_id; + u16 queue_num; +}; + +struct nbl_chan_param_register_vsi2q { + u16 vsi_index; + u16 vsi_id; + u16 queue_offset; + u16 queue_num; +}; + +struct nbl_chan_param_setup_queue { + struct nbl_txrx_queue_param queue_param; + bool is_tx; +}; + +struct nbl_chan_param_cfg_dsch { + u16 vsi_id; + bool vld; +}; + +struct nbl_chan_param_setup_cqs { + u16 vsi_id; + u16 real_qps; +}; + +struct nbl_chan_param_set_promisc_mode { + u16 vsi_id; + u16 mode; +}; + +struct nbl_chan_param_cfg_msix_map { + u16 num_net_msix; + u16 num_others_msix; + u16 msix_mask_en; +}; + +struct nbl_chan_param_enable_mailbox_irq { + u16 vector_id; + bool enable_msix; +}; + +struct nbl_chan_param_get_global_vector { + u16 vsi_id; + u16 vector_id; +}; + +struct nbl_chan_param_get_vsi_id { + u16 vsi_id; + u16 type; +}; + +struct nbl_chan_param_get_eth_id { + u16 vsi_id; + u8 eth_mode; + u8 eth_id; +}; + +struct nbl_chan_param_get_queue_info { + u16 queue_num; + u16 queue_size; +}; + +struct nbl_chan_param_set_eth_loopback { + u32 eth_port_id; + u32 enable; +}; + +struct nbl_chan_param_get_queue_err_stats { + u8 queue_id; + bool is_tx; +}; + +struct nbl_chan_param_set_coalesce { + u16 local_vector_id; + u16 vector_num; + u16 rx_max_coalesced_frames; + u16 rx_coalesce_usecs; +}; + +struct nbl_chan_param_set_spoof_check_addr { + u16 vsi_id; + u8 mac[ETH_ALEN]; +}; + +struct nbl_chan_param_set_vf_spoof_check { + u16 vsi_id; + u16 vf_id; + bool enable; +}; + +struct nbl_chan_param_get_rxfh_indir { + u16 vsi_id; + u32 rxfh_indir_size; +}; + +struct nbl_chan_result_get_real_bdf { + u8 bus; + u8 dev; + u8 function; +}; + +struct nbl_chan_param_set_upcall { + u16 vsi_id; + u8 eth_id; +}; + +struct nbl_chan_param_set_func_vld { + u8 eth_id; + bool vld; +}; + +struct nbl_chan_param_nvm_version_resp { + char magic[8]; /* "M181FWV0" */ + u32 version; /* major << 16 | minor << 8 | revision */ + u32 build_date; /* 0x20231231 - 2023.12.31 */ + u32 build_time; /* 0x00123456 - 12:34:56 */ + u32 build_hash; /* git commit hash */ + u32 rsv[2]; +}; + +struct nbl_chan_param_flash_read { + u32 bank_id; + u32 offset; + u32 len; +#define NBL_CHAN_FLASH_READ_LEN 0x800 +}; + +struct nbl_chan_param_flash_erase { + u32 bank_id; + u32 offset; + u32 len; +#define NBL_CHAN_FLASH_ERASE_LEN 0x1000 +}; + +struct nbl_chan_resource_write_param { + u32 resid; + u32 offset; + u32 len; + u8 data[]; +}; + +struct nbl_chan_resource_read_param { + u32 resid; + u32 offset; + u32 len; +}; + +struct nbl_chan_param_flash_write { + u32 bank_id; + u32 offset; + u32 len; +#define NBL_CHAN_FLASH_WRITE_LEN 0x800 + u8 data[NBL_CHAN_FLASH_WRITE_LEN]; +}; + +struct nbl_chan_param_load_p4 { + u8 name[NBL_P4_SECTION_NAME_LEN]; + u32 addr; + u32 size; + u32 section_index; + u32 section_offset; + u32 load_start; + u32 load_end; + u8 data[]; +}; + +struct nbl_chan_result_flash_activate { + u32 err_code; + u32 reset_flag; +}; + +struct nbl_chan_param_set_sfp_state { + u8 eth_id; + u8 state; +}; + +struct nbl_chan_param_get_module_eeprom { + u8 eth_id; + struct ethtool_eeprom eeprom; +}; + +struct nbl_chan_param_module_eeprom_info { + u8 eth_id; + u8 i2c_address; + u8 page; + u8 bank; + u32 write:1; + u32 rsvd:31; + u16 offset; + u16 length; +#define NBL_MODULE_EEPRO_WRITE_MAX_LEN (4) + u8 data[NBL_MODULE_EEPRO_WRITE_MAX_LEN]; +}; + +struct nbl_chan_param_eth_rep_notify_link_state { + u8 eth_id; + u8 link_state; +}; + +struct nbl_chan_param_set_eth_mac_addr { + u8 mac[ETH_ALEN]; + u8 eth_id; +}; + +struct nbl_chan_param_ctrl_port_led { + u32 eth_id; + enum nbl_led_reg_ctrl led_status; +}; + +struct nbl_chan_param_set_intr_suppress_level { + u16 local_vector_id; + u16 vector_num; + u16 level; +}; + +struct nbl_chan_param_get_private_stat_data { + u32 eth_id; + u32 data_len; +}; + +struct nbl_chan_param_get_module_tempetature { + u8 eth_id; + enum nbl_module_temp_type type; +}; + +struct nbl_chan_param_restore_queue { + u16 local_queue_id; + int type; +}; + +struct nbl_chan_param_restart_queue { + u16 local_queue_id; + int type; +}; + +struct nbl_chan_param_restore_hw_queue { + u16 vsi_id; + u16 local_queue_id; + dma_addr_t dma; + int type; +}; + +struct nbl_chan_param_get_vf_func_id { + u16 vsi_id; + int vf_id; +}; + +struct nbl_chan_param_notify_link_state { + u8 link_state; + u32 link_speed; +}; + +struct nbl_chan_send_info { + u16 dstid; + u16 msg_type; + void *arg; + size_t arg_len; + void *resp; + size_t resp_len; + u16 ack; +}; + +struct nbl_chan_ack_info { + u16 dstid; + u16 msg_type; + u16 msgid; + int err; + void *data; + u32 data_len; +}; + +enum nbl_channel_type { + NBL_CHAN_TYPE_MAILBOX, + NBL_CHAN_TYPE_ADMINQ, + NBL_CHAN_TYPE_MAX +}; + +#define NBL_LINE_RATE_INFO_LENGTH (3) +struct nbl_rep_line_rate_info { + u16 vsi_id; + u16 func_id; + u32 data[NBL_LINE_RATE_INFO_LENGTH]; +}; + +struct nbl_channel_ops { + int (*send_msg)(void *priv, struct nbl_chan_send_info *chan_send); + int (*send_ack)(void *priv, struct nbl_chan_ack_info *chan_ack); + int (*register_msg)(void *priv, u16 msg_type, nbl_chan_resp func, void *callback_priv); + int (*cfg_chan_qinfo_map_table)(void *priv, u8 chan_type); + bool (*check_queue_exist)(void *priv, u8 chan_type); + int (*setup_queue)(void *priv, u8 chan_type); + int (*set_listener_info)(void *priv, void *shm_ring, struct eventfd_ctx *eventfd); + int (*set_listener_msgtype)(void *priv, int msgtype); + void (*clear_listener_info)(void *priv); + int (*teardown_queue)(void *priv, u8 chan_type); + int (*set_queue_interrupt_state)(void *priv, u8 chan_type, bool ready); + void (*clean_queue_subtask)(void *priv, u8 chan_type); + int (*dump_txq)(void *priv, struct seq_file *m, u8 type); + int (*dump_rxq)(void *priv, struct seq_file *m, u8 type); + u32 (*get_adminq_tx_buf_size)(void *priv); + int (*setup_keepalive)(void *priv, u16 dest_id, u8 chan_type); + void (*remove_keepalive)(void *priv, u8 chan_type); + void (*register_chan_task)(void *priv, u8 chan_type, struct work_struct *task); +}; + +struct nbl_channel_ops_tbl { + struct nbl_channel_ops *ops; + void *priv; +}; + +int nbl_chan_init_common(void *p, struct nbl_init_param *param); +void nbl_chan_remove_common(void *p); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h new file mode 100644 index 000000000000..12b8fead495f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h @@ -0,0 +1,474 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEF_COMMON_H_ +#define _NBL_DEF_COMMON_H_ + +#include "nbl_include.h" +#include +#include + +#define NBL_OK 0 +#define NBL_CONTINUE 1 +#define NBL_FAIL -1 + +#define NBL_HASH_CFT_MAX 4 +#define NBL_HASH_CFT_AVL 2 + +#define NBL_CRC16_CCITT(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0x0000, 1, 0x0000) +#define NBL_CRC16_CCITT_FALSE(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0xFFFF, 0, 0x0000) +#define NBL_CRC16_XMODEM(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0x0000, 0, 0x0000) +#define NBL_CRC16_IBM(data, size) \ + nbl_calc_crc16(data, size, 0x8005, 0x0000, 1, 0x0000) + +static inline void nbl_tcam_truth_value_convert(u64 *data, u64 *mask) +{ + u64 tcam_x = 0; + u64 tcam_y = 0; + + tcam_x = *data & ~(*mask); + tcam_y = ~(*data) & ~(*mask); + + *data = tcam_x; + *mask = tcam_y; +} + +static inline u8 nbl_invert_uint8(const u8 data) +{ + u8 i, result = 0; + + for (i = 0; i < 8; i++) { + if (data & (1 << i)) + result |= 1 << (7 - i); + } + + return result; +} + +static inline u16 nbl_invert_uint16(const u16 data) +{ + u16 i, result = 0; + + for (i = 0; i < 16; i++) { + if (data & (1 << i)) + result |= 1 << (15 - i); + } + + return result; +} + +static inline u16 nbl_calc_crc16(const u8 *data, u32 size, u16 crc_poly, + u16 init_value, u8 ref_flag, u16 xorout) +{ + u16 crc_reg = init_value, tmp = 0; + u8 j, byte = 0; + + while (size--) { + byte = *(data++); + if (ref_flag) + byte = nbl_invert_uint8(byte); + crc_reg ^= byte << 8; + for (j = 0; j < 8; j++) { + tmp = crc_reg & 0x8000; + crc_reg <<= 1; + if (tmp) + crc_reg ^= crc_poly; + } + } + + if (ref_flag) + crc_reg = nbl_invert_uint16(crc_reg); + + crc_reg = crc_reg ^ xorout; + return crc_reg; +} + +static inline u16 nbl_hash_transfer(u16 hash, u16 power, u16 depth) +{ + u16 temp = 0; + u16 val = 0; + u32 val2 = 0; + u16 off = 16 - power; + + temp = (hash >> power); + val = hash << off; + val = val >> off; + + if (depth == 0) { + val = temp + val; + val = val << off; + val = val >> off; + } else { + val2 = val; + val2 *= depth; + val2 = val2 >> power; + val = (u16)val2; + } + + return val; +} + +/* debug masks - set these bits in adapter->debug_mask to control output */ +enum nbl_debug_mask { + /* BIT0~BIT30 use to define adapter debug_mask */ + NBL_DEBUG_MAIN = 0x00000001, + NBL_DEBUG_COMMON = 0x00000002, + NBL_DEBUG_DEBUGFS = 0x00000004, + NBL_DEBUG_PHY = 0x00000008, + NBL_DEBUG_FLOW = 0x00000010, + NBL_DEBUG_RESOURCE = 0x00000020, + NBL_DEBUG_QUEUE = 0x00000040, + NBL_DEBUG_INTR = 0x00000080, + NBL_DEBUG_ADMINQ = 0x00000100, + NBL_DEBUG_DEVLINK = 0x00000200, + NBL_DEBUG_ACCEL = 0x00000400, + NBL_DEBUG_MBX = 0x00000800, + NBL_DEBUG_ST = 0x00001000, + NBL_DEBUG_VSI = 0x00002000, + NBL_DEBUG_CUSTOMIZED_P4 = 0x00004000, + + /* BIT31 use to distinguish netif debug level or adapter debug_mask */ + NBL_DEBUG_USER = 0x80000000, + + /* Means turn on all adapter debug_mask */ + NBL_DEBUG_ALL = 0xFFFFFFFF +}; + +#define nbl_err(common, lvl, fmt, ...) \ +do { \ + typeof(common) _common = (common); \ + if (((lvl) & NBL_COMMON_TO_DEBUG_LVL(_common))) \ + dev_err(NBL_COMMON_TO_DEV(_common), fmt, ##__VA_ARGS__); \ +} while (0) + +#define nbl_warn(common, lvl, fmt, ...) \ +do { \ + typeof(common) _common = (common); \ + if (((lvl) & NBL_COMMON_TO_DEBUG_LVL(_common))) \ + dev_warn(NBL_COMMON_TO_DEV(_common), fmt, ##__VA_ARGS__); \ +} while (0) + +#define nbl_info(common, lvl, fmt, ...) \ +do { \ + typeof(common) _common = (common); \ + if (((lvl) & NBL_COMMON_TO_DEBUG_LVL(_common))) \ + dev_info(NBL_COMMON_TO_DEV(_common), fmt, ##__VA_ARGS__); \ +} while (0) + +#define nbl_debug(common, lvl, fmt, ...) \ +do { \ + typeof(common) _common = (common); \ + if (((lvl) & NBL_COMMON_TO_DEBUG_LVL(_common))) \ + dev_dbg(NBL_COMMON_TO_DEV(_common), fmt, ##__VA_ARGS__); \ +} while (0) + +#define NBL_COMMON_TO_PDEV(common) ((common)->pdev) +#define NBL_COMMON_TO_DEV(common) ((common)->dev) +#define NBL_COMMON_TO_DMA_DEV(common) ((common)->dma_dev) +#define NBL_COMMON_TO_VSI_ID(common) ((common)->vsi_id) +#define NBL_COMMON_TO_ETH_ID(common) ((common)->eth_id) +#define NBL_COMMON_TO_ETH_MODE(common) ((common)->eth_mode) +#define NBL_COMMON_TO_DEBUG_LVL(common) ((common)->debug_lvl) +#define NBL_COMMON_TO_VF_CAP(common) ((common)->is_vf) +#define NBL_COMMON_TO_PCI_USING_DAC(common) ((common)->pci_using_dac) +#define NBL_COMMON_TO_MGT_PF(common) ((common)->mgt_pf) +#define NBL_COMMON_TO_PCI_FUNC_ID(common) ((common)->function) +#define NBL_COMMON_TO_BOARD_ID(common) ((common)->board_id) + +#define NBL_ONE_ETHERNET_PORT (1) +#define NBL_TWO_ETHERNET_PORT (2) +#define NBL_FOUR_ETHERNET_PORT (4) +#define NBL_TWO_ETHERNET_VSI_ID_GAP (512) +#define NBL_FOUR_ETHERNET_VSI_ID_GAP (256) +#define NBL_VSI_ID_GAP(mode) ((mode) == NBL_FOUR_ETHERNET_PORT ? \ + NBL_FOUR_ETHERNET_VSI_ID_GAP : \ + NBL_TWO_ETHERNET_VSI_ID_GAP) + +#define NBL_BOOTIS_ECPU_ETH0_FUNCTION (2) +#define NBL_BOOTIS_ECPU_ETH1_FUNCTION (3) +#define NBL_BOOTIS_ECPU_ETH0_VSI (2020) +#define NBL_BOOTIS_ECPU_ETH1_VSI (2021) + +#define NBL_REP_FILL_EXT_HDR (1) +#define NBL_PF_FILL_EXT_HDR (2) + +#define NBL_SKB_FILL_VSI_ID_OFF (32) +#define NBL_SKB_FILL_EXT_HDR_OFF (34) + +#define NBL_INDEX_SIZE_MAX (64 * 1024) /* index max sise */ + +#define NBL_INDEX_TBL_KEY_INIT(key, dev_arg, start_index_arg, index_size_arg, key_size_arg) \ +do { \ + typeof(key) __key = key; \ + __key->dev = dev_arg; \ + __key->start_index = start_index_arg; \ + __key->index_size = index_size_arg; \ + __key->key_size = key_size_arg; \ +} while (0) + +struct nbl_common_info { + struct pci_dev *pdev; + struct device *dev; + struct device *dma_dev; + u32 debug_lvl; + u32 msg_enable; + u16 vsi_id; + u8 eth_id; + u8 eth_mode; + u8 is_vf; + + u8 function; + u8 devid; + u8 bus; + + u16 mgt_pf; + u8 board_id; + + bool pci_using_dac; + u8 tc_inst_id; /* for tc flow and cmdq */ + + enum nbl_product_type product_type; +}; + +struct nbl_netdev_rep_attr { + struct attribute attr; + ssize_t (*show)(struct device *dev, + struct nbl_netdev_rep_attr *attr, char *buf); + ssize_t (*store)(struct device *dev, + struct nbl_netdev_rep_attr *attr, const char *buf, size_t len); + int rep_id; +}; + +struct nbl_index_tbl_key { + struct device *dev; + u32 start_index; + u32 index_size; /* the avail index is [start_index, start_index + index_size) */ + u32 key_size; +}; + +struct nbl_hash_tbl_key { + struct device *dev; + u16 key_size; + u16 data_size; /* no include key or node member */ + u16 bucket_size; + u8 lock_need; /* true: support multi thread operation */ + u8 resv; +}; + +#define NBL_HASH_TBL_KEY_INIT(key, dev_arg, key_size_arg, data_size_arg, bucket_size_arg, \ + lock_need_args) \ +do { \ + typeof(key) __key = key; \ + __key->dev = dev_arg; \ + __key->key_size = key_size_arg; \ + __key->data_size = data_size_arg; \ + __key->bucket_size = bucket_size_arg; \ + __key->lock_need = lock_need_args; \ + __key->resv = 0; \ +} while (0) + +enum nbl_hash_tbl_op_type { + NBL_HASH_TBL_OP_SHOW = 0, + NBL_HASH_TBL_OP_DELETE, +}; + +struct nbl_hash_tbl_del_key { + void *action_priv; + void (*action_func)(void *priv, void *key, void *data); +}; + +#define NBL_HASH_TBL_DEL_KEY_INIT(key, priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +struct nbl_hash_tbl_scan_key { + enum nbl_hash_tbl_op_type op_type; + void *match_condition; + /* match ret value must be 0 if the node accord with the condition */ + int (*match_func)(void *condition, void *key, void *data); + void *action_priv; + void (*action_func)(void *priv, void *key, void *data); +}; + +#define NBL_HASH_TBL_SCAN_KEY_INIT(key, op_type_arg, con_arg, match_func_arg, priv_arg, \ + act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->op_type = op_type_arg; \ + __key->match_condition = con_arg; \ + __key->match_func = match_func_arg; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +struct nbl_hash_xy_tbl_key { + struct device *dev; + u16 x_axis_key_size; + u16 y_axis_key_size; /* y_axis_key_len = key_len - x_axis_key_len */ + u16 data_size; /* no include key or node member */ + u16 bucket_size; + u16 x_axis_bucket_size; + u16 y_axis_bucket_size; + u8 lock_need; /* true: support multi thread operation */ + u8 resv[3]; +}; + +#define NBL_HASH_XY_TBL_KEY_INIT(key, dev_arg, x_key_size_arg, y_key_size_arg, data_size_arg, \ + bucket_size_args, x_bucket_size_arg, y_bucket_size_arg, \ + lock_need_args) \ +do { \ + typeof(key) __key = key; \ + __key->dev = dev_arg; \ + __key->x_axis_key_size = x_key_size_arg; \ + __key->y_axis_key_size = y_key_size_arg; \ + __key->data_size = data_size_arg; \ + __key->bucket_size = bucket_size_args; \ + __key->x_axis_bucket_size = x_bucket_size_arg; \ + __key->y_axis_bucket_size = y_bucket_size_arg; \ + __key->lock_need = lock_need_args; \ + memset(__key->resv, 0, sizeof(__key->resv)); \ +} while (0) + +enum nbl_hash_xy_tbl_scan_type { + NBL_HASH_TBL_ALL_SCAN = 0, + NBL_HASH_TBL_X_AXIS_SCAN, + NBL_HASH_TBL_Y_AXIS_SCAN, +}; + +/* true: only query the match one, eg. if x_axis: mac; y_axist: vlan*/ +/** + * member "only_query_exist" use + * if true: only query the match one, eg. if x_axis: mac; y_axis: vlan, if only to query the tbl + * has a gevin "mac", the nbl_hash_xy_tbl_scan_key struct use as flow: + * op_type = NBL_HASH_TBL_OP_SHOW; + * scan_type = NBL_HASH_TBL_X_AXIS_SCAN; + * only_query_exist = true; + * x_key = the mac_addr; + * y_key = NULL; + * match_func = NULL; + * action_func = NULL; + */ +struct nbl_hash_xy_tbl_scan_key { + enum nbl_hash_tbl_op_type op_type; + enum nbl_hash_xy_tbl_scan_type scan_type; + bool only_query_exist; + u8 resv[3]; + void *x_key; + void *y_key; + void *match_condition; + /* match ret value must be 0 if the node accord with the condition */ + int (*match_func)(void *condition, void *x_key, void *y_key, void *data); + void *action_priv; + void (*action_func)(void *priv, void *x_key, void *y_key, void *data); +}; + +#define NBL_HASH_XY_TBL_SCAN_KEY_INIT(key, op_type_arg, scan_type_arg, query_flag_arg, \ + x_key_arg, y_key_arg, con_arg, match_func_arg, \ + priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->op_type = op_type_arg; \ + __key->scan_type = scan_type_arg; \ + __key->only_query_exist = query_flag_arg; \ + memset(__key->resv, 0, sizeof(__key->resv)); \ + __key->x_key = x_key_arg; \ + __key->y_key = y_key_arg; \ + __key->match_condition = con_arg; \ + __key->match_func = match_func_arg; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +struct nbl_hash_xy_tbl_del_key { + void *action_priv; + void (*action_func)(void *priv, void *x_key, void *y_key, void *data); +}; + +#define NBL_HASH_XY_TBL_DEL_KEY_INIT(key, priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +void nbl_convert_mac(u8 *mac, u8 *reverse_mac); + +void nbl_common_queue_work(struct work_struct *task, bool ctrl_task, bool singlethread); +void nbl_common_queue_work_rdma(struct work_struct *task); +void nbl_common_queue_delayed_work(struct delayed_work *task, u32 msec, + bool ctrl_task, bool singlethread); +void nbl_common_queue_delayed_work_keepalive(struct delayed_work *task, u32 msec); +void nbl_common_release_task(struct work_struct *task); +void nbl_common_alloc_task(struct work_struct *task, void *func); +void nbl_common_release_delayed_task(struct delayed_work *task); +void nbl_common_alloc_delayed_task(struct delayed_work *task, void *func); +void nbl_common_flush_task(struct work_struct *task); + +void nbl_common_destroy_wq(void); +int nbl_common_create_wq(void); + +void nbl_debugfs_func_init(void *p, struct nbl_init_param *param); +void nbl_debugfs_func_remove(void *p); + +bool nbl_dma_iommu_status(struct pci_dev *pdev); +bool nbl_dma_remap_status(struct pci_dev *pdev); +void nbl_net_addr_rep_attr(struct nbl_netdev_rep_attr *rep_attr, int rep_id); +u32 nbl_common_pf_id_subtraction_mgtpf_id(struct nbl_common_info *common, u32 pf_id); +void *nbl_common_init_index_table(struct nbl_index_tbl_key *key); +void nbl_common_remove_index_table(void *priv); +int nbl_common_get_index(void *priv, void *key, u32 key_size); +void nbl_common_free_index(void *priv, void *key, u32 key_size); + +/* ---- EVENT-NOTIFIER ---- */ +enum nbl_event_type { + NBL_EVENT_LAG_UPDATE = 0, + NBL_EVENT_OFFLOAD_STATUS_CHANGED, + NBL_EVENT_LINK_STATE_UPDATE, + NBL_EVENT_DEV_MODE_SWITCH, + NBL_EVENT_RDMA_ADEV_UPDATE, + NBL_EVENT_MAX, +}; + +struct nbl_event_callback { + int (*callback)(u16 type, void *event_data, void *callback_data); + void *callback_data; +}; + +struct nbl_event_dev_mode_switch_data { + int op; + int ret; +}; + +void nbl_event_notify(enum nbl_event_type type, void *event_data, u16 src_vsi_id, u16 board_id); +int nbl_event_register(enum nbl_event_type type, struct nbl_event_callback *callback, + u16 src_vsi_id, u16 board_id); +void nbl_event_unregister(enum nbl_event_type type, struct nbl_event_callback *callback, + u16 src_vsi_id, u16 board_id); +int nbl_event_init(void); +void nbl_event_remove(void); + +void *nbl_common_init_hash_table(struct nbl_hash_tbl_key *key); +void nbl_common_remove_hash_table(void *priv, struct nbl_hash_tbl_del_key *key); +int nbl_common_alloc_hash_node(void *priv, void *key, void *data); +void *nbl_common_get_hash_node(void *priv, void *key); +void nbl_common_free_hash_node(void *priv, void *key); +void nbl_common_scan_hash_node(void *priv, struct nbl_hash_tbl_scan_key *key); +u16 nbl_common_get_hash_node_num(void *priv); + +void *nbl_common_init_hash_xy_table(struct nbl_hash_xy_tbl_key *key); +void nbl_common_remove_hash_xy_table(void *priv, struct nbl_hash_xy_tbl_del_key *key); +int nbl_common_alloc_hash_xy_node(void *priv, void *x_key, void *y_key, void *data); +void *nbl_common_get_hash_xy_node(void *priv, void *x_key, void *y_key); +void nbl_common_free_hash_xy_node(void *priv, void *x_key, void *y_key); +u16 nbl_common_scan_hash_xy_node(void *priv, struct nbl_hash_xy_tbl_scan_key *key); +u16 nbl_common_get_hash_xy_node_num(void *priv); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h new file mode 100644 index 000000000000..4e42b2b15ef2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEF_DEV_H_ +#define _NBL_DEF_DEV_H_ + +#include "nbl_include.h" + +#define NBL_DEV_OPS_TBL_TO_OPS(dev_ops_tbl) ((dev_ops_tbl)->ops) +#define NBL_DEV_OPS_TBL_TO_PRIV(dev_ops_tbl) ((dev_ops_tbl)->priv) + +struct nbl_dev_ops { +}; + +struct nbl_dev_ops_tbl { + struct nbl_dev_ops *ops; + void *priv; +}; + +int nbl_dev_init(void *p, struct nbl_init_param *param); +void nbl_dev_remove(void *p); +int nbl_dev_start(void *p, struct nbl_init_param *param); +void nbl_dev_stop(void *p); + +void nbl_dev_user_module_init(void); +void nbl_dev_user_module_destroy(void); + +int nbl_dev_resume(void *p); +int nbl_dev_suspend(void *p); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h new file mode 100644 index 000000000000..9107cb22b80f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEF_DISPATCH_H_ +#define _NBL_DEF_DISPATCH_H_ + +#include "nbl_include.h" + +#define NBL_DISP_OPS_TBL_TO_OPS(disp_ops_tbl) ((disp_ops_tbl)->ops) +#define NBL_DISP_OPS_TBL_TO_PRIV(disp_ops_tbl) ((disp_ops_tbl)->priv) + +enum { + NBL_DISP_CTRL_LVL_NEVER = 0, + NBL_DISP_CTRL_LVL_MGT, + NBL_DISP_CTRL_LVL_NET, + NBL_DISP_CTRL_LVL_ALWAYS, + NBL_DISP_CTRL_LVL_MAX, +}; + +struct nbl_dispatch_ops { + int (*init_chip_module)(void *priv); + void (*get_resource_pt_ops)(void *priv, struct nbl_resource_pt_ops *pt_ops); + int (*queue_init)(void *priv); + int (*vsi_init)(void *priv); + int (*configure_msix_map)(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en); + int (*destroy_msix_map)(void *priv); + int (*enable_mailbox_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_abnormal_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_adminq_irq)(void *p, u16 vector_id, bool enable_msix); + u16 (*get_global_vector)(void *priv, u16 vsi_id, u16 local_vector_id); + u16 (*get_msix_entry_id)(void *priv, u16 vsi_id, u16 local_vector_id); + u32 (*get_chip_temperature)(void *priv); + u32 (*get_chip_temperature_max)(void *priv); + u32 (*get_chip_temperature_crit)(void *priv); + int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_module_temp_type type); + + int (*get_mbx_irq_num)(void *priv); + int (*get_adminq_irq_num)(void *priv); + int (*get_abnormal_irq_num)(void *priv); + int (*alloc_rings)(void *priv, struct net_device *netdev, u16 tx_num, + u16 rx_num, u16 tx_desc_num, u16 rx_desc_num); + void (*remove_rings)(void *priv); + dma_addr_t (*start_tx_ring)(void *priv, u8 ring_index); + void (*stop_tx_ring)(void *priv, u8 ring_index); + dma_addr_t (*start_rx_ring)(void *priv, u8 ring_index, bool use_napi); + void (*stop_rx_ring)(void *priv, u8 ring_index); + void (*kick_rx_ring)(void *priv, u16 index); + int (*dump_ring)(void *priv, struct seq_file *m, bool is_tx, int index); + int (*dump_ring_stats)(void *priv, struct seq_file *m, bool is_tx, int index); + struct napi_struct *(*get_vector_napi)(void *priv, u16 index); + void (*set_vector_info)(void *priv, u8 *irq_enable_base, u32 irq_data, + u16 index, bool mask_en); + int (*register_net)(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result); + void (*register_vsi_ring)(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num); + int (*unregister_net)(void *priv); + int (*alloc_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num); + void (*free_txrx_queues)(void *priv, u16 vsi_id); + int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); + void (*remove_all_queues)(void *priv, u16 vsi_id); + int (*register_vsi2q)(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num); + int (*setup_q2vsi)(void *priv, u16 vsi_id); + void (*remove_q2vsi)(void *priv, u16 vsi_id); + int (*setup_rss)(void *priv, u16 vsi_id); + void (*remove_rss)(void *priv, u16 vsi_id); + int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld); + int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps); + void (*remove_cqs)(void *priv, u16 vsi_id); + void (*clear_queues)(void *priv, u16 vsi_id); + u16 (*get_local_queue_id)(void *priv, u16 vsi_id, u16 global_queue_id); + u16 (*get_vsi_global_queue_id)(void *priv, u16 vsi_id, u16 local_qid); + + int (*enable_msix_irq)(void *priv, u16 global_vector_id); + u8* (*get_msix_irq_enable_info)(void *priv, u16 global_vector_id, u32 *irq_data); + int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); + int (*set_vf_spoof_check)(void *priv, u16 vsi_id, int vfid, u8 enable); + void (*get_base_mac_addr)(void *priv, u8 *mac); + + int (*add_macvlan)(void *priv, u8 *mac, u16 vlan, u16 vsi); + void (*del_macvlan)(void *priv, u8 *mac, u16 vlan, u16 vsi); + int (*add_lag_flow)(void *priv, u16 vsi); + void (*del_lag_flow)(void *priv, u16 vsi); + int (*add_lldp_flow)(void *priv, u16 vsi); + void (*del_lldp_flow)(void *priv, u16 vsi); + int (*add_multi_rule)(void *priv, u16 vsi); + void (*del_multi_rule)(void *priv, u16 vsi); + int (*setup_multi_group)(void *priv); + void (*remove_multi_group)(void *priv); + void (*clear_flow)(void *priv, u16 vsi_id); + void (*dump_flow)(void *priv, struct seq_file *m); + + u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); + void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id); + int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode); + u32 (*get_tx_headroom)(void *priv); + void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + + void (*get_firmware_version)(void *priv, char *firmware_verion, u8 max_len); + int (*get_driver_info)(void *priv, struct nbl_driver_info *driver_info); + void (*get_queue_stats)(void *priv, u8 queue_id, + struct nbl_queue_stats *queue_stats, bool is_tx); + int (*get_queue_err_stats)(void *priv, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, bool is_tx); + void (*get_net_stats)(void *priv, struct nbl_stats *queue_stats); + void (*get_private_stat_len)(void *priv, u32 *len); + void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data, u32 data_len); + void (*fill_private_stat_strings)(void *priv, u8 *strings); + u16 (*get_max_desc_num)(void *priv); + u16 (*get_min_desc_num)(void *priv); + u16 (*get_tx_desc_num)(void *priv, u32 ring_index); + u16 (*get_rx_desc_num)(void *priv, u32 ring_index); + void (*set_tx_desc_num)(void *priv, u32 ring_index, u16 desc_num); + void (*set_rx_desc_num)(void *priv, u32 ring_index, u16 desc_num); + void (*get_coalesce)(void *priv, u16 vector_id, struct ethtool_coalesce *ec); + void (*set_coalesce)(void *priv, u16 vector_id, u16 num_net_msix, u16 pnum, u16 rate); + u16 (*get_intr_suppress_level)(void *priv, u64 rate, u16 last_level); + void (*set_intr_suppress_level)(void *priv, u16 vector_id, + u16 num_net_msix, u16 level); + void (*get_rxfh_indir_size)(void *priv, u16 vsi_id, u32 *rxfh_indir_size); + void (*get_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir, u32 indir_size); + void (*get_rxfh_rss_key_size)(void *priv, u32 *rxfh_rss_key_size); + void (*get_rxfh_rss_key)(void *priv, u8 *rss_key, u32 rss_key_size); + void (*get_rxfh_rss_alg_sel)(void *priv, u8 *alg_sel, u8 eth_id); + int (*get_port_attributes)(void *priv); + int (*enable_port)(void *priv, bool enable); + void (*recv_port_notify)(void *priv); + int (*get_port_state)(void *priv, u8 eth_id, struct nbl_port_state *port_state); + int (*set_port_advertising)(void *priv, struct nbl_port_advertising *port_advertising); + int (*get_module_info)(void *priv, u8 eth_id, struct ethtool_modinfo *info); + int (*get_module_eeprom)(void *priv, u8 eth_id, struct ethtool_eeprom *eeprom, u8 *data); + int (*get_link_state)(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info); + int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); + int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); + int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); + int (*nway_reset)(void *priv, u8 eth_id); + void (*adapt_desc_gother)(void *priv); + void (*flr_clear_net)(void *priv, u16 vfid); + void (*flr_clear_queues)(void *priv, u16 vfid); + void (*flr_clear_flows)(void *priv, u16 vfid); + void (*flr_clear_interrupt)(void *priv, u16 vfid); + void (*unmask_all_interrupts)(void *priv); + void (*keep_alive)(void *priv); + int (*set_bridge_mode)(void *priv, u16 bmode); + + u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); + u64 (*get_real_hw_addr)(void *priv, u16 vsi_id); + u16 (*get_function_id)(void *priv, u16 vsi_id); + void (*get_real_bdf)(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function); + + bool (*check_fw_heartbeat)(void *priv); + bool (*check_fw_reset)(void *priv); + int (*flash_lock)(void *priv); + int (*flash_unlock)(void *priv); + int (*flash_prepare)(void *priv); + int (*flash_image)(void *priv, u32 module, const u8 *data, size_t len); + int (*flash_activate)(void *priv); + void (*get_phy_caps)(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps); + void (*get_phy_state)(void *priv, u8 eth_id, struct nbl_phy_state *phy_state); + int (*set_sfp_state)(void *priv, u8 eth_id, u8 state); + int (*set_eth_loopback)(void *priv, u8 enable); + struct sk_buff *(*clean_rx_lb_test)(void *priv, u32 ring_index); + int (*passthrough_fw_cmd)(void *priv, struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result); + int (*update_ring_num)(void *priv); + int (*set_ring_num)(void *priv, struct nbl_fw_cmd_ring_num_param *param); + + u32 (*check_active_vf)(void *priv); + int (*get_board_id)(void *priv); + + void (*get_reg_dump)(void *priv, u32 *data, u32 len); + int (*get_reg_dump_len)(void *priv); + + u32 (*get_adminq_tx_buf_size)(void *priv); + int (*emp_console_write)(void *priv, char *buf, size_t count); + bool (*get_product_flex_cap)(void *priv, enum nbl_flex_cap_type cap_type); + bool (*get_product_fix_cap)(void *priv, enum nbl_fix_cap_type cap_type); + + void (*dummy_func)(void *priv); + + int (*get_p4_info)(void *priv, char *verify_code); + int (*load_p4)(void *priv, struct nbl_load_p4_param *param); + int (*load_p4_default)(void *priv); + int (*get_p4_used)(void *priv); + int (*set_p4_used)(void *priv, int p4_type); + u16 (*get_vf_base_vsi_id)(void *priv, u16 pf_id); + + dma_addr_t (*restore_abnormal_ring)(void *priv, int ring_index, int type); + int (*restart_abnormal_ring)(void *priv, int ring_index, int type); + int (*restore_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type); + u16 (*get_vf_function_id)(void *priv, u16 vsi_id, int vf_id); +}; + +struct nbl_dispatch_ops_tbl { + struct nbl_dispatch_ops *ops; + void *priv; +}; + +int nbl_disp_init(void *p, struct nbl_init_param *param); +void nbl_disp_remove(void *p); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h new file mode 100644 index 000000000000..d61b17b01b07 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEF_PHY_H_ +#define _NBL_DEF_PHY_H_ + +#include "nbl_include.h" + +#define NBL_PHY_OPS_TBL_TO_OPS(phy_ops_tbl) ((phy_ops_tbl)->ops) +#define NBL_PHY_OPS_TBL_TO_PRIV(phy_ops_tbl) ((phy_ops_tbl)->priv) + +struct nbl_phy_ops { + int (*init_chip_module)(void *priv, u8 eth_speed, u8 eth_num); + int (*get_firmware_version)(void *priv, char *firmware_verion); + int (*flow_init)(void *priv); + int (*init_qid_map_table)(void *priv); + int (*set_qid_map_table)(void *priv, void *data, int qid_map_select); + int (*set_qid_map_ready)(void *priv, bool ready); + int (*cfg_ipro_queue_tbl)(void *priv, u16 queue_id, u16 vsi_id, u8 enable); + int (*cfg_ipro_dn_sport_tbl)(void *priv, u16 vsi_id, u16 dst_eth_id, u16 bmode, bool binit); + int (*set_vnet_queue_info)(void *priv, struct nbl_vnet_queue_info_param *param, + u16 queue_id); + int (*clear_vnet_queue_info)(void *priv, u16 queue_id); + int (*cfg_vnet_qinfo_log)(void *priv, u16 queue_id, bool vld); + int (*reset_dvn_cfg)(void *priv, u16 queue_id); + int (*reset_uvn_cfg)(void *priv, u16 queue_id); + int (*restore_dvn_context)(void *priv, u16 queue_id, u16 split, u16 last_avail_index); + int (*restore_uvn_context)(void *priv, u16 queue_id, u16 split, u16 last_avail_index); + int (*get_tx_queue_cfg)(void *priv, void *data, u16 queue_id); + int (*get_rx_queue_cfg)(void *priv, void *data, u16 queue_id); + int (*cfg_tx_queue)(void *priv, void *data, u16 queue_id); + int (*cfg_rx_queue)(void *priv, void *data, u16 queue_id); + bool (*check_q2tc)(void *priv, u16 queue_id); + int (*cfg_q2tc_netid)(void *priv, u16 queue_id, u16 netid, u16 vld); + int (*cfg_q2tc_tcid)(void *priv, u16 queue_id, u16 tcid); + int (*set_tc_wgt)(void *priv, u16 func_id, u8 *weight, u16 num_tc); + int (*set_tc_spwrr)(void *priv, u16 func_id, u8 spwrr); + int (*set_shaping)(void *priv, u16 func_id, u64 total_tx_rate, u8 vld, bool active); + void (*active_shaping)(void *priv, u16 func_id); + void (*deactive_shaping)(void *priv, u16 func_id); + int (*cfg_dsch_net_to_group)(void *priv, u16 func_id, u16 group_id, u16 vld); + int (*cfg_dsch_group_to_port)(void *priv, u16 group_id, u16 dport, u16 vld); + int (*init_epro_rss_key)(void *priv); + void (*read_rss_key)(void *priv, u8 *rss_key); + void (*read_rss_indir)(void *priv, u16 vsi_id, u32 *rss_indir, + u16 rss_ret_base, u16 rss_entry_size); + void (*get_rss_alg_sel)(void *priv, u8 eth_id, u8 *rss_alg_sel); + int (*init_epro_vpt_tbl)(void *priv, u16 vsi_id); + int (*set_epro_rss_default)(void *priv, u16 vsi_id); + int (*cfg_epro_rss_ret)(void *priv, u32 index, u8 size_type, u32 q_num, u16 *queue_list); + int (*set_epro_rss_pt)(void *priv, u16 vsi_id, u16 rss_ret_base, u16 rss_entry_size); + int (*clear_epro_rss_pt)(void *priv, u16 vsi_id); + int (*disable_dvn)(void *priv, u16 queue_id); + int (*disable_uvn)(void *priv, u16 queue_id); + int (*lso_dsch_drain)(void *priv, u16 queue_id); + int (*rsc_cache_drain)(void *priv, u16 queue_id); + u16 (*save_dvn_ctx)(void *priv, u16 queue_id, u16 split); + u16 (*save_uvn_ctx)(void *priv, u16 queue_id, u16 split, u16 queue_size); + void (*get_rx_queue_err_stats)(void *priv, u16 queue_id, + struct nbl_queue_err_stats *queue_err_stats); + void (*get_tx_queue_err_stats)(void *priv, u16 queue_id, + struct nbl_queue_err_stats *queue_err_stats); + void (*setup_queue_switch)(void *priv, u16 eth_id); + void (*init_pfc)(void *priv, u8 ether_ports); + u32 (*get_chip_temperature)(void *priv); + + int (*cfg_epro_vpt_tbl)(void *priv, u16 vsi_id); + void (*set_promisc_mode)(void *priv, u16 vsi_id, u16 eth_id, u16 mode); + void (*configure_msix_map)(void *priv, u16 func_id, bool valid, dma_addr_t dma_addr, + u8 bus, u8 devid, u8 function); + void (*configure_msix_info)(void *priv, u16 func_id, bool valid, u16 interrupt_id, + u8 bus, u8 devid, u8 function, bool net_msix_mask_en); + void (*get_msix_resource)(void *priv, u16 func_id, u16 *msix_base, u16 *msix_max); + void (*get_coalesce)(void *priv, u16 interrupt_id, u16 *pnum, u16 *rate); + void (*set_coalesce)(void *priv, u16 interrupt_id, u16 pnum, u16 rate); + + void (*update_mailbox_queue_tail_ptr)(void *priv, u16 tail_ptr, u8 txrx); + void (*config_mailbox_rxq)(void *priv, dma_addr_t dma_addr, int size_bwid); + void (*config_mailbox_txq)(void *priv, dma_addr_t dma_addr, int size_bwid); + void (*stop_mailbox_rxq)(void *priv); + void (*stop_mailbox_txq)(void *priv); + u16 (*get_mailbox_rx_tail_ptr)(void *priv); + bool (*check_mailbox_dma_err)(void *priv, bool tx); + u32 (*get_host_pf_mask)(void *priv); + u32 (*get_host_pf_fid)(void *priv, u8 func_id); + void (*cfg_mailbox_qinfo)(void *priv, u16 func_id, u16 bus, u16 devid, u16 function); + void (*enable_mailbox_irq)(void *priv, u16 func_id, bool enable_msix, u16 global_vector_id); + void (*enable_abnormal_irq)(void *priv, bool enable_msix, u16 global_vector_id); + void (*enable_msix_irq)(void *priv, u16 global_vector_id); + u8 *(*get_msix_irq_enable_info)(void *priv, u16 global_vector_id, u32 *irq_data); + void (*config_adminq_rxq)(void *priv, dma_addr_t dma_addr, int size_bwid); + void (*config_adminq_txq)(void *priv, dma_addr_t dma_addr, int size_bwid); + void (*stop_adminq_rxq)(void *priv); + void (*stop_adminq_txq)(void *priv); + void (*cfg_adminq_qinfo)(void *priv, u16 bus, u16 devid, u16 function); + void (*enable_adminq_irq)(void *priv, bool enable_msix, u16 global_vector_id); + void (*update_adminq_queue_tail_ptr)(void *priv, u16 tail_ptr, u8 txrx); + u16 (*get_adminq_rx_tail_ptr)(void *priv); + bool (*check_adminq_dma_err)(void *priv, bool tx); + + void (*update_tail_ptr)(void *priv, struct nbl_notify_param *param); + u8* (*get_tail_ptr)(void *priv); + + int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); + int (*set_spoof_check_enable)(void *priv, u16 vsi_id, u8 enable); + + u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); + + /* For leonis */ + int (*set_ht)(void *priv, u16 hash, u16 hash_other, u8 ht_table, + u8 bucket, u32 key_index, u8 valid); + int (*set_kt)(void *priv, u8 *key, u32 key_index, u8 key_type); + int (*search_key)(void *priv, u8 *key, u8 key_type); + int (*add_tcam)(void *priv, u32 index, u8 *key, u32 *action, u8 key_type, u8 pp_type); + void (*del_tcam)(void *priv, u32 index, u8 key_type, u8 pp_type); + int (*add_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 action); + void (*del_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id); + int (*init_fem)(void *priv); + + unsigned long (*get_fw_ping)(void *priv); + void (*set_fw_ping)(void *priv, unsigned long ping); + unsigned long (*get_fw_pong)(void *priv); + void (*set_fw_pong)(void *priv, unsigned long pong); + + void (*get_reg_dump)(void *priv, u32 *data, u32 len); + int (*get_reg_dump_len)(void *priv); + int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); + u32 (*get_uvn_desc_entry_stats)(void *priv); + void (*set_uvn_desc_wr_timeout)(void *priv, u16 timeout); + + int (*setup_loopback)(void *priv, u32 eth_id, u32 enable); + int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); + + /* for board cfg */ + u32 (*get_fw_eth_num)(void *priv); + u32 (*get_fw_eth_map)(void *priv); + void (*get_board_info)(void *priv, struct nbl_board_port_info *board); +}; + +struct nbl_phy_ops_tbl { + struct nbl_phy_ops *ops; + void *priv; +}; + +int nbl_phy_init_leonis(void *p, struct nbl_init_param *param); +void nbl_phy_remove_leonis(void *p); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h new file mode 100644 index 000000000000..6a0cc88776c5 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEF_RESOURCE_H_ +#define _NBL_DEF_RESOURCE_H_ + +#include "nbl_include.h" + +#define NBL_RES_OPS_TBL_TO_OPS(res_ops_tbl) ((res_ops_tbl)->ops) +#define NBL_RES_OPS_TBL_TO_PRIV(res_ops_tbl) ((res_ops_tbl)->priv) + +struct nbl_resource_pt_ops { + netdev_tx_t (*start_xmit)(struct sk_buff *skb, struct net_device *netdev); + netdev_tx_t (*rep_xmit)(struct sk_buff *skb, struct net_device *netdev); + netdev_tx_t (*self_test_xmit)(struct sk_buff *skb, struct net_device *netdev); + int (*napi_poll)(struct napi_struct *napi, int budget); +}; + +struct nbl_resource_ops { + int (*init_chip_module)(void *priv); + void (*get_resource_pt_ops)(void *priv, struct nbl_resource_pt_ops *pt_ops); + int (*queue_init)(void *priv); + int (*vsi_init)(void *priv); + int (*configure_msix_map)(void *priv, u16 func_id, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en); + int (*destroy_msix_map)(void *priv, u16 func_id); + int (*enable_mailbox_irq)(void *priv, u16 func_id, u16 vector_id, bool enable_msix); + int (*enable_abnormal_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_adminq_irq)(void *p, u16 vector_id, bool enable_msix); + u16 (*get_global_vector)(void *priv, u16 vsi_id, u16 local_vector_id); + u16 (*get_msix_entry_id)(void *priv, u16 vsi_id, u16 local_vector_id); + u32 (*get_chip_temperature)(void *priv); + u32 (*get_chip_temperature_max)(void *priv); + u32 (*get_chip_temperature_crit)(void *priv); + int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_module_temp_type type); + int (*get_mbx_irq_num)(void *priv); + int (*get_adminq_irq_num)(void *priv); + int (*get_abnormal_irq_num)(void *priv); + + int (*alloc_rings)(void *priv, struct net_device *netdev, u16 tx_num, + u16 rx_num, u16 tx_desc_num, u16 rx_desc_num); + void (*remove_rings)(void *priv); + dma_addr_t (*start_tx_ring)(void *priv, u8 ring_index); + void (*stop_tx_ring)(void *priv, u8 ring_index); + dma_addr_t (*start_rx_ring)(void *priv, u8 ring_index, bool use_napi); + void (*stop_rx_ring)(void *priv, u8 ring_index); + void (*update_rx_ring)(void *priv, u16 index); + void (*kick_rx_ring)(void *priv, u16 index); + int (*dump_ring)(void *priv, struct seq_file *m, bool is_tx, int index); + int (*dump_ring_stats)(void *priv, struct seq_file *m, bool is_tx, int index); + struct napi_struct *(*get_vector_napi)(void *priv, u16 index); + void (*set_vector_info)(void *priv, u8 *irq_enable_base, u32 irq_data, + u16 index, bool mask_en); + void (*register_vsi_ring)(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num); + int (*register_net)(void *priv, u16 func_id, + struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result); + int (*unregister_net)(void *priv, u16 func_id); + int (*alloc_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num); + void (*free_txrx_queues)(void *priv, u16 vsi_id); + int (*register_vsi2q)(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num); + int (*setup_q2vsi)(void *priv, u16 vsi_id); + void (*remove_q2vsi)(void *priv, u16 vsi_id); + int (*setup_rss)(void *priv, u16 vsi_id); + void (*remove_rss)(void *priv, u16 vsi_id); + int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); + void (*remove_all_queues)(void *priv, u16 vsi_id); + int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld); + int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps); + void (*remove_cqs)(void *priv, u16 vsi_id); + void (*clear_queues)(void *priv, u16 vsi_id); + u16 (*get_local_queue_id)(void *priv, u16 vsi_id, u16 global_queue_id); + + int (*enable_msix_irq)(void *priv, u16 global_vector_id); + u8* (*get_msix_irq_enable_info)(void *priv, u16 global_vector_id, u32 *irq_data); + + int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); + int (*set_vf_spoof_check)(void *priv, u16 vsi_id, int vfid, u8 enable); + void (*get_base_mac_addr)(void *priv, u8 *mac); + + int (*add_macvlan)(void *priv, u8 *mac, u16 vlan, u16 vsi); + void (*del_macvlan)(void *priv, u8 *mac, u16 vlan, u16 vsi); + int (*add_lag_flow)(void *priv, u16 vsi); + void (*del_lag_flow)(void *priv, u16 vsi); + int (*add_lldp_flow)(void *priv, u16 vsi); + void (*del_lldp_flow)(void *priv, u16 vsi); + int (*add_multi_rule)(void *priv, u16 vsi); + void (*del_multi_rule)(void *priv, u16 vsi); + int (*setup_multi_group)(void *priv); + void (*remove_multi_group)(void *priv); + void (*clear_flow)(void *priv, u16 vsi_id); + void (*dump_flow)(void *priv, struct seq_file *m); + + u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); + void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id); + int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode); + u32 (*get_tx_headroom)(void *priv); + void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + + void (*get_queue_stats)(void *priv, u8 queue_id, + struct nbl_queue_stats *queue_stats, bool is_tx); + int (*get_queue_err_stats)(void *priv, u16 func_id, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, bool is_tx); + void (*get_net_stats)(void *priv, struct nbl_stats *queue_stats); + void (*get_private_stat_len)(void *priv, u32 *len); + void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data); + void (*fill_private_stat_strings)(void *priv, u8 *strings); + u16 (*get_max_desc_num)(void); + u16 (*get_min_desc_num)(void); + u16 (*get_tx_desc_num)(void *priv, u32 ring_index); + u16 (*get_rx_desc_num)(void *priv, u32 ring_index); + void (*set_tx_desc_num)(void *priv, u32 ring_index, u16 desc_num); + void (*set_rx_desc_num)(void *priv, u32 ring_index, u16 desc_num); + void (*get_coalesce)(void *priv, u16 func_id, u16 vector_id, + struct ethtool_coalesce *ec); + void (*set_coalesce)(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 pnum, u16 rate); + u16 (*get_intr_suppress_level)(void *priv, u64 rate, u16 last_level); + void (*set_intr_suppress_level)(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 level); + void (*get_rxfh_indir_size)(void *priv, u16 vsi_id, u32 *rxfh_indir_size); + void (*get_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir); + void (*get_rxfh_rss_key_size)(void *priv, u32 *rxfh_rss_key_size); + void (*get_rxfh_rss_key)(void *priv, u8 *rss_key); + void (*get_rss_alg_sel)(void *priv, u8 *alg_sel, u8 eth_id); + int (*get_firmware_version)(void *priv, char *firmware_verion); + int (*get_driver_info)(void *priv, struct nbl_driver_info *driver_info); + int (*nway_reset)(void *priv, u8 eth_id); + + u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); + u64 (*get_real_hw_addr)(void *priv, u16 vsi_id); + u16 (*get_function_id)(void *priv, u16 vsi_id); + void (*get_real_bdf)(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function); + + int (*get_port_attributes)(void *priv); + int (*update_ring_num)(void *priv); + int (*set_ring_num)(void *priv, struct nbl_fw_cmd_ring_num_param *param); + int (*enable_port)(void *priv, bool enable); + void (*recv_port_notify)(void *priv, void *data); + int (*get_port_state)(void *priv, u8 eth_id, struct nbl_port_state *port_state); + int (*set_port_advertising)(void *priv, struct nbl_port_advertising *port_advertising); + int (*get_module_info)(void *priv, u8 eth_id, struct ethtool_modinfo *info); + int (*get_module_eeprom)(void *priv, u8 eth_id, struct ethtool_eeprom *eeprom, u8 *data); + int (*get_link_state)(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info); + int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); + int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); + int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); + void (*adapt_desc_gother)(void *priv); + void (*flr_clear_net)(void *priv, u16 vfid); + void (*flr_clear_queues)(void *priv, u16 vfid); + void (*flr_clear_flows)(void *priv, u16 vfid); + void (*flr_clear_interrupt)(void *priv, u16 vfid); + void (*unmask_all_interrupts)(void *priv); + int (*set_bridge_mode)(void *priv, u16 func_id, u16 bmode); + u16 (*get_vf_function_id)(void *priv, u16 vsi_id, int vf_id); + + bool (*check_fw_heartbeat)(void *priv); + bool (*check_fw_reset)(void *priv); + int (*flash_lock)(void *priv); + int (*flash_unlock)(void *priv); + int (*flash_prepare)(void *priv); + int (*flash_image)(void *priv, u32 module, const u8 *data, size_t len); + int (*flash_activate)(void *priv); + void (*get_phy_caps)(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps); + void (*get_phy_state)(void *priv, u8 eth_id, struct nbl_phy_state *phy_state); + int (*set_sfp_state)(void *priv, u8 eth_id, u8 state); + int (*setup_loopback)(void *priv, u32 eth_id, u32 enable); + struct sk_buff *(*clean_rx_lb_test)(void *priv, u32 ring_index); + int (*passthrough_fw_cmd)(void *priv, struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result); + + u32 (*check_active_vf)(void *priv, u16 func_id); + int (*get_board_id)(void *priv); + + void (*get_reg_dump)(void *priv, u32 *data, u32 len); + int (*get_reg_dump_len)(void *priv); + + bool (*get_product_flex_cap)(void *priv, enum nbl_flex_cap_type cap_type); + bool (*get_product_fix_cap)(void *priv, enum nbl_fix_cap_type cap_type); + + dma_addr_t (*restore_abnormal_ring)(void *priv, int ring_index, int type); + int (*restart_abnormal_ring)(void *priv, int ring_index, int type); + int (*restore_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type); + + void (*get_board_info)(void *priv, struct nbl_board_port_info *board_info); + + int (*get_p4_info)(void *priv, char *verify_code); + int (*load_p4)(void *priv, struct nbl_load_p4_param *param); + int (*load_p4_default)(void *priv); + int (*get_p4_used)(void *priv); + int (*set_p4_used)(void *priv, int p4_type); + + u16 (*get_vf_base_vsi_id)(void *priv, u16 pf_id); + u16 (*get_vsi_global_queue_id)(void *priv, u16 vsi_id, u16 local_qid); +}; + +struct nbl_resource_ops_tbl { + struct nbl_resource_ops *ops; + void *priv; +}; + +int nbl_res_init_leonis(void *p, struct nbl_init_param *param); +void nbl_res_remove_leonis(void *p); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h new file mode 100644 index 000000000000..76a1887098af --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEF_SERVICE_H_ +#define _NBL_DEF_SERVICE_H_ + +#include "nbl_include.h" + +#define NBL_SERV_OPS_TBL_TO_OPS(serv_ops_tbl) ((serv_ops_tbl)->ops) +#define NBL_SERV_OPS_TBL_TO_PRIV(serv_ops_tbl) ((serv_ops_tbl)->priv) + +struct nbl_service_ops { + int (*init_chip)(void *p); + int (*destroy_chip)(void *p); + int (*init_p4)(void *priv); + int (*configure_msix_map)(void *p, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en); + int (*destroy_msix_map)(void *priv); + int (*enable_mailbox_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_abnormal_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_adminq_irq)(void *p, u16 vector_id, bool enable_msix); + int (*request_net_irq)(void *priv, struct nbl_msix_info_param *msix_info); + void (*free_net_irq)(void *priv, struct nbl_msix_info_param *msix_info); + u16 (*get_global_vector)(void *priv, u16 local_vector_id); + u16 (*get_msix_entry_id)(void *priv, u16 local_vector_id); + void (*get_common_irq_num)(void *priv, struct nbl_common_irq_num *irq_num); + void (*get_ctrl_irq_num)(void *priv, struct nbl_ctrl_irq_num *irq_num); + int (*get_port_attributes)(void *p); + int (*update_ring_num)(void *priv); + int (*enable_port)(void *p, bool enable); + void (*set_netdev_carrier_state)(void *p, struct net_device *netdev, u8 link_state); + + int (*vsi_open)(void *priv, struct net_device *netdev, u16 vsi_index, + u16 real_qps, bool use_napi); + int (*vsi_stop)(void *priv, u16 vsi_index); + int (*switch_traffic_default_dest)(void *priv, u16 from_vsi, u16 to_vsi); + + int (*netdev_open)(struct net_device *netdev); + int (*netdev_stop)(struct net_device *netdev); + netdev_tx_t (*start_xmit)(struct sk_buff *skb, struct net_device *netdev); + int (*change_mtu)(struct net_device *netdev, int new_mtu); + void (*get_stats64)(struct net_device *netdev, struct rtnl_link_stats64 *stats); + void (*set_rx_mode)(struct net_device *dev); + void (*change_rx_flags)(struct net_device *dev, int flag); + int (*set_mac)(struct net_device *dev, void *p); + int (*rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); + int (*rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); + netdev_features_t (*features_check)(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features); + void (*tx_timeout)(struct net_device *netdev, u32 txqueue); + + int (*get_phys_port_name)(struct net_device *dev, char *name, size_t len); + int (*get_port_parent_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); + + int (*register_net)(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result); + int (*unregister_net)(void *priv); + int (*setup_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num, u16 net_vector_id); + void (*remove_txrx_queues)(void *priv, u16 vsi_id); + int (*register_vsi_info)(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num); + int (*setup_q2vsi)(void *priv, u16 vsi_id); + void (*remove_q2vsi)(void *priv, u16 vsi_id); + int (*setup_rss)(void *priv, u16 vsi_id); + void (*remove_rss)(void *priv, u16 vsi_id); + u32 (*get_chip_temperature)(void *priv); + u32 (*get_chip_temperature_max)(void *priv); + u32 (*get_chip_temperature_crit)(void *priv); + int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_module_temp_type type); + + int (*alloc_rings)(void *priv, struct net_device *dev, + u16 tx_num, u16 rx_num, u16 desc_num); + void (*free_rings)(void *priv); + int (*enable_napis)(void *priv, u16 vsi_index); + void (*disable_napis)(void *priv, u16 vsi_index); + void (*set_mask_en)(void *priv, bool enable); + int (*start_net_flow)(void *priv, struct net_device *dev, u16 vsi_id); + void (*stop_net_flow)(void *priv, u16 vsi_id); + int (*set_lldp_flow)(void *priv, u16 vsi_id); + void (*remove_lldp_flow)(void *priv, u16 vsi_id); + int (*start_mgt_flow)(void *priv); + void (*stop_mgt_flow)(void *priv); + u32 (*get_tx_headroom)(void *priv); + int (*set_spoof_check_addr)(void *priv, u8 *mac); + + u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); + void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id); + void (*debugfs_init)(void *priv); + void (*debugfs_netops_create)(void *priv, u16 tx_queue_num, u16 rx_queue_num); + void (*debugfs_ctrlops_create)(void *priv); + void (*debugfs_exit)(void *priv); + int (*setup_net_resource_mgt)(void *priv, struct net_device *dev); + void (*remove_net_resource_mgt)(void *priv); + int (*enable_lag_protocol)(void *priv, u16 vsi_id, bool lag_en); + void (*set_sfp_state)(void *priv, struct net_device *netdev, u8 eth_id, + bool open, bool is_force); + int (*get_board_id)(void *priv); + void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + + /* ethtool */ + void (*get_drvinfo)(struct net_device *netdev, struct ethtool_drvinfo *drvinfo); + int (*get_module_eeprom)(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data); + int (*get_module_info)(struct net_device *netdev, struct ethtool_modinfo *info); + int (*get_eeprom_length)(struct net_device *netdev); + int (*get_eeprom)(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes); + void (*get_strings)(struct net_device *netdev, u32 stringset, u8 *data); + int (*get_sset_count)(struct net_device *netdev, int sset); + void (*get_ethtool_stats)(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); + void (*get_channels)(struct net_device *netdev, struct ethtool_channels *channels); + int (*set_channels)(struct net_device *netdev, struct ethtool_channels *channels); + u32 (*get_link)(struct net_device *netdev); + int (*get_ksettings)(struct net_device *netdev, struct ethtool_link_ksettings *cmd); + int (*set_ksettings)(struct net_device *netdev, const struct ethtool_link_ksettings *cmd); + void (*get_ringparam)(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack); + int (*set_ringparam)(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack); + + int (*get_coalesce)(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack); + int (*set_coalesce)(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack); + + int (*get_rxnfc)(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs); + u32 (*get_rxfh_indir_size)(struct net_device *netdev); + u32 (*get_rxfh_key_size)(struct net_device *netdev); + int (*get_rxfh)(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); + u32 (*get_msglevel)(struct net_device *netdev); + void (*set_msglevel)(struct net_device *netdev, u32 msglevel); + int (*get_regs_len)(struct net_device *netdev); + void (*get_ethtool_dump_regs)(struct net_device *netdev, + struct ethtool_regs *regs, void *p); + int (*get_per_queue_coalesce)(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec); + int (*set_per_queue_coalesce)(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec); + void (*self_test)(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data); + u32 (*get_priv_flags)(struct net_device *netdev); + int (*set_priv_flags)(struct net_device *netdev, u32 priv_flags); + int (*set_pause_param)(struct net_device *netdev, struct ethtool_pauseparam *param); + void (*get_pause_param)(struct net_device *netdev, struct ethtool_pauseparam *param); + int (*set_fec_param)(struct net_device *netdev, struct ethtool_fecparam *fec); + int (*get_fec_param)(struct net_device *netdev, struct ethtool_fecparam *fec); + int (*get_ts_info)(struct net_device *netdev, struct ethtool_ts_info *ts_info); + int (*set_phys_id)(struct net_device *netdev, enum ethtool_phys_id_state state); + int (*nway_reset)(struct net_device *netdev); + + u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); + u64 (*get_real_hw_addr)(void *priv, u16 vsi_id); + u16 (*get_function_id)(void *priv, u16 vsi_id); + void (*get_real_bdf)(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function); + int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); + int (*process_abnormal_event)(void *priv); + void (*adapt_desc_gother)(void *priv); + void (*process_flr)(void *priv, u16 vfid); + void (*recovery_abnormal)(void *priv); + void (*keep_alive)(void *priv); + + int (*get_devlink_info)(struct devlink *devlink, struct devlink_info_req *req, + struct netlink_ext_ack *extack); + int (*update_devlink_flash)(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack); + + u32 (*get_adminq_tx_buf_size)(void *priv); + bool (*check_fw_heartbeat)(void *priv); + bool (*check_fw_reset)(void *priv); + + bool (*get_product_flex_cap)(void *priv, enum nbl_flex_cap_type cap_type); + bool (*get_product_fix_cap)(void *priv, enum nbl_fix_cap_type cap_type); + + int (*setup_st)(void *priv, void *st_table_param); + void (*remove_st)(void *priv, void *st_table_param); + u16 (*get_vf_base_vsi_id)(void *priv, u16 func_id); +}; + +struct nbl_service_ops_tbl { + struct nbl_resource_pt_ops pt_ops; + struct nbl_service_ops *ops; + void *priv; +}; + +int nbl_serv_init(void *priv, struct nbl_init_param *param); +void nbl_serv_remove(void *priv); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h new file mode 100644 index 000000000000..78dbae0a1e13 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h @@ -0,0 +1,794 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_INCLUDE_H_ +#define _NBL_INCLUDE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* ------ Basic definitions ------- */ +#define NBL_DRIVER_NAME "nbl_core" +/* "product NO-V NO.R NO.B NO.SP NO" + * product NO define: + * 1 reserve for develop branch + * 2 df200 + * 3 ASIC snic + * 4 x4 + */ +#define NBL_DRIVER_VERSION "1-1.1.100.0" + +#define NBL_DRIVER_DEV_MAX 8 + +#define NBL_PAIR_ID_GET_TX(id) ((id) * 2 + 1) +#define NBL_PAIR_ID_GET_RX(id) ((id) * 2) + +#define NBL_MAX_PF 8 + +#define NBL_IPV6_ADDR_LEN_AS_U8 16 + +#define NBL_P4_NAME_LEN 64 + +#define NBL_FLOW_INDEX_BYTE_LEN 8 + +#define SET_DEV_MIN_MTU(netdev, mtu) ((netdev)->min_mtu = (mtu)) +#define SET_DEV_MAX_MTU(netdev, mtu) ((netdev)->max_mtu = (mtu)) + +#define NBL_USER_DEV_SHMMSGRING_SIZE (PAGE_SIZE) +#define NBL_USER_DEV_SHMMSGBUF_SIZE (NBL_USER_DEV_SHMMSGRING_SIZE - 8) + +/* Used for macros to pass checkpatch */ +#define NBL_NAME(x) x + +enum nbl_product_type { + NBL_LEONIS_TYPE, + NBL_PRODUCT_MAX, +}; + +enum nbl_flex_cap_type { + NBL_SECURITY_ACCEL_CAP, + NBL_FLEX_CAP_NBITS +}; + +enum nbl_fix_cap_type { + NBL_TASK_FW_HB_CAP, + NBL_TASK_FW_RESET_CAP, + NBL_TASK_CLEAN_ADMINDQ_CAP, + NBL_TASK_CLEAN_MAILBOX_CAP, + NBL_RESTOOL_CAP, + NBL_HWMON_TEMP_CAP, + NBL_ITR_DYNAMIC, + NBL_TASK_ADAPT_DESC_GOTHER, + NBL_P4_CAP, + NBL_PROCESS_FLR_CAP, + NBL_RECOVERY_ABNORMAL_STATUS, + NBL_TASK_KEEP_ALIVE, + NBL_DUMP_FLOW_CAP, + NBL_FIX_CAP_NBITS +}; + +enum nbl_sfp_module_state { + NBL_SFP_MODULE_OFF, + NBL_SFP_MODULE_ON, +}; + +enum { + NBL_VSI_DATA = 0,/* default vsi in kernel or independent dpdk */ + NBL_VSI_CTRL, + NBL_VSI_USER,/* dpdk used vsi in coexist dpdk */ + NBL_VSI_MAX, +}; + +enum { + NBL_P4_DEFAULT = 0, + NBL_P4_TYPE_MAX, +}; + +enum { + NBL_TX = 0, + NBL_RX, +}; + +/* ------ Params that go through multiple layers ------ */ +struct nbl_driver_info { +#define NBL_DRIVER_VERSION_LEN_MAX (32) + char driver_version[NBL_DRIVER_VERSION_LEN_MAX]; +}; + +struct nbl_func_caps { + u32 has_ctrl:1; + u32 has_net:1; + u32 is_vf:1; + u32 is_nic:1; + u32 is_blk:1; + u32 has_user:1; + u32 support_lag:1; + u32 has_grc:1; + u32 has_factory_ctrl:1; + u32 need_pmd_debug:1; + u32 rsv:23; +}; + +struct nbl_init_param { + struct nbl_func_caps caps; + enum nbl_product_type product_type; + bool pci_using_dac; +}; + +struct nbl_txrx_queue_param { + u16 vsi_id; + u64 dma; + u64 avail; + u64 used; + u16 desc_num; + u16 local_queue_id; + u16 intr_en; + u16 intr_mask; + u16 global_vector_id; + u16 half_offload_en; + u16 split; + u16 extend_header; + u16 cxt; + u16 rxcsum; +}; + +struct nbl_qid_map_table { + u32 local_qid; + u32 notify_addr_l; + u32 notify_addr_h; + u32 global_qid; + u32 ctrlq_flag; +}; + +struct nbl_qid_map_param { + struct nbl_qid_map_table *qid_map; + u16 start; + u16 len; +}; + +struct nbl_ecpu_qid_map_param { + u8 valid; + u16 table_id; + u16 max_qid; + u16 base_qid; + u16 device_type; + u64 notify_addr; +}; + +struct nbl_rss_alg_param { + u8 hash_field_type_v4; + u8 hash_field_type_v6; + u8 hash_field_mask_dport; + u8 hash_field_mask_sport; + u8 hash_field_mask_dip; + u8 hash_field_mask_sip; + u8 hash_alg_type; +}; + +struct nbl_vnet_queue_info_param { + u32 function_id; + u32 device_id; + u32 bus_id; + u32 msix_idx; + u32 msix_idx_valid; + u32 valid; +}; + +struct nbl_queue_cfg_param { + /* queue args*/ + u64 desc; + u64 avail; + u64 used; + u16 size; + u16 extend_header; + u16 split; + u16 last_avail_idx; + u16 global_queue_id; + + /*interrupt args*/ + u16 global_vector; + u16 intr_en; + u16 intr_mask; + + /* dvn args */ + u16 tx; + + /* uvn args*/ + u16 rxcsum; + u16 half_offload_en; +}; + +struct nbl_register_net_param { + u16 pf_bdf; + u64 vf_bar_start; + u64 vf_bar_size; + u16 total_vfs; + u16 offset; + u16 stride; + u64 pf_bar_start; +}; + +struct nbl_register_net_result { + u16 tx_queue_num; + u16 rx_queue_num; + u16 queue_size; + u16 rdma_enable; + u64 hw_features; + u64 features; + u16 max_mtu; + u16 queue_offset; + u8 mac[ETH_ALEN]; +}; + +struct nbl_msix_info_param { + u16 msix_num; + struct msix_entry *msix_entries; +}; + +struct nbl_queue_stats { + u64 packets; + u64 bytes; + u64 descs; +}; + +struct nbl_tx_queue_stats { + u64 tso_packets; + u64 tso_bytes; + u64 tx_csum_packets; + u64 tx_busy; + u64 tx_dma_busy; + u64 tx_multicast_packets; + u64 tx_unicast_packets; + u64 tx_skb_free; + u64 tx_desc_addr_err_cnt; + u64 tx_desc_len_err_cnt; +}; + +struct nbl_rx_queue_stats { + u64 rx_csum_packets; + u64 rx_csum_errors; + u64 rx_multicast_packets; + u64 rx_unicast_packets; + u64 rx_desc_addr_err_cnt; + u64 rx_alloc_buf_err_cnt; + u64 rx_cache_reuse; + u64 rx_cache_full; + u64 rx_cache_empty; + u64 rx_cache_busy; + u64 rx_cache_waive; +}; + +struct nbl_stats { + /* for toe stats */ + u64 tso_packets; + u64 tso_bytes; + u64 tx_csum_packets; + u64 rx_csum_packets; + u64 rx_csum_errors; + u64 tx_busy; + u64 tx_dma_busy; + u64 tx_multicast_packets; + u64 tx_unicast_packets; + u64 rx_multicast_packets; + u64 rx_unicast_packets; + u64 tx_skb_free; + u64 tx_desc_addr_err_cnt; + u64 tx_desc_len_err_cnt; + u64 rx_desc_addr_err_cnt; + u64 rx_alloc_buf_err_cnt; + u64 rx_cache_reuse; + u64 rx_cache_full; + u64 rx_cache_empty; + u64 rx_cache_busy; + u64 rx_cache_waive; + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets; + u64 rx_bytes; +}; + +struct nbl_queue_err_stats { + u16 dvn_pkt_drop_cnt; + u32 uvn_stat_pkt_drop; +}; + +struct nbl_priv_stats { + u64 total_dvn_pkt_drop_cnt; + u64 total_uvn_stat_pkt_drop; +}; + +struct nbl_fc_info { + u32 rx_pause; + u32 tx_pause; +}; + +struct nbl_notify_param { + u16 notify_qid; + u16 tail_ptr; +}; + +enum nbl_eth_speed { + LINK_SPEED_100M = 0, + LINK_SPEED_1000M = 1, + LINK_SPEED_5G = 2, + LINK_SPEEP_10G = 3, + LINK_SPEED_25G = 4, + LINK_SPEED_50G = 5, + LINK_SPEED_100G = 6, + LINK_SPEED_200G = 7 +}; + +struct nbl_phy_caps { + u32 speed; /* enum nbl_eth_speed */ + u32 fec_ability; + u32 pause_param; /* bit0 tx, bit1 rx */ +}; + +struct nbl_phy_state { + u32 current_speed; + u32 fec_mode; + struct nbl_fc_info fc; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); +}; + +struct nbl_common_irq_num { + int mbx_irq_num; +}; + +struct nbl_ctrl_irq_num { + int adminq_irq_num; + int abnormal_irq_num; +}; + +#define NBL_PORT_KEY_ILLEGAL 0x0 +#define NBL_PORT_KEY_CAPABILITIES 0x1 +#define NBL_PORT_KEY_ENABLE 0x2 /* BIT(0): NBL_PORT_FLAG_ENABLE_NOTIFY */ +#define NBL_PORT_KEY_DISABLE 0x3 +#define NBL_PORT_KEY_ADVERT 0x4 +#define NBL_PORT_KEY_LOOPBACK 0x5 /* 0: disable eth loopback, 1: enable eth loopback */ +#define NBL_PORT_KEY_MODULE_SWITCH 0x6 /* 0: sfp off, 1: sfp on */ +#define NBL_PORT_KEY_MAC_ADDRESS 0x7 +#define NBL_PORT_KRY_LED_BLINK 0x8 + +enum { + NBL_PORT_SUBOP_READ = 1, + NBL_PORT_SUBOP_WRITE = 2, +}; + +#define NBL_PORT_FLAG_ENABLE_NOTIFY BIT(0) +#define NBL_PORT_ENABLE_LOOPBACK 1 +#define NBL_PORT_DISABLE_LOOPBCK 0 +#define NBL_PORT_SFP_ON 1 +#define NBL_PORT_SFP_OFF 0 +#define NBL_PORT_KEY_KEY_SHIFT 56 +#define NBL_PORT_KEY_DATA_MASK 0xFFFFFFFFFFFF + +struct nbl_port_key { + u32 id; /* port id */ + u32 subop; /* 1: read, 2: write */ + u64 data[]; /* [47:0]: data, [55:48]: rsvd, [63:56]: key */ +}; + +enum nbl_flow_ctrl { + NBL_PORT_TX_PAUSE = 0x1, + NBL_PORT_RX_PAUSE = 0x2, + NBL_PORT_TXRX_PAUSE_OFF = 0x4, /* used for ethtool, means ethtool close tx and rx pause */ +}; + +enum nbl_port_fec { + NBL_PORT_FEC_OFF = 1, + NBL_PORT_FEC_RS = 2, + NBL_PORT_FEC_BASER = 3, + NBL_PORT_FEC_AUTO = 4, /* ethtool may set Auto mode, used for PF mailbox msg*/ +}; + +enum nbl_port_autoneg { + NBL_PORT_AUTONEG_DISABLE = 0x1, + NBL_PORT_AUTONEG_ENABLE = 0x2, +}; + +enum nbl_port_type { + NBL_PORT_TYPE_UNKNOWN = 0, + NBL_PORT_TYPE_FIBRE, + NBL_PORT_TYPE_COPPER, +}; + +enum nbl_port_max_rate { + NBL_PORT_MAX_RATE_UNKNOWN = 0, + NBL_PORT_MAX_RATE_1G, + NBL_PORT_MAX_RATE_10G, + NBL_PORT_MAX_RATE_25G, + NBL_PORT_MAX_RATE_100G, + NBL_PORT_MAX_RATE_100G_PAM4, +}; + +enum nbl_port_mode { + NBL_PORT_NRZ_NORSFEC, + NBL_PORT_NRZ_544, + NBL_PORT_NRZ_528, + NBL_PORT_PAM4_544, + NBL_PORT_MODE_MAX, +}; + +enum nbl_led_reg_ctrl { + NBL_LED_REG_ACTIVE, + NBL_LED_REG_ON, + NBL_LED_REG_OFF, + NBL_LED_REG_INACTIVE, +}; + +/* emp to ctrl dev notify */ +struct nbl_port_notify { + u32 id; + u32 speed; /* in 10 Mbps units */ + u8 link_state:1; /* 0:down, 1:up */ + u8 module_inplace:1; /* 0: not inplace, 1:inplace */ + u8 revd0:6; + u8 flow_ctrl; /* enum nbl_flow_ctrl */ + u8 fec; /* enum nbl_port_fec */ + u8 active_lanes; + u8 rsvd1[4]; + u64 advertising; /* enum nbl_port_cap */ + u64 lp_advertising; /* enum nbl_port_cap */ +}; + +#define NBL_PORT_CAP_AUTONEG_MASK (BIT(NBL_PORT_CAP_AUTONEG)) +#define NBL_PORT_CAP_FEC_MASK \ + (BIT(NBL_PORT_CAP_FEC_NONE) | BIT(NBL_PORT_CAP_FEC_RS) | BIT(NBL_PORT_CAP_FEC_BASER)) +#define NBL_PORT_CAP_PAUSE_MASK (BIT(NBL_PORT_CAP_TX_PAUSE) | BIT(NBL_PORT_CAP_RX_PAUSE)) +#define NBL_PORT_CAP_SPEED_1G_MASK\ + (BIT(NBL_PORT_CAP_1000BASE_T) | BIT(NBL_PORT_CAP_1000BASE_X)) +#define NBL_PORT_CAP_SPEED_10G_MASK\ + (BIT(NBL_PORT_CAP_10GBASE_T) | BIT(NBL_PORT_CAP_10GBASE_KR) | BIT(NBL_PORT_CAP_10GBASE_SR)) +#define NBL_PORT_CAP_SPEED_25G_MASK \ + (BIT(NBL_PORT_CAP_25GBASE_KR) | BIT(NBL_PORT_CAP_25GBASE_SR) |\ + BIT(NBL_PORT_CAP_25GBASE_CR) | BIT(NBL_PORT_CAP_25G_AUI)) +#define NBL_PORT_CAP_SPEED_50G_MASK \ + (BIT(NBL_PORT_CAP_50GBASE_KR2) | BIT(NBL_PORT_CAP_50GBASE_SR2) |\ + BIT(NBL_PORT_CAP_50GBASE_CR2) | BIT(NBL_PORT_CAP_50G_AUI2) |\ + BIT(NBL_PORT_CAP_50GBASE_KR_PAM4) | BIT(NBL_PORT_CAP_50GBASE_SR_PAM4) |\ + BIT(NBL_PORT_CAP_50GBASE_CR_PAM4) | BIT(NBL_PORT_CAP_50G_AUI_PAM4)) +#define NBL_PORT_CAP_SPEED_100G_MASK \ + (BIT(NBL_PORT_CAP_100GBASE_KR4) | BIT(NBL_PORT_CAP_100GBASE_SR4) |\ + BIT(NBL_PORT_CAP_100GBASE_CR4) | BIT(NBL_PORT_CAP_100G_AUI4) |\ + BIT(NBL_PORT_CAP_100G_CAUI4) | BIT(NBL_PORT_CAP_100GBASE_KR2_PAM4) |\ + BIT(NBL_PORT_CAP_100GBASE_SR2_PAM4) | BIT(NBL_PORT_CAP_100GBASE_CR2_PAM4) |\ + BIT(NBL_PORT_CAP_100G_AUI2_PAM4)) +#define NBL_PORT_CAP_SPEED_MASK \ + (NBL_PORT_CAP_SPEED_1G_MASK | NBL_PORT_CAP_SPEED_10G_MASK |\ + NBL_PORT_CAP_SPEED_25G_MASK | NBL_PORT_CAP_SPEED_50G_MASK |\ + NBL_PORT_CAP_SPEED_100G_MASK) +#define NBL_PORT_CAP_PAM4_MASK\ + (BIT(NBL_PORT_CAP_50GBASE_KR_PAM4) | BIT(NBL_PORT_CAP_50GBASE_SR_PAM4) |\ + BIT(NBL_PORT_CAP_50GBASE_CR_PAM4) | BIT(NBL_PORT_CAP_50G_AUI_PAM4) |\ + BIT(NBL_PORT_CAP_100GBASE_KR2_PAM4) | BIT(NBL_PORT_CAP_100GBASE_SR2_PAM4) |\ + BIT(NBL_PORT_CAP_100GBASE_CR2_PAM4) | BIT(NBL_PORT_CAP_100G_AUI2_PAM4)) +#define NBL_ETH_1G_DEFAULT_FEC_MODE NBL_PORT_FEC_OFF +#define NBL_ETH_10G_DEFAULT_FEC_MODE NBL_PORT_FEC_OFF +#define NBL_ETH_25G_DEFAULT_FEC_MODE NBL_PORT_FEC_RS +#define NBL_ETH_100G_DEFAULT_FEC_MODE NBL_PORT_FEC_RS + +enum nbl_port_cap { + NBL_PORT_CAP_TX_PAUSE, + NBL_PORT_CAP_RX_PAUSE, + NBL_PORT_CAP_AUTONEG, + NBL_PORT_CAP_FEC_NONE, + NBL_PORT_CAP_FEC_RS, + NBL_PORT_CAP_FEC_BASER, + NBL_PORT_CAP_1000BASE_T, + NBL_PORT_CAP_1000BASE_X, + NBL_PORT_CAP_10GBASE_T, + NBL_PORT_CAP_10GBASE_KR, + NBL_PORT_CAP_10GBASE_SR, + NBL_PORT_CAP_25GBASE_KR, + NBL_PORT_CAP_25GBASE_SR, + NBL_PORT_CAP_25GBASE_CR, + NBL_PORT_CAP_25G_AUI, + NBL_PORT_CAP_50GBASE_KR2, + NBL_PORT_CAP_50GBASE_SR2, + NBL_PORT_CAP_50GBASE_CR2, + NBL_PORT_CAP_50G_AUI2, + NBL_PORT_CAP_50GBASE_KR_PAM4, + NBL_PORT_CAP_50GBASE_SR_PAM4, + NBL_PORT_CAP_50GBASE_CR_PAM4, + NBL_PORT_CAP_50G_AUI_PAM4, + NBL_PORT_CAP_100GBASE_KR4, + NBL_PORT_CAP_100GBASE_SR4, + NBL_PORT_CAP_100GBASE_CR4, + NBL_PORT_CAP_100G_AUI4, + NBL_PORT_CAP_100G_CAUI4, + NBL_PORT_CAP_100GBASE_KR2_PAM4, + NBL_PORT_CAP_100GBASE_SR2_PAM4, + NBL_PORT_CAP_100GBASE_CR2_PAM4, + NBL_PORT_CAP_100G_AUI2_PAM4, + NBL_PORT_CAP_MAX +}; + +enum nbl_fw_port_speed { + NBL_FW_PORT_SPEED_10G, + NBL_FW_PORT_SPEED_25G, + NBL_FW_PORT_SPEED_50G, + NBL_FW_PORT_SPEED_100G, +}; + +struct nbl_eth_link_info { + u8 link_status; + u32 link_speed; +}; + +struct nbl_port_state { + u64 port_caps; + u64 port_advertising; + u64 port_lp_advertising; + u32 link_speed; + u8 active_fc; + u8 active_fec; /* enum nbl_port_fec */ + u8 link_state; + u8 module_inplace; + u8 port_type; /* enum nbl_port_type */ + u8 port_max_rate; /* enum nbl_port_max_rate */ + u8 fw_port_max_speed; /* enum nbl_fw_port_speed */ +}; + +struct nbl_port_advertising { + u8 eth_id; + u64 speed_advert; + u8 active_fc; + u8 active_fec; /* enum nbl_port_fec */ + u8 autoneg; +}; + +#define PASSTHROUGH_FW_CMD_DATA_LEN (3072) +struct nbl_passthrough_fw_cmd_param { + u16 opcode; + u16 errcode; + u16 in_size; + u16 out_size; + u8 data[PASSTHROUGH_FW_CMD_DATA_LEN]; +}; + +#define NBL_RING_NUM_CMD_LEN (520) +struct nbl_fw_cmd_ring_num_param { + u16 pf_def_max_net_qp_num; + u16 vf_def_max_net_qp_num; + u16 net_max_qp_num[NBL_RING_NUM_CMD_LEN]; +}; + +static inline u64 nbl_speed_to_link_mode(unsigned int speed, u8 autoneg) +{ + u64 link_mode = 0; + int speed_support = 0; + + switch (speed) { + case SPEED_100000: + link_mode |= BIT(NBL_PORT_CAP_100GBASE_KR4) | BIT(NBL_PORT_CAP_100GBASE_SR4) | + BIT(NBL_PORT_CAP_100GBASE_CR4) | BIT(NBL_PORT_CAP_100G_AUI4) | + BIT(NBL_PORT_CAP_100G_CAUI4) | BIT(NBL_PORT_CAP_100GBASE_KR2_PAM4) | + BIT(NBL_PORT_CAP_100GBASE_SR2_PAM4) | BIT(NBL_PORT_CAP_100GBASE_CR2_PAM4) | + BIT(NBL_PORT_CAP_100G_AUI2_PAM4); + fallthrough; + case SPEED_50000: + link_mode |= BIT(NBL_PORT_CAP_50GBASE_KR2) | BIT(NBL_PORT_CAP_50GBASE_SR2) | + BIT(NBL_PORT_CAP_50GBASE_CR2) | BIT(NBL_PORT_CAP_50G_AUI2) | + BIT(NBL_PORT_CAP_50GBASE_KR_PAM4) | BIT(NBL_PORT_CAP_50GBASE_SR_PAM4) | + BIT(NBL_PORT_CAP_50GBASE_CR_PAM4) | BIT(NBL_PORT_CAP_50G_AUI_PAM4); + fallthrough; + case SPEED_25000: + link_mode |= BIT(NBL_PORT_CAP_25GBASE_KR) | BIT(NBL_PORT_CAP_25GBASE_SR) | + BIT(NBL_PORT_CAP_25GBASE_CR) | BIT(NBL_PORT_CAP_25G_AUI); + fallthrough; + case SPEED_10000: + link_mode |= BIT(NBL_PORT_CAP_10GBASE_T) | BIT(NBL_PORT_CAP_10GBASE_KR) | + BIT(NBL_PORT_CAP_10GBASE_SR); + fallthrough; + case SPEED_1000: + link_mode |= BIT(NBL_PORT_CAP_1000BASE_T) | BIT(NBL_PORT_CAP_1000BASE_X); + speed_support = 1; + } + + if (autoneg && speed_support) + link_mode |= BIT(NBL_PORT_CAP_AUTONEG); + + return link_mode; +} + +#define NBL_DEFINE_NAME_WITH_WIDTH_CHECK(_struct, _size) \ +_struct; \ +static inline int nbl_##_struct##_size_is_not_equal_to_define(void) \ +{ \ + int check[((sizeof(_struct) * 8) == (_size)) ? 1 : -1]; \ + return check[0]; \ +} + +/** + * list_is_first -- tests whether @ list is the first entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int nbl_list_is_first(const struct list_head *list, + const struct list_head *head) +{ + return list->prev == head; +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int nbl_list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int nbl_list_empty(const struct list_head *head) +{ + return READ_ONCE(head->next) == head; +} + +#define NBL_OPS_CALL(func, para) \ + ({ typeof(func) _func = (func); \ + (!_func) ? 0 : _func para; }) + +enum nbl_module_temp_type { + NBL_MODULE_TEMP, + NBL_MODULE_TEMP_MAX, + NBL_MODULE_TEMP_CRIT, + NBL_MODULE_TEMP_TYPE_MAX, +}; + +struct nbl_load_p4_param { +#define NBL_P4_SECTION_NAME_LEN 32 + u8 name[NBL_P4_SECTION_NAME_LEN]; + u32 addr; + u32 size; + u16 section_index; + u16 section_offset; + u8 *data; + bool start; + bool end; +}; + +struct nbl_board_port_info { + u8 eth_num; + u8 eth_speed; + u8 rsv[6]; +}; + +enum { + NBL_NETIF_F_SG_BIT, /* Scatter/gather IO. */ + NBL_NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */ + NBL_NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */ + NBL_NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ + NBL_NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ + NBL_NETIF_F_HW_VLAN_CTAG_TX_BIT, /* Transmit VLAN CTAG HW acceleration */ + NBL_NETIF_F_HW_VLAN_CTAG_RX_BIT, /* Receive VLAN CTAG HW acceleration */ + NBL_NETIF_F_HW_VLAN_CTAG_FILTER_BIT, /* Receive filtering on VLAN CTAGs */ + NBL_NETIF_F_TSO_BIT, /* ... TCPv4 segmentation */ + NBL_NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ + NBL_NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ + NBL_NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ + NBL_NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */ + NBL_NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ + NBL_NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT, /* ... UDP TUNNEL with TSO & CSUM */ + NBL_NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4 + * in hardware and all other + * headers in software. + */ + NBL_NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */ + NBL_NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ + NBL_NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ + NBL_NETIF_F_RXHASH_BIT, /* Receive hashing offload */ + NBL_NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ + NBL_NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */ + NBL_NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ + NBL_NETIF_F_HW_VLAN_STAG_FILTER_BIT, /* Receive filtering on VLAN STAGs */ + NBL_NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ + NBL_FEATURES_COUNT +}; + +static const netdev_features_t nbl_netdev_features[] = { + [NBL_NETIF_F_SG_BIT] = NETIF_F_SG, + [NBL_NETIF_F_IP_CSUM_BIT] = NETIF_F_IP_CSUM, + [NBL_NETIF_F_IPV6_CSUM_BIT] = NETIF_F_IPV6_CSUM, + [NBL_NETIF_F_HIGHDMA_BIT] = NETIF_F_HIGHDMA, + [NBL_NETIF_F_HW_VLAN_CTAG_TX_BIT] = NETIF_F_HW_VLAN_CTAG_TX, + [NBL_NETIF_F_HW_VLAN_CTAG_RX_BIT] = NETIF_F_HW_VLAN_CTAG_RX, + [NBL_NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = NETIF_F_HW_VLAN_CTAG_FILTER, + [NBL_NETIF_F_TSO_BIT] = NETIF_F_TSO, + [NBL_NETIF_F_GSO_ROBUST_BIT] = NETIF_F_GSO_ROBUST, + [NBL_NETIF_F_TSO6_BIT] = NETIF_F_TSO6, + [NBL_NETIF_F_GSO_GRE_BIT] = NETIF_F_GSO_GRE, + [NBL_NETIF_F_GSO_GRE_CSUM_BIT] = NETIF_F_GSO_GRE_CSUM, + [NBL_NETIF_F_GSO_UDP_TUNNEL_BIT] = NETIF_F_GSO_UDP_TUNNEL, + [NBL_NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = NETIF_F_GSO_UDP_TUNNEL_CSUM, + [NBL_NETIF_F_GSO_PARTIAL_BIT] = NETIF_F_GSO_PARTIAL, + [NBL_NETIF_F_GSO_UDP_L4_BIT] = NETIF_F_GSO_UDP_L4, + [NBL_NETIF_F_SCTP_CRC_BIT] = NETIF_F_SCTP_CRC, + [NBL_NETIF_F_NTUPLE_BIT] = NETIF_F_NTUPLE, + [NBL_NETIF_F_RXHASH_BIT] = NETIF_F_RXHASH, + [NBL_NETIF_F_RXCSUM_BIT] = NETIF_F_RXCSUM, + [NBL_NETIF_F_HW_VLAN_STAG_TX_BIT] = NETIF_F_HW_VLAN_STAG_TX, + [NBL_NETIF_F_HW_VLAN_STAG_RX_BIT] = NETIF_F_HW_VLAN_STAG_RX, + [NBL_NETIF_F_HW_VLAN_STAG_FILTER_BIT] = NETIF_F_HW_VLAN_STAG_FILTER, + [NBL_NETIF_F_HW_TC_BIT] = NETIF_F_HW_TC, +}; + +#define NBL_FEATURE(name) (1 << (NBL_##name##_BIT)) +#define NBL_FEATURE_TEST_BIT(val, loc) (((val) >> (loc)) & 0x1) + +static inline netdev_features_t nbl_features_to_netdev_features(u64 features) +{ + netdev_features_t netdev_features = 0; + int i = 0; + + for (i = 0; i < NBL_FEATURES_COUNT; i++) { + if (NBL_FEATURE_TEST_BIT(features, i)) + netdev_features += nbl_netdev_features[i]; + } + + return netdev_features; +}; + +enum nbl_abnormal_event_module { + NBL_ABNORMAL_EVENT_DVN = 0, + NBL_ABNORMAL_EVENT_UVN, + NBL_ABNORMAL_EVENT_MAX, +}; + +struct nbl_abnormal_details { + bool abnormal; + u16 qid; + u16 vsi_id; +}; + +struct nbl_abnormal_event_info { + struct nbl_abnormal_details details[NBL_ABNORMAL_EVENT_MAX]; + u32 other_abnormal_info; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h new file mode 100644 index 000000000000..d76bb7a28622 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#ifndef _NBL_DEF_PRODUCT_BASE_H_ +#define _NBL_DEF_PRODUCT_BASE_H_ + +#include "nbl_include.h" + +struct nbl_product_base_ops { + int (*phy_init)(void *p, struct nbl_init_param *param); + void (*phy_remove)(void *p); + int (*res_init)(void *p, struct nbl_init_param *param); + void (*res_remove)(void *p); + int (*chan_init)(void *p, struct nbl_init_param *param); + void (*chan_remove)(void *p); +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c new file mode 100644 index 000000000000..bf3b91ef945c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c @@ -0,0 +1,504 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: + */ + +#include +#include "nbl_core.h" + +static struct nbl_software_tool_table nbl_st_table; +static struct dentry *nbl_debugfs_root; + +static struct nbl_product_base_ops nbl_product_base_ops[NBL_PRODUCT_MAX] = { + { + .phy_init = nbl_phy_init_leonis, + .phy_remove = nbl_phy_remove_leonis, + .res_init = nbl_res_init_leonis, + .res_remove = nbl_res_remove_leonis, + .chan_init = nbl_chan_init_common, + .chan_remove = nbl_chan_remove_common, + }, +}; + +int nbl_core_start(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + int ret = 0; + + ret = nbl_dev_start(adapter, param); + return ret; +} + +void nbl_core_stop(struct nbl_adapter *adapter) +{ + nbl_dev_stop(adapter); +} + +void nbl_core_setup_product_ops(struct nbl_adapter *adapter, struct nbl_init_param *param, + struct nbl_product_base_ops **product_base_ops) +{ + adapter->product_base_ops = &nbl_product_base_ops[param->product_type]; + *product_base_ops = adapter->product_base_ops; +} + +struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter; + struct nbl_common_info *common; + struct nbl_product_base_ops *product_base_ops; + int ret = 0; + + if (!pdev) + return NULL; + + adapter = devm_kzalloc(&pdev->dev, sizeof(struct nbl_adapter), GFP_KERNEL); + if (!adapter) + return NULL; + + adapter->pdev = pdev; + common = NBL_ADAPTER_TO_COMMON(adapter); + + NBL_COMMON_TO_PDEV(common) = pdev; + NBL_COMMON_TO_DEV(common) = &pdev->dev; + NBL_COMMON_TO_DMA_DEV(common) = &pdev->dev; + NBL_COMMON_TO_DEBUG_LVL(common) |= NBL_DEBUG_ALL; + NBL_COMMON_TO_VF_CAP(common) = param->caps.is_vf; + NBL_COMMON_TO_PCI_USING_DAC(common) = param->pci_using_dac; + NBL_COMMON_TO_PCI_FUNC_ID(common) = PCI_FUNC(pdev->devfn); + common->devid = PCI_SLOT(pdev->devfn); + common->bus = pdev->bus->number; + common->product_type = param->product_type; + + memcpy(&adapter->init_param, param, sizeof(adapter->init_param)); + + nbl_core_setup_product_ops(adapter, param, &product_base_ops); + + /* every product's phy/chan/res layer has a great difference, so call their own init ops */ + ret = product_base_ops->phy_init(adapter, param); + if (ret) + goto phy_init_fail; + + ret = product_base_ops->chan_init(adapter, param); + if (ret) + goto chan_init_fail; + + ret = product_base_ops->res_init(adapter, param); + if (ret) + goto res_init_fail; + + ret = nbl_disp_init(adapter, param); + if (ret) + goto disp_init_fail; + + ret = nbl_serv_init(adapter, param); + if (ret) + goto serv_init_fail; + + ret = nbl_dev_init(adapter, param); + if (ret) + goto dev_init_fail; + + nbl_debugfs_func_init(adapter, param); + + return adapter; + +dev_init_fail: + nbl_serv_remove(adapter); +serv_init_fail: + nbl_disp_remove(adapter); +disp_init_fail: + product_base_ops->res_remove(adapter); +res_init_fail: + product_base_ops->chan_remove(adapter); +chan_init_fail: + product_base_ops->phy_remove(adapter); +phy_init_fail: + devm_kfree(&pdev->dev, adapter); + return NULL; +} + +void nbl_core_remove(struct nbl_adapter *adapter) +{ + struct device *dev; + struct nbl_common_info *common; + struct nbl_product_base_ops *product_base_ops; + + if (!adapter) + return; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + product_base_ops = NBL_ADAPTER_TO_RPDUCT_BASE_OPS(adapter); + + nbl_debugfs_func_remove(adapter); + nbl_dev_remove(adapter); + nbl_serv_remove(adapter); + nbl_disp_remove(adapter); + product_base_ops->res_remove(adapter); + product_base_ops->chan_remove(adapter); + product_base_ops->phy_remove(adapter); + devm_kfree(dev, adapter); +} + +int nbl_st_init(struct nbl_software_tool_table *st_table) +{ + dev_t devid; + int ret = 0; + + ret = alloc_chrdev_region(&devid, 0, NBL_ST_MAX_DEVICE_NUM, "nblst"); + if (ret < 0) + return ret; + + st_table->major = MAJOR(devid); + st_table->devno = devid; + + st_table->cls = class_create("nblst_cls"); + + if (IS_ERR(st_table->cls)) { + unregister_chrdev(st_table->major, "nblst"); + unregister_chrdev_region(st_table->devno, NBL_ST_MAX_DEVICE_NUM); + ret = -EBUSY; + } + + return ret; +} + +void nbl_st_remove(struct nbl_software_tool_table *st_table) +{ + class_destroy(st_table->cls); + unregister_chrdev(st_table->major, "nblst"); + unregister_chrdev_region(st_table->devno, NBL_ST_MAX_DEVICE_NUM); +} + +struct nbl_software_tool_table *nbl_get_st_table(void) +{ + return &nbl_st_table; +} + +static void nbl_debugfs_init(void) +{ + nbl_debugfs_root = debugfs_create_dir(NBL_DRIVER_NAME, NULL); +} + +static void nbl_debugfs_remove(void) +{ + debugfs_remove_recursive(nbl_debugfs_root); + nbl_debugfs_root = NULL; +} + +struct dentry *nbl_get_debugfs_root(void) +{ + return nbl_debugfs_root; +} + +static void nbl_get_func_param(struct pci_dev *pdev, kernel_ulong_t driver_data, + struct nbl_init_param *param) +{ + param->caps.has_ctrl = NBL_CAP_IS_CTRL(driver_data); + param->caps.has_net = NBL_CAP_IS_NET(driver_data); + param->caps.is_vf = NBL_CAP_IS_VF(driver_data); + param->caps.support_lag = NBL_CAP_SUPPORT_LAG(driver_data); + param->caps.has_user = NBL_CAP_IS_USER(driver_data); + param->caps.has_grc = NBL_CAP_IS_GRC(driver_data); + param->caps.is_nic = NBL_CAP_IS_NIC(driver_data); + param->caps.has_factory_ctrl = NBL_CAP_IS_FACTORY_CTRL(driver_data); + + if (NBL_CAP_IS_LEONIS(driver_data)) + param->product_type = NBL_LEONIS_TYPE; + + /** + * Leonis only PF0 has ctrl capability, but PF0's pcie device_id is same with other PF. + * So hanle it special. + **/ + if (param->product_type == NBL_LEONIS_TYPE && !param->caps.is_vf && + (PCI_FUNC(pdev->devfn) == 0) && !param->caps.has_factory_ctrl) { + param->caps.has_ctrl = 1; + param->caps.has_grc = 1; + } + + if (param->caps.has_ctrl && param->caps.has_factory_ctrl) { + dev_err(&pdev->dev, "Do not support ctrl & factory_ctrl simutaneously, skip ctrl"); + memset(¶m->caps, 0, sizeof(param->caps)); + param->caps.has_factory_ctrl = true; + } +} + +static int nbl_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *id) +{ + struct device *dev = &pdev->dev; + struct nbl_adapter *adapter = NULL; + struct nbl_init_param param = {{0}}; + int err; + + dev_info(dev, "nbl probe\n"); + + err = pci_enable_device(pdev); + if (err) + return err; + + param.pci_using_dac = true; + nbl_get_func_param(pdev, id->driver_data, ¶m); + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (err) { + dev_info(dev, "Configure DMA 64 bit mask failed, err = %d\n", err); + param.pci_using_dac = false; + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(dev, "Configure DMA 32 bit mask failed, err = %d\n", err); + goto configure_dma_err; + } + } + + pci_set_master(pdev); + + pci_save_state(pdev); + + adapter = nbl_core_init(pdev, ¶m); + if (!adapter) { + dev_err(dev, "Nbl adapter init fail\n"); + err = -EAGAIN; + goto adapter_init_err; + } + + pci_set_drvdata(pdev, adapter); + + err = nbl_core_start(adapter, ¶m); + if (err) + goto core_start_err; + + dev_info(dev, "nbl probe finished\n"); + + return 0; + +core_start_err: + nbl_core_remove(adapter); +adapter_init_err: + pci_clear_master(pdev); +configure_dma_err: + pci_disable_device(pdev); + return err; +} + +static void nbl_remove(struct pci_dev *pdev) +{ + struct nbl_adapter *adapter = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "nbl remove\n"); + pci_disable_sriov(pdev); + + nbl_core_stop(adapter); + nbl_core_remove(adapter); + + pci_clear_master(pdev); + pci_disable_device(pdev); + + dev_info(&pdev->dev, "nbl remove OK!\n"); +} + +static void nbl_shutdown(struct pci_dev *pdev) +{ + struct nbl_adapter *adapter = pci_get_drvdata(pdev); + + if (!NBL_COMMON_TO_VF_CAP(NBL_ADAPTER_TO_COMMON(adapter))) + nbl_remove(pdev); + + dev_info(&pdev->dev, "nbl shutdown OK\n"); +} + +static __maybe_unused int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + int err; + + if (!num_vfs) { + pci_disable_sriov(pdev); + return 0; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_err(&pdev->dev, "nbl enable sriov failed %d!\n", err); + return err; + } + + return num_vfs; +} + +#define NBL_VENDOR_ID (0x1F0F) + +/** + * Leonis DeviceID + * 0x3403-0x340d for snic v3r1 product + **/ +#define NBL_DEVICE_ID_M18110 (0x3403) +#define NBL_DEVICE_ID_M18110_LX (0x3404) +#define NBL_DEVICE_ID_M18110_BASE_T (0x3405) +#define NBL_DEVICE_ID_M18110_LX_BASE_T (0x3406) +#define NBL_DEVICE_ID_M18110_OCP (0x3407) +#define NBL_DEVICE_ID_M18110_LX_OCP (0x3408) +#define NBL_DEVICE_ID_M18110_BASE_T_OCP (0x3409) +#define NBL_DEVICE_ID_M18110_LX_BASE_T_OCP (0x340a) +#define NBL_DEVICE_ID_M18120 (0x340b) +#define NBL_DEVICE_ID_M18120_LX (0x340c) +#define NBL_DEVICE_ID_M18120_BASE_T (0x340d) +#define NBL_DEVICE_ID_M18120_LX_BASE_T (0x340e) +#define NBL_DEVICE_ID_M18120_OCP (0x340f) +#define NBL_DEVICE_ID_M18120_LX_OCP (0x3410) +#define NBL_DEVICE_ID_M18120_BASE_T_OCP (0x3411) +#define NBL_DEVICE_ID_M18120_LX_BASE_T_OCP (0x3412) +#define NBL_DEVICE_ID_M18100_VF (0x3413) + +static const struct pci_device_id nbl_id_table[] = { + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_BASE_T), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_BASE_T), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_BASE_T_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT)}, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_BASE_T_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_BASE_T), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_BASE_T), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_BASE_T_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_BASE_T_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + /* required as sentinel */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, nbl_id_table); + +static int nbl_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct nbl_adapter *adapter = pci_get_drvdata(pdev); + + return nbl_dev_suspend(adapter); +} + +static int nbl_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct nbl_adapter *adapter = pci_get_drvdata(pdev); + + return nbl_dev_resume(adapter); +} + +static SIMPLE_DEV_PM_OPS(nbl_pm_ops, nbl_suspend, nbl_resume); +static struct pci_driver nbl_driver = { + .name = NBL_DRIVER_NAME, + .id_table = nbl_id_table, + .probe = nbl_probe, + .remove = nbl_remove, + .shutdown = nbl_shutdown, + .driver.pm = &nbl_pm_ops, +}; + +static int __init nbl_module_init(void) +{ + int status; + + nbl_dev_user_module_init(); + status = nbl_common_create_wq(); + if (status) { + pr_err("Failed to create wq, err = %d\n", status); + goto wq_create_failed; + } + + nbl_st_init(nbl_get_st_table()); + nbl_debugfs_init(); + + nbl_event_init(); + + status = pci_register_driver(&nbl_driver); + if (status) { + pr_err("Failed to register PCI driver, err = %d\n", status); + goto pci_register_driver_failed; + } + + return 0; + +pci_register_driver_failed: + nbl_debugfs_remove(); + nbl_common_destroy_wq(); +wq_create_failed: + nbl_dev_user_module_destroy(); + return status; +} + +static void __exit nbl_module_exit(void) +{ + pci_unregister_driver(&nbl_driver); + + nbl_st_remove(nbl_get_st_table()); + + nbl_common_destroy_wq(); + + nbl_dev_user_module_destroy(); + + nbl_debugfs_remove(); + + nbl_event_remove(); + + pr_info("nbl module unloaded\n"); +} + +module_init(nbl_module_init); +module_exit(nbl_module_exit); +MODULE_LICENSE("GPL"); + +#define NBL_FW_PATH "nbl/" +#define NBL_FW_SNIC_PATH NBL_FW_PATH "snic_v3r1/" +#define NBL_FW_TUNNEL_TOE_P4 NBL_FW_SNIC_PATH + +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "nbl_single_tunnel_toe_enhance.elf"); -- Gitee