diff --git a/Documentation/networking/device_drivers/ethernet/nebula-matrix/m18120.rst b/Documentation/networking/device_drivers/ethernet/nebula-matrix/m18120.rst new file mode 100644 index 0000000000000000000000000000000000000000..c2dd701ccd07ca335d750393b54b661b059701c8 --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/nebula-matrix/m18120.rst @@ -0,0 +1,70 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================================ +Linux Base Driver for Nebula-matrix M18120-NIC family +============================================================ + +Overview: +========= +M18120-NIC is a series of network interface card for the Data Center Area. + +The driver supports link-speed 100GbE/25GE/10GE. + +M18120-NIC devices support SR-IOV. This driver is used for both of Physical +Function(PF) and Virtual Function(VF). + +M18120-NIC devices support MSI-X interrupt vector for each Tx/Rx queue and +interrupt moderation. + +M18120-NIC devices support also various offload features such as checksum offload, +Receive-Side Scaling(RSS). + + +Supported PCI vendor ID/device IDs: +=================================== + +1f0f:3403 - M18110 Family PF +1f0f:3404 - M18110 Lx Family PF +1f0f:3405 - M18110 Family BASE-T PF +1f0f:3406 - M18110 Lx Family BASE-T PF +1f0f:3407 - M18110 Family OCP PF +1f0f:3408 - M18110 Lx Family OCP PF +1f0f:3409 - M18110 Family BASE-T OCP PF +1f0f:340a - M18110 Lx Family BASE-T OCP PF +1f0f:340b - M18120 Family PF +1f0f:340c - M18120 Lx Family PF +1f0f:340d - M18120 Family BASE-T PF +1f0f:340e - M18120 Lx Family BASE-T PF +1f0f:340f - M18120 Family OCP PF +1f0f:3410 - M18120 Lx Family OCP PF +1f0f:3411 - M18120 Family BASE-T OCP PF +1f0f:3412 - M18120 Lx Family BASE-T OCP PF +1f0f:3413 - M18100 Family Virtual Function + +ethtool tool support +==================== + +Obtain basic information of the network card: + ethtool -i enp130s0f0 + +Get network card ring parameters: + ethtool -g enp130s0f0 + +Set the ring parameter: + ethtool -G enp130s0f0 rx 1024 tx 1024 + +View statistics: + ethtool -S enp130s0f0 + +Viewing Optical Module Information: + ethtool -m enp130s0f0 + +Support +======= + +For more information about M18110-NIC, please visit the following URL: +https://www.nebula-matrix.com/ + +If an issue is identified with the released source code on the supported kernel +with a supported adapter, email the specific information related to the issue to +open@nebula-matrix.com. diff --git a/anolis/configs/L1-RECOMMEND/CONFIG_NBL_CORE b/anolis/configs/L1-RECOMMEND/CONFIG_NBL_CORE new file mode 100644 index 0000000000000000000000000000000000000000..d4f53c7b6a1e5a1cbd061bc23ddd8d5cc7ab2c30 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/CONFIG_NBL_CORE @@ -0,0 +1 @@ +CONFIG_NBL_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/CONFIG_NET_VENDOR_NEBULA_MATRIX b/anolis/configs/L1-RECOMMEND/CONFIG_NET_VENDOR_NEBULA_MATRIX new file mode 100644 index 0000000000000000000000000000000000000000..06c7c0a1dd6f6ad340cf0d783b5094f6883a9519 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/CONFIG_NET_VENDOR_NEBULA_MATRIX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_NEBULA_MATRIX=y diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 268c84e49194e534d4d702ec4f6c11520632da9d..c79fbba85d7cb50b25b3665affe620f0dd8766a8 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -195,5 +195,6 @@ source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" +source "drivers/net/ethernet/nebula-matrix/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 423e9edd67771d60498eea0f5ff8d73a56d4cb7b..de3a73b689189aecbb86c1b0133e06514fda9ed5 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -107,3 +107,4 @@ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ obj-$(CONFIG_NET_VENDOR_BZWX) += bzwx/ +obj-$(CONFIG_NET_VENDOR_NEBULA_MATRIX) += nebula-matrix/ diff --git a/drivers/net/ethernet/nebula-matrix/Kconfig b/drivers/net/ethernet/nebula-matrix/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e92a661256295658415bd58ac423ad287bff217c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/Kconfig @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Nebula-matrix network device configuration +# + +config NET_VENDOR_NEBULA_MATRIX + bool "Nebula-matrix devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Nebual-matrix cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_NEBULA_MATRIX + +config NBL_CORE + tristate "Nebula-matrix Ethernet Controller m18110 Family support" + depends on PCI && VFIO + depends on ARM64 || X86_64 + default m + help + This driver supports Nebula-matrix Ethernet Controller m18110 Family of + devices. For more information about this product, go to the product + description with smart NIC: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called nbl_core. + +endif # NET_VENDOR_NEBULA_MATRIX diff --git a/drivers/net/ethernet/nebula-matrix/Makefile b/drivers/net/ethernet/nebula-matrix/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..dc6bf7dcd6bfdbd1e44ea996739f503d6e768cc0 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Nebula-matrix network device drivers. +# + +obj-$(CONFIG_NBL_CORE) += nbl/ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/Makefile b/drivers/net/ethernet/nebula-matrix/nbl/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4a8b896e356462f39b58ca7cd56b4976015dd2f0 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/Makefile @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2021 Nebula Matrix Limited. +# Author: Bennie Yan + +ifeq ($(KERNELRELEASE),) + +KERNELDIR ?= /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +modules: + $(MAKE) -C $(KERNELDIR) M=$(PWD) modules + +modules_install: + $(MAKE) -C $(KERNELDIR) M=$(PWD) modules_install + +clean: + $(MAKE) -C $(KERNELDIR) M=$(PWD) clean + +else + +obj-m := nbl_core.o + +nbl_core-objs += nbl_common/nbl_common.o \ + nbl_common/nbl_event.o \ + nbl_common/nbl_net_sysfs.o \ + nbl_channel/nbl_channel.o \ + nbl_channel/nbl_cmdq.o \ + nbl_hw/nbl_hw_leonis/nbl_phy_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_fc_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_flow_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_queue_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_resource_leonis.o \ + nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.o \ + nbl_hw/nbl_fc.o \ + nbl_hw/nbl_resource.o \ + nbl_hw/nbl_interrupt.o \ + nbl_hw/nbl_txrx.o \ + nbl_hw/nbl_queue.o \ + nbl_hw/nbl_vsi.o \ + nbl_hw/nbl_adminq.o \ + nbl_hw/nbl_accel.o \ + nbl_hw/nbl_fd.o \ + nbl_core/nbl_lag.o \ + nbl_core/nbl_dispatch.o \ + nbl_core/nbl_debugfs.o \ + nbl_core/nbl_ethtool.o \ + nbl_core/nbl_service.o \ + nbl_core/nbl_dev_rdma.o \ + nbl_core/nbl_sysfs.o \ + nbl_core/nbl_dev_user.o \ + nbl_core/nbl_dev.o \ + nbl_core/nbl_ktls.o \ + nbl_core/nbl_ipsec.o \ + nbl_core/nbl_tc_tun.o \ + nbl_core/nbl_tc.o \ + nbl_core/nbl_hwmon.o \ + nbl_main.o + +# Do not modify include path, unless you are adding a new file which needs some headers in its +# direct upper directory (see the exception part in below). +# +# The structure requires that codes can only access the header files in nbl_include, or the .h that +# has the same name as the .c file. The only exception is that the product-specific files can access +# the same headers as the common part, e.g. nbl_phy_leonis.c can access nbl_phy.h. +# Make sure to put all the things you need to expose to others in nbl_def_xxx.h, and make everything +# in your own .h private. +# +# Try not to break these rules, sincerely. +ccflags-y := -Werror -Wall -I $(src) -I $(src)/nbl_include -I $(src)/nbl_export -I $(src)/nbl_hw + +CFLAGS_nbl_hw/nbl_hw_leonis/nbl_phy_leonis.o += -I $(src)/nbl_hw +CFLAGS_nbl_hw/nbl_hw_leonis/nbl_flow_leonis.o += -I $(src)/nbl_hw +CFLAGS_nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.o += -I $(src)/nbl_hw +CFLAGS_nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.o += -I $(src)/nbl_hw +CFLAGS_nbl_hw/nbl_hw_leonis/nbl_queue_leonis.o += -I $(src)/nbl_hw +CFLAGS_nbl_hw/nbl_hw_leonis/nbl_resource_leonis.o += -I $(src)/nbl_hw + +endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c new file mode 100644 index 0000000000000000000000000000000000000000..0219833c2eeca1d23759e2ac37bf1fda5e8e91b6 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.c @@ -0,0 +1,1602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_channel.h" +#include "nbl_cmdq.h" + +static int nbl_chan_send_ack(void *priv, struct nbl_chan_ack_info *chan_ack); + +static int nbl_chan_add_msg_handler(struct nbl_channel_mgt *chan_mgt, u16 msg_type, + nbl_chan_resp func, void *priv) +{ + struct nbl_chan_msg_node_data handler = {0}; + int ret; + + handler.func = func; + handler.priv = priv; + + ret = nbl_common_alloc_hash_node(chan_mgt->handle_hash_tbl, &msg_type, &handler, NULL); + + return ret; +} + +static int nbl_chan_init_msg_handler(struct nbl_channel_mgt *chan_mgt, u8 user_notify) +{ + struct nbl_hash_tbl_key tbl_key; + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + int ret = 0; + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_chan_notify_userdev *notify; + + if (user_notify) { + notify = devm_kzalloc(dev, sizeof(struct nbl_chan_notify_userdev), GFP_KERNEL); + if (!notify) + return -ENOMEM; + + mutex_init(¬ify->lock); + chan_mgt->notify = notify; + } + + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), sizeof(u16), + sizeof(struct nbl_chan_msg_node_data), + NBL_CHAN_HANDLER_TBL_BUCKET_SIZE, false); + + chan_mgt->handle_hash_tbl = nbl_common_init_hash_table(&tbl_key); + if (!chan_mgt->handle_hash_tbl) { + ret = -ENOMEM; + goto alloc_hashtbl_failed; + } + + return 0; + +alloc_hashtbl_failed: + if (user_notify) { + chan_mgt->notify = NULL; + devm_kfree(dev, notify); + } + return ret; +} + +static void nbl_chan_remove_msg_handler(struct nbl_channel_mgt *chan_mgt) +{ + nbl_common_remove_hash_table(chan_mgt->handle_hash_tbl, NULL); + + chan_mgt->handle_hash_tbl = NULL; + if (chan_mgt->notify) { + devm_kfree(NBL_COMMON_TO_DEV(chan_mgt->common), chan_mgt->notify); + chan_mgt->notify = NULL; + } +} + +static bool nbl_chan_is_admiq(struct nbl_chan_info *chan_info) +{ + return chan_info->chan_type == NBL_CHAN_TYPE_ADMINQ; +} + +static void nbl_chan_init_queue_param(struct nbl_chan_info *chan_info, + u16 num_txq_entries, u16 num_rxq_entries, + u16 txq_buf_size, u16 rxq_buf_size) +{ + spin_lock_init(&chan_info->txq_lock); + chan_info->num_txq_entries = num_txq_entries; + chan_info->num_rxq_entries = num_rxq_entries; + chan_info->txq_buf_size = txq_buf_size; + chan_info->rxq_buf_size = rxq_buf_size; +} + +static int nbl_chan_init_tx_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(common); + struct nbl_chan_ring *txq = &chan_info->txq; + size_t size = chan_info->num_txq_entries * sizeof(struct nbl_chan_tx_desc); + + txq->desc = dmam_alloc_coherent(dma_dev, size, &txq->dma, GFP_KERNEL | __GFP_ZERO); + if (!txq->desc) + return -ENOMEM; + + chan_info->wait = devm_kcalloc(dev, chan_info->num_txq_entries, + sizeof(struct nbl_chan_waitqueue_head), GFP_KERNEL); + if (!chan_info->wait) + goto req_wait_queue_failed; + + txq->buf = devm_kcalloc(dev, chan_info->num_txq_entries, + sizeof(struct nbl_chan_buf), GFP_KERNEL); + if (!txq->buf) + goto req_num_txq_entries; + + return 0; + +req_num_txq_entries: + devm_kfree(dev, chan_info->wait); +req_wait_queue_failed: + dmam_free_coherent(dma_dev, size, txq->desc, txq->dma); + + txq->desc = NULL; + txq->dma = 0; + chan_info->wait = NULL; + + return -ENOMEM; +} + +static int nbl_chan_init_rx_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(common); + struct nbl_chan_ring *rxq = &chan_info->rxq; + size_t size = chan_info->num_rxq_entries * sizeof(struct nbl_chan_rx_desc); + + rxq->desc = dmam_alloc_coherent(dma_dev, size, &rxq->dma, GFP_KERNEL | __GFP_ZERO); + if (!rxq->desc) { + dev_err(dev, "Allocate DMA for chan rx descriptor ring failed\n"); + return -ENOMEM; + } + + rxq->buf = devm_kcalloc(dev, chan_info->num_rxq_entries, + sizeof(struct nbl_chan_buf), GFP_KERNEL); + if (!rxq->buf) { + dmam_free_coherent(dma_dev, size, rxq->desc, rxq->dma); + rxq->desc = NULL; + rxq->dma = 0; + return -ENOMEM; + } + + return 0; +} + +static void nbl_chan_remove_tx_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(common); + struct nbl_chan_ring *txq = &chan_info->txq; + size_t size = chan_info->num_txq_entries * sizeof(struct nbl_chan_tx_desc); + + devm_kfree(dev, txq->buf); + txq->buf = NULL; + + devm_kfree(dev, chan_info->wait); + chan_info->wait = NULL; + + dmam_free_coherent(dma_dev, size, txq->desc, txq->dma); + txq->desc = NULL; + txq->dma = 0; +} + +static void nbl_chan_remove_rx_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(common); + struct nbl_chan_ring *rxq = &chan_info->rxq; + size_t size = chan_info->num_rxq_entries * sizeof(struct nbl_chan_rx_desc); + + devm_kfree(dev, rxq->buf); + rxq->buf = NULL; + + dmam_free_coherent(dma_dev, size, rxq->desc, rxq->dma); + rxq->desc = NULL; + rxq->dma = 0; +} + +static int nbl_chan_init_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + int err; + + err = nbl_chan_init_tx_queue(common, chan_info); + if (err) + return err; + + err = nbl_chan_init_rx_queue(common, chan_info); + if (err) + goto setup_rx_queue_err; + + return 0; + +setup_rx_queue_err: + nbl_chan_remove_tx_queue(common, chan_info); + return err; +} + +static void nbl_chan_config_queue(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info, bool tx) +{ + struct nbl_phy_ops *phy_ops; + struct nbl_chan_ring *ring; + dma_addr_t dma_addr; + int size_bwid = ilog2(chan_info->num_rxq_entries); + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + if (tx) + ring = &chan_info->txq; + else + ring = &chan_info->rxq; + + dma_addr = ring->dma; + + if (nbl_chan_is_admiq(chan_info)) { + if (tx) + phy_ops->config_adminq_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + dma_addr, size_bwid); + else + phy_ops->config_adminq_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + dma_addr, size_bwid); + } else { + if (tx) + phy_ops->config_mailbox_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + dma_addr, size_bwid); + else + phy_ops->config_mailbox_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + dma_addr, size_bwid); + } +} + +static int nbl_chan_alloc_all_tx_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_chan_ring *txq = &chan_info->txq; + struct nbl_chan_buf *buf; + struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(chan_mgt->common); + u16 i; + + for (i = 0; i < chan_info->num_txq_entries; i++) { + buf = &txq->buf[i]; + buf->va = dmam_alloc_coherent(dma_dev, chan_info->txq_buf_size, + &buf->pa, GFP_KERNEL | __GFP_ZERO); + if (!buf->va) { + dev_err(dev, "Allocate buffer for chan tx queue failed\n"); + goto err; + } + } + + txq->next_to_clean = 0; + txq->next_to_use = 0; + txq->tail_ptr = 0; + + return 0; +err: + while (i--) { + buf = &txq->buf[i]; + dmam_free_coherent(dma_dev, chan_info->txq_buf_size, buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } + + return -ENOMEM; +} + +static int nbl_chan_cfg_mailbox_qinfo_map_table(struct nbl_channel_mgt *chan_mgt) +{ + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + u16 func_id; + u32 pf_mask; + + pf_mask = phy_ops->get_host_pf_mask(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + for (func_id = 0; func_id < NBL_MAX_PF; func_id++) { + if (!(pf_mask & (1 << func_id))) + phy_ops->cfg_mailbox_qinfo(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), func_id, + common->bus, common->devid, + NBL_COMMON_TO_PCI_FUNC_ID(common) + func_id); + } + + return 0; +} + +static int nbl_chan_cfg_adminq_qinfo_map_table(struct nbl_channel_mgt *chan_mgt) +{ + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + phy_ops->cfg_adminq_qinfo(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), + common->bus, common->devid, + NBL_COMMON_TO_PCI_FUNC_ID(common)); + + return 0; +} + +static int nbl_chan_cfg_qinfo_map_table(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + int err; + + if (!nbl_chan_is_admiq(chan_info)) + err = nbl_chan_cfg_mailbox_qinfo_map_table(chan_mgt); + else + err = nbl_chan_cfg_adminq_qinfo_map_table(chan_mgt); + + return err; +} + +static void nbl_chan_free_all_tx_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_chan_ring *txq = &chan_info->txq; + struct nbl_chan_buf *buf; + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(chan_mgt->common); + u16 i; + + for (i = 0; i < chan_info->num_txq_entries; i++) { + buf = &txq->buf[i]; + dmam_free_coherent(dma_dev, chan_info->txq_buf_size, + buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } +} + +#define NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, tail_ptr, qid) \ +do { \ + typeof(phy_ops) _phy_ops = (phy_ops); \ + typeof(chan_mgt) _chan_mgt = (chan_mgt); \ + typeof(tail_ptr) _tail_ptr = (tail_ptr); \ + typeof(qid) _qid = (qid); \ + if (nbl_chan_is_admiq(chan_info)) \ + (_phy_ops)->update_adminq_queue_tail_ptr(NBL_CHAN_MGT_TO_PHY_PRIV(_chan_mgt), \ + _tail_ptr, _qid); \ + else \ + (_phy_ops)->update_mailbox_queue_tail_ptr(NBL_CHAN_MGT_TO_PHY_PRIV(_chan_mgt), \ + _tail_ptr, _qid); \ +} while (0) + +static int nbl_chan_alloc_all_rx_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_phy_ops *phy_ops; + struct nbl_chan_ring *rxq = &chan_info->rxq; + struct nbl_chan_buf *buf; + struct nbl_chan_rx_desc *desc; + struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(chan_mgt->common); + u32 retry_times = 0; + u16 i; + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + for (i = 0; i < chan_info->num_rxq_entries; i++) { + buf = &rxq->buf[i]; + buf->va = dmam_alloc_coherent(dma_dev, chan_info->rxq_buf_size, + &buf->pa, GFP_KERNEL | __GFP_ZERO); + if (!buf->va) { + dev_err(dev, "Allocate buffer for chan rx queue failed\n"); + goto err; + } + } + + desc = rxq->desc; + for (i = 0; i < chan_info->num_rxq_entries - 1; i++) { + buf = &rxq->buf[i]; + desc[i].flags = NBL_CHAN_RX_DESC_AVAIL; + desc[i].buf_addr = buf->pa; + desc[i].buf_len = chan_info->rxq_buf_size; + } + + rxq->next_to_clean = 0; + rxq->next_to_use = chan_info->num_rxq_entries - 1; + rxq->tail_ptr = chan_info->num_rxq_entries - 1; + /* mb for notify */ + mb(); + + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, rxq->tail_ptr, NBL_MB_RX_QID); + + for (retry_times = 0; retry_times < 3; retry_times++) { + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, + rxq->tail_ptr, NBL_MB_RX_QID); + usleep_range(NBL_CHAN_TX_WAIT_US * 50, NBL_CHAN_TX_WAIT_US * 60); + } + + return 0; +err: + while (i--) { + buf = &rxq->buf[i]; + dmam_free_coherent(dma_dev, chan_info->rxq_buf_size, + buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } + + return -ENOMEM; +} + +static void nbl_chan_free_all_rx_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_chan_ring *rxq = &chan_info->rxq; + struct nbl_chan_buf *buf; + struct device *dma_dev = NBL_COMMON_TO_DMA_DEV(chan_mgt->common); + u16 i; + + for (i = 0; i < chan_info->num_rxq_entries; i++) { + buf = &rxq->buf[i]; + dmam_free_coherent(dma_dev, chan_info->rxq_buf_size, + buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } +} + +static int nbl_chan_alloc_all_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + int err; + + err = nbl_chan_alloc_all_tx_bufs(chan_mgt, chan_info); + if (err) + return err; + + err = nbl_chan_alloc_all_rx_bufs(chan_mgt, chan_info); + if (err) + goto alloc_rx_bufs_err; + + return 0; + +alloc_rx_bufs_err: + nbl_chan_free_all_tx_bufs(chan_mgt, chan_info); + return err; +} + +static void nbl_chan_stop_queue(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + if (nbl_chan_is_admiq(chan_info)) { + phy_ops->stop_adminq_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + phy_ops->stop_adminq_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + } else { + phy_ops->stop_mailbox_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + phy_ops->stop_mailbox_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + } +} + +static void nbl_chan_free_all_bufs(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info) +{ + nbl_chan_free_all_tx_bufs(chan_mgt, chan_info); + nbl_chan_free_all_rx_bufs(chan_mgt, chan_info); +} + +static void nbl_chan_remove_queue(struct nbl_common_info *common, + struct nbl_chan_info *chan_info) +{ + nbl_chan_remove_tx_queue(common, chan_info); + nbl_chan_remove_rx_queue(common, chan_info); +} + +static int nbl_chan_teardown_queue(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_common_info *common = chan_mgt->common; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + nbl_chan_stop_queue(chan_mgt, chan_info); + + nbl_chan_free_all_bufs(chan_mgt, chan_info); + + nbl_chan_remove_queue(common, chan_info); + + return 0; +} + +static int nbl_chan_setup_queue(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + int err; + + nbl_chan_init_queue_param(chan_info, NBL_CHAN_QUEUE_LEN, NBL_CHAN_QUEUE_LEN, + NBL_CHAN_BUF_LEN, NBL_CHAN_BUF_LEN); + + err = nbl_chan_init_queue(common, chan_info); + if (err) + return err; + + nbl_chan_config_queue(chan_mgt, chan_info, true); /* tx */ + nbl_chan_config_queue(chan_mgt, chan_info, false); /* rx */ + + err = nbl_chan_alloc_all_bufs(chan_mgt, chan_info); + if (err) + goto chan_q_setup_fail; + + return 0; + +chan_q_setup_fail: + nbl_chan_teardown_queue(chan_mgt, chan_type); + return err; +} + +static void nbl_chan_shutdown_queue(struct nbl_channel_mgt *chan_mgt, u8 chan_type, bool tx) +{ + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + if (tx) { + if (nbl_chan_is_admiq(chan_info)) + phy_ops->stop_adminq_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + else + phy_ops->stop_mailbox_txq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + + nbl_chan_free_all_tx_bufs(chan_mgt, chan_info); + nbl_chan_remove_tx_queue(common, chan_info); + } else { + if (nbl_chan_is_admiq(chan_info)) + phy_ops->stop_adminq_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + else + phy_ops->stop_mailbox_rxq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + + nbl_chan_free_all_rx_bufs(chan_mgt, chan_info); + nbl_chan_remove_rx_queue(common, chan_info); + } +} + +static int nbl_chan_start_txq(struct nbl_channel_mgt *chan_mgt, u8 chan_type) +{ + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + int ret; + + ret = nbl_chan_init_tx_queue(common, chan_info); + if (ret) + return ret; + + nbl_chan_config_queue(chan_mgt, chan_info, true); /* tx */ + + ret = nbl_chan_alloc_all_tx_bufs(chan_mgt, chan_info); + if (ret) + goto alloc_buf_failed; + + return 0; + +alloc_buf_failed: + nbl_chan_shutdown_queue(chan_mgt, chan_type, true); + return ret; +} + +static int nbl_chan_start_rxq(struct nbl_channel_mgt *chan_mgt, u8 chan_type) +{ + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + int ret; + + ret = nbl_chan_init_rx_queue(common, chan_info); + if (ret) + return ret; + + nbl_chan_config_queue(chan_mgt, chan_info, false); /* rx */ + + ret = nbl_chan_alloc_all_rx_bufs(chan_mgt, chan_info); + if (ret) + goto alloc_buf_failed; + + return 0; + +alloc_buf_failed: + nbl_chan_shutdown_queue(chan_mgt, chan_type, false); + return ret; +} + +static int nbl_chan_reset_queue(struct nbl_channel_mgt *chan_mgt, u8 chan_type, bool tx) +{ + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + int i = 0, j = 0, ret = 0; + + /* If someone else is doing resetting, don't bother */ + if (test_bit(NBL_CHAN_RESETTING, chan_info->state)) + return 0; + + /* Make sure rx won't enter if we are resetting */ + set_bit(NBL_CHAN_RESETTING, chan_info->state); + if (chan_info->clean_task) + nbl_common_flush_task(chan_info->clean_task); + + /* Make sure tx won't enter if we are resetting */ + spin_lock(&chan_info->txq_lock); + + /* If we are in a race, and someone else has finished it, just return */ + if (!test_bit(NBL_CHAN_RESETTING, chan_info->state)) { + spin_unlock(&chan_info->txq_lock); + return 0; + } + + /* Make sure no one is waiting before we reset. */ + while (i++ < (NBL_CHAN_ACK_WAIT_TIME * 2) / HZ) { + for (j = 0; j < NBL_CHAN_QUEUE_LEN; j++) + if (chan_info->wait[j].status == NBL_MBX_STATUS_WAITING) + break; + + if (j == NBL_CHAN_QUEUE_LEN) + break; + mdelay(1000); + } + + if (j != NBL_CHAN_QUEUE_LEN) { + nbl_warn(NBL_CHAN_MGT_TO_COMMON(chan_mgt), NBL_DEBUG_MBX, + "Some wait_head unreleased, fail to reset"); + clear_bit(NBL_CHAN_RESETTING, chan_info->state); + spin_unlock(&chan_info->txq_lock); + return 0; + } + + nbl_chan_shutdown_queue(chan_mgt, chan_type, tx); + + if (tx) + ret = nbl_chan_start_txq(chan_mgt, chan_type); + else + ret = nbl_chan_start_rxq(chan_mgt, chan_type); + + /* Make sure we clear this bit inside lock, so that we don't reset it twice if race */ + clear_bit(NBL_CHAN_RESETTING, chan_info->state); + spin_unlock(&chan_info->txq_lock); + + return ret; +} + +static bool nbl_chan_check_dma_err(struct nbl_channel_mgt *chan_mgt, u8 chan_type, bool tx) +{ + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + if (phy_ops->get_hw_status(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt))) + return false; + + if (chan_type == NBL_CHAN_TYPE_MAILBOX) + return phy_ops->check_mailbox_dma_err(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), tx); + else + return phy_ops->check_adminq_dma_err(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), tx); +} + +static int nbl_chan_update_txqueue(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info, struct nbl_chan_tx_param *param) +{ + struct nbl_chan_ring *txq = &chan_info->txq; + struct nbl_chan_tx_desc *tx_desc = NBL_CHAN_TX_RING_TO_DESC(txq, txq->next_to_use); + struct nbl_chan_buf *tx_buf = NBL_CHAN_TX_RING_TO_BUF(txq, txq->next_to_use); + + if (param->arg_len > NBL_CHAN_BUF_LEN - sizeof(*tx_desc)) + return -EINVAL; + + tx_desc->dstid = param->dstid; + tx_desc->msg_type = param->msg_type; + tx_desc->msgid = param->msgid; + + if (param->arg_len > NBL_CHAN_TX_DESC_EMBEDDED_DATA_LEN) { + memcpy(tx_buf->va, param->arg, param->arg_len); + tx_desc->buf_addr = tx_buf->pa; + tx_desc->buf_len = param->arg_len; + tx_desc->data_len = 0; + } else { + memcpy(tx_desc->data, param->arg, param->arg_len); + tx_desc->buf_len = 0; + tx_desc->data_len = param->arg_len; + } + tx_desc->flags = NBL_CHAN_TX_DESC_AVAIL; + + /* wmb */ + wmb(); + txq->next_to_use = NBL_NEXT_ID(txq->next_to_use, chan_info->num_txq_entries - 1); + txq->tail_ptr++; + + return 0; +} + +static int nbl_chan_kick_tx_ring(struct nbl_channel_mgt *chan_mgt, struct nbl_chan_info *chan_info) +{ + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_chan_ring *txq = &chan_info->txq; + struct nbl_chan_tx_desc *tx_desc; + int i = 0; + + /* mb for tx notify */ + mb(); + + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, txq->tail_ptr, NBL_MB_TX_QID); + + tx_desc = NBL_CHAN_TX_RING_TO_DESC(txq, txq->next_to_clean); + + while (!(tx_desc->flags & NBL_CHAN_TX_DESC_USED)) { + udelay(NBL_CHAN_TX_WAIT_US); + i++; + + if (!(i % NBL_CHAN_TX_REKICK_WAIT_TIMES)) + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, txq->tail_ptr, + NBL_MB_TX_QID); + + if (i == NBL_CHAN_TX_WAIT_TIMES) { + nbl_err(common, NBL_DEBUG_MBX, "chan send message type: %d timeout\n", + tx_desc->msg_type); + return -EAGAIN; + } + } + + txq->next_to_clean = txq->next_to_use; + return 0; +} + +static void nbl_chan_recv_ack_msg(void *priv, u16 srcid, u16 msgid, void *data, u32 data_len) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_chan_info *chan_info = NULL; + struct nbl_chan_waitqueue_head *wait_head = NULL; + union nbl_chan_msg_id ack_msgid = {{0}}; + u32 *payload = (u32 *)data; + u32 ack_datalen = 0, ack_msgtype = 0, copy_len = 0; + + if (srcid == NBL_CHAN_ADMINQ_FUNCTION_ID) + chan_info = NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); + else + chan_info = NBL_CHAN_MGT_TO_MAILBOX(chan_mgt); + + ack_datalen = data_len - 3 * sizeof(u32); + ack_msgtype = *payload; + ack_msgid.id = *(u16 *)(payload + 1); + wait_head = &chan_info->wait[ack_msgid.info.loc]; + wait_head->ack_err = *(payload + 2); + + if (wait_head->msg_type != ack_msgtype) { + nbl_warn(common, NBL_DEBUG_MBX, "Skip ack msg type %d donot match msg type %d\n", + ack_msgtype, wait_head->msg_type); + return; + } + + if (wait_head->status != NBL_MBX_STATUS_WAITING) { + nbl_warn(common, NBL_DEBUG_MBX, "Skip ack with status %d", wait_head->status); + return; + } + + if (wait_head->msg_index != ack_msgid.info.index) { + nbl_warn(common, NBL_DEBUG_MBX, "Skip ack index %d donot match index %d", + ack_msgid.info.index, wait_head->msg_index); + return; + } + + if (ack_datalen != wait_head->ack_data_len) + nbl_debug(common, NBL_DEBUG_MBX, "Channel payload_len donot match ack_data_len, msgtype:%u, msgid:%u, rcv_data_len:%u, expect_data_len:%u\n", + ack_msgtype, ack_msgid.id, ack_datalen, wait_head->ack_data_len); + + copy_len = min_t(u32, wait_head->ack_data_len, ack_datalen); + if (wait_head->ack_err >= 0 && copy_len > 0) + memcpy((char *)wait_head->ack_data, payload + 3, copy_len); + wait_head->ack_data_len = (u16)copy_len; + + /* wmb */ + wmb(); + wait_head->acked = 1; + if (wait_head->need_waked) + wake_up(&wait_head->wait_queue); +} + +static inline u16 nbl_unused_msg_ring_count(u32 head, u32 tail) +{ + return ((tail > head) ? 0 : NBL_USER_DEV_SHMMSGBUF_SIZE) + tail - head - 1; +} + +static int nbl_chan_msg_forward_userdev(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_tx_desc *tx_desc) +{ + struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); + void *shm_msg_ring = chan_mgt->notify->shm_msg_ring; + char *data = (char *)shm_msg_ring + 8; + u32 *head = (u32 *)shm_msg_ring, tmp; + u32 tail = *(head + 1); + u32 total_len = sizeof(struct nbl_chan_tx_desc) + sizeof(u32), copy_len; + + if (!tx_desc->data_len) + total_len += ALIGN(tx_desc->buf_len, 4); + + tmp = *head; + if (total_len > nbl_unused_msg_ring_count(tmp, tail)) { + dev_err(dev, "user msg ring not enough for msg\n"); + return -E2BIG; + } + + /* save total_len */ + *(u32 *)(data + tmp) = total_len; + tmp += sizeof(u32); + total_len -= sizeof(u32); + if (tmp >= NBL_USER_DEV_SHMMSGBUF_SIZE) + tmp -= NBL_USER_DEV_SHMMSGBUF_SIZE; + + copy_len = NBL_USER_DEV_SHMMSGBUF_SIZE - tmp; + copy_len = min(copy_len, total_len); + memcpy(data + tmp, tx_desc, copy_len); + if (total_len > copy_len) + memcpy(data, (char *)tx_desc + copy_len, total_len - copy_len); + + tmp += total_len; + if (tmp >= NBL_USER_DEV_SHMMSGBUF_SIZE) + tmp -= NBL_USER_DEV_SHMMSGBUF_SIZE; + + /* make sure to update head after content */ + smp_wmb(); + *head = tmp; + + eventfd_signal(chan_mgt->notify->eventfd, 1); + + return 0; +} + +static void nbl_chan_recv_msg(struct nbl_channel_mgt *chan_mgt, void *data, u32 data_len) +{ + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_tx_desc *tx_desc; + struct nbl_chan_msg_node_data *msg_handler; + struct device *dev = NBL_COMMON_TO_DEV(chan_mgt->common); + u16 msg_type, payload_len, srcid, msgid, warn = 1; + void *payload; + + tx_desc = data; + msg_type = tx_desc->msg_type; + dev_dbg(dev, "recv msg_type: %d\n", tx_desc->msg_type); + + srcid = tx_desc->srcid; + msgid = tx_desc->msgid; + if (msg_type >= NBL_CHAN_MSG_MAX) { + dev_warn(dev, "Invalid chan message type %u\n", msg_type); + return; + } + + if (tx_desc->data_len) { + payload = (void *)tx_desc->data; + payload_len = tx_desc->data_len; + } else { + payload = (void *)(tx_desc + 1); + payload_len = tx_desc->buf_len; + } + + msg_handler = nbl_common_get_hash_node(chan_mgt->handle_hash_tbl, &msg_type); + if (msg_handler) { + warn = 0; + msg_handler->func(msg_handler->priv, srcid, msgid, payload, payload_len); + } + + if (chan_mgt->notify) { + mutex_lock(&chan_mgt->notify->lock); + if (chan_mgt->notify->eventfd && test_bit(msg_type, chan_mgt->notify->msgtype) && + chan_mgt->notify->shm_msg_ring) { + warn = 0; + nbl_chan_msg_forward_userdev(chan_mgt, tx_desc); + } + mutex_unlock(&chan_mgt->notify->lock); + } + + if (warn) { + NBL_CHAN_ACK(chan_ack, srcid, msg_type, msgid, -EPERM, NULL, 0); + nbl_chan_send_ack(chan_mgt, &chan_ack); + dev_warn(dev, "Recv channel msg_type: %d, but msg_handler is null!\n", msg_type); + } +} + +static void nbl_chan_advance_rx_ring(struct nbl_channel_mgt *chan_mgt, + struct nbl_chan_info *chan_info, + struct nbl_chan_ring *rxq) +{ + struct nbl_phy_ops *phy_ops; + struct nbl_chan_rx_desc *rx_desc; + struct nbl_chan_buf *rx_buf; + u16 next_to_use; + + phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + next_to_use = rxq->next_to_use; + rx_desc = NBL_CHAN_RX_RING_TO_DESC(rxq, next_to_use); + rx_buf = NBL_CHAN_RX_RING_TO_BUF(rxq, next_to_use); + + rx_desc->flags = NBL_CHAN_RX_DESC_AVAIL; + rx_desc->buf_addr = rx_buf->pa; + rx_desc->buf_len = chan_info->rxq_buf_size; + + /* wmb */ + wmb(); + rxq->next_to_use++; + if (rxq->next_to_use == chan_info->num_rxq_entries) + rxq->next_to_use = 0; + rxq->tail_ptr++; + + NBL_UPDATE_QUEUE_TAIL_PTR(chan_info, phy_ops, chan_mgt, rxq->tail_ptr, NBL_MB_RX_QID); +} + +static void nbl_chan_clean_queue(struct nbl_channel_mgt *chan_mgt, struct nbl_chan_info *chan_info) +{ + struct nbl_chan_ring *rxq = &chan_info->rxq; + struct nbl_chan_rx_desc *rx_desc; + struct nbl_chan_buf *rx_buf; + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + u16 next_to_clean; + + next_to_clean = rxq->next_to_clean; + rx_desc = NBL_CHAN_RX_RING_TO_DESC(rxq, next_to_clean); + rx_buf = NBL_CHAN_RX_RING_TO_BUF(rxq, next_to_clean); + while (rx_desc->flags & NBL_CHAN_RX_DESC_USED) { + if (!(rx_desc->flags & NBL_CHAN_RX_DESC_WRITE)) + nbl_debug(common, NBL_DEBUG_MBX, + "mailbox rx flag 0x%x has no NBL_CHAN_RX_DESC_WRITE\n", + rx_desc->flags); + + dma_rmb(); + nbl_chan_recv_msg(chan_mgt, rx_buf->va, rx_desc->buf_len); + + nbl_chan_advance_rx_ring(chan_mgt, chan_info, rxq); + + next_to_clean++; + if (next_to_clean == chan_info->num_rxq_entries) + next_to_clean = 0; + rx_desc = NBL_CHAN_RX_RING_TO_DESC(rxq, next_to_clean); + rx_buf = NBL_CHAN_RX_RING_TO_BUF(rxq, next_to_clean); + } + rxq->next_to_clean = next_to_clean; +} + +static void nbl_chan_clean_queue_subtask(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + if (!test_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state) || + test_bit(NBL_CHAN_RESETTING, chan_info->state)) + return; + + nbl_chan_clean_queue(chan_mgt, chan_info); +} + +static int nbl_chan_get_msg_id(struct nbl_chan_info *chan_info, union nbl_chan_msg_id *msgid) +{ + struct nbl_chan_waitqueue_head *wait = NULL; + int valid_loc = chan_info->wait_head_index, i; + + for (i = 0; i < NBL_CHAN_QUEUE_LEN; i++) { + wait = &chan_info->wait[valid_loc]; + + if (wait->status != NBL_MBX_STATUS_WAITING) { + wait->msg_index = NBL_NEXT_ID(wait->msg_index, NBL_CHAN_MSG_INDEX_MAX - 1); + msgid->info.index = wait->msg_index; + msgid->info.loc = valid_loc; + + valid_loc = NBL_NEXT_ID(valid_loc, chan_info->num_txq_entries - 1); + chan_info->wait_head_index = valid_loc; + return 0; + } + + valid_loc = NBL_NEXT_ID(valid_loc, chan_info->num_txq_entries - 1); + } + + return -ENOSPC; +} + +static int nbl_chan_send_msg(void *priv, struct nbl_chan_send_info *chan_send) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_common_info *common = NBL_CHAN_MGT_TO_COMMON(chan_mgt); + struct nbl_chan_info *chan_info = NBL_CHAN_GET_INFO(chan_mgt, chan_send->dstid); + struct nbl_chan_waitqueue_head *wait_head; + union nbl_chan_msg_id msgid = {{0}}; + struct nbl_chan_tx_param tx_param = {0}; + int i = NBL_CHAN_TX_WAIT_ACK_TIMES, resend_times = 0, ret = 0; + + if (chan_send->arg_len > NBL_CHAN_BUF_LEN - sizeof(struct nbl_chan_tx_desc)) + return -EINVAL; + + if (test_bit(NBL_CHAN_ABNORMAL, chan_info->state)) + return -EFAULT; + +resend: + spin_lock(&chan_info->txq_lock); + + ret = nbl_chan_get_msg_id(chan_info, &msgid); + if (ret) { + spin_unlock(&chan_info->txq_lock); + nbl_err(common, NBL_DEBUG_MBX, "Channel tx wait head full, send msgtype:%u to dstid:%u failed\n", + chan_send->msg_type, chan_send->dstid); + return ret; + } + + tx_param.msg_type = chan_send->msg_type; + tx_param.arg = chan_send->arg; + tx_param.arg_len = chan_send->arg_len; + tx_param.dstid = chan_send->dstid; + tx_param.msgid = msgid.id; + + ret = nbl_chan_update_txqueue(chan_mgt, chan_info, &tx_param); + if (ret) { + spin_unlock(&chan_info->txq_lock); + nbl_err(common, NBL_DEBUG_MBX, "Channel tx queue full, send msgtype:%u to dstid:%u failed\n", + chan_send->msg_type, chan_send->dstid); + return ret; + } + + wait_head = &chan_info->wait[msgid.info.loc]; + init_waitqueue_head(&wait_head->wait_queue); + wait_head->acked = 0; + wait_head->ack_data = chan_send->resp; + wait_head->ack_data_len = chan_send->resp_len; + wait_head->msg_type = chan_send->msg_type; + wait_head->need_waked = chan_send->ack; + wait_head->msg_index = msgid.info.index; + wait_head->status = chan_send->ack ? NBL_MBX_STATUS_WAITING : NBL_MBX_STATUS_IDLE; + + ret = nbl_chan_kick_tx_ring(chan_mgt, chan_info); + + spin_unlock(&chan_info->txq_lock); + + if (ret) { + wait_head->status = NBL_MBX_STATUS_TIMEOUT; + goto check_tx_dma_err; + } + + if (!chan_send->ack) + return 0; + + if (test_bit(NBL_CHAN_INTERRUPT_READY, chan_info->state)) { + ret = wait_event_timeout(wait_head->wait_queue, wait_head->acked, + NBL_CHAN_ACK_WAIT_TIME); + if (!ret) { + nbl_err(common, NBL_DEBUG_MBX, "Channel waiting ack failed, message type: %d, msg id: %u\n", + chan_send->msg_type, msgid.id); + wait_head->status = NBL_MBX_STATUS_TIMEOUT; + goto check_rx_dma_err; + } + + /* rmb for waithead ack */ + rmb(); + chan_send->ack_len = wait_head->ack_data_len; + wait_head->status = NBL_MBX_STATUS_IDLE; + return wait_head->ack_err; + } + + /*polling wait mailbox ack*/ + while (i--) { + nbl_chan_clean_queue(chan_mgt, chan_info); + + if (wait_head->acked) { + chan_send->ack_len = wait_head->ack_data_len; + wait_head->status = NBL_MBX_STATUS_IDLE; + return wait_head->ack_err; + } + usleep_range(NBL_CHAN_TX_WAIT_ACK_US_MIN, NBL_CHAN_TX_WAIT_ACK_US_MAX); + } + + wait_head->status = NBL_MBX_STATUS_TIMEOUT; + nbl_err(common, NBL_DEBUG_MBX, "Channel polling ack failed, message type: %d msg id: %u\n", + chan_send->msg_type, msgid.id); + +check_rx_dma_err: + if (nbl_chan_check_dma_err(chan_mgt, chan_info->chan_type, false)) { + nbl_err(common, NBL_DEBUG_MBX, "nbl channel rx dma error\n"); + nbl_chan_reset_queue(chan_mgt, chan_info->chan_type, false); + chan_info->rxq_reset_times++; + } + +check_tx_dma_err: + if (nbl_chan_check_dma_err(chan_mgt, chan_info->chan_type, true)) { + nbl_err(common, NBL_DEBUG_MBX, "nbl channel tx dma error\n"); + nbl_chan_reset_queue(chan_mgt, chan_info->chan_type, true); + chan_info->txq_reset_times++; + } + + if (++resend_times >= NBL_CHAN_RESEND_MAX_TIMES) { + nbl_err(common, NBL_DEBUG_MBX, "nbl channel resend_times %d\n", resend_times); + return -EFAULT; + } + + i = NBL_CHAN_TX_WAIT_ACK_TIMES; + goto resend; +} + +static int nbl_chan_send_ack(void *priv, struct nbl_chan_ack_info *chan_ack) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_send_info chan_send; + u32 *tmp; + u32 len = 3 * sizeof(u32) + chan_ack->data_len; + + tmp = kzalloc(len, GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + + tmp[0] = chan_ack->msg_type; + tmp[1] = chan_ack->msgid; + tmp[2] = (u32)chan_ack->err; + if (chan_ack->data && chan_ack->data_len) + memcpy(&tmp[3], chan_ack->data, chan_ack->data_len); + + NBL_CHAN_SEND(chan_send, chan_ack->dstid, NBL_CHAN_MSG_ACK, tmp, len, NULL, 0, 0); + nbl_chan_send_msg(chan_mgt, &chan_send); + kfree(tmp); + + return 0; +} + +static int nbl_chan_register_msg(void *priv, u16 msg_type, nbl_chan_resp func, void *callback_priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + int ret; + + ret = nbl_chan_add_msg_handler(chan_mgt, msg_type, func, callback_priv); + + return ret; +} + +static bool nbl_chan_check_queue_exist(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt; + struct nbl_chan_info *chan_info; + + if (!priv) + return false; + + chan_mgt = (struct nbl_channel_mgt *)priv; + chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + return chan_info ? true : false; +} + +static int nbl_chan_dump_txq(void *priv, struct seq_file *m, u8 type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = type == NBL_CHAN_TYPE_MAILBOX ? + NBL_CHAN_MGT_TO_MAILBOX(chan_mgt) : + NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); + struct nbl_chan_ring *txq = &chan_info->txq; + struct nbl_chan_waitqueue_head *wait; + struct nbl_chan_tx_desc *desc; + int i; + + seq_printf(m, "q_base_addr:%llx, txq size:%u, next_to_use:%u, tail_ptr:%u, " + "next_to_clean:%u\n", txq->dma, + chan_info->num_txq_entries, txq->next_to_use, txq->tail_ptr, txq->next_to_clean); + seq_printf(m, "reset times %d\n", chan_info->txq_reset_times); + + for (i = 0; i < chan_info->num_txq_entries; i++) { + desc = NBL_CHAN_TX_RING_TO_DESC(txq, i); + wait = &chan_info->wait[i]; + seq_printf(m, "%u: flags 0x%x, srcid %u, dstid %u, data_len %u," + " buf_len %u, msg_type %u, msgid %u, ", i, + desc->flags, desc->srcid, desc->dstid, + desc->data_len, desc->buf_len, desc->msg_type, desc->msgid); + seq_printf(m, "acked %u, ack_err %u, ack_data_len %u," + " need_waked %u, msg_type %u\n", wait->acked, wait->ack_err, + wait->ack_data_len, wait->need_waked, wait->msg_type); + } + + return 0; +} + +static int nbl_chan_dump_rxq(void *priv, struct seq_file *m, u8 type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = type == NBL_CHAN_TYPE_MAILBOX ? + NBL_CHAN_MGT_TO_MAILBOX(chan_mgt) : + NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); + struct nbl_chan_ring *rxq = &chan_info->rxq; + struct nbl_chan_rx_desc *rx_desc; + struct nbl_chan_tx_desc *tx_desc; + struct nbl_chan_buf *rx_buf; + int i; + + seq_printf(m, "q_base_addr:%llx, rxq size:%u, next_to_use:%u, tail_ptr:%u, " + "next_to_clean:%u\n", rxq->dma, + chan_info->num_rxq_entries, rxq->next_to_use, rxq->tail_ptr, rxq->next_to_clean); + seq_printf(m, "reset times %d\n", chan_info->rxq_reset_times); + for (i = 0; i < chan_info->num_rxq_entries; i++) { + rx_desc = NBL_CHAN_RX_RING_TO_DESC(rxq, i); + rx_buf = NBL_CHAN_RX_RING_TO_BUF(rxq, i); + tx_desc = (struct nbl_chan_tx_desc *)rx_buf->va; + seq_printf(m, "%u: rx_desc flags 0x%x, buf_len 0x%x, buf_id 0x%x, buffer_addr 0x%llx, " + "tx_dedc srcid %u, dstid %u, data_len %u, buf_len %u, msg_type %u, msgid %u\n", + i, rx_desc->flags, rx_desc->buf_len, rx_desc->buf_id, rx_desc->buf_addr, + tx_desc->srcid, tx_desc->dstid, tx_desc->data_len, tx_desc->buf_len, + tx_desc->msg_type, tx_desc->msgid); + } + + return 0; +} + +static u32 nbl_chan_get_adminq_tx_buf_size(void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *adminq = NBL_CHAN_MGT_TO_ADMINQ(chan_mgt); + + return adminq->txq_buf_size; +} + +static int nbl_chan_set_listener_info(void *priv, void *shm_ring, struct eventfd_ctx *eventfd) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + + mutex_lock(&chan_mgt->notify->lock); + + chan_mgt->notify->shm_msg_ring = shm_ring; + if (chan_mgt->notify->eventfd) + eventfd_ctx_put(chan_mgt->notify->eventfd); + chan_mgt->notify->eventfd = eventfd; + + mutex_unlock(&chan_mgt->notify->lock); + + return 0; +} + +static int nbl_chan_set_listener_msgtype(void *priv, int msgtype) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + + if (msgtype >= NBL_CHAN_MSG_MAILBOX_MAX) + return -EINVAL; + + mutex_lock(&chan_mgt->notify->lock); + set_bit(msgtype, chan_mgt->notify->msgtype); + mutex_unlock(&chan_mgt->notify->lock); + + return 0; +} + +static void nbl_chan_clear_listener_info(void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + + mutex_lock(&chan_mgt->notify->lock); + if (chan_mgt->notify->eventfd) + eventfd_ctx_put(chan_mgt->notify->eventfd); + chan_mgt->notify->eventfd = NULL; + + bitmap_zero(chan_mgt->notify->msgtype, NBL_CHAN_MSG_MAILBOX_MAX); + if (chan_mgt->notify->shm_msg_ring) + memset(chan_mgt->notify->shm_msg_ring, 0, NBL_USER_DEV_SHMMSGRING_SIZE); + mutex_unlock(&chan_mgt->notify->lock); +} + +static void nbl_chan_keepalive_resp(void *priv, u16 srcid, u16 msgid, void *data, u32 data_len) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_ack_info chan_ack; + + NBL_CHAN_ACK(chan_ack, srcid, NBL_CHAN_MSG_KEEP_ALIVE, msgid, 0, NULL, 0); + + nbl_chan_send_ack(chan_mgt, &chan_ack); +} + +static void nbl_chan_keepalive(struct delayed_work *work) +{ + struct nbl_chan_keepalive_info *keepalive = + container_of(work, struct nbl_chan_keepalive_info, keepalive_task); + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)keepalive->chan_mgt; + struct nbl_chan_send_info chan_send; + u32 delay_time; + + NBL_CHAN_SEND(chan_send, keepalive->keepalive_dest, NBL_CHAN_MSG_KEEP_ALIVE, + NULL, 0, NULL, 0, 1); + + if (nbl_chan_send_msg(chan_mgt, &chan_send)) { + if (keepalive->fail_cnt < NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH) + keepalive->fail_cnt++; + + if (keepalive->fail_cnt >= NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH && + keepalive->timeout < NBL_CHAN_KEEPALIVE_MAX_TIMEOUT) { + get_random_bytes(&delay_time, sizeof(delay_time)); + keepalive->timeout += delay_time % NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_GAP; + + keepalive->fail_cnt = 0; + } + } else { + if (keepalive->success_cnt < NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH) + keepalive->success_cnt++; + + if (keepalive->success_cnt >= NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH && + keepalive->timeout > NBL_CHAN_KEEPALIVE_DEFAULT_TIMEOUT * 2) { + get_random_bytes(&delay_time, sizeof(delay_time)); + keepalive->timeout -= delay_time % NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_GAP; + + keepalive->success_cnt = 0; + } + } + + nbl_common_queue_delayed_work_keepalive(work, jiffies_to_msecs(keepalive->timeout)); +} + +static int nbl_chan_setup_keepalive(void *priv, u16 dest_id, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + struct nbl_chan_keepalive_info *keepalive = &chan_info->keepalive; + u32 delay_time; + + get_random_bytes(&delay_time, sizeof(delay_time)); + delay_time = delay_time % NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_GAP; + + keepalive->timeout = NBL_CHAN_KEEPALIVE_DEFAULT_TIMEOUT + delay_time; + keepalive->chan_mgt = chan_mgt; + keepalive->keepalive_dest = dest_id; + keepalive->success_cnt = 0; + keepalive->fail_cnt = 0; + + nbl_chan_add_msg_handler(chan_mgt, NBL_CHAN_MSG_KEEP_ALIVE, + nbl_chan_keepalive_resp, chan_mgt); + + nbl_common_alloc_delayed_task(&keepalive->keepalive_task, nbl_chan_keepalive); + keepalive->task_setuped = true; + + nbl_common_queue_delayed_work_keepalive(&keepalive->keepalive_task, + jiffies_to_msecs(keepalive->timeout)); + + return 0; +} + +static void nbl_chan_remove_keepalive(void *priv, u8 chan_type) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + if (!chan_info->keepalive.task_setuped) + return; + + nbl_common_release_delayed_task(&chan_info->keepalive.keepalive_task); + chan_info->keepalive.task_setuped = false; +} + +static void nbl_chan_register_chan_task(void *priv, u8 chan_type, struct work_struct *task) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + chan_info->clean_task = task; +} + +static void nbl_chan_set_queue_state(void *priv, enum nbl_chan_state state, u8 chan_type, u8 set) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_chan_info *chan_info = NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type); + + if (set) + set_bit(state, chan_info->state); + else + clear_bit(state, chan_info->state); +} + +static struct nbl_channel_ops chan_ops = { + .send_msg = nbl_chan_send_msg, + .send_ack = nbl_chan_send_ack, + .register_msg = nbl_chan_register_msg, + .cfg_chan_qinfo_map_table = nbl_chan_cfg_qinfo_map_table, + .check_queue_exist = nbl_chan_check_queue_exist, + .setup_queue = nbl_chan_setup_queue, + .teardown_queue = nbl_chan_teardown_queue, + .clean_queue_subtask = nbl_chan_clean_queue_subtask, + + /* for mailbox register msg for userdev */ + .set_listener_info = nbl_chan_set_listener_info, + .set_listener_msgtype = nbl_chan_set_listener_msgtype, + .clear_listener_info = nbl_chan_clear_listener_info, + .dump_txq = nbl_chan_dump_txq, + .dump_rxq = nbl_chan_dump_rxq, + .get_adminq_tx_buf_size = nbl_chan_get_adminq_tx_buf_size, + + .init_cmdq = nbl_chan_cmdq_mgt_start, + .deinit_cmdq = nbl_chan_cmdq_mgt_stop, + .send_cmd = nbl_chan_send_cmdq, + + .setup_keepalive = nbl_chan_setup_keepalive, + .remove_keepalive = nbl_chan_remove_keepalive, + .register_chan_task = nbl_chan_register_chan_task, + .set_queue_state = nbl_chan_set_queue_state, +}; + +static int nbl_chan_setup_chan_mgt(struct nbl_adapter *adapter, + struct nbl_init_param *param, + struct nbl_channel_mgt_leonis **chan_mgt_leonis) +{ + struct device *dev; + struct nbl_common_info *common; + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_chan_info *mailbox; + struct nbl_chan_info *adminq = NULL; + int ret; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + phy_ops_tbl = NBL_ADAPTER_TO_PHY_OPS_TBL(adapter); + + *chan_mgt_leonis = devm_kzalloc(dev, sizeof(struct nbl_channel_mgt_leonis), GFP_KERNEL); + if (!*chan_mgt_leonis) + goto alloc_channel_mgt_leonis_fail; + + NBL_CHAN_MGT_TO_COMMON(&(*chan_mgt_leonis)->chan_mgt) = common; + (*chan_mgt_leonis)->chan_mgt.phy_ops_tbl = phy_ops_tbl; + + mailbox = devm_kzalloc(dev, sizeof(struct nbl_chan_info), GFP_KERNEL); + if (!mailbox) + goto alloc_mailbox_fail; + mailbox->chan_type = NBL_CHAN_TYPE_MAILBOX; + NBL_CHAN_MGT_TO_MAILBOX(&(*chan_mgt_leonis)->chan_mgt) = mailbox; + + if (param->caps.has_ctrl || param->caps.has_factory_ctrl) { + adminq = devm_kzalloc(dev, sizeof(struct nbl_chan_info), GFP_KERNEL); + if (!adminq) + goto alloc_adminq_fail; + adminq->chan_type = NBL_CHAN_TYPE_ADMINQ; + NBL_CHAN_MGT_TO_ADMINQ(&(*chan_mgt_leonis)->chan_mgt) = adminq; + } + + ret = nbl_chan_init_msg_handler(&(*chan_mgt_leonis)->chan_mgt, param->caps.has_user); + if (ret) + goto init_chan_msg_handle; + + return 0; + +init_chan_msg_handle: + if (adminq) + devm_kfree(dev, adminq); +alloc_adminq_fail: + devm_kfree(dev, mailbox); +alloc_mailbox_fail: + devm_kfree(dev, *chan_mgt_leonis); + *chan_mgt_leonis = NULL; +alloc_channel_mgt_leonis_fail: + return -ENOMEM; +} + +static void nbl_chan_remove_chan_mgt(struct nbl_common_info *common, + struct nbl_channel_mgt_leonis **chan_mgt_leonis) +{ + struct device *dev = NBL_COMMON_TO_DEV(common); + + nbl_chan_remove_msg_handler(&(*chan_mgt_leonis)->chan_mgt); + if (NBL_CHAN_MGT_TO_ADMINQ(&(*chan_mgt_leonis)->chan_mgt)) + devm_kfree(dev, NBL_CHAN_MGT_TO_ADMINQ(&(*chan_mgt_leonis)->chan_mgt)); + devm_kfree(dev, NBL_CHAN_MGT_TO_MAILBOX(&(*chan_mgt_leonis)->chan_mgt)); + + /* check and remove command queue */ + if ((*chan_mgt_leonis)->chan_mgt.cmdq_mgt) + nbl_chan_cmdq_mgt_stop(dev, &(*chan_mgt_leonis)->chan_mgt, + common->tc_inst_id); + + devm_kfree(dev, *chan_mgt_leonis); + *chan_mgt_leonis = NULL; +} + +static void nbl_chan_remove_ops(struct device *dev, struct nbl_channel_ops_tbl **chan_ops_tbl) +{ + if (!dev || !chan_ops_tbl) + return; + + devm_kfree(dev, *chan_ops_tbl); + *chan_ops_tbl = NULL; +} + +static int nbl_chan_setup_ops(struct device *dev, struct nbl_channel_ops_tbl **chan_ops_tbl, + struct nbl_channel_mgt_leonis *chan_mgt) +{ + int ret; + *chan_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_channel_ops_tbl), GFP_KERNEL); + if (!*chan_ops_tbl) + return -ENOMEM; + + NBL_CHAN_OPS_TBL_TO_OPS(*chan_ops_tbl) = &chan_ops; + NBL_CHAN_OPS_TBL_TO_PRIV(*chan_ops_tbl) = chan_mgt; + + if (!chan_mgt) + return 0; + + ret = nbl_chan_add_msg_handler(&chan_mgt->chan_mgt, NBL_CHAN_MSG_ACK, + nbl_chan_recv_ack_msg, chan_mgt); + if (ret) + goto err; + + return 0; + +err: + devm_kfree(dev, *chan_ops_tbl); + *chan_ops_tbl = NULL; + + return -1; +} + +int nbl_chan_init_common(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_channel_mgt_leonis **chan_mgt_leonis; + struct nbl_channel_ops_tbl **chan_ops_tbl; + int ret = 0; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + chan_mgt_leonis = (struct nbl_channel_mgt_leonis **)&NBL_ADAPTER_TO_CHAN_MGT(adapter); + chan_ops_tbl = &NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + + ret = nbl_chan_setup_chan_mgt(adapter, param, chan_mgt_leonis); + if (ret) + goto setup_mgt_fail; + + ret = nbl_chan_setup_ops(dev, chan_ops_tbl, *chan_mgt_leonis); + if (ret) + goto setup_ops_fail; + + return 0; + +setup_ops_fail: + nbl_chan_remove_chan_mgt(common, chan_mgt_leonis); +setup_mgt_fail: + return ret; +} + +void nbl_chan_remove_common(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_channel_mgt_leonis **chan_mgt_leonis; + struct nbl_channel_ops_tbl **chan_ops_tbl; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + chan_mgt_leonis = (struct nbl_channel_mgt_leonis **)&NBL_ADAPTER_TO_CHAN_MGT(adapter); + chan_ops_tbl = &NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + + nbl_chan_remove_chan_mgt(common, chan_mgt_leonis); + nbl_chan_remove_ops(dev, chan_ops_tbl); +} + +int nbl_chan_init_bootis(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_channel_ops_tbl **chan_ops_tbl = &NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + + /* if no chan cap, also alloc chan_ops_tbl. other layer can call chan_ops->get_queue_cap */ + *chan_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_channel_ops_tbl), GFP_KERNEL); + if (!*chan_ops_tbl) + return -ENOMEM; + + nbl_chan_setup_ops(dev, chan_ops_tbl, NULL); + + return 0; +} + +void nbl_chan_remove_bootis(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + + devm_kfree(dev, NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter)); + NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter) = NULL; +} + +int nbl_chan_init_virtio(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_channel_ops_tbl **chan_ops_tbl = &NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + + /* if no chan cap, also alloc chan_ops_tbl. other layer can call chan_ops->get_queue_cap */ + *chan_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_channel_ops_tbl), GFP_KERNEL); + if (!*chan_ops_tbl) + return -ENOMEM; + + nbl_chan_setup_ops(dev, chan_ops_tbl, NULL); + + return 0; +} + +void nbl_chan_remove_virtio(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + + devm_kfree(dev, NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter)); + NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter) = NULL; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..90aea419412b42038d44b99599319b811db2d2da --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_channel.h @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_CHANNEL_H_ +#define _NBL_CHANNEL_H_ + +#include "nbl_core.h" + +#define NBL_CHAN_MGT_TO_COMMON(chan_mgt) ((chan_mgt)->common) +#define NBL_CHAN_MGT_TO_DEV(chan_mgt) NBL_COMMON_TO_DEV(NBL_CHAN_MGT_TO_COMMON(chan_mgt)) +#define NBL_CHAN_MGT_TO_PHY_OPS_TBL(chan_mgt) ((chan_mgt)->phy_ops_tbl) +#define NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt) (NBL_CHAN_MGT_TO_PHY_OPS_TBL(chan_mgt)->ops) +#define NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt) (NBL_CHAN_MGT_TO_PHY_OPS_TBL(chan_mgt)->priv) +#define NBL_CHAN_MGT_TO_MAILBOX(chan_mgt) ((chan_mgt)->chan_info[NBL_CHAN_TYPE_MAILBOX]) +#define NBL_CHAN_MGT_TO_ADMINQ(chan_mgt) ((chan_mgt)->chan_info[NBL_CHAN_TYPE_ADMINQ]) +#define NBL_CHAN_MGT_TO_CHAN_INFO(chan_mgt, chan_type) ((chan_mgt)->chan_info[chan_type]) + +#define NBL_CHAN_TX_RING_TO_DESC(tx_ring, i) \ + (&(((struct nbl_chan_tx_desc *)((tx_ring)->desc))[i])) +#define NBL_CHAN_RX_RING_TO_DESC(rx_ring, i) \ + (&(((struct nbl_chan_rx_desc *)((rx_ring)->desc))[i])) +#define NBL_CHAN_TX_RING_TO_BUF(tx_ring, i) (&(((tx_ring)->buf)[i])) +#define NBL_CHAN_RX_RING_TO_BUF(rx_ring, i) (&(((rx_ring)->buf)[i])) + +#define NBL_CHAN_GET_INFO(chan_mgt, id) \ +({ \ + typeof(chan_mgt) _chan_mgt = (chan_mgt); \ + ((id) == NBL_CHAN_ADMINQ_FUNCTION_ID && NBL_CHAN_MGT_TO_ADMINQ(_chan_mgt) ? \ + NBL_CHAN_MGT_TO_ADMINQ(_chan_mgt) : NBL_CHAN_MGT_TO_MAILBOX(_chan_mgt)); \ +}) + +#define NBL_CHAN_TX_WAIT_US 100 +#define NBL_CHAN_TX_REKICK_WAIT_TIMES 2000 +#define NBL_CHAN_TX_WAIT_TIMES 10000 + +#define NBL_CHAN_TX_WAIT_ACK_US_MIN 100 +#define NBL_CHAN_TX_WAIT_ACK_US_MAX 120 +#define NBL_CHAN_TX_WAIT_ACK_TIMES 50000 + +#define NBL_CHAN_QUEUE_LEN 256 +#define NBL_CHAN_BUF_LEN 4096 + +#define NBL_CHAN_TX_DESC_EMBEDDED_DATA_LEN 16 +#define NBL_CHAN_RESEND_MAX_TIMES (3) + +#define NBL_CHAN_TX_DESC_AVAIL BIT(0) +#define NBL_CHAN_TX_DESC_USED BIT(1) +#define NBL_CHAN_RX_DESC_WRITE BIT(1) +#define NBL_CHAN_RX_DESC_AVAIL BIT(3) +#define NBL_CHAN_RX_DESC_USED BIT(4) + +#define NBL_CHAN_ACK_WAIT_TIME (2 * HZ) + +/* adminq */ +#define NBL_ADMINQ_QUEUE_LEN 256 +#define NBL_ADMINQ_BUF_LEN 4096 + +#define NBL_CHAN_HANDLER_TBL_BUCKET_SIZE 512 + +enum { + NBL_MB_RX_QID = 0, + NBL_MB_TX_QID = 1, +}; + +enum { + NBL_MBX_STATUS_IDLE = 0, + NBL_MBX_STATUS_WAITING, + NBL_MBX_STATUS_TIMEOUT = -1, +}; + +struct nbl_chan_tx_param { + enum nbl_chan_msg_type msg_type; + void *arg; + size_t arg_len; + u16 dstid; + u16 msgid; +}; + +struct nbl_chan_buf { + void *va; + dma_addr_t pa; + size_t size; +}; + +struct nbl_chan_tx_desc { + u16 flags; + u16 srcid; + u16 dstid; + u16 data_len; + u16 buf_len; + u64 buf_addr; + u16 msg_type; + u8 data[16]; + u16 msgid; + u8 rsv[26]; +} __packed; + +struct nbl_chan_rx_desc { + u16 flags; + u32 buf_len; + u16 buf_id; + u64 buf_addr; +} __packed; + +struct nbl_chan_ring { + void *desc; + struct nbl_chan_buf *buf; + + u16 next_to_use; + u16 tail_ptr; + u16 next_to_clean; + + dma_addr_t dma; +}; + +#define NBL_CHAN_MSG_INDEX_MAX 64 +#define NBL_CHAN_MSG_LOC_MAX 1024 + +union nbl_chan_msg_id { + struct nbl_chan_msg_id_info { + u16 index:6; + u16 loc:10; + } info; + u16 id; +}; + +struct nbl_chan_waitqueue_head { + struct wait_queue_head wait_queue; + char *ack_data; + int acked; + int ack_err; + u16 ack_data_len; + u16 need_waked; + u16 msg_type; + u8 status; + u8 msg_index; +}; + +struct nbl_chan_notify_userdev { + DECLARE_BITMAP(msgtype, NBL_CHAN_MSG_MAILBOX_MAX); + struct mutex lock; /* used to protect eventfd and shm_msg_ring */ + struct eventfd_ctx *eventfd; + void *shm_msg_ring; +}; + +#define NBL_CHAN_KEEPALIVE_DEFAULT_TIMEOUT (10 * HZ) +#define NBL_CHAN_KEEPALIVE_MAX_TIMEOUT (1024 * HZ) +#define NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_GAP (10 * HZ) +#define NBL_CHAN_KEEPALIVE_TIMEOUT_UPDATE_THRESH (3) + +struct nbl_chan_keepalive_info { + struct delayed_work keepalive_task; + void *chan_mgt; + u32 timeout; + u16 keepalive_dest; + u8 success_cnt; + u8 fail_cnt; + bool task_setuped; + u8 resv[3]; +}; + +struct nbl_chan_info { + struct nbl_chan_ring txq; + struct nbl_chan_ring rxq; + struct nbl_chan_waitqueue_head *wait; + /* spinlock_t */ + spinlock_t txq_lock; + + struct work_struct *clean_task; + struct nbl_chan_keepalive_info keepalive; + + u16 wait_head_index; + + u16 num_txq_entries; + u16 num_rxq_entries; + u16 txq_buf_size; + u16 rxq_buf_size; + + u16 txq_reset_times; + u16 rxq_reset_times; + + DECLARE_BITMAP(state, NBL_CHAN_STATE_NBITS); + + u8 chan_type; +}; + +struct nbl_chan_msg_node_data { + nbl_chan_resp func; + void *priv; +}; + +struct nbl_channel_mgt { + struct nbl_common_info *common; + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_chan_info *chan_info[NBL_CHAN_TYPE_MAX]; + struct nbl_cmdq_mgt *cmdq_mgt; + struct nbl_chan_notify_userdev *notify; + void *handle_hash_tbl; +}; + +/* Mgt structure for each product. + * Every indivisual mgt must have the common mgt as its first member, and contains its unique + * data structure in the reset of it. + */ +struct nbl_channel_mgt_leonis { + struct nbl_channel_mgt chan_mgt; +}; + +struct nbl_channel_mgt_bootis { + struct nbl_channel_mgt chan_mgt; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c new file mode 100644 index 0000000000000000000000000000000000000000..ed7560cfd8dd38fe06c7bb49509095dcd7293581 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.c @@ -0,0 +1,787 @@ +/* Nebula-matrix DPDK user-network + * Copyright(c) 2021-2030 nbl, Inc. + */ + +#include +#include "nbl_cmdq.h" + +static u8 g_seq_index; +spinlock_t nbl_tc_flow_inst_lock; /* used to protect global instance resources */ + +static inline void *nbl_cmdq_alloc_dma_mem(struct device *dma_dev, + struct nbl_cmdq_dma_mem *mem, + u32 size) { + mem->size = size; + return dma_alloc_coherent(dma_dev, size, &mem->pa, GFP_KERNEL | __GFP_ZERO); +} + +static inline void nbl_cmdq_free_dma_mem(struct device *dma_dev, + struct nbl_cmdq_dma_mem *mem) { + dma_free_coherent(dma_dev, mem->size, mem->va, mem->pa); + mem->size = 0; + mem->va = NULL; + mem->pa = (dma_addr_t)0; +} + +static inline void +nbl_cmdq_free_queue_ring(struct device *dma_dev, struct nbl_cmd_ring *ring) +{ + nbl_cmdq_free_dma_mem(dma_dev, &ring->desc); +} + +/** + * @brief: free the buffer for the send ring + * @cmd_queue: pointer to the command queue + */ +static enum nbl_cmd_status +nbl_cmdq_alloc_queue_bufs(const struct nbl_cmd_queue *queue, + struct nbl_cmd_ring *ring) +{ + int i; + struct nbl_cmdq_dma_mem *bi; + struct nbl_channel_mgt *chan_mgt = queue->chan_mgt; + struct device *dma_dev = chan_mgt->common->dma_dev; + + /* No mapped memory needed yet, just the buffer info structures */ + ring->in_buffer_dma_head = kcalloc(queue->cmd_ring_depth, sizeof(struct nbl_cmdq_dma_mem), + GFP_ATOMIC); + if (!ring->in_buffer_dma_head) + return -ENOMEM; + + ring->in_buffer_info = (struct nbl_cmdq_dma_mem *)ring->in_buffer_dma_head; + + /* allocate the mapped in buffers */ + ring->in_mem.va = nbl_cmdq_alloc_dma_mem(dma_dev, &ring->in_mem, + queue->sq_buf_size * queue->cmd_ring_depth); + if (!ring->in_mem.va) + goto dealloc_cmd_queue_in_bufs; + + for (i = 0; i < queue->cmd_ring_depth; i++) { + bi = &ring->in_buffer_info[i]; + bi->va = (char *)ring->in_mem.va + i * queue->sq_buf_size; + bi->pa = ring->in_mem.pa + i * queue->sq_buf_size; + bi->size = queue->sq_buf_size; + } + + /* alloc dma_mem array for out buffers */ + ring->out_buffer_dma_head = kcalloc(queue->cmd_ring_depth, sizeof(struct nbl_cmdq_dma_mem), + GFP_ATOMIC); + if (!ring->out_buffer_dma_head) + return -ENOMEM; + + ring->out_buffer_info = (struct nbl_cmdq_dma_mem *)ring->out_buffer_dma_head; + + /* allocate the mapped out buffers */ + ring->out_mem.va = nbl_cmdq_alloc_dma_mem(dma_dev, &ring->out_mem, + queue->sq_buf_size * queue->cmd_ring_depth); + if (!ring->out_mem.va) + goto dealloc_cmd_queue_out_bufs; + + for (i = 0; i < queue->cmd_ring_depth; i++) { + bi = &ring->out_buffer_info[i]; + bi->va = (char *)ring->out_mem.va + i * queue->sq_buf_size; + bi->pa = ring->out_mem.pa + i * queue->sq_buf_size; + bi->size = queue->sq_buf_size; + } + + return NBL_CMDQ_SUCCESS; + +dealloc_cmd_queue_out_bufs: + ring->out_buffer_info = NULL; + kfree(ring->out_buffer_dma_head); + ring->out_buffer_dma_head = NULL; + i = queue->cmd_ring_depth; + + nbl_cmdq_free_dma_mem(dma_dev, &ring->in_mem); + for (i = 0; i < queue->cmd_ring_depth; i++) { + bi = &ring->in_buffer_info[i]; + bi->va = NULL; + bi->pa = 0; + bi->size = 0; + } + +dealloc_cmd_queue_in_bufs: + ring->in_buffer_info = NULL; + kfree(ring->in_buffer_dma_head); + ring->in_buffer_dma_head = NULL; + return -ENOMEM; +} + +/** + * @brief: allocate buffers for the send ring + * @cmd_queue: pointer to the command queue + */ +static enum nbl_cmd_status +nbl_cmdq_alloc_queue_ring(const struct nbl_cmd_queue *queue, + struct nbl_cmd_ring *ring) +{ + u32 size = queue->cmd_ring_depth * sizeof(struct nbl_cmd_desc); + struct nbl_channel_mgt *chan_mgt = queue->chan_mgt; + struct device *dma_dev = chan_mgt->common->dma_dev; + + ring->desc.va = nbl_cmdq_alloc_dma_mem(dma_dev, &ring->desc, size); + if (!ring->desc.va) + return -ENOMEM; + + return NBL_CMDQ_SUCCESS; +} + +/** + * @brief: free the buffer for the send ring + * @cmd_queue: pointer to the command queue + */ +static void +nbl_cmdq_free_queue_bufs(struct device *dma_dev, struct nbl_cmd_ring *ring) +{ + /* free in buffers */ + if (ring->in_mem.va) + nbl_cmdq_free_dma_mem(dma_dev, &ring->in_mem); + + /* free out buffers */ + if (ring->out_mem.va) + nbl_cmdq_free_dma_mem(dma_dev, &ring->out_mem); + + /* free in and out DMA rings */ + kfree(ring->in_buffer_dma_head); + kfree(ring->out_buffer_dma_head); +} + +/** + * @brief: init the send ring of command queue + * @hw: input, pointer to the hardware related properties + * @nbl_cmd_queue: pointer to the command queue + */ +static enum nbl_cmd_status +nbl_cmdq_init_sq_ring(struct nbl_cmd_queue *queue) +{ + enum nbl_cmd_status status; + struct nbl_cmd_ring *ring = &queue->sq_ring; + struct nbl_channel_mgt *chan_mgt = queue->chan_mgt; + struct device *dma_dev = chan_mgt->common->dma_dev; + + /* check if the queue is already initialized */ + if (ring->count > 0) { + status = NBL_CMDQ_NOT_READY; + goto init_cmd_queue_exit; + } + + status = nbl_cmdq_alloc_queue_ring(queue, ring); + if (status) + goto init_cmd_queue_exit; + + status = nbl_cmdq_alloc_queue_bufs(queue, ring); + if (status) + goto init_cmd_queue_free_rings; + + ring->next_to_use = 0; + ring->next_to_clean = 0; + ring->doorbell = 0; + + /* on success */ + ring->count = queue->cmd_ring_depth; + goto init_cmd_queue_exit; + +init_cmd_queue_free_rings: + nbl_cmdq_free_queue_bufs(dma_dev, ring); + nbl_cmdq_free_queue_ring(dma_dev, ring); + +init_cmd_queue_exit: + return status; +} + +static void +nbl_cmdq_init_queue_parameters(struct nbl_cmd_queue *cmd_queue) +{ + cmd_queue->sq_buf_size = NBL_CMDQ_BUF_SIZE; + cmd_queue->cmd_ring_depth = NBL_CMDQ_RING_DEPTH; + cmd_queue->sq_ring.count = 0; +} + +/** + * @brief: shutdown the queue, will free the ring + * @hw: input, pointer to the hardware related properties + */ +static enum nbl_cmd_status +nbl_cmdq_shutdown_queue(struct nbl_cmd_queue *queue, + struct nbl_cmd_ring *ring) +{ + struct nbl_channel_mgt *chan_mgt = queue->chan_mgt; + struct device *dma_dev = chan_mgt->common->dma_dev; + + /* reset cmd queue related registers */ + spin_lock(&queue->sq_lock); + ring->count = 0; + + /* free cmd queue ring */ + nbl_cmdq_free_queue_bufs(dma_dev, ring); + nbl_cmdq_free_queue_ring(dma_dev, ring); + + spin_unlock(&queue->sq_lock); + return NBL_CMDQ_SUCCESS; +} + +static inline enum nbl_cmd_status +nbl_cmdq_check_queue(const struct nbl_cmd_ring *ring, const struct nbl_common_info *common) +{ + if (!ring->count) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq not initialized yet."); + return NBL_CMDQ_CQ_NOT_READY; + } + + return NBL_CMDQ_SUCCESS; +} + +static enum nbl_cmd_status +nbl_cmdq_destroy_queue(struct nbl_cmd_queue *queue) +{ + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + struct nbl_cmd_ring *ring = &queue->sq_ring; + struct nbl_common_info *common = queue->chan_mgt->common; + + /* check queue status, abort destroy if queue not ready */ + status = nbl_cmdq_check_queue(ring, common); + if (status == NBL_CMDQ_CQ_NOT_READY) + return status; + + /* shutdown queue */ + return nbl_cmdq_shutdown_queue(queue, ring); +} + +static inline bool +nbl_cmdq_flag_check_cmd_done(const struct nbl_cmd_desc *desc) { + return (desc->flags & NBL_CMDQ_DESC_FLAG_DONE); +} + +/** + * @brief: free command queue ring and return free count + * @cmd_queue: input, pointer to the hardware related properties + * @return: number of free desc in the queue + */ +static enum nbl_cmd_status +nbl_cmdq_clean_sq_ring(struct nbl_cmd_queue *cmd_queue) +{ + struct nbl_cmd_ring *ring = &cmd_queue->sq_ring; + u16 ntc = ring->next_to_clean; + struct nbl_cmd_desc *desc = NBL_CMDQ_GET_DESC(*ring, ntc); + + while (1) { + if (nbl_cmdq_flag_check_cmd_done(desc)) + memset(desc, 0, sizeof(*desc)); + else + break; + + ntc++; + if (ntc == ring->count) + ntc = 0; + + /* next descriptor */ + desc = NBL_CMDQ_GET_DESC(*ring, ntc); + } + + desc = NULL; + ring->next_to_clean = ntc; + return (ring->next_to_clean > ring->next_to_use ? 0 : ring->count) + + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * @brief: check the command queue to see if command processed + * @desc: input, pointer to the hardware related properties + * @desc: use this descriptor to check the DD bit + */ +static inline bool +nbl_cmdq_flag_check_dd(const struct nbl_cmd_desc *desc) +{ + return (desc->flags & NBL_CMDQ_DESC_FLAG_DD); +} + +static inline bool +nbl_cmdq_flag_check_out_buffer(const struct nbl_cmd_desc *desc) +{ + return (desc->flags & NBL_CMDQ_DESC_FLAG_BUF_OUT); +} + +static inline bool +nbl_cmdq_flag_check_error(const struct nbl_cmd_desc *desc) +{ + return (desc->flags & NBL_CMDQ_DESC_FLAG_ERR); +} + +static inline bool +nbl_cmdq_flag_check_hit(const struct nbl_cmd_desc *desc) +{ + return (desc->flags & NBL_CMDQ_DESC_FLAG_HIT); +} + +static inline void +nbl_cmdq_flag_mark_cmd_done(struct nbl_cmd_desc *desc) { + desc->flags |= NBL_CMDQ_DESC_FLAG_DONE; +} + +static inline bool +nbl_cmdq_flag_check_interface_error(struct nbl_cmd_desc *desc) { + return (desc->flags & NBL_CMDQ_DESC_FLAG_IF_ERR); +} + +static enum nbl_cmd_status +nbl_cmdq_execution_nolock(struct nbl_cmd_queue *queue, + struct nbl_cmd_ring *ring, + const struct nbl_cmd_hdr *hdr, + struct nbl_cmd_desc *desc, + const struct nbl_cmd_content *cmd) +{ + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(queue->chan_mgt); + struct nbl_common_info *common = queue->chan_mgt->common; + + /* clean the cmd send queue to reclaim descriptors */ + if (nbl_cmdq_clean_sq_ring(queue) == 0) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmd send queue full!"); + return NBL_CMDQ_CQ_FULL; + } + + /* fill descriptor */ + desc->block = cpu_to_le16(hdr->block); + desc->module = cpu_to_le16(hdr->module); + desc->table = cpu_to_le16(hdr->table); + desc->opcode = cpu_to_le16(hdr->opcode); + desc->param_high = cpu_to_le32(NBL_CMDQ_HI_DWORD(cmd->in_params)); + desc->param_low = cpu_to_le32(NBL_CMDQ_LO_DWORD(cmd->in_params)); + desc->flags = 0; + desc->seq = g_seq_index++; + if (g_seq_index == 16) + g_seq_index = 0; + + /* data to send */ + if (cmd->in_va && cmd->in) { + desc->datalen = cmd->in_length + NBL_CMDQ_HALF_DESC_LENGTH; + desc->flags |= cpu_to_le16(NBL_CMDQ_DESC_FLAG_BUF_IN); + desc->send_high = cpu_to_le32(NBL_CMDQ_HI_DWORD(cmd->in)); + desc->send_low = cpu_to_le32(NBL_CMDQ_LO_DWORD(cmd->in)); + } + + /* data to receive */ + if (cmd->out_va && cmd->out) { + desc->flags |= cpu_to_le16(NBL_CMDQ_DESC_FLAG_BUF_OUT); + desc->recv_high = cpu_to_le32(NBL_CMDQ_HI_DWORD(cmd->out)); + desc->recv_low = cpu_to_le32(NBL_CMDQ_LO_DWORD(cmd->out)); + } + + /* update next_to_use */ + (ring->next_to_use)++; + (ring->doorbell)++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + /* wmb for cmdq notify */ + wmb(); + phy_ops->update_cmdq_tail(NBL_CHAN_MGT_TO_PHY_PRIV(queue->chan_mgt), + (ring->doorbell) & NBL_CMDQ_DOORBELL_MASK); + return NBL_CMDQ_SUCCESS; +} + +static inline enum nbl_cmd_status +nbl_cmdq_check_content(const struct nbl_cmd_queue *queue, + const struct nbl_cmd_hdr *hdr, + const struct nbl_cmd_content *cmd) +{ + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + + if ((cmd->in_va && !cmd->in_length) || + (!cmd->in_va && cmd->in_length) || + (cmd->in_va && cmd->in_length > queue->sq_buf_size)) { + status = NBL_CMDQ_CQ_ERR_PARAMS; + } + + /* check parameters: the receiving part */ + if ((hdr->opcode == NBL_CMD_OP_READ || + hdr->opcode == NBL_CMD_OP_SEARCH) && !cmd->out_va) + status = NBL_CMDQ_CQ_ERR_PARAMS; + + return status; +} + +static inline enum nbl_cmd_status +nbl_cmdq_check_interface_error(struct nbl_cmd_desc *desc, + struct nbl_common_info *common) +{ + u8 interface_err = 0; + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + + /* flag error bit: error in firmware cmdq interface */ + if (nbl_cmdq_flag_check_interface_error(desc)) { + /* mark current desc as done by driver */ + nbl_cmdq_flag_mark_cmd_done(desc); + + status = NBL_CMDQ_FAILED; + interface_err = (desc->flags >> NBL_CMDQ_DESC_FLAG_IF_ERR_OFT) & + NBL_CMDQ_DESC_FLAG_IF_ERR_MASK; + switch (interface_err) { + case 0b00: + /* dma error, re-send command */ + /* abort if failed sending command 3 times in a row */ + status = NBL_CMDQ_NEED_RESEND; + break; + case 0b01: + /* driver data error, dont re-send */ + status = NBL_CMDQ_NOBUF_ERR; + break; + case 0b10: + case 0b11: + /* firmware sequence error, reset cmdq */ + status = NBL_CMDQ_NEED_RESET; + break; + default: + /* unknown error */ + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow cmdq unknown error from firmware interface"); + break; + } + } + + return status; +} + +static enum nbl_cmd_status +nbl_cmdq_fetch_response(struct nbl_cmd_queue *queue, struct nbl_cmd_desc *desc, + struct nbl_cmd_content *cmd, struct nbl_cmdq_dma_mem *buffer) +{ + u8 error_code; + const char *buf_start; + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + struct nbl_common_info *common = queue->chan_mgt->common; + + /* check descriptor flag error bit for firmware business */ + if (nbl_cmdq_flag_check_error(desc)) { + status = NBL_CMDQ_FAILED; + error_code = desc->errorcode; + if (error_code) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq error code: %d", + error_code); + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow cmdq desc error in flag but no errorcode"); + } + + goto fetch_response_end; + } + + /* check return buffer flag bit */ + if (cmd->out_va && cmd->out && !nbl_cmdq_flag_check_out_buffer(desc)) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq response buffer bit not matched"); + status = NBL_CMDQ_NOBUF_ERR; + goto fetch_response_end; + } + + /* process out buffer */ + if (cmd->out_va && cmd->out && buffer) { + cmd->out_length = le16_to_cpu(desc->datalen) - NBL_CMDQ_HALF_DESC_LENGTH; + if (cmd->out_length > queue->sq_buf_size) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow cmdq buffer larger than allowed.\n"); + status = NBL_CMDQ_CQ_ERR_BUFFER; + goto fetch_response_end; + } + + if ((desc->opcode == NBL_CMD_OP_READ || + desc->opcode == NBL_CMD_OP_SEARCH) && cmd->out_va) { + buf_start = (char *)buffer->va + NBL_CMDQ_HALF_DESC_LENGTH; + memcpy(cmd->out_va, buf_start, cmd->out_length); + } + } + +fetch_response_end: + queue->sq_last_status = status; + return status; +} + +/** + * @brief: send command to firmware, the sync version, will block and wait + * for response. + * @hw: input, pointer to the hardware related properties + * @hdr: command header, including register block, module, table and opcode + * @cmd: command content, including input and output + */ +static enum nbl_cmd_status +nbl_cmdq_do_send(void *priv, const struct nbl_cmd_hdr *hdr, + struct nbl_cmd_content *cmd) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_cmdq_mgt *cmdq_mgt = chan_mgt->cmdq_mgt; + bool hit = false; + bool completed = false; + u32 desc_index = 0; + u32 total_delay = 0; + enum nbl_cmd_status status = NBL_CMDQ_SUCCESS; + struct nbl_cmd_queue *queue = &cmdq_mgt->cmd_queue; + struct nbl_cmd_ring *ring = &queue->sq_ring; + struct nbl_cmd_desc *desc = NULL; + struct nbl_cmdq_dma_mem *in_buffer = NULL; + struct nbl_cmdq_dma_mem *out_buffer = NULL; + struct nbl_common_info *common = queue->chan_mgt->common; + + /* check cmd queue status */ + status = nbl_cmdq_check_queue(ring, common); + if (status) + goto cmd_send_end; + + /* check parameters: the sending part */ + status = nbl_cmdq_check_content(queue, hdr, cmd); + if (status) + goto cmd_send_end; + + /* lock the ring, assign buffer and send command */ + spin_lock(&queue->sq_lock); + + desc_index = ring->next_to_use; + /* assign pre-allocated dma for buffers */ + if (cmd->in_va) { + in_buffer = &ring->in_buffer_info[desc_index]; + memcpy(in_buffer->va, cmd->in_va, cmd->in_length); + cmd->in = in_buffer->pa; + } + + if (cmd->out_va) { + out_buffer = &ring->out_buffer_info[desc_index]; + cmd->out = out_buffer->pa; + } + + desc = NBL_CMDQ_GET_DESC(*ring, desc_index); + status = nbl_cmdq_execution_nolock(queue, ring, hdr, desc, cmd); + + /* check if queue is full */ + if (status == NBL_CMDQ_CQ_FULL) { + spin_unlock(&queue->sq_lock); + goto cmd_send_end; + } + + do { + if (nbl_cmdq_flag_check_dd(desc)) { + completed = true; + break; + } + + total_delay++; + udelay(NBL_CMDQ_SQ_WAIT_USEC); + } while (total_delay < queue->sq_timeout); + + hit = nbl_cmdq_flag_check_hit(desc); + /* check interface error, while holding the lock */ + spin_unlock(&queue->sq_lock); + prefetch(desc); + if (completed && hit) { + status = nbl_cmdq_check_interface_error(desc, common); + if (status) + goto cmd_send_end; + } + + if (completed && hit) { + /* if ready, return output */ + status = nbl_cmdq_fetch_response(queue, desc, cmd, out_buffer); + } else if (!completed) { + /* timeout error */ + status = NBL_CMDQ_TIMEOUT_ERR; + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq firmware timeout!\n"); + } else { + status = NBL_CMDQ_NOHIT_ERR; + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq param error, block:%d module:%d " + "table:%d.\n", desc->block, desc->module, desc->table); + } + + /* mark desc as done by driver */ + nbl_cmdq_flag_mark_cmd_done(desc); + +cmd_send_end: + desc = NULL; + ring = NULL; + queue = NULL; + return status; +} + +static enum nbl_cmd_status +nbl_cmdq_send(void *priv, const void *vhdr, void *vcmd) +{ + enum nbl_cmd_status status; + const struct nbl_cmd_hdr *hdr = (const struct nbl_cmd_hdr *)vhdr; + struct nbl_cmd_content *cmd = (struct nbl_cmd_content *)vcmd; + + /* command execution */ + status = nbl_cmdq_do_send(priv, hdr, cmd); + return status; +} + +static enum nbl_cmd_status +nbl_cmdq_init_ring(struct nbl_cmdq_mgt *cmdq_mgt) +{ + enum nbl_cmd_status ret_code; + struct nbl_cmd_queue *cmd_queue = &cmdq_mgt->cmd_queue; + + /* set send queue write back timeout */ + cmd_queue->sq_timeout = NBL_CMDQ_TIMEOUT; + ret_code = nbl_cmdq_init_sq_ring(cmd_queue); + return ret_code; +} + +/** + * @brief: create the command queue + * @hw: input, pointer to the hardware related properties + */ +static enum nbl_cmd_status +nbl_cmdq_init_queue(struct nbl_cmdq_mgt *cmdq_mgt) +{ + nbl_cmdq_init_queue_parameters(&cmdq_mgt->cmd_queue); + + /* init queue lock */ + spin_lock_init(&cmdq_mgt->cmd_queue.sq_lock); + nbl_cmdq_init_ring(cmdq_mgt); + return 0; +} + +static int nbl_cmdq_init(void *priv, void *param) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + return phy_ops->init_cmdq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt), param, 0); +} + +static int nbl_cmdq_destroy(void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + phy_ops->destroy_cmdq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + return 0; +} + +static int nbl_cmdq_reset(void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_CHAN_MGT_TO_PHY_OPS(chan_mgt); + + phy_ops->reset_cmdq(NBL_CHAN_MGT_TO_PHY_PRIV(chan_mgt)); + return 0; +} + +static void nbl_cmdq_get_param(void *priv, void *cmdq_param) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_cmdq_mgt *cmdq_mgt = chan_mgt->cmdq_mgt; + struct nbl_chan_cmdq_init_info *param = + (struct nbl_chan_cmdq_init_info *)cmdq_param; + + param->pa = cmdq_mgt->cmd_queue.sq_ring.desc.pa; + param->len = NBL_CMDQ_RING_DEPTH; +} + +int nbl_chan_send_cmdq(void *priv, const void *hdr, void *cmd) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_common_info *common = chan_mgt->common; + int ret; + + if (!chan_mgt->cmdq_mgt) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq not initialized yet"); + return NBL_CMDQ_NOT_READY; + } + + ret = nbl_cmdq_send(priv, hdr, cmd); + if (ret == (int)NBL_CMDQ_NEED_RESET) + ret = nbl_cmdq_reset(priv); + else if (ret == (int)NBL_CMDQ_NEED_RESEND) + nbl_cmdq_send(priv, hdr, cmd); + + return ret; +} + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_cmdq_setup_mgt(struct device *dev, struct nbl_cmdq_mgt **cmdq_mgt) +{ + *cmdq_mgt = devm_kzalloc(dev, sizeof(**cmdq_mgt), GFP_ATOMIC); + if (!*cmdq_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_cmdq_remove_mgt(struct device *dev, struct nbl_cmdq_mgt **cmdq_mgt) +{ + devm_kfree(dev, *cmdq_mgt); + *cmdq_mgt = NULL; +} + +int nbl_chan_cmdq_mgt_start(struct device *dev, void *priv) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_common_info *common = chan_mgt->common; + struct nbl_cmdq_mgt **cmdq_mgt = &chan_mgt->cmdq_mgt; + struct nbl_chan_cmdq_init_info cmdq_param = {0}; + u8 idx = 0; + int ret = 0; + + /* if cmdq not ready, setup command queue */ + if (!(*cmdq_mgt)) { + idx = nbl_tc_alloc_inst_id(); + if (idx >= NBL_TC_FLOW_INST_COUNT) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq start failed, max tc flow instances reached!"); + return -EPERM; + } + + common->tc_inst_id = idx; + + /* alloc memory for cmdq management */ + ret = nbl_cmdq_setup_mgt(dev, cmdq_mgt); + if (ret) { + nbl_tc_unset_cmdq_info(common->tc_inst_id); + common->tc_inst_id = NBL_TC_FLOW_INST_COUNT; + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow cmdq start failed due to failed memory allocation"); + return ret; + } + + nbl_tc_set_cmdq_info(&nbl_chan_send_cmdq, (void *)chan_mgt, idx); + (*cmdq_mgt)->cmd_queue.chan_mgt = chan_mgt; + ret = nbl_cmdq_init_queue(*cmdq_mgt); + + cmdq_param.vsi_id = common->vsi_id; + cmdq_param.bdf_num = (common->bus << 8 | common->devid << 3 | + NBL_COMMON_TO_PCI_FUNC_ID(common)); + nbl_cmdq_get_param(chan_mgt, &cmdq_param); + nbl_cmdq_init(chan_mgt, &cmdq_param); + nbl_info(common, NBL_DEBUG_FLOW, "tc flow cmdq inited\n"); + } + + (*cmdq_mgt)->cmdq_refcount++; + nbl_info(common, NBL_DEBUG_FLOW, + "tc flow cmdq ref count: %d\n", (*cmdq_mgt)->cmdq_refcount); + return (int)common->tc_inst_id; +} + +int nbl_chan_cmdq_mgt_stop(struct device *dev, void *priv, u8 inst_id) +{ + struct nbl_channel_mgt *chan_mgt = (struct nbl_channel_mgt *)priv; + struct nbl_cmdq_mgt **cmdq_mgt = &chan_mgt->cmdq_mgt; + struct nbl_common_info *common = chan_mgt->common; + + if (inst_id >= NBL_TC_FLOW_INST_COUNT) + return 0; + + if (!(*cmdq_mgt)) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow cmdq not inited but try to deinit"); + return 0; + } else if ((*cmdq_mgt)->cmdq_refcount == 1) { + /* wait for inflight cmd to finish */ + mdelay(NBL_CMDQ_FLIGHT_DELAY); + nbl_cmdq_destroy(priv); + nbl_cmdq_destroy_queue(&(*cmdq_mgt)->cmd_queue); + nbl_cmdq_remove_mgt(dev, cmdq_mgt); + nbl_tc_unset_cmdq_info(inst_id); + common->tc_inst_id = NBL_TC_FLOW_INST_COUNT; + nbl_info(common, NBL_DEBUG_FLOW, "tc flow cmdq deinited\n"); + } else { + (*cmdq_mgt)->cmdq_refcount--; + nbl_info(common, NBL_DEBUG_FLOW, + "tc flow cmdq ref count: %d\n", (*cmdq_mgt)->cmdq_refcount); + } + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h new file mode 100644 index 0000000000000000000000000000000000000000..bcd400672d3b0d9679471709f8762aae7be4c9b4 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_channel/nbl_cmdq.h @@ -0,0 +1,160 @@ +/* Nebula-matrix DPDK user-network + * Copyright(c) 2021-2030 nBL, Inc. + */ +#ifndef _NBL_CMDQ_H +#define _NBL_CMDQ_H + +#include "nbl_channel.h" +#include "nbl_core.h" + +#define NBL_CMDQ_HI_DWORD(x) ((u32)(((x) >> 32) & 0xFFFFFFFF)) +#define NBL_CMDQ_LO_DWORD(x) ((u32)(x) & 0xFFFFFFFF) + +#define NBL_CMDQ_TIMEOUT 100000 +#define NBL_CMDQ_FLIGHT_DELAY 500 +#define NBL_CMDQ_HALF_DESC_LENGTH 16 + +/* command resend and reset */ +#define NBL_CMDQ_RESEND_MAX_TIMES 3 +#define NBL_CMDQ_RESET_MAX_WAIT 5 + +/* initial value of descriptor */ +#define NBL_CMDQ_DESC_FLAG_DD BIT(0) +#define NBL_CMDQ_DESC_FLAG_ERR BIT(1) +#define NBL_CMDQ_DESC_FLAG_BUF_IN BIT(2) +#define NBL_CMDQ_DESC_FLAG_BUF_OUT BIT(3) +#define NBL_CMDQ_DESC_FLAG_SI BIT(4) +#define NBL_CMDQ_DESC_FLAG_EI BIT(5) +#define NBL_CMDQ_DESC_FLAG_IF_ERR BIT(6) +#define NBL_CMDQ_DESC_FLAG_HIT BIT(7) +#define NBL_CMDQ_DESC_FLAG_IF_ERR_OFT 8 +#define NBL_CMDQ_DESC_FLAG_IF_ERR_MASK (0b11) +#define NBL_CMDQ_DESC_FLAG_DONE BIT(15) + +#define NBL_CMDQ_SQ_WAIT_USEC 1 +#define NBL_CMDQ_BUF_SIZE 256 +#define NBL_CMDQ_RING_DEPTH 4096 /* max: 2^16 */ +#define NBL_CMDQ_RQ_RING_DEPTH 4096 /* max: 2^15 */ +#define NBL_CMDQ_DOORBELL_MASK 0x1FFFF + +struct nbl_cmdq_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +}; + +/** + * @brief: command ring, with pointers to ring/buffer memory + * @dma_head: + * @buffer: + * @cmd_buf: + */ +struct nbl_cmd_ring { + struct nbl_cmdq_dma_mem desc; /* descriptor ring memory */ + struct nbl_cmdq_dma_mem in_mem; + struct nbl_cmdq_dma_mem out_mem; + struct nbl_cmdq_dma_mem *in_buffer_info; /* buffer detail information */ + struct nbl_cmdq_dma_mem *out_buffer_info; /* buffer detail information */ + void *in_buffer_dma_head; /* buffer dma head */ + void *out_buffer_dma_head; /* buffer dma head */ + + u16 count; /* count of descriptors */ + u16 next_to_use; + u16 next_to_clean; + + /* only 17 bit valid for send queue, and 16 for receive queue */ + u32 doorbell; + + /* for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 cmdq_enable; + u32 cmdq_interrupt; + u32 msgq_curr_rst; + u32 msgq_interrupt; + u32 msgq_enable; + + /* ring base address */ + u32 bah; + u32 bal; +}; + +struct nbl_cmd_queue { + struct nbl_cmd_ring sq_ring; /* command send queue */ + u16 sq_buf_size; + u16 cmd_ring_depth; + spinlock_t sq_lock; /* used to lock the send queue */ + u32 sq_timeout; + enum nbl_cmd_status sq_last_status; + + struct nbl_channel_mgt *chan_mgt; +}; + +struct nbl_cmdq_mgt { + struct nbl_cmd_queue cmd_queue; + u16 cmdq_refcount; +}; + +#pragma pack(1) +/** + * struct nbl_cmd_desc - Admin queue descriptor + * @brief: admin queue descriptor, 32 Bytes + * @flags: basic properties of the descriptor + * @block: firmware divide the register tables into blocks, sections, tables + * @module: same as above + * @table: same as above + * @opcode: add, delete, flush, update etc. + * @errorcode: command error returned by the firmware + * @datalen: valid length of the buffer + * @param_high: and _low, optional parameters for the command + * @recv_high: and _low, buffer address for receiving data + * @send_high: and _low, buffer address for sending data + */ +struct nbl_cmd_desc { + u32 flags:16; + u32 block:5; + u32 module:5; + u32 table:4; + u32 rsv:2; + u32 opcode:8; + u32 errorcode:8; + u32 datalen:12; + u32 seq:4; + u32 param_low; + u32 param_high; + u32 recv_low; + u32 recv_high; + u32 send_low; + u32 send_high; +}; + +struct nbl_cmd_rq_desc { + u32 head_data; + u32 contents[7]; +}; + +struct nbl_cmd_rq_desc_age { + u32 start_offset:17; + u32 reserved0:15; + u32 bitmap0; + u32 bitmap1; + u32 bitmap2; + u32 bitmap3; + u32 reserved1; + u32 reserved2; +}; + +#pragma pack() + +#define NBL_CMDQ_GET_DESC(ring, index) \ + (&(((struct nbl_cmd_desc *)((ring).desc.va))[index])) + +#define NBL_CMDQ_GET_RQ_DESC(ring, index) \ + (&(((struct nbl_cmd_rq_desc *)((ring).desc.va))[(index) + 1])) + +int nbl_chan_cmdq_mgt_start(struct device *dev, void *priv); +int nbl_chan_cmdq_mgt_stop(struct device *dev, void *priv, u8 inst_id); +int nbl_chan_send_cmdq(void *priv, const void *hdr, void *cmd); + +# endif /* _NBL_CMDQ_H */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c new file mode 100644 index 0000000000000000000000000000000000000000..d49f0f3f8919d4c6c564cce3594b1c95950494b0 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.c @@ -0,0 +1,1127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_common.h" + +struct nbl_common_wq_mgt { + struct workqueue_struct *ctrl_dev_wq1; + struct workqueue_struct *ctrl_dev_wq2; + struct workqueue_struct *net_dev_wq; + struct workqueue_struct *keepalive_wq; + struct workqueue_struct *rdma_wq; + struct workqueue_struct *rdma_event_wq; +}; + +void nbl_convert_mac(u8 *mac, u8 *reverse_mac) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + reverse_mac[i] = mac[ETH_ALEN - 1 - i]; +} + +static struct nbl_common_wq_mgt *wq_mgt; + +void nbl_common_queue_work(struct work_struct *task, bool ctrl_task, bool singlethread) +{ + if (ctrl_task && singlethread) + queue_work(wq_mgt->ctrl_dev_wq1, task); + else if (ctrl_task && !singlethread) + queue_work(wq_mgt->ctrl_dev_wq2, task); + else if (!ctrl_task) + queue_work(wq_mgt->net_dev_wq, task); +} + +void nbl_common_queue_work_rdma(struct work_struct *task, bool singlethread) +{ + if (singlethread) + queue_work(wq_mgt->rdma_wq, task); + else + queue_work(wq_mgt->rdma_event_wq, task); +} + +void nbl_common_queue_delayed_work(struct delayed_work *task, u32 msec, + bool ctrl_task, bool singlethread) +{ + if (ctrl_task && singlethread) + queue_delayed_work(wq_mgt->ctrl_dev_wq1, task, msecs_to_jiffies(msec)); + else if (ctrl_task && !singlethread) + queue_delayed_work(wq_mgt->ctrl_dev_wq2, task, msecs_to_jiffies(msec)); + else if (!ctrl_task) + queue_delayed_work(wq_mgt->net_dev_wq, task, msecs_to_jiffies(msec)); +} + +void nbl_common_queue_delayed_work_keepalive(struct delayed_work *task, u32 msec) +{ + queue_delayed_work(wq_mgt->keepalive_wq, task, msecs_to_jiffies(msec)); +} + +void nbl_common_release_task(struct work_struct *task) +{ + cancel_work_sync(task); +} + +void nbl_common_alloc_task(struct work_struct *task, void *func) +{ + INIT_WORK(task, func); +} + +void nbl_common_release_delayed_task(struct delayed_work *task) +{ + cancel_delayed_work_sync(task); +} + +void nbl_common_alloc_delayed_task(struct delayed_work *task, void *func) +{ + INIT_DELAYED_WORK(task, func); +} + +void nbl_common_flush_task(struct work_struct *task) +{ + flush_work(task); +} + +void nbl_common_destroy_wq(void) +{ + destroy_workqueue(wq_mgt->rdma_event_wq); + destroy_workqueue(wq_mgt->rdma_wq); + destroy_workqueue(wq_mgt->keepalive_wq); + destroy_workqueue(wq_mgt->net_dev_wq); + destroy_workqueue(wq_mgt->ctrl_dev_wq2); + destroy_workqueue(wq_mgt->ctrl_dev_wq1); + kfree(wq_mgt); +} + +int nbl_common_create_wq(void) +{ + wq_mgt = kzalloc(sizeof(*wq_mgt), GFP_KERNEL); + if (!wq_mgt) + return -ENOMEM; + + wq_mgt->ctrl_dev_wq1 = create_singlethread_workqueue("nbl_ctrldev_wq1"); + if (!wq_mgt->ctrl_dev_wq1) { + pr_err("Failed to create workqueue nbl_ctrldev_wq1\n"); + goto alloc_ctrl_dev_wq1_failed; + } + + wq_mgt->ctrl_dev_wq2 = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_UNBOUND, + 0, "nbl_ctrldev_wq2"); + if (!wq_mgt->ctrl_dev_wq2) { + pr_err("Failed to create workqueue nbl_ctrldev_wq2\n"); + goto alloc_ctrl_dev_wq2_failed; + } + + wq_mgt->net_dev_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_UNBOUND, + 0, "nbl_net_dev_wq1"); + if (!wq_mgt->net_dev_wq) { + pr_err("Failed to create workqueue nbl_net_dev_wq1\n"); + goto alloc_net_dev_wq_failed; + } + + wq_mgt->rdma_wq = create_singlethread_workqueue("nbl_rdma_wq1"); + if (!wq_mgt->rdma_wq) { + pr_err("Failed to create workqueue nbl_rdma_wq1\n"); + goto alloc_rdma_wq_failed; + } + + wq_mgt->rdma_event_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nbl_rdma_wq2"); + if (!wq_mgt->rdma_event_wq) { + pr_err("Failed to create workqueue nbl_rdma_wq2\n"); + goto alloc_rdma_event_wq_failed; + } + + wq_mgt->keepalive_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_UNBOUND, + 0, "nbl_keepalive_wq1"); + if (!wq_mgt->keepalive_wq) { + pr_err("Failed to create workqueue nbl_keepalive_wq1\n"); + goto alloc_keepalive_wq_failed; + } + + return 0; + +alloc_keepalive_wq_failed: + destroy_workqueue(wq_mgt->rdma_event_wq); +alloc_rdma_event_wq_failed: + destroy_workqueue(wq_mgt->rdma_wq); +alloc_rdma_wq_failed: + destroy_workqueue(wq_mgt->net_dev_wq); +alloc_net_dev_wq_failed: + destroy_workqueue(wq_mgt->ctrl_dev_wq2); +alloc_ctrl_dev_wq2_failed: + destroy_workqueue(wq_mgt->ctrl_dev_wq1); +alloc_ctrl_dev_wq1_failed: + kfree(wq_mgt); + return -ENOMEM; +} + +u32 nbl_common_pf_id_subtraction_mgtpf_id(struct nbl_common_info *common, u32 pf_id) +{ + u32 diff = U32_MAX; + + if (pf_id >= NBL_COMMON_TO_MGT_PF(common)) + diff = pf_id - NBL_COMMON_TO_MGT_PF(common); + + return diff; +} + +/** + * alloc a index resource poll, the index_size max is 64 * 1024 + * the poll support start_index not zero; + */ +void *nbl_common_init_index_table(struct nbl_index_tbl_key *key) +{ + struct nbl_index_mgt *index_mgt; + int bucket_size; + int i; + + if (key->index_size > NBL_INDEX_SIZE_MAX) + return NULL; + + index_mgt = devm_kzalloc(key->dev, sizeof(struct nbl_index_mgt), GFP_KERNEL); + if (!index_mgt) + return NULL; + + index_mgt->bitmap = devm_kcalloc(key->dev, BITS_TO_LONGS(key->index_size), + sizeof(long), GFP_KERNEL); + if (!index_mgt->bitmap) + goto alloc_bitmap_failed; + + bucket_size = DIV_ROUND_UP(key->index_size, NBL_INDEX_HASH_DIVISOR); + index_mgt->key_hash = devm_kcalloc(key->dev, bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!index_mgt->key_hash) + goto alloc_key_hash_failed; + + for (i = 0; i < bucket_size; i++) + INIT_HLIST_HEAD(index_mgt->key_hash + i); + + memcpy(&index_mgt->tbl_key, key, sizeof(struct nbl_index_tbl_key)); + index_mgt->free_index_num = key->index_size; + index_mgt->bucket_size = bucket_size; + + return index_mgt; + +alloc_key_hash_failed: + devm_kfree(key->dev, index_mgt->bitmap); +alloc_bitmap_failed: + devm_kfree(key->dev, index_mgt); + + return NULL; +} + +static void nbl_common_free_index_node(struct nbl_index_mgt *index_mgt, + struct nbl_index_entry_node *idx_node) +{ + int i; + u32 free_index; + + free_index = idx_node->index - index_mgt->tbl_key.start_index; + for (i = 0; i < idx_node->index_num; i++) + clear_bit(free_index + i, index_mgt->bitmap); + index_mgt->free_index_num += idx_node->index_num; + hlist_del(&idx_node->node); + devm_kfree(index_mgt->tbl_key.dev, idx_node); +} + +void nbl_common_remove_index_table(void *priv, struct nbl_index_tbl_del_key *key) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct device *dev; + struct nbl_index_entry_node *idx_node; + struct hlist_node *list_node; + int i; + + if (!index_mgt) + return; + + dev = index_mgt->tbl_key.dev; + for (i = 0; i < index_mgt->bucket_size; i++) { + hlist_for_each_entry_safe(idx_node, list_node, index_mgt->key_hash + i, node) { + if (key && key->action_func) + key->action_func(key->action_priv, idx_node->index, idx_node->data); + nbl_common_free_index_node(index_mgt, idx_node); + } + } + + devm_kfree(dev, index_mgt->bitmap); + devm_kfree(dev, index_mgt->key_hash); + devm_kfree(dev, index_mgt); +} + +void nbl_common_scan_index_table(void *priv, struct nbl_index_tbl_scan_key *key) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct device *dev; + struct nbl_index_entry_node *idx_node; + struct hlist_node *list_node; + int i; + + if (!index_mgt) + return; + + dev = index_mgt->tbl_key.dev; + for (i = 0; i < index_mgt->bucket_size; i++) { + hlist_for_each_entry_safe(idx_node, list_node, index_mgt->key_hash + i, node) { + if (key && key->action_func) + key->action_func(key->action_priv, idx_node->index, idx_node->data); + if (key && key->del) + nbl_common_free_index_node(index_mgt, idx_node); + } + } +} + +static u32 nbl_common_calculate_hash_key(void *key, u32 key_size, u32 bucket_size) +{ + u32 i; + u32 value = 0; + u32 hash_value; + + /* if bucket size little than 1, the hash value always 0 */ + if (bucket_size == NBL_HASH_TBL_LIST_BUCKET_SIZE) + return 0; + + for (i = 0; i < key_size; i++) + value += *((u8 *)key + i); + + hash_value = __hash_32(value); + + return hash_value % bucket_size; +} + +int nbl_common_find_available_idx(unsigned long *addr, u32 size, u32 idx_num, u32 multiple) +{ + u32 first_idx; + u32 next_idx; + u32 cur_idx; + u32 idx_num_tmp; + + first_idx = find_first_zero_bit(addr, size); + /* most find a index */ + if (idx_num == 1) + return first_idx; + + while (first_idx < size) { + if (first_idx % multiple == 0) { + idx_num_tmp = idx_num - 1; + cur_idx = first_idx; + while (cur_idx < size && idx_num_tmp > 0) { + next_idx = find_next_zero_bit(addr, size, cur_idx + 1); + if (next_idx - cur_idx != 1) + break; + idx_num_tmp--; + cur_idx = next_idx; + } + + /* has reach tail, return err */ + if (cur_idx >= size) + return size; + + /* has find available idx, return the begin idx */ + if (!idx_num_tmp) + return first_idx; + + first_idx = first_idx + multiple; + } else { + first_idx = first_idx + 1; + } + + first_idx = find_next_zero_bit(addr, size, first_idx); + } + + return size; +} + +/** + * alloc available index + * it support alloc continuous idx (num > 1) and can select base_idx's multiple + * input + * @key: must not NULL; + * @key_size: must > 0; + * @extra_key: if alloc idx num > 1, e extra_key must not NULL, detail see + struct nbl_index_key_extra + * @data: the node include extra data if not NULL + * @data_size: + * @output_data: optional, return the tbl's data if the output_data not NULL + */ +int nbl_common_alloc_index(void *priv, void *key, struct nbl_index_key_extra *extra_key, + void *data, u32 data_size, void **output_data) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct nbl_index_entry_node *idx_node; + u32 key_node_size; + u32 index = U32_MAX; + u32 hash_value; + u32 base_index; + u32 key_size = index_mgt->tbl_key.key_size; + u32 idx_num = 1; + u32 idx_multiple = 1; + u32 i; + + if (!index_mgt->free_index_num) + return index; + + if (extra_key) { + idx_num = extra_key->index_num; + idx_multiple = extra_key->begin_idx_multiple; + } + + base_index = nbl_common_find_available_idx(index_mgt->bitmap, + index_mgt->tbl_key.index_size, idx_num, + idx_multiple); + if (base_index >= index_mgt->tbl_key.index_size) + return index; + + key_node_size = sizeof(struct nbl_index_entry_node) + key_size + data_size; + idx_node = devm_kzalloc(index_mgt->tbl_key.dev, key_node_size, GFP_KERNEL); + if (!idx_node) + return index; + + for (i = 0; i < idx_num; i++) + set_bit(base_index + i, index_mgt->bitmap); + + index_mgt->free_index_num -= idx_num; + index = base_index + index_mgt->tbl_key.start_index; + hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); + idx_node->index = index; + idx_node->index_num = idx_num; + memcpy(idx_node->data, key, key_size); + if (data) + memcpy(idx_node->data + key_size, data, data_size); + + if (output_data) + *output_data = idx_node->data + key_size; + + hlist_add_head(&idx_node->node, index_mgt->key_hash + hash_value); + + return index; +} + +/** + * if the key has alloced available index, return the base index; + * default alloc available index, if not alloc, struct nbl_index_key_extra need + * it support alloc continuous idx (num > 1) and can select base_idx's multiple + * input + * @extra_key: if alloc idx num > 1, the extra_key must not NULL, detail see + struct nbl_index_key_extra + */ +int nbl_common_get_index(void *priv, void *key, struct nbl_index_key_extra *extra_key) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct nbl_index_entry_node *idx_node; + u32 index = U32_MAX; + u32 hash_value; + u32 key_size = index_mgt->tbl_key.key_size; + + hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); + hlist_for_each_entry(idx_node, index_mgt->key_hash + hash_value, node) + if (!memcmp(idx_node->data, key, key_size)) { + index = idx_node->index; + goto out; + } + + if (extra_key && extra_key->not_alloc_new_node) + goto out; + + index = nbl_common_alloc_index(index_mgt, key, extra_key, NULL, 0, NULL); +out: + return index; +} + +/** + * if the key has alloced available index, return the base index; + * default alloc available index, if not alloc, struct nbl_index_key_extra need + * it support alloc continuous idx (num > 1) and can select base_idx's multiple + * input + * @key: must not NULL; + * @key_size: must > 0; + * @extra_key: if alloc idx num > 1, e extra_key must not NULL, detail see + struct nbl_index_key_extra + * @data: the node include extra data if not NULL + * @data_size: + * @output_data: optional, return the tbl's data if the output_data not NULL + */ +int nbl_common_get_index_with_data(void *priv, void *key, struct nbl_index_key_extra *extra_key, + void *data, u32 data_size, void **output_data) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct nbl_index_entry_node *idx_node; + u32 index = U32_MAX; + u32 hash_value; + u32 key_size = index_mgt->tbl_key.key_size; + + hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); + hlist_for_each_entry(idx_node, index_mgt->key_hash + hash_value, node) + if (!memcmp(idx_node->data, key, key_size)) { + index = idx_node->index; + if (output_data) + *output_data = idx_node->data + key_size; + goto out; + } + + if (extra_key && extra_key->not_alloc_new_node) + goto out; + + index = nbl_common_alloc_index(index_mgt, key, extra_key, data, data_size, output_data); +out: + return index; +} + +void nbl_common_free_index(void *priv, void *key) +{ + struct nbl_index_mgt *index_mgt = (struct nbl_index_mgt *)priv; + struct nbl_index_entry_node *idx_node; + u32 hash_value; + u32 key_size = index_mgt->tbl_key.key_size; + + hash_value = nbl_common_calculate_hash_key(key, key_size, index_mgt->bucket_size); + hlist_for_each_entry(idx_node, index_mgt->key_hash + hash_value, node) + if (!memcmp(idx_node->data, key, key_size)) { + nbl_common_free_index_node(index_mgt, idx_node); + return; + } +} + +/** + * alloc a hash table + * the table support multi thread + */ +void *nbl_common_init_hash_table(struct nbl_hash_tbl_key *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt; + int bucket_size; + int i; + + tbl_mgt = devm_kzalloc(key->dev, sizeof(struct nbl_hash_tbl_mgt), GFP_KERNEL); + if (!tbl_mgt) + return NULL; + + bucket_size = key->bucket_size; + tbl_mgt->hash = devm_kcalloc(key->dev, bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!tbl_mgt->hash) + goto alloc_hash_failed; + + for (i = 0; i < bucket_size; i++) + INIT_HLIST_HEAD(tbl_mgt->hash + i); + + memcpy(&tbl_mgt->tbl_key, key, sizeof(struct nbl_hash_tbl_key)); + + if (key->lock_need) + mutex_init(&tbl_mgt->lock); + + return tbl_mgt; + +alloc_hash_failed: + devm_kfree(key->dev, tbl_mgt); + + return NULL; +} + +/** + * alloc a hash node, and add to hlist_head + */ +int nbl_common_alloc_hash_node(void *priv, void *key, void *data, void **out_data) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + u32 hash_value; + u32 node_size; + u16 key_size; + u16 data_size; + + node_size = sizeof(struct nbl_hash_entry_node); + hash_node = devm_kzalloc(tbl_mgt->tbl_key.dev, sizeof(struct nbl_hash_entry_node), + GFP_KERNEL); + if (!hash_node) + return -1; + + key_size = tbl_mgt->tbl_key.key_size; + hash_node->key = devm_kzalloc(tbl_mgt->tbl_key.dev, key_size, GFP_KERNEL); + if (!hash_node->key) + goto alloc_key_failed; + + data_size = tbl_mgt->tbl_key.data_size; + hash_node->data = devm_kzalloc(tbl_mgt->tbl_key.dev, data_size, GFP_KERNEL); + if (!hash_node->data) + goto alloc_data_failed; + + memcpy(hash_node->key, key, key_size); + memcpy(hash_node->data, data, data_size); + + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_add_head(&hash_node->node, tbl_mgt->hash + hash_value); + tbl_mgt->node_num++; + if (out_data) + *out_data = hash_node->data; + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + return 0; + +alloc_data_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->key); +alloc_key_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node); + + return -1; +} + +/** + * get a hash node, return the data if node exist + */ +void *nbl_common_get_hash_node(void *priv, void *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + struct hlist_head *head; + void *data = NULL; + u32 hash_value; + u16 key_size; + + key_size = tbl_mgt->tbl_key.key_size; + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + head = tbl_mgt->hash + hash_value; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_for_each_entry(hash_node, head, node) + if (!memcmp(hash_node->key, key, key_size)) { + data = hash_node->data; + break; + } + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + return data; +} + +static void nbl_common_remove_hash_node(struct nbl_hash_tbl_mgt *tbl_mgt, + struct nbl_hash_entry_node *hash_node) +{ + hlist_del(&hash_node->node); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->key); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->data); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node); + tbl_mgt->node_num--; +} + +/** + * free a hash node + */ +void nbl_common_free_hash_node(void *priv, void *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + struct hlist_head *head; + u32 hash_value; + u16 key_size; + + key_size = tbl_mgt->tbl_key.key_size; + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + head = tbl_mgt->hash + hash_value; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_for_each_entry(hash_node, head, node) + if (!memcmp(hash_node->key, key, key_size)) + break; + + if (hash_node) + nbl_common_remove_hash_node(tbl_mgt, hash_node); + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); +} + +/* 0: the node accord with the match condition */ +static int nbl_common_match_and_done_hash_node(struct nbl_hash_tbl_mgt *tbl_mgt, + struct nbl_hash_tbl_scan_key *key, + struct nbl_hash_entry_node *hash_node) +{ + int ret = 0; + + if (key->match_func) { + ret = key->match_func(key->match_condition, hash_node->key, hash_node->data); + if (ret) + return ret; + } + + if (key->action_func) + key->action_func(key->action_priv, hash_node->key, hash_node->data); + + if (key->op_type == NBL_HASH_TBL_OP_DELETE) + nbl_common_remove_hash_node(tbl_mgt, hash_node); + + return 0; +} + +void nbl_common_scan_hash_node(void *priv, struct nbl_hash_tbl_scan_key *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + struct hlist_node *safe_node; + struct hlist_head *head; + u32 i; + int match_ret; + int node_num = 0; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + for (i = 0; i < tbl_mgt->tbl_key.bucket_size; i++) { + head = tbl_mgt->hash + i; + hlist_for_each_entry_safe(hash_node, safe_node, head, node) { + match_ret = nbl_common_match_and_done_hash_node(tbl_mgt, key, hash_node); + if (!match_ret) + node_num++; + } + } + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); +} + +u16 nbl_common_get_hash_node_num(void *priv) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + + return tbl_mgt->node_num; +} + +void nbl_common_remove_hash_table(void *priv, struct nbl_hash_tbl_del_key *key) +{ + struct nbl_hash_tbl_mgt *tbl_mgt = (struct nbl_hash_tbl_mgt *)priv; + struct nbl_hash_entry_node *hash_node; + struct hlist_node *safe_node; + struct hlist_head *head; + struct device *dev; + u32 i; + + if (!priv) + return; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + for (i = 0; i < tbl_mgt->tbl_key.bucket_size; i++) { + head = tbl_mgt->hash + i; + hlist_for_each_entry_safe(hash_node, safe_node, head, node) { + if (key && key->action_func) + key->action_func(key->action_priv, hash_node->key, hash_node->data); + nbl_common_remove_hash_node(tbl_mgt, hash_node); + } + } + + devm_kfree(tbl_mgt->tbl_key.dev, tbl_mgt->hash); + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + dev = tbl_mgt->tbl_key.dev; + devm_kfree(dev, tbl_mgt); +} + +/** + * alloc a hash x and y axis table + * it support x/y axis store if necessary, so it can scan by x/y axis; + * the table support multi thread + */ +void *nbl_common_init_hash_xy_table(struct nbl_hash_xy_tbl_key *key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt; + int i; + + tbl_mgt = devm_kzalloc(key->dev, sizeof(struct nbl_hash_xy_tbl_mgt), GFP_KERNEL); + if (!tbl_mgt) + return NULL; + + tbl_mgt->hash = devm_kcalloc(key->dev, key->bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!tbl_mgt->hash) + goto alloc_hash_failed; + + tbl_mgt->x_axis_hash = devm_kcalloc(key->dev, key->x_axis_bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!tbl_mgt->x_axis_hash) + goto alloc_x_axis_hash_failed; + + tbl_mgt->y_axis_hash = devm_kcalloc(key->dev, key->y_axis_bucket_size, + sizeof(struct hlist_head), GFP_KERNEL); + if (!tbl_mgt->y_axis_hash) + goto alloc_y_axis_hash_failed; + + for (i = 0; i < key->bucket_size; i++) + INIT_HLIST_HEAD(tbl_mgt->hash + i); + + for (i = 0; i < key->x_axis_bucket_size; i++) + INIT_HLIST_HEAD(tbl_mgt->x_axis_hash + i); + + for (i = 0; i < key->y_axis_bucket_size; i++) + INIT_HLIST_HEAD(tbl_mgt->y_axis_hash + i); + + memcpy(&tbl_mgt->tbl_key, key, sizeof(struct nbl_hash_xy_tbl_key)); + + if (key->lock_need) + mutex_init(&tbl_mgt->lock); + + return tbl_mgt; + +alloc_y_axis_hash_failed: + devm_kfree(key->dev, tbl_mgt->x_axis_hash); +alloc_x_axis_hash_failed: + devm_kfree(key->dev, tbl_mgt->hash); +alloc_hash_failed: + devm_kfree(key->dev, tbl_mgt); + + return NULL; +} + +/** + * alloc a hash x and y node, and add to hlist_head + */ +int nbl_common_alloc_hash_xy_node(void *priv, void *x_key, void *y_key, void *data) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + void *key; + u32 hash_value; + u32 x_hash_value; + u32 y_hash_value; + u32 node_size; + u16 key_size; + u16 x_key_size; + u16 y_key_size; + u16 data_size; + + node_size = sizeof(struct nbl_hash_entry_xy_node); + hash_node = devm_kzalloc(tbl_mgt->tbl_key.dev, sizeof(struct nbl_hash_entry_xy_node), + GFP_KERNEL); + if (!hash_node) + return -1; + + x_key_size = tbl_mgt->tbl_key.x_axis_key_size; + hash_node->x_axis_key = devm_kzalloc(tbl_mgt->tbl_key.dev, x_key_size, GFP_KERNEL); + if (!hash_node->x_axis_key) + goto alloc_x_key_failed; + + y_key_size = tbl_mgt->tbl_key.y_axis_key_size; + hash_node->y_axis_key = devm_kzalloc(tbl_mgt->tbl_key.dev, y_key_size, GFP_KERNEL); + if (!hash_node->y_axis_key) + goto alloc_y_key_failed; + + key_size = x_key_size + y_key_size; + key = devm_kzalloc(tbl_mgt->tbl_key.dev, key_size, GFP_KERNEL); + if (!key) + goto alloc_key_failed; + + data_size = tbl_mgt->tbl_key.data_size; + hash_node->data = devm_kzalloc(tbl_mgt->tbl_key.dev, data_size, GFP_KERNEL); + if (!hash_node->data) + goto alloc_data_failed; + + memcpy(key, x_key, x_key_size); + memcpy(key + x_key_size, y_key, y_key_size); + memcpy(hash_node->x_axis_key, x_key, x_key_size); + memcpy(hash_node->y_axis_key, y_key, y_key_size); + memcpy(hash_node->data, data, data_size); + + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + x_hash_value = nbl_common_calculate_hash_key(x_key, x_key_size, + tbl_mgt->tbl_key.x_axis_bucket_size); + y_hash_value = nbl_common_calculate_hash_key(y_key, y_key_size, + tbl_mgt->tbl_key.y_axis_bucket_size); + + devm_kfree(tbl_mgt->tbl_key.dev, key); + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_add_head(&hash_node->node, tbl_mgt->hash + hash_value); + hlist_add_head(&hash_node->x_axis_node, tbl_mgt->x_axis_hash + x_hash_value); + hlist_add_head(&hash_node->y_axis_node, tbl_mgt->y_axis_hash + y_hash_value); + + tbl_mgt->node_num++; + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + return 0; + +alloc_data_failed: + devm_kfree(tbl_mgt->tbl_key.dev, key); +alloc_key_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->y_axis_key); +alloc_y_key_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->x_axis_key); +alloc_x_key_failed: + devm_kfree(tbl_mgt->tbl_key.dev, hash_node); + + return -1; +} + +/** + * get a hash node, return the data if node exist + */ +void *nbl_common_get_hash_xy_node(void *priv, void *x_key, void *y_key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + struct hlist_head *head; + void *data = NULL; + void *key; + u32 hash_value; + u16 key_size; + u16 x_key_size; + u16 y_key_size; + + x_key_size = tbl_mgt->tbl_key.x_axis_key_size; + y_key_size = tbl_mgt->tbl_key.y_axis_key_size; + key_size = x_key_size + y_key_size; + key = devm_kzalloc(tbl_mgt->tbl_key.dev, key_size, GFP_KERNEL); + if (!key) + return NULL; + + memcpy(key, x_key, x_key_size); + memcpy(key + x_key_size, y_key, y_key_size); + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + head = tbl_mgt->hash + hash_value; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_for_each_entry(hash_node, head, node) + if (!memcmp(hash_node->x_axis_key, x_key, x_key_size) && + !memcmp(hash_node->y_axis_key, y_key, y_key_size)) { + data = hash_node->data; + break; + } + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + devm_kfree(tbl_mgt->tbl_key.dev, key); + + return data; +} + +static void nbl_common_remove_hash_xy_node(struct nbl_hash_xy_tbl_mgt *tbl_mgt, + struct nbl_hash_entry_xy_node *hash_node) +{ + hlist_del(&hash_node->node); + hlist_del(&hash_node->x_axis_node); + hlist_del(&hash_node->y_axis_node); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->x_axis_key); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->y_axis_key); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node->data); + devm_kfree(tbl_mgt->tbl_key.dev, hash_node); + tbl_mgt->node_num--; +} + +/** + * free a hash node + */ +void nbl_common_free_hash_xy_node(void *priv, void *x_key, void *y_key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + struct hlist_head *head; + void *key; + u32 hash_value; + u16 key_size; + u16 x_key_size; + u16 y_key_size; + + x_key_size = tbl_mgt->tbl_key.x_axis_key_size; + y_key_size = tbl_mgt->tbl_key.y_axis_key_size; + key_size = x_key_size + y_key_size; + key = devm_kzalloc(tbl_mgt->tbl_key.dev, key_size, GFP_KERNEL); + if (!key) + return; + + memcpy(key, x_key, x_key_size); + memcpy(key + x_key_size, y_key, y_key_size); + hash_value = nbl_common_calculate_hash_key(key, key_size, tbl_mgt->tbl_key.bucket_size); + head = tbl_mgt->hash + hash_value; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + hlist_for_each_entry(hash_node, head, node) + if (!memcmp(hash_node->x_axis_key, x_key, x_key_size) && + !memcmp(hash_node->y_axis_key, y_key, y_key_size)) { + break; + } + + if (hash_node) + nbl_common_remove_hash_xy_node(tbl_mgt, hash_node); + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + devm_kfree(tbl_mgt->tbl_key.dev, key); +} + +/* 0: the node accord with the match condition */ +static int nbl_common_match_and_done_hash_xy_node(struct nbl_hash_xy_tbl_mgt *tbl_mgt, + struct nbl_hash_xy_tbl_scan_key *key, + struct nbl_hash_entry_xy_node *hash_node) +{ + int ret = 0; + + if (key->match_func) { + ret = key->match_func(key->match_condition, hash_node->x_axis_key, + hash_node->y_axis_key, hash_node->data); + if (ret) + return ret; + } + + if (key->action_func) + key->action_func(key->action_priv, hash_node->x_axis_key, hash_node->y_axis_key, + hash_node->data); + + if (key->op_type == NBL_HASH_TBL_OP_DELETE) + nbl_common_remove_hash_xy_node(tbl_mgt, hash_node); + + return 0; +} + +/** + * scan by x_axis or y_aixs or none, and return the match node number + */ +u16 nbl_common_scan_hash_xy_node(void *priv, struct nbl_hash_xy_tbl_scan_key *key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + struct hlist_node *safe_node; + struct hlist_head *head; + int match_ret; + u32 i; + u32 hash_value; + u16 x_axis_key_size; + u16 y_axis_key_size; + u16 node_num = 0; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + if (key->scan_type == NBL_HASH_TBL_X_AXIS_SCAN) { + x_axis_key_size = tbl_mgt->tbl_key.x_axis_key_size; + hash_value = nbl_common_calculate_hash_key(key->x_key, x_axis_key_size, + tbl_mgt->tbl_key.x_axis_bucket_size); + head = tbl_mgt->x_axis_hash + hash_value; + hlist_for_each_entry_safe(hash_node, safe_node, head, x_axis_node) { + if (!memcmp(hash_node->x_axis_key, key->x_key, x_axis_key_size)) { + match_ret = nbl_common_match_and_done_hash_xy_node(tbl_mgt, key, + hash_node); + if (!match_ret) { + node_num++; + if (key->only_query_exist) + break; + } + } + } + } else if (key->scan_type == NBL_HASH_TBL_Y_AXIS_SCAN) { + y_axis_key_size = tbl_mgt->tbl_key.y_axis_key_size; + hash_value = nbl_common_calculate_hash_key(key->y_key, y_axis_key_size, + tbl_mgt->tbl_key.y_axis_bucket_size); + head = tbl_mgt->y_axis_hash + hash_value; + hlist_for_each_entry_safe(hash_node, safe_node, head, y_axis_node) { + if (!memcmp(hash_node->y_axis_key, key->y_key, y_axis_key_size)) { + match_ret = nbl_common_match_and_done_hash_xy_node(tbl_mgt, key, + hash_node); + if (!match_ret) { + node_num++; + if (key->only_query_exist) + break; + } + } + } + } else { + for (i = 0; i < tbl_mgt->tbl_key.bucket_size; i++) { + head = tbl_mgt->hash + i; + hlist_for_each_entry_safe(hash_node, safe_node, head, node) { + match_ret = nbl_common_match_and_done_hash_xy_node(tbl_mgt, key, + hash_node); + if (!match_ret) + node_num++; + } + } + } + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + return node_num; +} + +u16 nbl_common_get_hash_xy_node_num(void *priv) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + + return tbl_mgt->node_num; +} + +void nbl_common_remove_hash_xy_table(void *priv, struct nbl_hash_xy_tbl_del_key *key) +{ + struct nbl_hash_xy_tbl_mgt *tbl_mgt = (struct nbl_hash_xy_tbl_mgt *)priv; + struct nbl_hash_entry_xy_node *hash_node; + struct hlist_node *safe_node; + struct hlist_head *head; + struct device *dev; + u32 i; + + if (!priv) + return; + + if (tbl_mgt->tbl_key.lock_need) + mutex_lock(&tbl_mgt->lock); + + for (i = 0; i < tbl_mgt->tbl_key.bucket_size; i++) { + head = tbl_mgt->hash + i; + hlist_for_each_entry_safe(hash_node, safe_node, head, node) { + if (key->action_func) + key->action_func(key->action_priv, hash_node->x_axis_key, + hash_node->y_axis_key, hash_node->data); + nbl_common_remove_hash_xy_node(tbl_mgt, hash_node); + } + } + + devm_kfree(tbl_mgt->tbl_key.dev, tbl_mgt->hash); + devm_kfree(tbl_mgt->tbl_key.dev, tbl_mgt->x_axis_hash); + devm_kfree(tbl_mgt->tbl_key.dev, tbl_mgt->y_axis_hash); + + if (tbl_mgt->tbl_key.lock_need) + mutex_unlock(&tbl_mgt->lock); + + dev = tbl_mgt->tbl_key.dev; + devm_kfree(dev, tbl_mgt); +} + +void nbl_flow_direct_parse_tlv_data(u8 *tlv, u32 length, handle_tlv callback, void *data) +{ + u32 offset = 0; + u16 type, len; + int ret; + + while (offset + NBL_CHAN_FDIR_TLV_HEADER_LEN <= length) { + type = *(u16 *)tlv; + len = *(u16 *)(tlv + 2); + ret = callback(type, len, tlv + 4, data); + if (ret) + break; + + offset += (NBL_CHAN_FDIR_TLV_HEADER_LEN + len); + tlv += (NBL_CHAN_FDIR_TLV_HEADER_LEN + len); + } +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h new file mode 100644 index 0000000000000000000000000000000000000000..4c6e08d54f2b4e7de1c0b86019449c25c7c5bd11 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_common.h @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_COMMON_H_ +#define _NBL_COMMON_H_ + +#include "nbl_core.h" + +/** + * the key_hash size is index_size/NBL_INDEX_HASH_DIVISOR. eg index_size is 1024, + * the key_hash size is 1024/16 = 64 + */ +#define NBL_INDEX_HASH_DIVISOR 16 + +/* list only need one bucket size */ +#define NBL_HASH_TBL_LIST_BUCKET_SIZE 1 + +struct nbl_index_mgt { + struct nbl_index_tbl_key tbl_key; + unsigned long *bitmap; + struct hlist_head *key_hash; + u32 free_index_num; + u32 bucket_size; +}; + +struct nbl_index_entry_node { + struct hlist_node node; + u32 index; /* the index for key has alloc from index table */ + u32 index_num; + u8 data[]; +}; + +struct nbl_hash_tbl_mgt { + struct nbl_hash_tbl_key tbl_key; + struct hlist_head *hash; + struct mutex lock; /* support multi thread */ + u16 node_num; +}; + +struct nbl_hash_xy_tbl_mgt { + struct nbl_hash_xy_tbl_key tbl_key; + struct hlist_head *hash; + struct hlist_head *x_axis_hash; + struct hlist_head *y_axis_hash; + struct mutex lock; /* support multi thread */ + u16 node_num; +}; + +/* it used for y_axis no necessay */ +struct nbl_hash_entry_node { + struct hlist_node node; + void *key; + void *data; +}; + +/* it used for y_axis no necessay */ +struct nbl_hash_entry_xy_node { + struct hlist_node node; + struct hlist_node x_axis_node; + struct hlist_node y_axis_node; + void *x_axis_key; + void *y_axis_key; + void *data; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.c new file mode 100644 index 0000000000000000000000000000000000000000..a73f24860346c753122ad8ff3f8b135f63d6dffb --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_event.h" + +static struct nbl_event_mgt *event_mgt; + +void nbl_event_notify(enum nbl_event_type type, void *event_data, u16 src_vsi_id, u16 board_id) +{ + struct nbl_event_notifier_list *notifier_list = &event_mgt->notifier_list[type]; + struct nbl_event_notifier *notifier = NULL; + + mutex_lock(¬ifier_list->notifier_lock); + + list_for_each_entry(notifier, ¬ifier_list->list, node) { + if (src_vsi_id != notifier->src_vsi_id || board_id != notifier->board_id) + continue; + + mutex_lock(¬ifier->callback_lock); + notifier->callback.callback(type, event_data, notifier->callback.callback_data); + mutex_unlock(¬ifier->callback_lock); + } + + mutex_unlock(¬ifier_list->notifier_lock); +} + +int nbl_event_register(enum nbl_event_type type, struct nbl_event_callback *callback, + u16 src_vsi_id, u16 board_id) +{ + struct nbl_event_notifier_list *notifier_list = &event_mgt->notifier_list[type]; + struct nbl_event_notifier *notifier = NULL; + + notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); + if (!notifier) + return -ENOMEM; + + notifier->src_vsi_id = src_vsi_id; + notifier->board_id = board_id; + notifier->callback.callback = callback->callback; + notifier->callback.callback_data = callback->callback_data; + + mutex_init(¬ifier->callback_lock); + + mutex_lock(¬ifier_list->notifier_lock); + list_add_tail(¬ifier->node, ¬ifier_list->list); + mutex_unlock(¬ifier_list->notifier_lock); + + return 0; +} + +void nbl_event_unregister(enum nbl_event_type type, struct nbl_event_callback *callback, + u16 src_vsi_id, u16 board_id) +{ + struct nbl_event_notifier_list *notifier_list = &event_mgt->notifier_list[type]; + struct nbl_event_notifier *notifier = NULL; + + mutex_lock(¬ifier_list->notifier_lock); + + list_for_each_entry(notifier, ¬ifier_list->list, node) { + if (notifier->callback.callback == callback->callback && + notifier->callback.callback_data == callback->callback_data && + notifier->src_vsi_id == src_vsi_id && notifier->board_id == board_id) { + list_del(¬ifier->node); + kfree(notifier); + break; + } + } + + mutex_unlock(¬ifier_list->notifier_lock); +} + +int nbl_event_init(void) +{ + int i = 0; + + event_mgt = kzalloc(sizeof(*event_mgt), GFP_KERNEL); + if (!event_mgt) + return -ENOMEM; + + for (i = 0; i < NBL_EVENT_MAX; i++) { + INIT_LIST_HEAD(&event_mgt->notifier_list[i].list); + mutex_init(&event_mgt->notifier_list[i].notifier_lock); + } + + return 0; +} + +void nbl_event_remove(void) +{ + struct nbl_event_notifier *notifier = NULL, *notifier_safe = NULL; + int i = 0; + + for (i = 0; i < NBL_EVENT_MAX; i++) { + list_for_each_entry_safe(notifier, notifier_safe, + &event_mgt->notifier_list[i].list, node) { + list_del(¬ifier->node); + kfree(notifier); + } + } + + kfree(event_mgt); + event_mgt = NULL; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h new file mode 100644 index 0000000000000000000000000000000000000000..6a9b4e8375d2b2370e5ad196ae5b2b5c015edbd5 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_event.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_EVENT_H_ +#define _NBL_EVENT_H_ + +#include "nbl_core.h" + +struct nbl_event_notifier { + struct list_head node; + struct mutex callback_lock; /* Protect callback */ + struct nbl_event_callback callback; + u16 src_vsi_id; + u16 board_id; +}; + +struct nbl_event_notifier_list { + struct list_head list; + struct mutex notifier_lock; /* Protect list structure */ +}; + +struct nbl_event_mgt { + struct nbl_event_notifier_list notifier_list[NBL_EVENT_MAX]; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_net_sysfs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_net_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..e42f97c4e9fa4686a8485a528112e2755d4a20e8 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_common/nbl_net_sysfs.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_common.h" + +#ifndef NO_SYSFS_PREALLOC +#define NBL_SET_RO_ATTR(rep_attr, attr_name, attr_show) do { \ + typeof(rep_attr) _rep_attr = (rep_attr); \ + (_rep_attr)->attr.name = __stringify(attr_name); \ + (_rep_attr)->attr.mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(0444); \ + (_rep_attr)->show = attr_show; \ + (_rep_attr)->store = NULL; \ +} while (0) +#else +#define NBL_SET_RO_ATTR(rep_attr, attr_name, attr_show) do { \ + typeof(rep_attr) _rep_attr = (rep_attr); \ + (_rep_attr)->attr.name = __stringify(attr_name); \ + (_rep_attr)->attr.mode = 0444; \ + (_rep_attr)->show = attr_show; \ + (_rep_attr)->store = NULL; \ +} while (0) +#endif + +#define NBL_NET_REP_ID_LEN 4 + +static ssize_t net_rep_show(struct device *dev, + struct nbl_netdev_rep_attr *attr, char *buf) +{ + return scnprintf(buf, NBL_NET_REP_ID_LEN, "%d\n", attr->rep_id); +} + +void nbl_net_addr_rep_attr(struct nbl_netdev_rep_attr *attr, int rep_id) +{ + NBL_SET_RO_ATTR(attr, rep_id, net_rep_show); + attr->rep_id = rep_id; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h new file mode 100644 index 0000000000000000000000000000000000000000..0961c3527d7ce01d8c19b414e9b08e6e4e68e64d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core.h @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_CORE_H_ +#define _NBL_CORE_H_ + +#include "nbl_product_base.h" +#include "nbl_def_channel.h" +#include "nbl_def_phy.h" +#include "nbl_def_resource.h" +#include "nbl_def_dispatch.h" +#include "nbl_def_service.h" +#include "nbl_def_dev.h" +#include "nbl_def_common.h" + +#define NBL_ADAPTER_TO_PDEV(adapter) ((adapter)->pdev) +#define NBL_ADAPTER_TO_DEV(adapter) (&((adapter)->pdev->dev)) +#define NBL_ADAPTER_TO_COMMON(adapter) (&((adapter)->common)) +#define NBL_ADAPTER_TO_RPDUCT_BASE_OPS(adapter) ((adapter)->product_base_ops) + +#define NBL_ADAPTER_TO_PHY_MGT(adapter) ((adapter)->core.phy_mgt) +#define NBL_ADAPTER_TO_RES_MGT(adapter) ((adapter)->core.res_mgt) +#define NBL_ADAPTER_TO_DISP_MGT(adapter) ((adapter)->core.disp_mgt) +#define NBL_ADAPTER_TO_SERV_MGT(adapter) ((adapter)->core.serv_mgt) +#define NBL_ADAPTER_TO_DEV_MGT(adapter) ((adapter)->core.dev_mgt) +#define NBL_ADAPTER_TO_CHAN_MGT(adapter) ((adapter)->core.chan_mgt) +#define NBL_ADAPTER_TO_DEBUGFS_MGT(adapter) ((adapter)->core.debugfs_mgt) + +#define NBL_ADAPTER_TO_PHY_OPS_TBL(adapter) ((adapter)->intf.phy_ops_tbl) +#define NBL_ADAPTER_TO_RES_OPS_TBL(adapter) ((adapter)->intf.resource_ops_tbl) +#define NBL_ADAPTER_TO_DISP_OPS_TBL(adapter) ((adapter)->intf.dispatch_ops_tbl) +#define NBL_ADAPTER_TO_SERV_OPS_TBL(adapter) ((adapter)->intf.service_ops_tbl) +#define NBL_ADAPTER_TO_DEV_OPS_TBL(adapter) ((adapter)->intf.dev_ops_tbl) +#define NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter) ((adapter)->intf.channel_ops_tbl) + +#define NBL_ADAPTER_TO_RES_PT_OPS(adapter) (&(NBL_ADAPTER_TO_SERV_OPS_TBL(adapter)->pt_ops)) + +#define NBL_NETDEV_PRIV_TO_ADAPTER(priv) ((priv)->adapter) + +#define NBL_NETDEV_TO_ADAPTER(netdev) \ + (NBL_NETDEV_PRIV_TO_ADAPTER((struct nbl_netdev_priv *)netdev_priv(netdev))) + +#define NBL_NETDEV_TO_SERV_MGT(netdev) \ + (NBL_ADAPTER_TO_SERV_MGT(NBL_NETDEV_PRIV_TO_ADAPTER(\ + (struct nbl_netdev_priv *)netdev_priv(netdev)))) + +#define NBL_NETDEV_TO_DEV_MGT(netdev) \ + (NBL_ADAPTER_TO_DEV_MGT(NBL_NETDEV_TO_ADAPTER(netdev))) + +#define NBL_NETDEV_TO_COMMON(netdev) \ + (NBL_ADAPTER_TO_COMMON(NBL_NETDEV_PRIV_TO_ADAPTER(\ + (struct nbl_netdev_priv *)netdev_priv(netdev)))) + +#define NBL_CAP_SET_BIT(loc) (1 << (loc)) +#define NBL_CAP_TEST_BIT(val, loc) (((val) >> (loc)) & 0x1) + +#define NBL_CAP_IS_CTRL(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_CTRL_BIT) +#define NBL_CAP_IS_NET(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_NET_BIT) +#define NBL_CAP_IS_VF(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_VF_BIT) +#define NBL_CAP_SUPPORT_LAG(val) NBL_CAP_TEST_BIT(val, NBL_CAP_SUPPORT_LAG_BIT) +#define NBL_CAP_IS_NIC(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_NIC_BIT) +#define NBL_CAP_IS_USER(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_USER_BIT) +#define NBL_CAP_IS_GRC(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_GRC_BIT) +#define NBL_CAP_IS_BLK(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_BLK_BIT) +#define NBL_CAP_IS_DPU_HOST(val) ({ typeof(val) _val = (val); \ + !NBL_CAP_TEST_BIT(_val, NBL_CAP_IS_NIC_BIT) && \ + NBL_CAP_TEST_BIT(_val, NBL_CAP_DPU_IS_HOST_BIT); }) +#define NBL_CAP_IS_DPU_ECPU(val) ({ typeof(val) _val = (val); \ + !NBL_CAP_TEST_BIT(_val, NBL_CAP_IS_NIC_BIT) && \ + !NBL_CAP_TEST_BIT(_val, NBL_CAP_DPU_IS_HOST_BIT); }) +#define NBL_CAP_IS_LEONIS(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_LEONIS_BIT) +#define NBL_CAP_IS_BOOTIS(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_BOOTIS_BIT) +#define NBL_CAP_IS_VIRTIO(val) NBL_CAP_TEST_BIT(val, NBL_CAP_IS_VIRTIO_BIT) +#define NBL_CAP_IS_FACTORY_CTRL(val) NBL_CAP_TEST_BIT(val, NBL_CAP_HAS_FACTORY_CTRL_BIT) + +enum { + NBL_CAP_HAS_CTRL_BIT = 0, + NBL_CAP_HAS_NET_BIT, + NBL_CAP_IS_VF_BIT, + NBL_CAP_SUPPORT_LAG_BIT, + NBL_CAP_IS_NIC_BIT, + NBL_CAP_DPU_IS_HOST_BIT, + NBL_CAP_IS_LEONIS_BIT, + NBL_CAP_IS_BOOTIS_BIT, + NBL_CAP_IS_VIRTIO_BIT, + NBL_CAP_IS_BLK_BIT, + NBL_CAP_HAS_USER_BIT, + NBL_CAP_HAS_GRC_BIT, + NBL_CAP_HAS_FACTORY_CTRL_BIT, +}; + +enum nbl_adapter_state { + NBL_DOWN, + NBL_RESETTING, + NBL_RESET_REQUESTED, + NBL_INITING, + NBL_INIT_FAILED, + NBL_RUNNING, + NBL_TESTING, + NBL_USER, + NBL_FATAL_ERR, + NBL_STATE_NBITS +}; + +enum { + NBL_ESWITCH_NONE, + NBL_ESWITCH_LEGACY, + NBL_ESWITCH_OFFLOADS +}; + +struct nbl_interface { + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_resource_ops_tbl *resource_ops_tbl; + struct nbl_dispatch_ops_tbl *dispatch_ops_tbl; + struct nbl_service_ops_tbl *service_ops_tbl; + struct nbl_dev_ops_tbl *dev_ops_tbl; + struct nbl_utils_ops_tbl *utils_ops_tbl; + struct nbl_channel_ops_tbl *channel_ops_tbl; +}; + +struct nbl_core { + void *phy_mgt; + void *res_mgt; + void *disp_mgt; + void *serv_mgt; + void *dev_mgt; + void *chan_mgt; + void *debugfs_mgt; +}; + +struct nbl_adapter { + struct pci_dev *pdev; + struct nbl_core core; + struct nbl_interface intf; + struct nbl_common_info common; + struct nbl_product_base_ops *product_base_ops; + struct nbl_init_param init_param; + DECLARE_BITMAP(state, NBL_STATE_NBITS); +}; + +struct nbl_rep_data { + struct net_device *netdev; + struct nbl_netdev_rep_attr rep_attr; + struct u64_stats_sync rep_syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u16 rep_vsi_id; + u8 base_queue_id; + u8 rep_queue_num; +}; + +struct nbl_netdev_priv { + struct nbl_adapter *adapter; + struct nbl_rep_data *rep; + struct net_device *netdev; + u16 tx_queue_num; + u16 rx_queue_num; + u16 queue_size; + /* default traffic destination in kernel/dpdk/coexist scene */ + u16 normal_vsi; + u16 other_vsi; + u16 async_other_vsi; + u16 async_pending_vsi; + s64 last_st_time; +}; + +struct nbl_indr_dev_priv { + struct net_device *indr_dev; + struct nbl_netdev_priv *dev_priv; + struct list_head list; + int binder_type; +}; + +struct nbl_devlink_priv { + void *priv; + void *dev_mgt; +}; + +struct nbl_tc_insts_info { + int (*send_cmdq)(void *priv, const void *hdr, void *cmd); + void *chan_mgt; + void *tc_flow_mgt; + int locked; +}; + +struct nbl_software_tool_id_entry { + struct list_head node; + u16 bus; + u16 id; + u8 refcount; +}; + +#define NBL_ST_MAX_DEVICE_NUM 64 +struct nbl_software_tool_table { + DECLARE_BITMAP(devid, NBL_ST_MAX_DEVICE_NUM); + int major; + dev_t devno; + struct class *cls; +}; + +extern spinlock_t nbl_tc_flow_inst_lock; + +#define NBL_TC_FLOW_INST_COUNT (8) + +struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *param); +void nbl_core_remove(struct nbl_adapter *adapter); +int nbl_core_start(struct nbl_adapter *adapter, struct nbl_init_param *param); +void nbl_core_stop(struct nbl_adapter *adapter); +void nbl_tc_set_cmdq_info(int (*send_cmdq)(void *, const void *, void *), + void *priv, u8 index); +void nbl_tc_unset_cmdq_info(u8 index); +void nbl_tc_set_flow_info(void *priv, u8 index); +void *nbl_tc_get_flow_info(u8 index); +void nbl_tc_unset_flow_info(u8 index); +u8 nbl_tc_alloc_inst_id(void); +int nbl_tc_call_inst_cmdq(u8 inst_id, const void *hdr, void *cmd); + +int nbl_st_init(struct nbl_software_tool_table *st_table); +void nbl_st_remove(struct nbl_software_tool_table *st_table); +struct nbl_software_tool_table *nbl_get_st_table(void); +struct dentry *nbl_get_debugfs_root(void); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..154ef13822cd8be6f7828f4da84faa51b922ebbd --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_debugfs.h" + +#define SINGLE_FOPS_RO(_fops_, _open_) \ + static const struct file_operations _fops_ = { \ + .open = _open_, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define SINGLE_FOPS_WO(_fops_, _open_, _write_) \ + static const struct file_operations _fops_ = { \ + .open = _open_, \ + .write = _write_, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define COMPLETE_FOPS_RW(_fops_, _open_, _write_) \ + static const struct file_operations _fops_ = { \ + .open = _open_, \ + .write = _write_, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +static int nbl_flow_info_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + + disp_ops->dump_flow(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), m); + + return 0; +} + +static int nbl_fd_info_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + + disp_ops->dump_fd_flow(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), m); + + return 0; +} + +static int nbl_mbx_txq_dma_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + chan_ops->dump_txq(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), m, NBL_CHAN_TYPE_MAILBOX); + + return 0; +} + +static int nbl_mbx_rxq_dma_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + chan_ops->dump_rxq(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), m, NBL_CHAN_TYPE_MAILBOX); + + return 0; +} + +static int nbl_adminq_txq_dma_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + chan_ops->dump_txq(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), m, NBL_CHAN_TYPE_ADMINQ); + + return 0; +} + +static int nbl_adminq_rxq_dma_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + chan_ops->dump_rxq(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), m, NBL_CHAN_TYPE_ADMINQ); + + return 0; +} + +static int nbl_debugfs_flow_info_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_flow_info_dump, inode->i_private); +} + +static int nbl_debugfs_fd_info_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_fd_info_dump, inode->i_private); +} + +static int nbl_debugfs_mbx_txq_dma_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_mbx_txq_dma_dump, inode->i_private); +} + +static int nbl_debugfs_mbx_rxq_dma_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_mbx_rxq_dma_dump, inode->i_private); +} + +static int nbl_debugfs_adminq_txq_dma_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_adminq_txq_dma_dump, inode->i_private); +} + +static int nbl_debugfs_adminq_rxq_dma_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_adminq_rxq_dma_dump, inode->i_private); +} + +SINGLE_FOPS_RO(flow_info_fops, nbl_debugfs_flow_info_dump); +SINGLE_FOPS_RO(fd_info_fops, nbl_debugfs_fd_info_dump); +SINGLE_FOPS_RO(mbx_txq_fops, nbl_debugfs_mbx_txq_dma_dump); +SINGLE_FOPS_RO(mbx_rxq_fops, nbl_debugfs_mbx_rxq_dma_dump); +SINGLE_FOPS_RO(adminq_txq_fops, nbl_debugfs_adminq_txq_dma_dump); +SINGLE_FOPS_RO(adminq_rxq_fops, nbl_debugfs_adminq_rxq_dma_dump); + +static int nbl_ring_index_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + + seq_printf(m, "Index = %d", debugfs_mgt->ring_index); + + return 0; +} + +static int nbl_ring_index_open(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_ring_index_dump, inode->i_private); +} + +static ssize_t nbl_ring_index_write(struct file *file, const char __user *buf, + size_t count, loff_t *offp) +{ + struct nbl_debugfs_mgt *debugfs_mgt = file_inode(file)->i_private; + char buffer[4] = {0}; + size_t size = min(count, sizeof(buffer)); + + if (copy_from_user(buffer, buf, size)) + return -EFAULT; + if (kstrtou16(buffer, 10, &debugfs_mgt->ring_index)) + return -EFAULT; + + return size; +} + +SINGLE_FOPS_WO(ring_index_fops, nbl_ring_index_open, nbl_ring_index_write); + +static int nbl_ring_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + bool is_tx = debugfs_mgt->ring_index % 2; + u16 ring_index = debugfs_mgt->ring_index / 2; + + seq_printf(m, "Dump %s_ring_%d :\n", is_tx ? "tx" : "rx", ring_index); + disp_ops->dump_ring(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), m, is_tx, ring_index); + + return 0; +} + +static int nbl_debugfs_ring_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_ring_dump, inode->i_private); +} + +SINGLE_FOPS_RO(ring_fops, nbl_debugfs_ring_dump); + +static void nbl_serv_debugfs_setup_netops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + debugfs_create_file("txrx_ring_index", 0644, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &ring_index_fops); + debugfs_create_file("txrx_ring", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &ring_fops); +} + +static int nbl_ring_stats_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + struct nbl_queue_err_stats queue_err_stats = {0}; + bool is_tx = debugfs_mgt->ring_index % 2; + u16 ring_index = debugfs_mgt->ring_index / 2; + int ret; + + seq_printf(m, "Dump %s_ring_%d_stats\n", is_tx ? "tx" : "rx", ring_index); + disp_ops->dump_ring_stats(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), m, is_tx, ring_index); + if (is_tx) { + ret = disp_ops->get_queue_err_stats(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), + ring_index, + &queue_err_stats, true); + if (!ret) + seq_printf(m, "dvn_pkt_drop_cnt: %d\n", queue_err_stats.dvn_pkt_drop_cnt); + } else { + ret = disp_ops->get_queue_err_stats(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), + ring_index, + &queue_err_stats, false); + if (!ret) + seq_printf(m, "uvn_pkt_drop_cnt: %d\n", queue_err_stats.uvn_stat_pkt_drop); + } + + return 0; +} + +static int nbl_debugfs_ring_stats_dump(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_ring_stats_dump, inode->i_private); +} + +SINGLE_FOPS_RO(ring_stats_fops, nbl_debugfs_ring_stats_dump); + +static void nbl_serv_debugfs_setup_pfops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + debugfs_create_file("txrx_ring_stats", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &ring_stats_fops); +} + +static void nbl_serv_debugfs_setup_ctrlops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + + if (chan_ops->check_queue_exist(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), + NBL_CHAN_TYPE_ADMINQ)) { + debugfs_create_file("adminq_txq", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &adminq_txq_fops); + debugfs_create_file("adminq_rxq", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &adminq_rxq_fops); + } + + if (disp_ops->get_product_flex_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), + NBL_DUMP_FLOW_CAP)) + debugfs_create_file("flow_info", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &flow_info_fops); + + if (disp_ops->get_product_flex_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), + NBL_DUMP_FD_CAP)) + debugfs_create_file("fd_info", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &fd_info_fops); +} + +static int nbl_pmd_debug_dump(struct seq_file *m, void *v) +{ + struct nbl_debugfs_mgt *debugfs_mgt = (struct nbl_debugfs_mgt *)m->private; + + seq_printf(m, "pmd_debug = %s\n", debugfs_mgt->pmd_debug ? "on" : "off"); + + return 0; +} + +static int nbl_pmd_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, nbl_pmd_debug_dump, inode->i_private); +} + +static ssize_t nbl_pmd_debug_write(struct file *file, const char __user *buf, + size_t count, loff_t *offp) +{ + struct nbl_debugfs_mgt *debugfs_mgt = file_inode(file)->i_private; + struct nbl_dispatch_ops *disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt); + char buffer[4] = {0}; + size_t size = min(count, sizeof(buffer)); + + if (copy_from_user(buffer, buf, size)) + return -EFAULT; + if (kstrtobool(buffer, &debugfs_mgt->pmd_debug)) + return -EFAULT; + + disp_ops->set_pmd_debug(NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt), debugfs_mgt->pmd_debug); + return size; +} + +COMPLETE_FOPS_RW(pmd_debug_fops, nbl_pmd_debug_open, nbl_pmd_debug_write); + +static void nbl_serv_debugfs_setup_pmdops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + debugfs_create_file("pmd_debug", 0644, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &pmd_debug_fops); +} + +static void nbl_serv_debugfs_setup_commonops(struct nbl_debugfs_mgt *debugfs_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt); + + if (!chan_ops->check_queue_exist(NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + debugfs_create_file("mbx_txq", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &mbx_txq_fops); + debugfs_create_file("mbx_rxq", 0444, debugfs_mgt->nbl_debugfs_root, + debugfs_mgt, &mbx_rxq_fops); +} + +void nbl_debugfs_func_init(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_debugfs_mgt **debugfs_mgt = + (struct nbl_debugfs_mgt **)&NBL_ADAPTER_TO_DEBUGFS_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NULL; + struct nbl_common_info *common; + struct device *dev; + const char *name; + + common = NBL_ADAPTER_TO_COMMON(adapter); + dev = NBL_ADAPTER_TO_DEV(adapter); + + *debugfs_mgt = devm_kzalloc(dev, sizeof(struct nbl_debugfs_mgt), GFP_KERNEL); + if (!*debugfs_mgt) + return; + + NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(*debugfs_mgt) = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + NBL_DEBUGFS_MGT_TO_COMMON(*debugfs_mgt) = common; + disp_ops = NBL_DEBUGFS_MGT_TO_DISP_OPS((*debugfs_mgt)); + + name = pci_name(NBL_COMMON_TO_PDEV(common)); + (*debugfs_mgt)->nbl_debugfs_root = debugfs_create_dir(name, nbl_get_debugfs_root()); + if (!(*debugfs_mgt)->nbl_debugfs_root) { + nbl_err(common, NBL_DEBUG_DEBUGFS, "nbl init debugfs failed\n"); + return; + } + + nbl_serv_debugfs_setup_commonops(*debugfs_mgt); + + if (param->caps.has_ctrl) + nbl_serv_debugfs_setup_ctrlops(*debugfs_mgt); + + if (disp_ops->get_product_fix_cap(NBL_DEBUGFS_MGT_TO_DISP_PRIV((*debugfs_mgt)), + NBL_PMD_DEBUG)) + nbl_serv_debugfs_setup_pmdops(*debugfs_mgt); + + if (param->caps.has_net) { + nbl_serv_debugfs_setup_netops(*debugfs_mgt); + if (!param->caps.is_vf) + nbl_serv_debugfs_setup_pfops(*debugfs_mgt); + } +} + +void nbl_debugfs_func_remove(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_debugfs_mgt **debugfs_mgt = + (struct nbl_debugfs_mgt **)&NBL_ADAPTER_TO_DEBUGFS_MGT(adapter); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + + debugfs_remove_recursive((*debugfs_mgt)->nbl_debugfs_root); + (*debugfs_mgt)->nbl_debugfs_root = NULL; + + devm_kfree(dev, *debugfs_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..4fbab252ff2e7918873dd04b873d17987bb96106 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_debugfs.h @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEBUGFS_H_ +#define _NBL_DEBUGFS_H_ + +#include "nbl_core.h" + +#define NBL_DEBUGFS_MGT_TO_COMMON(debugfs_mgt) ((debugfs_mgt)->common) +#define NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt) ((debugfs_mgt)->disp_ops_tbl) +#define NBL_DEBUGFS_MGT_TO_DISP_OPS(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt)->ops) +#define NBL_DEBUGFS_MGT_TO_DISP_PRIV(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_DISP_OPS_TBL(debugfs_mgt)->priv) +#define NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(debugfs_mgt) ((debugfs_mgt)->chan_ops_tbl) +#define NBL_DEBUGFS_MGT_TO_CHAN_OPS(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(debugfs_mgt)->ops) +#define NBL_DEBUGFS_MGT_TO_CHAN_PRIV(debugfs_mgt) \ + (NBL_DEBUGFS_MGT_TO_CHAN_OPS_TBL(debugfs_mgt)->priv) + +struct nbl_debugfs_mgt { + struct dentry *nbl_debugfs_root; + struct nbl_dispatch_ops_tbl *disp_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_common_info *common; + /* Ring fops related info */ + u16 ring_index; + u16 ring_num; + bool pmd_debug; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..954ae7e8f07b49f3e07ea261efae9cf66505247c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.c @@ -0,0 +1,5078 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include +#include "nbl_dev.h" +#include "nbl_lag.h" + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "netif debug level (0=none,...,16=all), adapter debug_mask (<-1)"); + +static int net_msix_mask_en = 1; +module_param(net_msix_mask_en, int, 0); +MODULE_PARM_DESC(net_msix_mask_en, "net msix interrupt mask enable"); + +int performance_mode = 3; +module_param(performance_mode, int, 0); +MODULE_PARM_DESC(performance_mode, "performance_mode"); + +int restore_eth = 1; +module_param(restore_eth, int, 0); +MODULE_PARM_DESC(restore_eth, "restore_eth"); + +int loongarch_low_version; +module_param(loongarch_low_version, int, 0); +MODULE_PARM_DESC(loongarch_low_version, "loongarch_low_version for using less interrupts"); + +static struct nbl_dev_board_id_table board_id_table; + +struct nbl_dev_ops dev_ops; + +static int nbl_dev_clean_mailbox_schedule(struct nbl_dev_mgt *dev_mgt); +static void nbl_dev_clean_adminq_schedule(struct nbl_task_info *task_info); +static void nbl_dev_remove_rep_res(struct nbl_dev_mgt *dev_mgt); +static void nbl_dev_handle_fatal_err(struct nbl_dev_mgt *dev_mgt); + +/* ---------- Basic functions ---------- */ +static int nbl_dev_get_port_attributes(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_port_attributes(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static int nbl_dev_enable_port(struct nbl_dev_mgt *dev_mgt, bool enable) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->enable_port(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), enable); +} + +static void nbl_dev_init_port(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (restore_eth) + serv_ops->init_port(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static int nbl_dev_alloc_board_id(struct nbl_dev_board_id_table *index_table, u32 board_key) +{ + int i = 0; + + for (i = 0; i < NBL_DEV_BOARD_ID_MAX; i++) { + if (index_table->entry[i].board_key == board_key) { + index_table->entry[i].refcount++; + return i; + } + } + + for (i = 0; i < NBL_DEV_BOARD_ID_MAX; i++) { + if (!index_table->entry[i].valid) { + index_table->entry[i].board_key = board_key; + index_table->entry[i].refcount++; + index_table->entry[i].valid = true; + return i; + } + } + + return -ENOSPC; +} + +static void nbl_dev_free_board_id(struct nbl_dev_board_id_table *index_table, u32 board_key) +{ + int i = 0; + + for (i = 0; i < NBL_DEV_BOARD_ID_MAX; i++) { + if (index_table->entry[i].board_key == board_key && index_table->entry[i].valid) { + index_table->entry[i].refcount--; + break; + } + } + + if (i != NBL_DEV_BOARD_ID_MAX && !index_table->entry[i].refcount) + memset(&index_table->entry[i], 0, sizeof(index_table->entry[i])); +} + +static void nbl_dev_set_netdev_priv(struct net_device *netdev, struct nbl_dev_vsi *vsi) +{ + struct nbl_netdev_priv *net_priv = netdev_priv(netdev); + + net_priv->tx_queue_num = vsi->queue_num; + net_priv->rx_queue_num = vsi->queue_num; + net_priv->queue_size = vsi->queue_size; + net_priv->rep = NULL; + net_priv->netdev = netdev; + net_priv->normal_vsi = vsi->vsi_id; + net_priv->other_vsi = vsi->vsi_id; + net_priv->async_other_vsi = vsi->vsi_id; + net_priv->async_pending_vsi = vsi->vsi_id; +} + +/* ---------- Interrupt config ---------- */ +static irqreturn_t nbl_dev_clean_mailbox(int __always_unused irq, void *data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)data; + + nbl_dev_clean_mailbox_schedule(dev_mgt); + + return IRQ_HANDLED; +} + +static irqreturn_t nbl_dev_clean_adminq(int __always_unused irq, void *data) +{ + struct nbl_task_info *task_info = (struct nbl_task_info *)data; + + nbl_dev_clean_adminq_schedule(task_info); + + return IRQ_HANDLED; +} + +static __maybe_unused void nbl_dev_notify_ipsec_hard_expire(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ +#ifdef CONFIG_TLS_DEVICE + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct net *net = dev_net(NBL_DEV_MGT_TO_NET_DEV(dev_mgt)->netdev); + struct nbl_sa_search_key *param; + struct xfrm_state *x; + + param = (struct nbl_sa_search_key *)data; + x = xfrm_state_lookup(net, param->mark, ¶m->daddr, param->spi, + IPPROTO_ESP, param->family); + if (x) { + x->km.state = XFRM_STATE_EXPIRED; + hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT); + xfrm_state_put(x); + } +#endif +} + +static void nbl_dev_handle_ipsec_event(struct work_struct *work) +{ +#ifdef CONFIG_TLS_DEVICE + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + ipsec_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops; + + serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + serv_ops->handle_ipsec_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +#endif +} + +static void nbl_dev_clean_ipsec_status(struct nbl_dev_mgt *dev_mgt) +{ +#ifdef CONFIG_TLS_DEVICE + struct nbl_service_ops *serv_ops; + struct nbl_dev_ctrl *ctrl_dev; + struct nbl_task_info *task_info; + + serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + if (serv_ops->check_ipsec_status(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt))) + nbl_common_queue_work(&task_info->ipsec_task, true, false); +#endif +} + +static void nbl_dev_handle_abnormal_event(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + clean_abnormal_irq_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->process_abnormal_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static void nbl_dev_clean_abnormal_status(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + nbl_common_queue_work(&task_info->clean_abnormal_irq_task, true, false); +} + +static irqreturn_t nbl_dev_clean_abnormal_event(int __always_unused irq, void *data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + nbl_dev_grc_process_abnormal_event(rdma_dev); + + if (serv_ops->get_product_flex_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_SECURITY_ACCEL_CAP)) + nbl_dev_clean_ipsec_status(dev_mgt); + + nbl_dev_clean_abnormal_status(dev_mgt); + + return IRQ_HANDLED; +} + +static void nbl_dev_register_common_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_irq_num irq_num = {0}; + + serv_ops->get_common_irq_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &irq_num); + msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num = irq_num.mbx_irq_num; +} + +static void nbl_dev_register_net_irq(struct nbl_dev_mgt *dev_mgt, u16 queue_num) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + + msix_info->serv_info[NBL_MSIX_NET_TYPE].num = queue_num; + msix_info->serv_info[NBL_MSIX_NET_TYPE].hw_self_mask_en = net_msix_mask_en; +} + +static void nbl_dev_register_virtio_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + + msix_info->serv_info[NBL_MSIX_VIRTIO_TYPE].num = 1; +} + +static void nbl_dev_register_factory_ctrl_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_irq_num common_irq_num = {0}; + struct nbl_ctrl_irq_num ctrl_irq_num = {0}; + + /* Register mailbox irq, draco need this. */ + serv_ops->get_common_irq_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &common_irq_num); + msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num = common_irq_num.mbx_irq_num; + + serv_ops->get_ctrl_irq_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &ctrl_irq_num); + msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num = ctrl_irq_num.adminq_irq_num; +} + +static void nbl_dev_register_ctrl_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_ctrl_irq_num irq_num = {0}; + + serv_ops->get_ctrl_irq_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &irq_num); + + msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num = irq_num.abnormal_irq_num; + msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num = irq_num.adminq_irq_num; +} + +static int nbl_dev_request_net_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_msix_info_param param = {0}; + int msix_num = msix_info->serv_info[NBL_MSIX_NET_TYPE].num; + int ret = 0; + + param.msix_entries = kcalloc(msix_num, sizeof(*param.msix_entries), GFP_KERNEL); + if (!param.msix_entries) + return -ENOMEM; + + param.msix_num = msix_num; + memcpy(param.msix_entries, msix_info->msix_entries + + msix_info->serv_info[NBL_MSIX_NET_TYPE].base_vector_id, + sizeof(param.msix_entries[0]) * msix_num); + + ret = serv_ops->request_net_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), ¶m); + + kfree(param.msix_entries); + return ret; +} + +static void nbl_dev_free_net_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_msix_info_param param = {0}; + int msix_num = msix_info->serv_info[NBL_MSIX_NET_TYPE].num; + + param.msix_entries = kcalloc(msix_num, sizeof(*param.msix_entries), GFP_KERNEL); + if (!param.msix_entries) + return; + + param.msix_num = msix_num; + memcpy(param.msix_entries, msix_info->msix_entries + + msix_info->serv_info[NBL_MSIX_NET_TYPE].base_vector_id, + sizeof(param.msix_entries[0]) * msix_num); + + serv_ops->free_net_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), ¶m); + + kfree(param.msix_entries); +} + +static int nbl_dev_request_mailbox_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + int err; + + if (!msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + snprintf(dev_common->mailbox_name, sizeof(dev_common->mailbox_name) - 1, "%s-%s", + dev_name(dev), "mailbox"); + err = devm_request_irq(dev, irq_num, nbl_dev_clean_mailbox, + 0, dev_common->mailbox_name, dev_mgt); + if (err) { + dev_err(dev, "Request mailbox irq handler failed err: %d\n", err); + return err; + } + + return 0; +} + +static void nbl_dev_free_mailbox_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + + if (!msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num) + return; + + local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + devm_free_irq(dev, irq_num, dev_mgt); +} + +static int nbl_dev_enable_mailbox_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + + if (!msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_TYPE_MAILBOX, true); + + return serv_ops->enable_mailbox_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, true); +} + +static int nbl_dev_disable_mailbox_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + + if (!msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].num) + return 0; + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_flush_task(&dev_common->clean_mbx_task); + + local_vector_id = msix_info->serv_info[NBL_MSIX_MAILBOX_TYPE].base_vector_id; + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_TYPE_MAILBOX, false); + + return serv_ops->enable_mailbox_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, false); +} + +static int nbl_dev_request_adminq_irq(struct nbl_dev_mgt *dev_mgt, struct nbl_task_info *task_info) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + int err; + + if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + err = devm_request_irq(dev, irq_num, nbl_dev_clean_adminq, + 0, "adminq_irq", task_info); + if (err) { + dev_err(dev, "Request adminq irq handler failed err: %d\n", err); + return err; + } + + return 0; +} + +static void nbl_dev_free_adminq_irq(struct nbl_dev_mgt *dev_mgt, struct nbl_task_info *task_info) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + + if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) + return; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + devm_free_irq(dev, irq_num, task_info); +} + +static int nbl_dev_enable_adminq_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + + if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_TYPE_ADMINQ, true); + + return serv_ops->enable_adminq_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, true); +} + +static int nbl_dev_disable_adminq_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + + if (!msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ADMINDQ_TYPE].base_vector_id; + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_TYPE_ADMINQ, false); + + return serv_ops->enable_adminq_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, false); +} + +static int nbl_dev_request_abnormal_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + int err; + + if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + err = devm_request_irq(dev, irq_num, nbl_dev_clean_abnormal_event, + 0, "abnormal_irq", dev_mgt); + if (err) { + dev_err(dev, "Request abnormal_irq irq handler failed err: %d\n", err); + return err; + } + + return 0; +} + +static void nbl_dev_free_abnormal_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + u32 irq_num; + + if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) + return; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; + irq_num = msix_info->msix_entries[local_vector_id].vector; + + devm_free_irq(dev, irq_num, dev_mgt); +} + +static int nbl_dev_enable_abnormal_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + int err = 0; + + if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; + err = serv_ops->enable_abnormal_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, true); + + return err; +} + +static int nbl_dev_disable_abnormal_irq(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + u16 local_vector_id; + int err = 0; + + if (!msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].num) + return 0; + + local_vector_id = msix_info->serv_info[NBL_MSIX_ABNORMAL_TYPE].base_vector_id; + err = serv_ops->enable_abnormal_irq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + local_vector_id, false); + + return err; +} + +static int nbl_dev_configure_msix_map(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + int err = 0; + int i; + u16 msix_not_net_num = 0; + + for (i = NBL_MSIX_NET_TYPE; i < NBL_MSIX_TYPE_MAX; i++) + msix_info->serv_info[i].base_vector_id = msix_info->serv_info[i - 1].base_vector_id + + msix_info->serv_info[i - 1].num; + + for (i = NBL_MSIX_MAILBOX_TYPE; i < NBL_MSIX_TYPE_MAX; i++) { + if (i == NBL_MSIX_NET_TYPE) + continue; + + msix_not_net_num += msix_info->serv_info[i].num; + } + + err = serv_ops->configure_msix_map(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + msix_info->serv_info[NBL_MSIX_NET_TYPE].num, + msix_not_net_num, + msix_info->serv_info[NBL_MSIX_NET_TYPE].hw_self_mask_en); + + return err; +} + +static int nbl_dev_destroy_msix_map(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int err = 0; + + err = serv_ops->destroy_msix_map(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + return err; +} + +static int nbl_dev_alloc_msix_entries(struct nbl_dev_mgt *dev_mgt, u16 num_entries) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u16 i; + + msix_info->msix_entries = devm_kcalloc(NBL_DEV_MGT_TO_DEV(dev_mgt), num_entries, + sizeof(msix_info->msix_entries), + GFP_KERNEL); + if (!msix_info->msix_entries) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) + msix_info->msix_entries[i].entry = + serv_ops->get_msix_entry_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), i); + + dev_info(NBL_DEV_MGT_TO_DEV(dev_mgt), "alloc msix entry: %u-%u.\n", + msix_info->msix_entries[0].entry, msix_info->msix_entries[num_entries - 1].entry); + + return 0; +} + +static void nbl_dev_free_msix_entries(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), msix_info->msix_entries); + msix_info->msix_entries = NULL; +} + +static int nbl_dev_alloc_msix_intr(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int needed = 0; + int err; + int i; + + for (i = 0; i < NBL_MSIX_TYPE_MAX; i++) + needed += msix_info->serv_info[i].num; + + err = nbl_dev_alloc_msix_entries(dev_mgt, (u16)needed); + if (err) { + pr_err("Allocate msix entries failed\n"); + return err; + } + + err = pci_enable_msix_range(NBL_COMMON_TO_PDEV(common), msix_info->msix_entries, + needed, needed); + if (err < 0) { + pr_err("pci_enable_msix_range failed, err = %d.\n", err); + goto enable_msix_failed; + } + + return needed; + +enable_msix_failed: + nbl_dev_free_msix_entries(dev_mgt); + return err; +} + +static void nbl_dev_free_msix_intr(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + pci_disable_msix(NBL_COMMON_TO_PDEV(common)); + nbl_dev_free_msix_entries(dev_mgt); +} + +static int nbl_dev_init_interrupt_scheme(struct nbl_dev_mgt *dev_mgt) +{ + int err = 0; + + err = nbl_dev_alloc_msix_intr(dev_mgt); + if (err < 0) { + dev_err(NBL_DEV_MGT_TO_DEV(dev_mgt), "Failed to enable MSI-X vectors\n"); + return err; + } + + return 0; +} + +static void nbl_dev_clear_interrupt_scheme(struct nbl_dev_mgt *dev_mgt) +{ + nbl_dev_free_msix_intr(dev_mgt); +} + +/* ---------- Channel config ---------- */ +static int nbl_dev_setup_chan_qinfo(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + int ret = 0; + + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + return 0; + + ret = chan_ops->cfg_chan_qinfo_map_table(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + chan_type); + if (ret) + dev_err(dev, "setup chan:%d, qinfo map table failed\n", chan_type); + + return ret; +} + +static int nbl_dev_setup_chan_queue(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + int ret = 0; + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + ret = chan_ops->setup_queue(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type); + + return ret; +} + +static int nbl_dev_remove_chan_queue(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + int ret = 0; + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + ret = chan_ops->teardown_queue(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type); + + return ret; +} + +static bool nbl_dev_should_chan_keepalive(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + bool ret = true; + + ret = serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_KEEP_ALIVE); + + return ret; +} + +static int nbl_dev_setup_chan_keepalive(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u16 dest_func_id = NBL_COMMON_TO_MGT_PF(common); + + if (!nbl_dev_should_chan_keepalive(dev_mgt)) + return 0; + + if (chan_type != NBL_CHAN_TYPE_MAILBOX) + return -EOPNOTSUPP; + + dest_func_id = serv_ops->get_function_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + return chan_ops->setup_keepalive(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + dest_func_id, chan_type); + + return -ENOENT; +} + +static void nbl_dev_remove_chan_keepalive(struct nbl_dev_mgt *dev_mgt, u8 chan_type) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + chan_ops->remove_keepalive(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type); +} + +static void nbl_dev_register_chan_task(struct nbl_dev_mgt *dev_mgt, + u8 chan_type, struct work_struct *task) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type)) + chan_ops->register_chan_task(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), chan_type, task); +} + +/* ---------- Tasks config ---------- */ +static void nbl_dev_clean_mailbox_task(struct work_struct *work) +{ + struct nbl_dev_common *common_dev = container_of(work, struct nbl_dev_common, + clean_mbx_task); + struct nbl_dev_mgt *dev_mgt = common_dev->dev_mgt; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + chan_ops->clean_queue_subtask(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_TYPE_MAILBOX); +} + +static int nbl_dev_clean_mailbox_schedule(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + bool is_ctrl = !!(NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)); + + nbl_common_queue_work(&common_dev->clean_mbx_task, is_ctrl, true); + + return 0; +} + +static void nbl_dev_prepare_eswitch_reset(struct nbl_dev_mgt *dev_mgt) +{ + nbl_dev_remove_rep_res(dev_mgt); +} + +static void nbl_dev_prepare_reset_task(struct work_struct *work) +{ + int ret; + enum nbl_core_reset_event event = NBL_CORE_FATAL_ERR_EVENT; + struct nbl_reset_task_info *task_info = container_of(work, struct nbl_reset_task_info, + task); + struct nbl_dev_common *common_dev = container_of(task_info, struct nbl_dev_common, + reset_task); + struct nbl_dev_mgt *dev_mgt = common_dev->dev_mgt; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_chan_send_info chan_send; + + nbl_event_notify(NBL_EVENT_RESET_EVENT, &event, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + serv_ops->netdev_stop(dev_mgt->net_dev->netdev); + nbl_dev_prepare_eswitch_reset(dev_mgt); + netif_device_detach(dev_mgt->net_dev->netdev); /* to avoid ethtool operation */ + nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_ACK_RESET_EVENT, NULL, + 0, NULL, 0, 0); + /* notify ctrl dev, finish reset event process */ + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, + NBL_CHAN_TYPE_MAILBOX, true); + + /* sleep to avoid send_msg is running */ + usleep_range(10, 20); + + /* ctrl dev must shutdown phy reg read/write after ctrl dev has notify emp shutdown dev */ + if (!NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) + serv_ops->set_hw_status(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_HW_FATAL_ERR); +} + +static void nbl_dev_clean_adminq_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + clean_adminq_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + chan_ops->clean_queue_subtask(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_TYPE_ADMINQ); +} + +static void nbl_dev_clean_adminq_schedule(struct nbl_task_info *task_info) +{ + nbl_common_queue_work(&task_info->clean_adminq_task, true, false); +} + +static void nbl_dev_fw_heartbeat_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + fw_hb_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + if (task_info->fw_resetting) + return; + + if (!serv_ops->check_fw_heartbeat(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt))) { + dev_notice(NBL_COMMON_TO_DEV(common), "FW reset detected"); + task_info->fw_resetting = true; + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, + NBL_CHAN_TYPE_ADMINQ, true); + nbl_common_queue_delayed_work(&task_info->fw_reset_task, MSEC_PER_SEC, true, false); + } +} + +static void nbl_dev_fw_reset_task(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct nbl_task_info *task_info = container_of(delayed_work, struct nbl_task_info, + fw_reset_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + if (serv_ops->check_fw_reset(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt))) { + dev_notice(NBL_COMMON_TO_DEV(common), "FW recovered"); + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, + NBL_CHAN_TYPE_ADMINQ, false); + nbl_dev_disable_adminq_irq(dev_mgt); + nbl_dev_free_adminq_irq(dev_mgt, task_info); + + nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + nbl_dev_setup_chan_qinfo(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + nbl_dev_request_adminq_irq(dev_mgt, task_info); + nbl_dev_enable_adminq_irq(dev_mgt); + + if (NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) { + nbl_dev_get_port_attributes(dev_mgt); + nbl_dev_enable_port(dev_mgt, true); + } + task_info->fw_resetting = false; + return; + } + + nbl_common_queue_delayed_work(delayed_work, MSEC_PER_SEC, true, false); +} + +static void nbl_dev_offload_network_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + offload_network_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->check_offload_status(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static void nbl_dev_adapt_desc_gother_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + adapt_desc_gother_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->adapt_desc_gother(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static void nbl_dev_recovery_abnormal_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + recovery_abnormal_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->recovery_abnormal(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static void nbl_dev_ctrl_reset_task(struct work_struct *work) +{ + struct nbl_task_info *task_info = container_of(work, struct nbl_task_info, + reset_task); + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + + nbl_dev_handle_fatal_err(dev_mgt); +} + +static void nbl_dev_ctrl_task_schedule(struct nbl_task_info *task_info) +{ + struct nbl_dev_mgt *dev_mgt = task_info->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_OFFLOAD_NETWORK_CAP)) + nbl_common_queue_work(&task_info->offload_network_task, true, true); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_HB_CAP)) + nbl_common_queue_work(&task_info->fw_hb_task, true, false); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_ADAPT_DESC_GOTHER)) + nbl_common_queue_work(&task_info->adapt_desc_gother_task, true, false); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_RECOVERY_ABNORMAL_STATUS)) + nbl_common_queue_work(&task_info->recovery_abnormal_task, true, false); +} + +static void nbl_dev_ctrl_task_timer(struct timer_list *t) +{ + struct nbl_task_info *task_info = from_timer(task_info, t, serv_timer); + + mod_timer(&task_info->serv_timer, round_jiffies(task_info->serv_timer_period + jiffies)); + nbl_dev_ctrl_task_schedule(task_info); +} + +static void nbl_dev_ctrl_task_start(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + if (!task_info->timer_setup) + return; + + mod_timer(&task_info->serv_timer, round_jiffies(jiffies + task_info->serv_timer_period)); +} + +static void nbl_dev_ctrl_task_stop(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + if (!task_info->timer_setup) + return; + + del_timer_sync(&task_info->serv_timer); + task_info->timer_setup = false; +} + +static void nbl_dev_chan_notify_flr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + u16 vfid; + u16 vsi_id; + + vfid = *(u16 *)data; + serv_ops->process_flr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vfid); + + vsi_id = serv_ops->covert_vfid_to_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vfid); + nbl_dev_grc_process_flr_event(rdma_dev, vsi_id); +} + +static void nbl_dev_ctrl_register_flr_chan_msg(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (!serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_PROCESS_FLR_CAP)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_ADMINQ_FLR_NOTIFY, + nbl_dev_chan_notify_flr_resp, dev_mgt); +} + +static void nbl_dev_factory_task_start(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_factory *factory_dev = NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(factory_dev); + + if (!task_info->timer_setup) + return; + + mod_timer(&task_info->serv_timer, round_jiffies(jiffies + task_info->serv_timer_period)); +} + +static void nbl_dev_factory_task_stop(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_factory *factory_dev = NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(factory_dev); + + if (!task_info->timer_setup) + return; + + del_timer_sync(&task_info->serv_timer); +} + +static int nbl_dev_setup_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + task_info->dev_mgt = dev_mgt; + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_OFFLOAD_NETWORK_CAP)) { + nbl_common_alloc_task(&task_info->offload_network_task, + nbl_dev_offload_network_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_HB_CAP)) { + nbl_common_alloc_task(&task_info->fw_hb_task, nbl_dev_fw_heartbeat_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_RESET_CAP)) { + nbl_common_alloc_delayed_task(&task_info->fw_reset_task, nbl_dev_fw_reset_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_ADMINDQ_CAP)) { + nbl_common_alloc_task(&task_info->clean_adminq_task, nbl_dev_clean_adminq_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_IPSEC_AGE_CAP)) { + nbl_common_alloc_task(&task_info->ipsec_task, nbl_dev_handle_ipsec_event); + task_info->timer_setup = true; + } + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_ADAPT_DESC_GOTHER)) { + nbl_common_alloc_task(&task_info->adapt_desc_gother_task, + nbl_dev_adapt_desc_gother_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_RECOVERY_ABNORMAL_STATUS)) { + nbl_common_alloc_task(&task_info->recovery_abnormal_task, + nbl_dev_recovery_abnormal_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CTRL_CAP)) + nbl_common_alloc_task(&task_info->reset_task, &nbl_dev_ctrl_reset_task); + + nbl_common_alloc_task(&task_info->clean_abnormal_irq_task, + nbl_dev_handle_abnormal_event); + + if (task_info->timer_setup) { + timer_setup(&task_info->serv_timer, nbl_dev_ctrl_task_timer, 0); + task_info->serv_timer_period = HZ; + } + + nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_ADMINQ, &task_info->clean_adminq_task); + + return 0; +} + +static void nbl_dev_remove_ctrl_dev_task(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev); + + nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_ADMINQ, NULL); + + nbl_common_release_task(&task_info->clean_abnormal_irq_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_OFFLOAD_NETWORK_CAP)) + nbl_common_release_task(&task_info->offload_network_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_RESET_CAP)) + nbl_common_release_delayed_task(&task_info->fw_reset_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_HB_CAP)) + nbl_common_release_task(&task_info->fw_hb_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_ADMINDQ_CAP)) + nbl_common_release_task(&task_info->clean_adminq_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_IPSEC_AGE_CAP)) + nbl_common_release_task(&task_info->ipsec_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_ADAPT_DESC_GOTHER)) + nbl_common_release_task(&task_info->adapt_desc_gother_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_RECOVERY_ABNORMAL_STATUS)) + nbl_common_release_task(&task_info->recovery_abnormal_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CTRL_CAP)) + nbl_common_release_task(&task_info->reset_task); +} + +static int nbl_dev_setup_factory_dev_task(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_factory *factory_dev = NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_FACTORY_TO_TASK_INFO(factory_dev); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + task_info->dev_mgt = dev_mgt; + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_alloc_task(&common_dev->clean_mbx_task, nbl_dev_clean_mailbox_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_HB_CAP)) { + nbl_common_alloc_task(&task_info->fw_hb_task, nbl_dev_fw_heartbeat_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_RESET_CAP)) { + nbl_common_alloc_delayed_task(&task_info->fw_reset_task, nbl_dev_fw_reset_task); + task_info->timer_setup = true; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_ADMINDQ_CAP)) { + nbl_common_alloc_task(&task_info->clean_adminq_task, nbl_dev_clean_adminq_task); + task_info->timer_setup = true; + } + + if (task_info->timer_setup) { + timer_setup(&task_info->serv_timer, nbl_dev_ctrl_task_timer, 0); + task_info->serv_timer_period = HZ; + } + + return 0; +} + +static void nbl_dev_remove_factory_dev_task(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_factory *factory_dev = NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_task_info *task_info = NBL_DEV_FACTORY_TO_TASK_INFO(factory_dev); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_RESET_CAP)) + nbl_common_release_delayed_task(&task_info->fw_reset_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_FW_HB_CAP)) + nbl_common_release_task(&task_info->fw_hb_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_ADMINDQ_CAP)) + nbl_common_release_task(&task_info->clean_adminq_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_release_task(&common_dev->clean_mbx_task); +} + +static int nbl_dev_update_template_config(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->update_template_config(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +/* ---------- Dev init process ---------- */ +static int nbl_dev_setup_common_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_common *common_dev; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int board_id; + + common_dev = devm_kzalloc(NBL_ADAPTER_TO_DEV(adapter), + sizeof(struct nbl_dev_common), GFP_KERNEL); + if (!common_dev) + return -ENOMEM; + common_dev->dev_mgt = dev_mgt; + + if (nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_MAILBOX)) + goto setup_chan_fail; + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_alloc_task(&common_dev->clean_mbx_task, nbl_dev_clean_mailbox_task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CAP)) + nbl_common_alloc_task(&common_dev->reset_task.task, &nbl_dev_prepare_reset_task); + + if (param->caps.is_nic) { + board_id = serv_ops->get_board_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (board_id < 0) + goto get_board_id_fail; + NBL_COMMON_TO_BOARD_ID(common) = board_id; + } + + NBL_COMMON_TO_VSI_ID(common) = serv_ops->get_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), 0, + NBL_VSI_DATA); + + serv_ops->get_eth_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_VSI_ID(common), + &NBL_COMMON_TO_ETH_MODE(common), &NBL_COMMON_TO_ETH_ID(common), + &NBL_COMMON_TO_LOGIC_ETH_ID(common)); + + nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_MAILBOX, &common_dev->clean_mbx_task); + + NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = common_dev; + + nbl_dev_register_common_irq(dev_mgt); + + return 0; + +get_board_id_fail: + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CAP)) + nbl_common_release_task(&common_dev->reset_task.task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_release_task(&common_dev->clean_mbx_task); +setup_chan_fail: + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), common_dev); + return -EFAULT; +} + +static void nbl_dev_remove_common_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + + if (!common_dev) + return; + + nbl_dev_register_chan_task(dev_mgt, NBL_CHAN_TYPE_MAILBOX, NULL); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_RESET_CAP)) + nbl_common_release_task(&common_dev->reset_task.task); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_TASK_CLEAN_MAILBOX_CAP)) + nbl_common_release_task(&common_dev->clean_mbx_task); + + nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), common_dev); + NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = NULL; +} + +static int nbl_dev_setup_factory_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_factory *factory_dev; + struct nbl_dev_common *common_dev; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + int i, ret = 0; + u32 board_key; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + if (param->caps.is_nic) + NBL_COMMON_TO_BOARD_ID(common) = + nbl_dev_alloc_board_id(&board_id_table, board_key); + + common_dev = devm_kzalloc(NBL_ADAPTER_TO_DEV(adapter), + sizeof(struct nbl_dev_common), GFP_KERNEL); + if (!common_dev) + goto alloc_common_dev_fail; + common_dev->dev_mgt = dev_mgt; + NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = common_dev; + + factory_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_factory), GFP_KERNEL); + if (!factory_dev) + goto alloc_factory_dev_fail; + NBL_DEV_FACTORY_TO_TASK_INFO(factory_dev)->adapter = adapter; + NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt) = factory_dev; + + nbl_dev_register_factory_ctrl_irq(dev_mgt); + + ret = serv_ops->init_chip_factory(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) { + dev_err(dev, "factory dev chip_init failed\n"); + goto chip_init_fail; + } + + /* Register both mailbox and adminq, leonis need adminq and draco need mailbox */ + for (i = 0; i < NBL_CHAN_TYPE_MAX; i++) { + ret = nbl_dev_setup_chan_qinfo(dev_mgt, i); + if (ret) { + dev_err(dev, "factory dev setup chan qinfo failed\n"); + goto setup_chan_qinfo_fail; + } + + ret = nbl_dev_setup_chan_queue(dev_mgt, i); + if (ret) { + dev_err(dev, "factory dev setup chan queue failed\n"); + goto setup_chan_queue_fail; + } + } + + ret = nbl_dev_setup_factory_dev_task(dev_mgt); + if (ret) { + dev_err(dev, "factory dev task failed\n"); + goto setup_ctrl_dev_task_fail; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) + serv_ops->setup_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); + + return 0; + +setup_ctrl_dev_task_fail: +setup_chan_queue_fail: + while (--i + 1) + nbl_dev_remove_chan_queue(dev_mgt, i); +setup_chan_qinfo_fail: + serv_ops->destroy_chip_factory(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +chip_init_fail: + devm_kfree(dev, factory_dev); + NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) = NULL; +alloc_factory_dev_fail: + devm_kfree(dev, common_dev); + NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) = NULL; +alloc_common_dev_fail: + nbl_dev_free_board_id(&board_id_table, board_key); + return ret; +} + +static bool nbl_dev_remove_factory_ctrl_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_factory **factory_dev = &NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt); + struct nbl_dev_common **common_dev = &NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u32 board_key; + int i; + + if (!*factory_dev) + return false; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) + serv_ops->remove_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); + + nbl_dev_remove_factory_dev_task(dev_mgt); + + for (i = 0; i < NBL_CHAN_TYPE_MAX; i++) + nbl_dev_remove_chan_queue(dev_mgt, i); + + serv_ops->destroy_chip_factory(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), *factory_dev); + *factory_dev = NULL; + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), *common_dev); + *common_dev = NULL; + + nbl_dev_free_board_id(&board_id_table, board_key); + + return true; +} + +static int nbl_dev_setup_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int i, ret = 0; + u32 board_key; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + if (param->caps.is_nic) + NBL_COMMON_TO_BOARD_ID(common) = + nbl_dev_alloc_board_id(&board_id_table, board_key); + + dev_info(dev, "board_key 0x%x alloc board id 0x%x\n", + board_key, NBL_COMMON_TO_BOARD_ID(common)); + + ctrl_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_ctrl), GFP_KERNEL); + if (!ctrl_dev) + goto alloc_fail; + NBL_DEV_CTRL_TO_TASK_INFO(ctrl_dev)->adapter = adapter; + NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) = ctrl_dev; + + nbl_dev_register_ctrl_irq(dev_mgt); + + ret = serv_ops->init_chip(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) { + dev_err(dev, "ctrl dev chip_init failed\n"); + goto chip_init_fail; + } + + ret = serv_ops->start_mgt_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) { + dev_err(dev, "ctrl dev start_mgt_flow failed\n"); + goto mgt_flow_fail; + } + + for (i = 0; i < NBL_CHAN_TYPE_MAX; i++) { + ret = nbl_dev_setup_chan_qinfo(dev_mgt, i); + if (ret) { + dev_err(dev, "ctrl dev setup chan qinfo failed\n"); + goto setup_chan_q_fail; + } + } + + ret = nbl_dev_setup_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + if (ret) { + dev_err(dev, "ctrl dev setup chan queue failed\n"); + goto setup_chan_q_fail; + } + + ret = nbl_dev_setup_ctrl_dev_task(dev_mgt); + if (ret) { + dev_err(dev, "ctrl dev task failed\n"); + goto setup_ctrl_dev_task_fail; + } + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) { + ret = serv_ops->setup_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); + if (ret) { + dev_err(dev, "ctrl dev st failed\n"); + goto setup_ctrl_dev_st_fail; + } + } + + nbl_dev_update_template_config(dev_mgt); + + serv_ops->cfg_eth_bond_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), true); + serv_ops->cfg_fd_update_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), true); + + return 0; + +setup_ctrl_dev_st_fail: + nbl_dev_remove_ctrl_dev_task(dev_mgt); +setup_ctrl_dev_task_fail: + nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); +setup_chan_q_fail: + serv_ops->stop_mgt_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +mgt_flow_fail: + serv_ops->destroy_chip(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +chip_init_fail: + devm_kfree(dev, ctrl_dev); + NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) = NULL; +alloc_fail: + nbl_dev_free_board_id(&board_id_table, board_key); + return ret; +} + +static void nbl_dev_remove_ctrl_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_ctrl **ctrl_dev = &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u32 board_key; + + if (!*ctrl_dev) + return; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + serv_ops->cfg_fd_update_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), false); + serv_ops->cfg_eth_bond_event(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), false); + + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_RESTOOL_CAP)) + serv_ops->remove_st(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), nbl_get_st_table()); + + nbl_dev_remove_chan_queue(dev_mgt, NBL_CHAN_TYPE_ADMINQ); + nbl_dev_remove_ctrl_dev_task(dev_mgt); + + serv_ops->stop_mgt_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->destroy_chip(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), *ctrl_dev); + *ctrl_dev = NULL; + + /* If it is not nic, this free function will do nothing, so no need check */ + nbl_dev_free_board_id(&board_id_table, board_key); +} + +static int nbl_dev_netdev_open(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->netdev_open(netdev); +} + +static int nbl_dev_rep_netdev_open(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rep_netdev_open(netdev); +} + +static int nbl_dev_netdev_stop(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->netdev_stop(netdev); +} + +static int nbl_dev_rep_netdev_stop(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rep_netdev_stop(netdev); +} + +static netdev_tx_t nbl_dev_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_resource_pt_ops *pt_ops = NBL_DEV_MGT_TO_RES_PT_OPS(dev_mgt); + + return pt_ops->start_xmit(skb, netdev); +} + +static netdev_tx_t nbl_dev_rep_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rep_start_xmit(skb, netdev); +} + +static void nbl_dev_netdev_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_stats64(netdev, stats); +} + +static void +nbl_dev_netdev_rep_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->rep_get_stats64(netdev, stats); +} + +static void nbl_dev_netdev_set_rx_mode(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->set_rx_mode(netdev); +} + +static void nbl_dev_netdev_rep_set_rx_mode(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->rep_set_rx_mode(netdev); +} + +static void nbl_dev_netdev_change_rx_flags(struct net_device *netdev, int flag) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->change_rx_flags(netdev, flag); +} + +static int nbl_dev_netdev_set_mac(struct net_device *netdev, void *p) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_mac(netdev, p); +} + +static int nbl_dev_netdev_rep_set_mac(struct net_device *netdev, void *p) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rep_set_mac(netdev, p); +} + +static int nbl_dev_netdev_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rx_add_vid(netdev, proto, vid); +} + +static int nbl_dev_netdev_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rx_kill_vid(netdev, proto, vid); +} + +static int nbl_dev_netdev_rep_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rep_rx_add_vid(netdev, proto, vid); +} + +static int nbl_dev_netdev_rep_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rep_rx_kill_vid(netdev, proto, vid); +} + +static int +nbl_dev_netdev_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_features(netdev, features); +} + +static netdev_features_t +nbl_dev_netdev_features_check(struct sk_buff *skb, struct net_device *netdev, + netdev_features_t features) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->features_check(skb, netdev, features); +} + +static int +nbl_dev_netdev_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_vf_spoofchk(netdev, vf_id, ena); +} + +static void nbl_dev_netdev_tx_timeout(struct net_device *netdev, u32 txqueue) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->tx_timeout(netdev, txqueue); +} + +static int nbl_dev_netdev_bridge_setlink(struct net_device *netdev, struct nlmsghdr *nlh, + u16 flags, struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->bridge_setlink(netdev, nlh, flags, extack); +} + +static int nbl_dev_netdev_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *netdev, u32 filter_mask, int nlflags) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->bridge_getlink(skb, pid, seq, netdev, filter_mask, nlflags); +} + +static int nbl_dev_netdev_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_vf_link_state(netdev, vf_id, link_state); +} + +static int nbl_dev_netdev_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_vf_mac(netdev, vf_id, mac); +} + +static int +nbl_dev_netdev_set_vf_rate(struct net_device *netdev, int vf_id, int min_rate, int max_rate) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_vf_rate(netdev, vf_id, min_rate, max_rate); +} + +static int +nbl_dev_netdev_set_vf_vlan(struct net_device *netdev, int vf_id, u16 vlan, u8 pri, __be16 proto) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_vf_vlan(netdev, vf_id, vlan, pri, proto); +} + +static int +nbl_dev_netdev_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->setup_tc(netdev, type, type_data); +} + +static int +nbl_dev_netdev_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rep_setup_tc(netdev, type, type_data); +} + +static int +nbl_dev_netdev_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_vf_config(netdev, vf_id, ivi); +} + +static u16 +nbl_dev_netdev_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->select_queue(netdev, skb, sb_dev); +} + +static int nbl_dev_netdev_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->change_mtu(netdev, new_mtu); +} + +static int nbl_dev_ndo_get_phys_port_name(struct net_device *netdev, char *name, size_t len) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_phys_port_name(netdev, name, len); +} + +static int +nbl_dev_ndo_get_port_parent_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_port_parent_id(netdev, ppid); +} + +static int nbl_dev_rep_get_phys_port_name(struct net_device *netdev, char *name, size_t len) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rep_get_phys_port_name(netdev, name, len); +} + +static int +nbl_dev_rep_get_port_parent_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->rep_get_port_parent_id(netdev, ppid); +} + +static const struct net_device_ops netdev_ops_leonis_rep = { + .ndo_open = nbl_dev_rep_netdev_open, + .ndo_stop = nbl_dev_rep_netdev_stop, + .ndo_start_xmit = nbl_dev_rep_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats64 = nbl_dev_netdev_rep_get_stats64, + .ndo_set_rx_mode = nbl_dev_netdev_rep_set_rx_mode, + .ndo_set_mac_address = nbl_dev_netdev_rep_set_mac, + .ndo_vlan_rx_add_vid = nbl_dev_netdev_rep_rx_add_vid, + .ndo_vlan_rx_kill_vid = nbl_dev_netdev_rep_rx_kill_vid, + .ndo_features_check = nbl_dev_netdev_features_check, + .ndo_setup_tc = nbl_dev_netdev_rep_setup_tc, + .ndo_change_mtu = nbl_dev_netdev_change_mtu, + .ndo_get_phys_port_name = nbl_dev_rep_get_phys_port_name, + .ndo_get_port_parent_id = nbl_dev_rep_get_port_parent_id, +}; + +static const struct net_device_ops netdev_ops_leonis_pf = { + .ndo_open = nbl_dev_netdev_open, + .ndo_stop = nbl_dev_netdev_stop, + .ndo_start_xmit = nbl_dev_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats64 = nbl_dev_netdev_get_stats64, + .ndo_set_rx_mode = nbl_dev_netdev_set_rx_mode, + .ndo_change_rx_flags = nbl_dev_netdev_change_rx_flags, + .ndo_set_mac_address = nbl_dev_netdev_set_mac, + .ndo_vlan_rx_add_vid = nbl_dev_netdev_rx_add_vid, + .ndo_vlan_rx_kill_vid = nbl_dev_netdev_rx_kill_vid, + .ndo_set_features = nbl_dev_netdev_set_features, + .ndo_features_check = nbl_dev_netdev_features_check, + .ndo_set_vf_spoofchk = nbl_dev_netdev_set_vf_spoofchk, + .ndo_tx_timeout = nbl_dev_netdev_tx_timeout, + .ndo_bridge_getlink = nbl_dev_netdev_bridge_getlink, + .ndo_bridge_setlink = nbl_dev_netdev_bridge_setlink, + .ndo_set_vf_link_state = nbl_dev_netdev_set_vf_link_state, + .ndo_set_vf_mac = nbl_dev_netdev_set_vf_mac, + .ndo_set_vf_rate = nbl_dev_netdev_set_vf_rate, + .ndo_get_vf_config = nbl_dev_netdev_get_vf_config, + .ndo_select_queue = nbl_dev_netdev_select_queue, + .ndo_set_vf_vlan = nbl_dev_netdev_set_vf_vlan, + .ndo_setup_tc = nbl_dev_netdev_setup_tc, + .ndo_change_mtu = nbl_dev_netdev_change_mtu, + .ndo_get_phys_port_name = nbl_dev_ndo_get_phys_port_name, + .ndo_get_port_parent_id = nbl_dev_ndo_get_port_parent_id, +}; + +static const struct net_device_ops netdev_ops_leonis_vf = { + .ndo_open = nbl_dev_netdev_open, + .ndo_stop = nbl_dev_netdev_stop, + .ndo_start_xmit = nbl_dev_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats64 = nbl_dev_netdev_get_stats64, + .ndo_set_rx_mode = nbl_dev_netdev_set_rx_mode, + .ndo_set_mac_address = nbl_dev_netdev_set_mac, + .ndo_vlan_rx_add_vid = nbl_dev_netdev_rx_add_vid, + .ndo_vlan_rx_kill_vid = nbl_dev_netdev_rx_kill_vid, + .ndo_features_check = nbl_dev_netdev_features_check, + .ndo_tx_timeout = nbl_dev_netdev_tx_timeout, + .ndo_select_queue = nbl_dev_netdev_select_queue, + .ndo_setup_tc = nbl_dev_netdev_setup_tc, + .ndo_change_mtu = nbl_dev_netdev_change_mtu, + .ndo_get_phys_port_name = nbl_dev_ndo_get_phys_port_name, +}; + +static void nbl_dev_setup_netops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + bool is_vf = param->caps.is_vf; + bool is_rep = param->is_rep; + + if (is_rep) { + netdev->netdev_ops = &netdev_ops_leonis_rep; + serv_ops->set_netdev_ops(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &netdev_ops_leonis_rep, false); + } else if (is_vf) { + netdev->netdev_ops = &netdev_ops_leonis_vf; + } else { + netdev->netdev_ops = &netdev_ops_leonis_pf; + serv_ops->set_netdev_ops(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &netdev_ops_leonis_pf, true); + /* set rep_ops first, cuz pf may turn on switch_dev without sriov enabled */ + serv_ops->set_netdev_ops(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &netdev_ops_leonis_rep, false); + } +} + +static void nbl_dev_remove_netops(struct net_device *netdev) +{ + netdev->netdev_ops = NULL; +} + +static void nbl_dev_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_drvinfo(netdev, drvinfo); +} + +static int nbl_dev_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_module_eeprom(netdev, eeprom, data); +} + +static int nbl_dev_get_module_info(struct net_device *netdev, struct ethtool_modinfo *info) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_module_info(netdev, info); +} + +static int nbl_dev_get_eeprom_len(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_eeprom_length(netdev); +} + +static int nbl_dev_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_eeprom(netdev, eeprom, bytes); +} + +static void nbl_dev_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_strings(netdev, stringset, data); +} + +static int nbl_dev_get_sset_count(struct net_device *netdev, int sset) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_sset_count(netdev, sset); +} + +static void nbl_dev_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_ethtool_stats(netdev, stats, data); +} + +static void nbl_dev_get_rep_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_rep_strings(netdev, stringset, data); +} + +static int nbl_dev_get_rep_sset_count(struct net_device *netdev, int sset) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rep_sset_count(netdev, sset); +} + +static void nbl_dev_get_rep_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_rep_ethtool_stats(netdev, stats, data); +} + +static void nbl_dev_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_channels(netdev, channels); +} + +static int nbl_dev_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_channels(netdev, channels); +} + +static u32 nbl_dev_get_link(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_link(netdev); +} + +static int +nbl_dev_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_ksettings(netdev, cmd); +} + +static int +nbl_dev_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_ksettings(netdev, cmd); +} + +static void nbl_dev_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_ringparam(netdev, ringparam, k_ringparam, extack); +} + +static int nbl_dev_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_ringparam(netdev, ringparam, k_ringparam, extack); +} + +static int nbl_dev_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_coalesce(netdev, ec, kernel_ec, extack); +} + +static int nbl_dev_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_coalesce(netdev, ec, kernel_ec, extack); +} + +static int nbl_dev_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxnfc(netdev, cmd, rule_locs); +} + +static int nbl_dev_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_rxnfc(netdev, cmd); +} + +static u32 nbl_dev_get_rxfh_indir_size(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxfh_indir_size(netdev); +} + +static u32 nbl_dev_get_rxfh_key_size(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxfh_key_size(netdev); +} + +static int nbl_dev_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_rxfh(netdev, indir, key, hfunc); +} + +static u32 nbl_dev_get_msglevel(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_msglevel(netdev); +} + +static void nbl_dev_set_msglevel(struct net_device *netdev, u32 msglevel) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->set_msglevel(netdev, msglevel); +} + +static int nbl_dev_get_regs_len(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_regs_len(netdev); +} + +static void nbl_dev_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_ethtool_dump_regs(netdev, regs, p); +} + +static int nbl_dev_get_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_per_queue_coalesce(netdev, q_num, ec); +} + +static int nbl_dev_set_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_per_queue_coalesce(netdev, q_num, ec); +} + +static void nbl_dev_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->self_test(netdev, eth_test, data); +} + +static u32 nbl_dev_get_priv_flags(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_priv_flags(netdev); +} + +static int nbl_dev_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_priv_flags(netdev, priv_flags); +} + +static int nbl_dev_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_pause_param(netdev, param); +} + +static void nbl_dev_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_pause_param(netdev, param); +} + +static int nbl_dev_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_fec_param(netdev, fec); +} + +static int nbl_dev_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_fec_param(netdev, fec); +} + +static int nbl_dev_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *ts_info) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->get_ts_info(netdev, ts_info); +} + +static int nbl_dev_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->set_phys_id(netdev, state); +} + +static int nbl_dev_nway_reset(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->nway_reset(netdev); +} + +static const struct ethtool_ops ethtool_ops_leonis_rep = { + .get_drvinfo = nbl_dev_get_drvinfo, + .get_strings = nbl_dev_get_rep_strings, + .get_sset_count = nbl_dev_get_rep_sset_count, + .get_ethtool_stats = nbl_dev_get_rep_ethtool_stats, + .get_link = nbl_dev_get_link, + .get_link_ksettings = nbl_dev_get_link_ksettings, + .get_ringparam = nbl_dev_get_ringparam, +}; + +static const struct ethtool_ops ethtool_ops_leonis_pf = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_RX_MAX_FRAMES | + ETHTOOL_COALESCE_TX_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .get_drvinfo = nbl_dev_get_drvinfo, + .get_module_eeprom = nbl_dev_get_module_eeprom, + .get_module_info = nbl_dev_get_module_info, + .get_eeprom_len = nbl_dev_get_eeprom_len, + .get_eeprom = nbl_dev_get_eeprom, + .get_strings = nbl_dev_get_strings, + .get_sset_count = nbl_dev_get_sset_count, + .get_ethtool_stats = nbl_dev_get_ethtool_stats, + .get_channels = nbl_dev_get_channels, + .set_channels = nbl_dev_set_channels, + .get_link = nbl_dev_get_link, + .get_link_ksettings = nbl_dev_get_link_ksettings, + .set_link_ksettings = nbl_dev_set_link_ksettings, + .get_ringparam = nbl_dev_get_ringparam, + .set_ringparam = nbl_dev_set_ringparam, + .get_coalesce = nbl_dev_get_coalesce, + .set_coalesce = nbl_dev_set_coalesce, + .set_rxnfc = nbl_dev_set_rxnfc, + .get_rxnfc = nbl_dev_get_rxnfc, + .get_rxfh_indir_size = nbl_dev_get_rxfh_indir_size, + .get_rxfh_key_size = nbl_dev_get_rxfh_key_size, + .get_rxfh = nbl_dev_get_rxfh, + .get_msglevel = nbl_dev_get_msglevel, + .set_msglevel = nbl_dev_set_msglevel, + .get_regs_len = nbl_dev_get_regs_len, + .get_regs = nbl_dev_get_regs, + .get_per_queue_coalesce = nbl_dev_get_per_queue_coalesce, + .set_per_queue_coalesce = nbl_dev_set_per_queue_coalesce, + .self_test = nbl_dev_self_test, + .get_priv_flags = nbl_dev_get_priv_flags, + .set_priv_flags = nbl_dev_set_priv_flags, + .set_pauseparam = nbl_dev_set_pauseparam, + .get_pauseparam = nbl_dev_get_pauseparam, + .set_fecparam = nbl_dev_set_fecparam, + .get_fecparam = nbl_dev_get_fecparam, + .get_ts_info = nbl_dev_get_ts_info, + .set_phys_id = nbl_dev_set_phys_id, + .nway_reset = nbl_dev_nway_reset, +}; + +static const struct ethtool_ops ethtool_ops_leonis_vf = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_RX_MAX_FRAMES | + ETHTOOL_COALESCE_TX_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .get_drvinfo = nbl_dev_get_drvinfo, + .get_strings = nbl_dev_get_strings, + .get_sset_count = nbl_dev_get_sset_count, + .get_ethtool_stats = nbl_dev_get_ethtool_stats, + .get_channels = nbl_dev_get_channels, + .set_channels = nbl_dev_set_channels, + .get_link = nbl_dev_get_link, + .get_link_ksettings = nbl_dev_get_link_ksettings, + .get_ringparam = nbl_dev_get_ringparam, + .set_ringparam = nbl_dev_set_ringparam, + .get_coalesce = nbl_dev_get_coalesce, + .set_coalesce = nbl_dev_set_coalesce, + .get_rxnfc = nbl_dev_get_rxnfc, + .get_rxfh_indir_size = nbl_dev_get_rxfh_indir_size, + .get_rxfh_key_size = nbl_dev_get_rxfh_key_size, + .get_rxfh = nbl_dev_get_rxfh, + .get_msglevel = nbl_dev_get_msglevel, + .set_msglevel = nbl_dev_set_msglevel, + .get_regs_len = nbl_dev_get_regs_len, + .get_regs = nbl_dev_get_regs, + .get_per_queue_coalesce = nbl_dev_get_per_queue_coalesce, + .set_per_queue_coalesce = nbl_dev_set_per_queue_coalesce, + .get_ts_info = nbl_dev_get_ts_info, +}; + +static void nbl_dev_setup_ethtool_ops_leonis(void *priv, struct net_device *netdev, + struct nbl_init_param *param) +{ + bool is_vf = param->caps.is_vf; + bool is_rep = param->is_rep; + + if (is_rep) + netdev->ethtool_ops = ðtool_ops_leonis_rep; + if (is_vf) + netdev->ethtool_ops = ðtool_ops_leonis_vf; + else + netdev->ethtool_ops = ðtool_ops_leonis_pf; +} + +static void nbl_dev_remove_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = NULL; +} + +#ifdef CONFIG_TLS_DEVICE +#define NBL_DEV_KTLS_OPS_TBL \ +do { \ + NBL_DEV_KTLS_OPS(tls_dev_add, serv_ops->add_tls_dev); \ + NBL_DEV_KTLS_OPS(tls_dev_del, serv_ops->del_tls_dev); \ + NBL_DEV_KTLS_OPS(tls_dev_resync, serv_ops->resync_tls_dev); \ +} while (0) + +static int nbl_dev_tls_dev_add(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->add_tls_dev(netdev, sk, direction, crypto_info, start_offload_tcp_sn); +} + +static void nbl_dev_tls_dev_del(struct net_device *netdev, struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->del_tls_dev(netdev, tls_ctx, direction); +} + +static int nbl_dev_tls_dev_resync(struct net_device *netdev, struct sock *sk, + u32 tcp_seq, u8 *rec_num, + enum tls_offload_ctx_dir direction) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->resync_tls_dev(netdev, sk, tcp_seq, rec_num, direction); +} + +static const struct tlsdev_ops ktls_ops = { + .tls_dev_add = nbl_dev_tls_dev_add, + .tls_dev_del = nbl_dev_tls_dev_del, + .tls_dev_resync = nbl_dev_tls_dev_resync, +}; + +static void nbl_dev_setup_ktls_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, + struct nbl_init_param *param) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (param->is_rep) + return; + + if (!serv_ops->get_product_flex_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_SECURITY_ACCEL_CAP)) + return; + + netdev->hw_features |= NETIF_F_HW_TLS_RX; + netdev->hw_features |= NETIF_F_HW_TLS_TX; + netdev->tlsdev_ops = &ktls_ops; +} + +static void nbl_dev_remove_ktls_ops(struct net_device *netdev) +{ + netdev->hw_features &= ~NETIF_F_HW_TLS_RX; + netdev->hw_features &= ~NETIF_F_HW_TLS_TX; + + netdev->tlsdev_ops = NULL; +} + +#else +static ivoidnt nbl_dev_setup_ktls_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev + struct nbl_init_param *param) +{ +} + +static void nbl_dev_remove_ktls_ops(struct net_device *netdev) {} +#endif + +#ifdef CONFIG_TLS_DEVICE + +static int nbl_dev_xdo_state_add(struct xfrm_state *x, struct netlink_ext_ack *extack) +{ + struct net_device *netdev = x->xso.dev; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->add_xdo_dev_state(x, extack); +} + +static void nbl_dev_xdo_state_delete(struct xfrm_state *x) +{ + struct net_device *netdev = x->xso.dev; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->delete_xdo_dev_state(x); +} + +static void nbl_dev_xdo_state_free(struct xfrm_state *x) +{ + struct net_device *netdev = x->xso.dev; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->free_xdo_dev_state(x); +} + +static bool nbl_dev_xdo_offload_ok(struct sk_buff *skb, struct xfrm_state *x) +{ + struct net_device *netdev = x->xso.dev; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->xdo_dev_offload_ok(skb, x); +} + +static void nbl_dev_xdo_dev_state_advance_esn(struct xfrm_state *x) +{ + struct net_device *netdev = x->xso.dev; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->xdo_dev_state_advance_esn(x); +} + +static const struct xfrmdev_ops xfrm_ops = { + .xdo_dev_state_add = nbl_dev_xdo_state_add, + .xdo_dev_state_delete = nbl_dev_xdo_state_delete, + .xdo_dev_state_free = nbl_dev_xdo_state_free, + .xdo_dev_offload_ok = nbl_dev_xdo_offload_ok, + .xdo_dev_state_advance_esn = nbl_dev_xdo_dev_state_advance_esn, +}; + +static void nbl_dev_setup_xfrm_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, + struct nbl_init_param *param) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + enum nbl_flex_cap_type cap_type = NBL_SECURITY_ACCEL_CAP; + + if (param->is_rep) + return; + + if (!serv_ops->get_product_flex_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), cap_type)) + return; + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_NOTIFY_IPSEC_HARD_EXPIRE, + nbl_dev_notify_ipsec_hard_expire, dev_mgt); + + netdev->features |= NETIF_F_HW_ESP; + netdev->hw_enc_features |= NETIF_F_HW_ESP; + netdev->features |= NETIF_F_HW_ESP_TX_CSUM; + netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; + + /* gso_partial_features */ + netdev->gso_partial_features |= NETIF_F_GSO_ESP; + netdev->features |= NETIF_F_GSO_ESP; + netdev->hw_features |= NETIF_F_GSO_ESP; + netdev->hw_enc_features |= NETIF_F_GSO_ESP; + + netdev->xfrmdev_ops = &xfrm_ops; +} + +static void nbl_dev_remove_xfrm_ops(struct net_device *netdev) +{ + netdev->features &= ~NETIF_F_HW_ESP; + netdev->hw_enc_features &= ~NETIF_F_HW_ESP; + netdev->features &= ~NETIF_F_HW_ESP_TX_CSUM; + netdev->hw_enc_features &= ~NETIF_F_HW_ESP_TX_CSUM; + + /* gso_partial_features */ + netdev->gso_partial_features &= ~NETIF_F_GSO_ESP; + netdev->features &= ~NETIF_F_GSO_ESP; + netdev->hw_features &= ~NETIF_F_GSO_ESP; + netdev->hw_enc_features &= ~NETIF_F_GSO_ESP; + + netdev->xfrmdev_ops = NULL; +} +#else +static int nbl_dev_setup_xfrm_ops(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) +{ + return 0; +} + +static void nbl_dev_remove_xfrm_ops(struct net_device *netdev) +{ +} +#endif + +static void nbl_dev_set_eth_mac_addr(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, netdev->dev_addr); + serv_ops->set_eth_mac_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + mac, NBL_COMMON_TO_ETH_ID(common)); +} + +static int nbl_dev_cfg_netdev(struct net_device *netdev, struct nbl_dev_mgt *dev_mgt, + struct nbl_init_param *param, + struct nbl_register_net_result *register_result) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net_ops *net_dev_ops = NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt); + + if (param->pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + if (!param->is_rep) + netdev->watchdog_timeo = 5 * HZ; + + netdev->hw_features |= nbl_features_to_netdev_features(register_result->hw_features); + netdev->features |= nbl_features_to_netdev_features(register_result->features); + netdev->vlan_features |= netdev->features; + + SET_DEV_MIN_MTU(netdev, ETH_MIN_MTU); + SET_DEV_MAX_MTU(netdev, register_result->max_mtu); + netdev->mtu = min_t(u16, register_result->max_mtu, NBL_DEFAULT_MTU); + + if (is_valid_ether_addr(register_result->mac)) + eth_hw_addr_set(netdev, register_result->mac); + else + eth_hw_addr_random(netdev); + + ether_addr_copy(netdev->perm_addr, netdev->dev_addr); + + serv_ops->set_spoof_check_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev->perm_addr); + + netdev->needed_headroom = serv_ops->get_tx_headroom(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + net_dev_ops->setup_netdev_ops(dev_mgt, netdev, param); + net_dev_ops->setup_ethtool_ops(dev_mgt, netdev, param); + nbl_dev_setup_ktls_ops(dev_mgt, netdev, param); + nbl_dev_setup_xfrm_ops(dev_mgt, netdev, param); + + nbl_dev_set_eth_mac_addr(dev_mgt, netdev); + + return 0; +} + +static void nbl_dev_reset_netdev(struct net_device *netdev) +{ + nbl_dev_remove_ktls_ops(netdev); + nbl_dev_remove_xfrm_ops(netdev); + nbl_dev_remove_ethtool_ops(netdev); + nbl_dev_remove_netops(netdev); +} + +static int nbl_dev_register_net(struct nbl_dev_mgt *dev_mgt, + struct nbl_register_net_result *register_result) +{ + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(NBL_DEV_MGT_TO_COMMON(dev_mgt)); +#ifdef CONFIG_PCI_IOV + struct resource *res; +#endif + u16 pf_bdf; + u64 pf_bar_start; + u64 vf_bar_start, vf_bar_size; + u16 total_vfs = 0, offset, stride; + int pos; + u32 val; + struct nbl_register_net_param register_param = {0}; + int ret = 0; + + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &val); + pf_bar_start = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0 + 4, &val); + pf_bar_start |= ((u64)val << 32); + + register_param.pf_bar_start = pf_bar_start; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pf_bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride); + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vfs); + + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR, &val); + vf_bar_start = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR + 4, &val); + vf_bar_start |= ((u64)val << 32); + +#ifdef CONFIG_PCI_IOV + res = &pdev->resource[PCI_IOV_RESOURCES]; + vf_bar_size = resource_size(res); +#else + vf_bar_size = 0; +#endif + if (total_vfs) { + register_param.pf_bdf = pf_bdf; + register_param.vf_bar_start = vf_bar_start; + register_param.vf_bar_size = vf_bar_size; + register_param.total_vfs = total_vfs; + register_param.offset = offset; + register_param.stride = stride; + } + } + + net_dev->total_vfs = total_vfs; + + ret = serv_ops->register_net(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + ®ister_param, register_result); + + if (!register_result->tx_queue_num || !register_result->rx_queue_num) + return -EIO; + + return ret; +} + +static void nbl_dev_unregister_net(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + int ret; + + ret = serv_ops->unregister_net(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) + dev_err(dev, "unregister net failed\n"); +} + +static void nbl_dev_get_rep_feature(struct nbl_adapter *adapter, + struct nbl_register_net_result *register_result) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_rep_feature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), register_result); +} + +static void nbl_dev_get_rep_queue_num(struct nbl_adapter *adapter, + u8 *base_queue_id, + u8 *rep_queue_num) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_rep_queue_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + base_queue_id, rep_queue_num); +} + +static u16 nbl_dev_vsi_alloc_queue(struct nbl_dev_net *net_dev, u16 queue_num) +{ + struct nbl_dev_vsi_controller *vsi_ctrl = &net_dev->vsi_ctrl; + u16 queue_offset = 0; + + if (vsi_ctrl->queue_free_offset + queue_num > net_dev->total_queue_num) + return -ENOSPC; + + queue_offset = vsi_ctrl->queue_free_offset; + vsi_ctrl->queue_free_offset += queue_num; + + return queue_offset; +} + +static int nbl_dev_vsi_common_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct nbl_dev_vsi *vsi) +{ + int ret = 0; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_vsi_param vsi_param = {0}; + + vsi->queue_offset = nbl_dev_vsi_alloc_queue(NBL_DEV_MGT_TO_NET_DEV(dev_mgt), + vsi->queue_num); + vsi_param.index = vsi->index; + vsi_param.vsi_id = vsi->vsi_id; + vsi_param.queue_offset = vsi->queue_offset; + vsi_param.queue_num = vsi->queue_num; + + /* Tell serv & res layer the mapping from vsi to queue_id */ + ret = serv_ops->register_vsi_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &vsi_param); + return ret; +} + +static void nbl_dev_vsi_common_remove(struct nbl_dev_mgt *dev_mgt, struct nbl_dev_vsi *vsi) +{ +} + +static int nbl_dev_vsi_common_start(struct nbl_dev_mgt *dev_mgt, struct net_device *netdev, + struct nbl_dev_vsi *vsi) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + int ret; + + vsi->napi_netdev = netdev; + + ret = serv_ops->setup_q2vsi(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + if (ret) { + dev_err(dev, "Setup q2vsi failed\n"); + goto set_q2vsi_fail; + } + + ret = serv_ops->setup_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + if (ret) { + dev_err(dev, "Setup q2vsi failed\n"); + goto set_rss_fail; + } + + if (vsi->use_independ_irq) { + ret = serv_ops->enable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + if (ret) { + dev_err(dev, "Enable napis failed\n"); + goto enable_napi_fail; + } + } + + ret = serv_ops->init_tx_rate(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + if (ret) { + dev_err(dev, "init tx_rate failed\n"); + goto init_tx_rate_fail; + } + + return 0; + +init_tx_rate_fail: + serv_ops->disable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); +enable_napi_fail: + serv_ops->remove_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_rss_fail: + serv_ops->remove_q2vsi(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_q2vsi_fail: + return ret; +} + +static void nbl_dev_vsi_common_stop(struct nbl_dev_mgt *dev_mgt, struct nbl_dev_vsi *vsi) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (vsi->use_independ_irq) + serv_ops->disable_napis(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + serv_ops->remove_rss(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + serv_ops->remove_q2vsi(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +} + +static int nbl_dev_vsi_data_register(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + int ret = 0; + + ret = nbl_dev_register_net(dev_mgt, &vsi->register_result); + if (ret) + return ret; + + vsi->queue_num = vsi->register_result.tx_queue_num; + vsi->queue_size = vsi->register_result.queue_size; + + nbl_debug(common, NBL_DEBUG_VSI, "Data vsi register, queue_num %d, queue_size %d", + vsi->queue_num, vsi->queue_size); + + return 0; +} + +static int nbl_dev_vsi_data_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + return nbl_dev_vsi_common_setup(dev_mgt, param, vsi); +} + +static void nbl_dev_vsi_data_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_remove(dev_mgt, vsi); +} + +static int nbl_dev_vsi_data_start(void *dev_priv, struct net_device *netdev, + void *vsi_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + int ret; + u16 vid; + + vid = vsi->register_result.vlan_tci & VLAN_VID_MASK; + ret = serv_ops->start_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, vsi->vsi_id, vid); + if (ret) { + dev_err(dev, "Set netdev flow table failed\n"); + goto set_flow_fail; + } + + if (!NBL_COMMON_TO_VF_CAP(common)) { + ret = serv_ops->set_lldp_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + if (ret) { + dev_err(dev, "Set netdev lldp flow failed\n"); + goto set_lldp_fail; + } + vsi->feature.has_lldp = true; + } + + ret = nbl_dev_vsi_common_start(dev_mgt, netdev, vsi); + if (ret) { + dev_err(dev, "Vsi common start failed\n"); + goto common_start_fail; + } + + return 0; + +common_start_fail: + if (!NBL_COMMON_TO_VF_CAP(common)) + serv_ops->remove_lldp_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_lldp_fail: + serv_ops->stop_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_flow_fail: + return ret; +} + +static void nbl_dev_vsi_data_stop(void *dev_priv, void *vsi_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_stop(dev_mgt, vsi); + + if (!NBL_COMMON_TO_VF_CAP(common)) { + serv_ops->remove_lldp_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + vsi->feature.has_lldp = false; + } + + serv_ops->stop_net_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +} + +static int nbl_dev_vsi_data_netdev_build(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + vsi->netdev = netdev; + return nbl_dev_cfg_netdev(netdev, dev_mgt, param, &vsi->register_result); +} + +static void nbl_dev_vsi_data_netdev_destroy(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_reset_netdev(vsi->netdev); +} + +static int nbl_dev_vsi_ctrl_register(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_rep_queue_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &vsi->queue_num, &vsi->queue_size); + + nbl_debug(common, NBL_DEBUG_VSI, "Ctrl vsi register, queue_num %d, queue_size %d", + vsi->queue_num, vsi->queue_size); + return 0; +} + +static int nbl_dev_vsi_ctrl_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + return nbl_dev_vsi_common_setup(dev_mgt, param, vsi); +} + +static void nbl_dev_vsi_ctrl_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_remove(dev_mgt, vsi); +} + +static int nbl_dev_vsi_ctrl_start(void *dev_priv, struct net_device *netdev, + void *vsi_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int ret = 0; + + ret = nbl_dev_vsi_common_start(dev_mgt, netdev, vsi); + if (ret) + goto start_fail; + + /* For ctrl vsi, open it after create, for that we don't have ndo_open ops. */ + ret = serv_ops->vsi_open(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, + vsi->index, vsi->queue_num, 1); + if (ret) + goto open_fail; + + return ret; + +open_fail: + nbl_dev_vsi_common_stop(dev_mgt, vsi); +start_fail: + return ret; +} + +static void nbl_dev_vsi_ctrl_stop(void *dev_priv, void *vsi_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->vsi_stop(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + nbl_dev_vsi_common_stop(dev_mgt, vsi); +} + +static int nbl_dev_vsi_ctrl_netdev_build(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data) +{ + return 0; +} + +static void nbl_dev_vsi_ctrl_netdev_destroy(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ +} + +static int nbl_dev_vsi_user_register(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->get_user_queue_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &vsi->queue_num, &vsi->queue_size, + NBL_COMMON_TO_VSI_ID(common)); + + nbl_debug(common, NBL_DEBUG_VSI, "User vsi register, queue_num %d, queue_size %d", + vsi->queue_num, vsi->queue_size); + return 0; +} + +static int nbl_dev_vsi_user_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + return nbl_dev_vsi_common_setup(dev_mgt, param, vsi); +} + +static void nbl_dev_vsi_user_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_remove(dev_mgt, vsi); +} + +static int nbl_dev_vsi_user_start(void *dev_priv, struct net_device *netdev, + void *vsi_data) +{ + return 0; +} + +static void nbl_dev_vsi_user_stop(void *dev_priv, void *vsi_data) +{ +} + +static int nbl_dev_vsi_user_netdev_build(struct nbl_dev_mgt *dev_mgt, + struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data) +{ + return 0; +} + +static void nbl_dev_vsi_user_netdev_destroy(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + /* nothing need to do */ +} + +static int nbl_dev_vsi_xdp_register(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (!serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_XDP_CAP)) + return 0; + + serv_ops->get_xdp_queue_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &vsi->queue_num, &vsi->queue_size, + NBL_COMMON_TO_VSI_ID(common)); + + nbl_debug(common, NBL_DEBUG_VSI, "Xdp vsi register, queue_num %d, queue_size %d", + vsi->queue_num, vsi->queue_size); + return 0; +} + +static int nbl_dev_vsi_xdp_setup(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + return nbl_dev_vsi_common_setup(dev_mgt, param, vsi); +} + +static void nbl_dev_vsi_xdp_remove(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + + nbl_dev_vsi_common_remove(dev_mgt, vsi); +} + +static int nbl_dev_vsi_xdp_start(void *dev_priv, struct net_device *netdev, + void *vsi_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int ret = 0; + + ret = nbl_dev_vsi_common_start(dev_mgt, netdev, vsi); + if (ret) + goto start_fail; + + ret = serv_ops->vsi_open(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, + vsi->index, vsi->queue_num, 1); + if (ret) + goto open_fail; + + return ret; + +open_fail: + nbl_dev_vsi_common_stop(dev_mgt, vsi); +start_fail: + return ret; +} + +static void nbl_dev_vsi_xdp_stop(void *dev_priv, void *vsi_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)dev_priv; + struct nbl_dev_vsi *vsi = (struct nbl_dev_vsi *)vsi_data; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + serv_ops->vsi_stop(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->index); + nbl_dev_vsi_common_stop(dev_mgt, vsi); +} + +static int nbl_dev_vsi_xdp_netdev_build(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data) +{ + return 0; +} + +static void nbl_dev_vsi_xdp_netdev_destroy(struct nbl_dev_mgt *dev_mgt, void *vsi_data) +{ + /* nothing need to do */ +} + +static struct nbl_dev_vsi_tbl vsi_tbl[NBL_VSI_MAX] = { + [NBL_VSI_DATA] = { + .vsi_ops = { + .register_vsi = nbl_dev_vsi_data_register, + .setup = nbl_dev_vsi_data_setup, + .remove = nbl_dev_vsi_data_remove, + .start = nbl_dev_vsi_data_start, + .stop = nbl_dev_vsi_data_stop, + .netdev_build = nbl_dev_vsi_data_netdev_build, + .netdev_destroy = nbl_dev_vsi_data_netdev_destroy, + }, + .vf_support = true, + .only_nic_support = false, + .in_kernel = true, + .use_independ_irq = true, + .static_queue = true, + }, + [NBL_VSI_CTRL] = { + .vsi_ops = { + .register_vsi = nbl_dev_vsi_ctrl_register, + .setup = nbl_dev_vsi_ctrl_setup, + .remove = nbl_dev_vsi_ctrl_remove, + .start = nbl_dev_vsi_ctrl_start, + .stop = nbl_dev_vsi_ctrl_stop, + .netdev_build = nbl_dev_vsi_ctrl_netdev_build, + .netdev_destroy = nbl_dev_vsi_ctrl_netdev_destroy, + }, + .vf_support = false, + .only_nic_support = true, + .in_kernel = true, + .use_independ_irq = true, + .static_queue = true, + }, + [NBL_VSI_USER] = { + .vsi_ops = { + .register_vsi = nbl_dev_vsi_user_register, + .setup = nbl_dev_vsi_user_setup, + .remove = nbl_dev_vsi_user_remove, + .start = nbl_dev_vsi_user_start, + .stop = nbl_dev_vsi_user_stop, + .netdev_build = nbl_dev_vsi_user_netdev_build, + .netdev_destroy = nbl_dev_vsi_user_netdev_destroy, + }, + .vf_support = false, + .only_nic_support = true, + .in_kernel = false, + .use_independ_irq = false, + .static_queue = false, + }, + [NBL_VSI_XDP] = { + .vsi_ops = { + .register_vsi = nbl_dev_vsi_xdp_register, + .setup = nbl_dev_vsi_xdp_setup, + .remove = nbl_dev_vsi_xdp_remove, + .start = nbl_dev_vsi_xdp_start, + .stop = nbl_dev_vsi_xdp_stop, + .netdev_build = nbl_dev_vsi_xdp_netdev_build, + .netdev_destroy = nbl_dev_vsi_xdp_netdev_destroy, + }, + .vf_support = false, + .only_nic_support = true, + .in_kernel = true, + .use_independ_irq = false, + .static_queue = false, + }, +}; + +static int nbl_dev_vsi_build(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) +{ + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_vsi *vsi = NULL; + int i; + + net_dev->vsi_ctrl.queue_num = 0; + net_dev->vsi_ctrl.queue_free_offset = 0; + + /* Build all vsi, and alloc vsi_id for each of them */ + for (i = 0; i < NBL_VSI_MAX; i++) { + if ((param->caps.is_vf && !vsi_tbl[i].vf_support) || + (!param->caps.is_nic && vsi_tbl[i].only_nic_support)) + continue; + + vsi = devm_kzalloc(NBL_DEV_MGT_TO_DEV(dev_mgt), sizeof(*vsi), GFP_KERNEL); + if (!vsi) + goto malloc_vsi_fail; + + vsi->ops = &vsi_tbl[i].vsi_ops; + vsi->vsi_id = serv_ops->get_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), 0, i); + vsi->index = i; + vsi->in_kernel = vsi_tbl[i].in_kernel; + vsi->use_independ_irq = vsi_tbl[i].use_independ_irq; + vsi->static_queue = vsi_tbl[i].static_queue; + net_dev->vsi_ctrl.vsi_list[i] = vsi; + } + + return 0; + +malloc_vsi_fail: + while (--i + 1) { + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->vsi_ctrl.vsi_list[i]); + net_dev->vsi_ctrl.vsi_list[i] = NULL; + } + + return -ENOMEM; +} + +static void nbl_dev_vsi_destroy(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + int i; + + for (i = 0; i < NBL_VSI_MAX; i++) + if (net_dev->vsi_ctrl.vsi_list[i]) { + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->vsi_ctrl.vsi_list[i]); + net_dev->vsi_ctrl.vsi_list[i] = NULL; + } +} + +struct nbl_dev_vsi *nbl_dev_vsi_select(struct nbl_dev_mgt *dev_mgt, u8 vsi_index) +{ + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_dev_vsi *vsi = NULL; + int i = 0; + + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi = net_dev->vsi_ctrl.vsi_list[i]; + if (vsi && vsi->index == vsi_index) + return vsi; + } + + return NULL; +} + +static int nbl_dev_vsi_handle_switch_event(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_service_traffic_switch info = {0}; + struct nbl_event_dev_mode_switch_data *data = + (struct nbl_event_dev_mode_switch_data *)event_data; + struct nbl_dev_vsi *data_vsi = NULL, *user_vsi = NULL; + int op = data->op; + + data_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; + user_vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + + info.normal_vsi = data_vsi->vsi_id; + info.sync_other_vsi = data_vsi->vsi_id; + info.async_other_vsi = data_vsi->vsi_id; + info.has_lacp = data_vsi->feature.has_lacp; + info.has_lldp = data_vsi->feature.has_lldp; + + /* user enable promisc must be user vsi */ + if (op == NBL_DEV_KERNEL_TO_USER || op == NBL_DEV_SET_USER_PROMISC_MODE) { + info.normal_vsi = user_vsi->vsi_id; + if (data->promosic) { + info.sync_other_vsi = user_vsi->vsi_id; + info.async_other_vsi = user_vsi->vsi_id; + info.promisc = data->promosic; + } + } + + data->ret = serv_ops->switch_traffic_default_dest(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &info); + + return 0; +} + +static int nbl_dev_vsi_handle_netdev_event(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev = net_dev->netdev; + bool *netdev_state = (bool *)event_data; + struct nbl_dev_vsi *vsi; + int ret; + + vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_XDP); + if (!vsi) + return 0; + + if (*netdev_state) { + ret = vsi->ops->start(dev_mgt, netdev, vsi); + if (ret) + nbl_err(common, NBL_DEBUG_VSI, "xdp-vsi start failed\n"); + } else { + vsi->ops->stop(dev_mgt, vsi); + } + + return 0; +} + +static struct nbl_dev_net_ops netdev_ops[NBL_PRODUCT_MAX] = { + { + .setup_netdev_ops = nbl_dev_setup_netops_leonis, + .setup_ethtool_ops = nbl_dev_setup_ethtool_ops_leonis, + }, +}; + +static void nbl_det_setup_net_dev_ops(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) +{ + NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt) = &netdev_ops[param->product_type]; +} + +static int nbl_dev_setup_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param, + struct nbl_rep_data *rep) +{ + struct nbl_event_callback callback = {0}; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_net **net_dev = &NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_dev_vsi *vsi; + int i, ret = 0; + u16 total_queue_num = 0, kernel_queue_num = 0, user_queue_num = 0; + u16 dynamic_queue_max = 0, irq_queue_num = 0; + + *net_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_net), GFP_KERNEL); + if (!*net_dev) + return -ENOMEM; + + ret = nbl_dev_vsi_build(dev_mgt, param); + if (ret) + goto vsi_build_fail; + + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; + + if (!vsi) + continue; + + ret = vsi->ops->register_vsi(dev_mgt, param, vsi); + if (ret) { + dev_err(NBL_DEV_MGT_TO_DEV(dev_mgt), "Vsi %d register failed", vsi->index); + goto vsi_register_fail; + } + + if (vsi->static_queue) { + total_queue_num += vsi->queue_num; + } else { + if (dynamic_queue_max < vsi->queue_num) + dynamic_queue_max = vsi->queue_num; + } + + if (vsi->use_independ_irq) + irq_queue_num += vsi->queue_num; + + if (vsi->in_kernel) + kernel_queue_num += vsi->queue_num; + else + user_queue_num += vsi->queue_num; + } + + /* all vsi's dynamic only support enable use one at the same time. */ + total_queue_num += dynamic_queue_max; + + /* the total queue set must before vsi stepup */ + (*net_dev)->total_queue_num = total_queue_num; + (*net_dev)->kernel_queue_num = kernel_queue_num; + (*net_dev)->user_queue_num = user_queue_num; + + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; + + if (!vsi) + continue; + + if (!vsi->in_kernel) + continue; + + ret = vsi->ops->setup(dev_mgt, param, vsi); + if (ret) { + dev_err(NBL_DEV_MGT_TO_DEV(dev_mgt), "Vsi %d setup failed", vsi->index); + goto vsi_setup_fail; + } + } + + nbl_dev_register_net_irq(dev_mgt, irq_queue_num); + + nbl_det_setup_net_dev_ops(dev_mgt, param); + + callback.callback = nbl_dev_vsi_handle_netdev_event; + callback.callback_data = dev_mgt; + nbl_event_register(NBL_EVENT_NETDEV_STATE_CHANGE, &callback, NBL_COMMON_TO_VSI_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + + return 0; + +vsi_setup_fail: +vsi_register_fail: + nbl_dev_vsi_destroy(dev_mgt); +vsi_build_fail: + devm_kfree(dev, *net_dev); + return ret; +} + +static void nbl_dev_remove_net_dev(struct nbl_adapter *adapter) +{ + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net **net_dev = &NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_dev_vsi *vsi; + int i = 0; + + if (!*net_dev) + return; + + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi = (*net_dev)->vsi_ctrl.vsi_list[i]; + + if (!vsi) + continue; + + vsi->ops->remove(dev_mgt, vsi); + } + nbl_dev_vsi_destroy(dev_mgt); + + nbl_dev_unregister_net(dev_mgt); + + devm_kfree(dev, *net_dev); + *net_dev = NULL; +} + +static int nbl_dev_setup_virtio_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_virtio *virtio_dev; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + + if (!serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_VIRTIO_CAP)) + return 0; + + virtio_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_virtio), GFP_KERNEL); + if (!virtio_dev) + return -ENOMEM; + NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt) = virtio_dev; + + nbl_dev_register_virtio_irq(dev_mgt); + virtio_dev->device_msix = 0; + + return 0; +} + +static void nbl_dev_remove_virtio_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_virtio *virtio_dev = NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt); + + if (!virtio_dev) + return; + + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), virtio_dev); + NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt) = NULL; +} + +static int nbl_dev_setup_dev_mgt(struct nbl_common_info *common, struct nbl_dev_mgt **dev_mgt) +{ + *dev_mgt = devm_kzalloc(NBL_COMMON_TO_DEV(common), sizeof(struct nbl_dev_mgt), GFP_KERNEL); + if (!*dev_mgt) + return -ENOMEM; + + NBL_DEV_MGT_TO_COMMON(*dev_mgt) = common; + return 0; +} + +static void nbl_dev_remove_dev_mgt(struct nbl_common_info *common, struct nbl_dev_mgt **dev_mgt) +{ + devm_kfree(NBL_COMMON_TO_DEV(common), *dev_mgt); + *dev_mgt = NULL; +} + +static void nbl_dev_remove_ops(struct device *dev, struct nbl_dev_ops_tbl **dev_ops_tbl) +{ + devm_kfree(dev, *dev_ops_tbl); + *dev_ops_tbl = NULL; +} + +static int nbl_dev_setup_ops(struct device *dev, struct nbl_dev_ops_tbl **dev_ops_tbl, + struct nbl_adapter *adapter) +{ + *dev_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_dev_ops_tbl), GFP_KERNEL); + if (!*dev_ops_tbl) + return -ENOMEM; + + NBL_DEV_OPS_TBL_TO_OPS(*dev_ops_tbl) = &dev_ops; + NBL_DEV_OPS_TBL_TO_PRIV(*dev_ops_tbl) = adapter; + + return 0; +} + +int nbl_dev_init(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dev_mgt **dev_mgt = (struct nbl_dev_mgt **)&NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_ops_tbl **dev_ops_tbl = &NBL_ADAPTER_TO_DEV_OPS_TBL(adapter); + struct nbl_service_ops_tbl *serv_ops_tbl = NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); + struct nbl_channel_ops_tbl *chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + int ret = 0; + + ret = nbl_dev_setup_dev_mgt(common, dev_mgt); + if (ret) + goto setup_mgt_fail; + + NBL_DEV_MGT_TO_SERV_OPS_TBL(*dev_mgt) = serv_ops_tbl; + NBL_DEV_MGT_TO_CHAN_OPS_TBL(*dev_mgt) = chan_ops_tbl; + + /* If we have factory_dev, no need to go further */ + if (param->caps.has_factory_ctrl) + return nbl_dev_setup_factory_ctrl_dev(adapter, param); + + ret = nbl_dev_setup_common_dev(adapter, param); + if (ret) + goto setup_common_dev_fail; + + if (param->caps.has_ctrl) { + ret = nbl_dev_setup_ctrl_dev(adapter, param); + if (ret) + goto setup_ctrl_dev_fail; + } + + ret = nbl_dev_setup_net_dev(adapter, param, NULL); + if (ret) + goto setup_net_dev_fail; + + ret = nbl_dev_setup_virtio_dev(adapter, param); + if (ret) + goto setup_virtio_dev_fail; + + ret = nbl_dev_setup_rdma_dev(adapter, param); + if (ret) + goto setup_rdma_dev_fail; + + ret = nbl_dev_setup_ops(dev, dev_ops_tbl, adapter); + if (ret) + goto setup_ops_fail; + + return 0; + +setup_ops_fail: + nbl_dev_remove_rdma_dev(adapter); +setup_rdma_dev_fail: + nbl_dev_remove_virtio_dev(adapter); +setup_virtio_dev_fail: + nbl_dev_remove_net_dev(adapter); +setup_net_dev_fail: + nbl_dev_remove_ctrl_dev(adapter); +setup_ctrl_dev_fail: + nbl_dev_remove_common_dev(adapter); +setup_common_dev_fail: + nbl_dev_remove_dev_mgt(common, dev_mgt); +setup_mgt_fail: + return ret; +} + +void nbl_dev_remove(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dev_mgt **dev_mgt = (struct nbl_dev_mgt **)&NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_ops_tbl **dev_ops_tbl = &NBL_ADAPTER_TO_DEV_OPS_TBL(adapter); + + nbl_dev_remove_ops(dev, dev_ops_tbl); + + /* If we succeed in factory_dev remove, no need to go further */ + if (nbl_dev_remove_factory_ctrl_dev(adapter)) + return; + + nbl_dev_remove_rdma_dev(adapter); + nbl_dev_remove_virtio_dev(adapter); + nbl_dev_remove_net_dev(adapter); + nbl_dev_remove_ctrl_dev(adapter); + nbl_dev_remove_common_dev(adapter); + + nbl_dev_remove_dev_mgt(common, dev_mgt); +} + +static void nbl_dev_notify_dev_prepare_reset(struct nbl_dev_mgt *dev_mgt, + enum nbl_reset_event event) +{ + int func_num = 0; + unsigned long cur_func = 0; + unsigned long next_func = 0; + unsigned long *func_bitmap; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_chan_send_info chan_send; + + func_bitmap = devm_kcalloc(NBL_COMMON_TO_DEV(common), BITS_TO_LONGS(NBL_MAX_FUNC), + sizeof(long), GFP_KERNEL); + if (!func_bitmap) + return; + + serv_ops->get_active_func_bitmaps(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), func_bitmap, + NBL_MAX_FUNC); + memset(dev_mgt->ctrl_dev->task_info.reset_status, 0, + sizeof(dev_mgt->ctrl_dev->task_info.reset_status)); + /* clear ctrl_dev func_id, and do it last */ + clear_bit(NBL_COMMON_TO_MGT_PF(common), func_bitmap); + + cur_func = NBL_COMMON_TO_MGT_PF(common); + while (1) { + next_func = find_next_bit(func_bitmap, NBL_MAX_FUNC, cur_func + 1); + if (next_func >= NBL_MAX_FUNC) + break; + + cur_func = next_func; + dev_mgt->ctrl_dev->task_info.reset_status[cur_func] = NBL_RESET_SEND; + NBL_CHAN_SEND(chan_send, cur_func, NBL_CHAN_MSG_NOTIFY_RESET_EVENT, &event, + sizeof(event), NULL, 0, 0); + chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + func_num++; + if (func_num >= NBL_DEV_BATCH_RESET_FUNC_NUM) { + usleep_range(NBL_DEV_BATCH_RESET_USEC, NBL_DEV_BATCH_RESET_USEC * 2); + func_num = 0; + } + } + + if (func_num) + usleep_range(NBL_DEV_BATCH_RESET_USEC, NBL_DEV_BATCH_RESET_USEC * 2); + + /* ctrl dev need proc last, basecase reset task will close mailbox */ + dev_mgt->ctrl_dev->task_info.reset_status[NBL_COMMON_TO_MGT_PF(common)] = NBL_RESET_SEND; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_NOTIFY_RESET_EVENT, + NULL, 0, NULL, 0, 0); + chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + usleep_range(NBL_DEV_BATCH_RESET_USEC, NBL_DEV_BATCH_RESET_USEC * 2); + + cur_func = NBL_COMMON_TO_MGT_PF(common); + while (1) { + if (dev_mgt->ctrl_dev->task_info.reset_status[cur_func] == NBL_RESET_SEND) + nbl_info(common, NBL_DEBUG_MAIN, "func %ld reset failed", cur_func); + + next_func = find_next_bit(func_bitmap, NBL_MAX_FUNC, cur_func + 1); + if (next_func >= NBL_MAX_FUNC) + break; + + cur_func = next_func; + } + + devm_kfree(NBL_COMMON_TO_DEV(common), func_bitmap); +} + +static void nbl_dev_handle_fatal_err(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_chan_param_notify_fw_reset_info fw_reset = {0}; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev_mgt->net_dev->netdev); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_chan_send_info chan_send; + + if (test_and_set_bit(NBL_FATAL_ERR, adapter->state)) { + nbl_info(common, NBL_DEBUG_MAIN, "dev in fatal_err status already."); + return; + } + + nbl_dev_disable_abnormal_irq(dev_mgt); + nbl_dev_ctrl_task_stop(dev_mgt); + nbl_dev_notify_dev_prepare_reset(dev_mgt, NBL_HW_FATAL_ERR_EVENT); + + /* notify emp shutdown dev */ + fw_reset.type = NBL_FW_HIGH_TEMP_RESET; + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_NOTIFY_FW_RESET, &fw_reset, sizeof(fw_reset), NULL, 0, 0); + chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + + chan_ops->set_queue_state(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_ABNORMAL, + NBL_CHAN_TYPE_ADMINQ, true); + serv_ops->set_hw_status(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_HW_FATAL_ERR); + nbl_info(common, NBL_DEBUG_MAIN, "dev in fatal_err status."); +} + +static struct nbl_dev_temp_alarm_info temp_alarm_info[NBL_TEMP_STATUS_MAX] = { + {LOGLEVEL_WARNING, "High temperature on sensors0 resumed.\n"}, + {LOGLEVEL_WARNING, "High temperature on sensors0 observed, security(WARNING).\n"}, + {LOGLEVEL_CRIT, "High temperature on sensors0 observed, security(CRITICAL).\n"}, + {LOGLEVEL_EMERG, "High temperature on sensors0 observed, security(EMERGENCY).\n"}, +}; + +static void nbl_dev_handle_temp_ext(struct nbl_dev_mgt *dev_mgt, u8 *data) +{ + u16 temp = (u16)*data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_ctrl *ctrl_dev = NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt); + enum nbl_dev_temp_status old_temp_status = ctrl_dev->temp_status; + enum nbl_dev_temp_status new_temp_status = NBL_TEMP_STATUS_NORMAL; + + /* no resume if temp exceed NBL_TEMP_EMERG_THRESHOLD, even if the temp resume nomal. + * Because the hw has shutdown. + */ + if (old_temp_status == NBL_TEMP_STATUS_EMERG) + return; + + /* if temp in (85-105) and not in normal_status, no resume to avoid alarm oscillate */ + if (temp > NBL_TEMP_NOMAL_THRESHOLD && + temp < NBL_TEMP_WARNING_THRESHOLD && + old_temp_status > NBL_TEMP_STATUS_NORMAL) + return; + + if (temp >= NBL_TEMP_WARNING_THRESHOLD && + temp < NBL_TEMP_CRIT_THRESHOLD) + new_temp_status = NBL_TEMP_STATUS_WARNING; + else if (temp >= NBL_TEMP_CRIT_THRESHOLD && + temp < NBL_TEMP_EMERG_THRESHOLD) + new_temp_status = NBL_TEMP_STATUS_CRIT; + else if (temp >= NBL_TEMP_EMERG_THRESHOLD) + new_temp_status = NBL_TEMP_STATUS_EMERG; + + if (new_temp_status == old_temp_status) + return; + + ctrl_dev->temp_status = new_temp_status; + + /* temp fall only alarm when the alarm need to resume */ + if (new_temp_status < old_temp_status && new_temp_status != NBL_TEMP_STATUS_NORMAL) + return; + + nbl_log(common, temp_alarm_info[new_temp_status].logvel, + temp_alarm_info[new_temp_status].alarm_info); + + if (new_temp_status == NBL_TEMP_STATUS_EMERG) { + ctrl_dev->task_info.reset_event = NBL_HW_FATAL_ERR_EVENT; + nbl_common_queue_work(&ctrl_dev->task_info.reset_task, false, false); + } +} + +static void nbl_dev_chan_notify_evt_alert_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_chan_param_emp_alert_event *alert_param = + (struct nbl_chan_param_emp_alert_event *)data; + + switch (alert_param->type) { + case NBL_EMP_EVENT_TEMP_ALERT: + nbl_dev_handle_temp_ext(dev_mgt, alert_param->data); + return; + default: + return; + } +} + +static void nbl_dev_ctrl_register_emp_ext_alert_chan_msg(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + /* draco use mailbox communication with emp */ + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_ADMINQ_EXT_ALERT, + nbl_dev_chan_notify_evt_alert_resp, dev_mgt); +} + +/* ---------- Dev start process ---------- */ +static int nbl_dev_start_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + int err = 0; + + err = nbl_dev_request_abnormal_irq(dev_mgt); + if (err) + goto abnormal_request_irq_err; + + err = nbl_dev_enable_abnormal_irq(dev_mgt); + if (err) + goto enable_abnormal_irq_err; + + err = nbl_dev_request_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); + if (err) + goto request_adminq_irq_err; + + err = nbl_dev_enable_adminq_irq(dev_mgt); + if (err) + goto enable_adminq_irq_err; + + nbl_dev_ctrl_register_flr_chan_msg(dev_mgt); + nbl_dev_ctrl_register_emp_ext_alert_chan_msg(dev_mgt); + + nbl_dev_get_port_attributes(dev_mgt); + nbl_dev_init_port(dev_mgt); + nbl_dev_enable_port(dev_mgt, true); + nbl_dev_ctrl_task_start(dev_mgt); + + return 0; + +enable_adminq_irq_err: + nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); +request_adminq_irq_err: + nbl_dev_disable_abnormal_irq(dev_mgt); +enable_abnormal_irq_err: + nbl_dev_free_abnormal_irq(dev_mgt); +abnormal_request_irq_err: + return err; +} + +static void nbl_dev_stop_ctrl_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + + if (!NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) + return; + + nbl_dev_ctrl_task_stop(dev_mgt); + nbl_dev_enable_port(dev_mgt, false); + nbl_dev_disable_adminq_irq(dev_mgt); + nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)->task_info); + nbl_dev_disable_abnormal_irq(dev_mgt); + nbl_dev_free_abnormal_irq(dev_mgt); +} + +static void nbl_dev_chan_notify_link_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct net_device *netdev = (struct net_device *)priv; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_chan_param_notify_link_state *link_info; + + link_info = (struct nbl_chan_param_notify_link_state *)data; + + serv_ops->set_netdev_carrier_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + netdev, link_info->link_state); +} + +static void nbl_dev_register_link_state_chan_msg(struct nbl_dev_mgt *dev_mgt, + struct net_device *netdev) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_NOTIFY_LINK_STATE, + nbl_dev_chan_notify_link_state_resp, netdev); +} + +static void nbl_dev_chan_notify_reset_event_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + enum nbl_reset_event event = *(enum nbl_reset_event *)data; + + dev_mgt->common_dev->reset_task.event = event; + nbl_common_queue_work(&dev_mgt->common_dev->reset_task.task, false, false); +} + +static void nbl_dev_chan_ack_reset_event_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + + WRITE_ONCE(dev_mgt->ctrl_dev->task_info.reset_status[src_id], NBL_RESET_DONE); +} + +static void nbl_dev_register_reset_event_chan_msg(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + if (!chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_NOTIFY_RESET_EVENT, + nbl_dev_chan_notify_reset_event_resp, dev_mgt); + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_MSG_ACK_RESET_EVENT, + nbl_dev_chan_ack_reset_event_resp, dev_mgt); +} + +static int nbl_dev_setup_rep_netdev(struct nbl_adapter *adapter, struct nbl_init_param *param, + struct nbl_rep_data *rep) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct net_device *netdev; + struct nbl_netdev_priv *net_priv; + struct nbl_register_net_result register_result = { 0 }; + u16 tx_queue_num = 1, rx_queue_num = 1; + int ret = 0; + + nbl_dev_get_rep_feature(adapter, ®ister_result); + + netdev = alloc_etherdev_mqs(sizeof(struct nbl_netdev_priv), tx_queue_num, rx_queue_num); + if (!netdev) { + dev_err(dev, "Alloc net device failed\n"); + ret = -ENOMEM; + goto alloc_fail; + } + + net_priv = netdev_priv(netdev); + net_priv->adapter = adapter; + rep->netdev = netdev; + net_priv->rep = rep; + net_priv->netdev = netdev; + + SET_NETDEV_DEV(netdev, dev); + ret = nbl_dev_cfg_netdev(netdev, dev_mgt, param, ®ister_result); + if (ret) { + dev_err(dev, "Cfg net device failed, ret=%d\n", ret); + goto cfg_netdev_fail; + } + + netif_carrier_off(netdev); + ret = register_netdev(netdev); + if (ret) { + dev_err(dev, "Register netdev failed, ret=%d\n", ret); + goto register_netdev_fail; + } + return 0; + +register_netdev_fail: +cfg_netdev_fail: + free_netdev(netdev); + rep->netdev = NULL; +alloc_fail: + return ret; +} + +static int nbl_dev_eswitch_load_rep(struct nbl_adapter *adapter, int num_vfs) +{ + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_init_param param; + struct nbl_dev_rep *rep_dev; + int i, ret = 0; + u16 vf_base_vsi_id; + + rep_dev = devm_kzalloc(dev, sizeof(struct nbl_dev_rep), GFP_KERNEL); + if (!rep_dev) + return -ENOMEM; + + memset(¶m, 0, sizeof(param)); + + NBL_DEV_MGT_TO_REP_DEV(dev_mgt) = rep_dev; + rep_dev->num_vfs = num_vfs; + param.is_rep = true; + param.pci_using_dac = NBL_COMMON_TO_PCI_USING_DAC(common); + rep_dev->rep = devm_kzalloc(dev, num_vfs * sizeof(struct nbl_rep_data), GFP_KERNEL); + if (!rep_dev->rep) + return -ENOMEM; + + vf_base_vsi_id = serv_ops->get_vf_base_vsi_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + common->mgt_pf); + ret = serv_ops->alloc_rep_data(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), num_vfs, vf_base_vsi_id); + + for (i = 0; i < num_vfs; i++) { + rep_dev->rep[i].rep_vsi_id = vf_base_vsi_id + i; + ret = nbl_dev_setup_rep_netdev(adapter, ¶m, &rep_dev->rep[i]); + if (ret) + return ret; + nbl_dev_get_rep_queue_num(adapter, &rep_dev->rep[i].base_queue_id, + &rep_dev->rep[i].rep_queue_num); + + /* add rep_id sysfs here */ + nbl_net_addr_rep_attr(&rep_dev->rep[i].rep_attr, i); + ret = sysfs_create_file(&rep_dev->rep[i].netdev->dev.kobj, + &rep_dev->rep[i].rep_attr.attr); + if (ret) { + dev_err(dev, "nbl rep add rep_id net-fs failed"); + return ret; + } + serv_ops->set_rep_netdev_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + &rep_dev->rep[i]); + } + + dev_info(dev, "nbl dev switch load rep success\n"); + return 0; +} + +static int nbl_dev_eswitch_unload_rep(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_rep *rep_dev = NBL_DEV_MGT_TO_REP_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_rep_data *rep_data = NULL; + struct device *dev; + struct net_device *netdev; + int i; + + if (!rep_dev) + return -ENODEV; + + dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + rep_data = rep_dev->rep; + if (!rep_data) { + devm_kfree(dev, rep_dev); + NBL_DEV_MGT_TO_REP_DEV(dev_mgt) = NULL; + return -ENODEV; + } + + serv_ops->unset_rep_netdev_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->free_rep_data(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + for (i = 0; i < rep_dev->num_vfs; i++) { + netdev = rep_data[i].netdev; + if (!netdev) + continue; + sysfs_remove_file(&netdev->dev.kobj, &rep_data[i].rep_attr.attr); + unregister_netdev(netdev); + nbl_dev_reset_netdev(netdev); + free_netdev(netdev); + } + devm_kfree(dev, rep_data); + devm_kfree(dev, rep_dev); + NBL_DEV_MGT_TO_REP_DEV(dev_mgt) = NULL; + + return 0; +} + +static int nbl_dev_eswitch_mode_to_devlink(u16 cur_eswitch_mode, u16 *devlink_eswitch_mode) +{ + switch (cur_eswitch_mode) { + case NBL_ESWITCH_LEGACY: + *devlink_eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + break; + case NBL_ESWITCH_OFFLOADS: + *devlink_eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + break; + default: + *devlink_eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + } + return 0; +} + +static int nbl_dev_eswitch_mode_from_devlink(u16 devlink_eswitch_mode, u16 *cfg_eswitch_mode) +{ + switch (devlink_eswitch_mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + *cfg_eswitch_mode = NBL_ESWITCH_LEGACY; + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + *cfg_eswitch_mode = NBL_ESWITCH_OFFLOADS; + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +int nbl_dev_destroy_rep(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u16 eswitch_mode = 0; + int ret = 0; + + eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = nbl_dev_eswitch_unload_rep(dev_mgt); + if (ret) + return ret; + ret = serv_ops->free_rep_queue_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + } + + return ret; +} + +int nbl_dev_create_rep(void *p, int num_vfs) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev = net_dev->netdev; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + u16 eswitch_mode = 0; + int ret = 0; + + eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + dev_info(dev, "dev create rep num_vfs:%d, eswitch_mode:%d\n", num_vfs, eswitch_mode); + if (eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = nbl_dev_eswitch_load_rep(adapter, num_vfs); + if (ret) { + nbl_dev_eswitch_unload_rep(dev_mgt); + return ret; + } + ret = serv_ops->alloc_rep_queue_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev); + } + + return ret; +} + +int nbl_dev_setup_vf_config(void *p, int num_vfs) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->setup_vf_config(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), num_vfs, false); +} + +void nbl_dev_remove_vf_config(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + return serv_ops->remove_vf_config(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static int nbl_dev_init_offload_mode(struct nbl_dev_mgt *dev_mgt, u16 vsi_id) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int ret = 0; + + ret = serv_ops->disable_phy_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + if (ret) + return ret; + serv_ops->init_acl(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + ret = serv_ops->set_upcall_rule(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id, vsi_id); + if (ret) + goto fail_set_upcall_rule; + + /* eswitch mode set, start CMDQ or add reference */ + ret = serv_ops->switchdev_init_cmdq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret < 0 || ret >= NBL_TC_FLOW_INST_COUNT) + goto fail_init_cmdq; + common->tc_inst_id = ret; + + ret = serv_ops->set_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) + goto fail_set_tc_flow_info; + + ret = serv_ops->get_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (ret) + goto fail_get_tc_flow_info; + + return 0; + +fail_get_tc_flow_info: + serv_ops->unset_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +fail_set_tc_flow_info: + serv_ops->switchdev_deinit_cmdq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +fail_init_cmdq: + serv_ops->unset_upcall_rule(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); +fail_set_upcall_rule: + serv_ops->uninit_acl(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->enable_phy_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + return ret; +} + +static int nbl_dev_uninit_offload_mode(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int ret = 0; + + ret = serv_ops->enable_phy_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + if (ret) + return ret; + ret = serv_ops->unset_upcall_rule(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + if (ret) + goto fail_unset_upcall_rule; + serv_ops->uninit_acl(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + return 0; + +fail_unset_upcall_rule: + serv_ops->disable_phy_flow(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), common->eth_id); + return ret; +} + +static void nbl_dev_destroy_flow_res(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + /* unset tc flow info */ + serv_ops->unset_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->get_tc_flow_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + /* stop CMDQ or reduce its reference */ + serv_ops->switchdev_deinit_cmdq(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +} + +static void nbl_dev_remove_rep_res(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_vsi *vsi = dev_mgt->net_dev->vsi_ctrl.vsi_list[NBL_VSI_CTRL]; + u16 cur_eswitch_mode = NBL_ESWITCH_NONE; + + cur_eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (cur_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + nbl_dev_eswitch_unload_rep(dev_mgt); + serv_ops->free_rep_queue_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + nbl_dev_uninit_offload_mode(dev_mgt); + serv_ops->set_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_ESWITCH_NONE); + nbl_dev_destroy_flow_res(dev_mgt); + vsi->ops->stop(dev_mgt, vsi); + } +} + +static int nbl_dev_start_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct net_device *netdev = net_dev->netdev; + struct nbl_netdev_priv *net_priv; + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_vsi *vsi; + struct nbl_dev_vsi *xdp_vsi; + struct nbl_event_callback callback = {0}; + struct nbl_ring_param ring_param = {0}; + u16 net_vector_id, queue_num, xdp_queue_num = 0; + int ret; + + vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_DATA); + if (!vsi) + return -EFAULT; + + queue_num = vsi->queue_num; + netdev = alloc_etherdev_mqs(sizeof(struct nbl_netdev_priv), queue_num, queue_num); + if (!netdev) { + dev_err(dev, "Alloc net device failed\n"); + ret = -ENOMEM; + goto alloc_netdev_fail; + } + + SET_NETDEV_DEV(netdev, dev); + net_priv = netdev_priv(netdev); + net_priv->adapter = adapter; + nbl_dev_set_netdev_priv(netdev, vsi); + + net_dev->netdev = netdev; + common->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + serv_ops->set_mask_en(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), net_msix_mask_en); + + /* Alloc all queues. + * One problem is we now must use the queue_size of data_vsi for all queues. + */ + xdp_vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_XDP); + if (xdp_vsi) + xdp_queue_num = xdp_vsi->queue_num; + + ring_param.tx_ring_num = net_dev->kernel_queue_num; + ring_param.rx_ring_num = net_dev->kernel_queue_num; + ring_param.xdp_ring_offset = net_dev->kernel_queue_num - xdp_queue_num; + ring_param.queue_size = net_priv->queue_size; + ret = serv_ops->alloc_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, &ring_param); + if (ret) { + dev_err(dev, "Alloc rings failed\n"); + goto alloc_rings_fail; + } + + serv_ops->cpu_affinity_init(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->queue_num); + ret = serv_ops->setup_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), netdev, + vsi->register_result.vlan_proto, + vsi->register_result.vlan_tci, + vsi->register_result.rate); + if (ret) { + dev_err(dev, "setup net mgt failed\n"); + goto setup_net_mgt_fail; + } + + /* netdev build must before setup_txrx_queues. Because snoop check mac trust the mac + * if pf use ip link cfg the mac for vf. We judge the case will not permit accord queue + * has alloced when vf modify mac. + */ + ret = vsi->ops->netdev_build(dev_mgt, param, netdev, vsi); + if (ret) { + dev_err(dev, "Build netdev failed, selected vsi %d\n", vsi->index); + goto build_netdev_fail; + } + + net_vector_id = msix_info->serv_info[NBL_MSIX_NET_TYPE].base_vector_id; + ret = serv_ops->setup_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + vsi->vsi_id, net_dev->total_queue_num, net_vector_id); + if (ret) { + dev_err(dev, "Set queue map failed\n"); + goto set_queue_fail; + } + + ret = nbl_init_lag(dev_mgt, param); + if (ret) { + dev_err(dev, "init bond failed\n"); + goto enable_bond_fail; + } + + nbl_dev_register_link_state_chan_msg(dev_mgt, netdev); + nbl_dev_register_reset_event_chan_msg(dev_mgt); + + ret = vsi->ops->start(dev_mgt, netdev, vsi); + if (ret) { + dev_err(dev, "Start vsi failed, selected vsi %d\n", vsi->index); + goto start_vsi_fail; + } + + ret = nbl_dev_request_net_irq(dev_mgt); + if (ret) { + dev_err(dev, "request irq failed\n"); + goto request_irq_fail; + } + + netif_carrier_off(netdev); + ret = register_netdev(netdev); + if (ret) { + dev_err(dev, "Register netdev failed\n"); + goto register_netdev_fail; + } + + if (!param->caps.is_vf) { + if (serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_QOS_SYSFS_CAP)) + nbl_netdev_add_sysfs(netdev, net_dev); + if (net_dev->total_vfs) { + ret = serv_ops->setup_vf_resource(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + net_dev->total_vfs); + if (ret) + goto setup_vf_res_fail; + } + + callback.callback = nbl_dev_vsi_handle_switch_event; + callback.callback_data = dev_mgt; + nbl_event_register(NBL_EVENT_DEV_MODE_SWITCH, &callback, + NBL_COMMON_TO_ETH_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + + set_bit(NBL_DOWN, adapter->state); + + return 0; + +setup_vf_res_fail: + nbl_netdev_remove_sysfs(net_dev); + unregister_netdev(netdev); +register_netdev_fail: + nbl_dev_free_net_irq(dev_mgt); +request_irq_fail: + vsi->ops->stop(dev_mgt, vsi); +start_vsi_fail: + nbl_deinit_lag(dev_mgt); +enable_bond_fail: + serv_ops->remove_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); +set_queue_fail: + vsi->ops->netdev_destroy(dev_mgt, vsi); +build_netdev_fail: + serv_ops->remove_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +setup_net_mgt_fail: + serv_ops->free_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); +alloc_rings_fail: + free_netdev(netdev); +alloc_netdev_fail: + return ret; +} + +static void nbl_dev_stop_net_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_event_callback callback = {0}; + struct nbl_event_callback netdev_callback = {0}; + struct nbl_dev_vsi *vsi; + struct net_device *netdev; + + if (!net_dev) + return; + + netdev = net_dev->netdev; + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; + if (!vsi) + return; + + if (!common->is_vf) { + callback.callback = nbl_dev_vsi_handle_switch_event; + callback.callback_data = dev_mgt; + nbl_event_unregister(NBL_EVENT_DEV_MODE_SWITCH, &callback, + NBL_COMMON_TO_ETH_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + serv_ops->remove_vf_resource(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + nbl_netdev_remove_sysfs(net_dev); + } + + nbl_dev_remove_rep_res(dev_mgt); + + unregister_netdev(netdev); + + netdev_callback.callback = nbl_dev_vsi_handle_netdev_event; + netdev_callback.callback_data = dev_mgt; + nbl_event_unregister(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + vsi->ops->netdev_destroy(dev_mgt, vsi); + vsi->ops->stop(dev_mgt, vsi); + + nbl_dev_free_net_irq(dev_mgt); + + nbl_deinit_lag(dev_mgt); + + serv_ops->remove_net_resource_mgt(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + serv_ops->remove_txrx_queues(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), vsi->vsi_id); + serv_ops->free_rings(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + free_netdev(netdev); +} + +static int nbl_dev_resume_net_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev; + int ret = 0; + + if (!net_dev) + return 0; + + netdev = net_dev->netdev; + + ret = nbl_dev_request_net_irq(dev_mgt); + if (ret) + dev_err(dev, "request irq failed\n"); + + netif_device_attach(netdev); + return ret; +} + +static void nbl_dev_suspend_net_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev; + + if (!net_dev) + return; + + netdev = net_dev->netdev; + netif_device_detach(netdev); + nbl_dev_free_net_irq(dev_mgt); +} + +static int nbl_dev_get_devlink_eswitch_mode(struct devlink *devlink, u16 *mode) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(common); + struct nbl_adapter *adapter = NULL; + u16 cur_eswitch_mode = NBL_ESWITCH_NONE; + + adapter = pci_get_drvdata(pdev); + if (!adapter) + return -EINVAL; + + cur_eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + return nbl_dev_eswitch_mode_to_devlink(cur_eswitch_mode, mode); +} + +static int nbl_dev_set_devlink_eswitch_mode(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv->dev_mgt; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(common); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_adapter *adapter = NULL; + struct nbl_dev_vsi *vsi = dev_mgt->net_dev->vsi_ctrl.vsi_list[NBL_VSI_CTRL]; + struct nbl_event_offload_status_data event_data = {0}; + int num_vfs = 0; + u16 cfg_eswitch_mode = NBL_ESWITCH_NONE; + u16 cur_eswitch_mode = NBL_ESWITCH_NONE; + int ret = 0; + + num_vfs = pci_num_vf(pdev); + adapter = pci_get_drvdata(pdev); + if (!adapter) + return -EINVAL; + ret = nbl_dev_eswitch_mode_from_devlink(mode, &cfg_eswitch_mode); + if (ret) + return ret; + cur_eswitch_mode = serv_ops->get_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + if (cur_eswitch_mode == cfg_eswitch_mode) + return 0; + + if (!vsi) + return -ENOENT; + + if (cfg_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = vsi->ops->start(dev_mgt, dev_mgt->net_dev->netdev, vsi); + if (ret) + return ret; + + ret = nbl_dev_init_offload_mode(dev_mgt, vsi->vsi_id); + if (ret) { + dev_err(dev, "dev fail init offload mode\n"); + return -EBUSY; + } + serv_ops->set_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), cfg_eswitch_mode); + if (num_vfs) { + ret = nbl_dev_create_rep(adapter, num_vfs); + if (ret) + goto fail_cfg_rep; + } + + event_data.pf_vsi_id = NBL_COMMON_TO_VSI_ID(common); + event_data.status = true; + nbl_event_notify(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_data, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else if (cur_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = nbl_dev_uninit_offload_mode(dev_mgt); + if (ret) { + dev_err(dev, "dev fail uninit offload mode\n"); + return -EBUSY; + } + if (num_vfs) { + ret = nbl_dev_destroy_rep(adapter); + if (ret) + goto fail_cfg_rep; + } + serv_ops->set_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), cfg_eswitch_mode); + + nbl_dev_destroy_flow_res(dev_mgt); + + vsi->ops->stop(dev_mgt, vsi); + + event_data.pf_vsi_id = NBL_COMMON_TO_VSI_ID(common); + event_data.status = false; + nbl_event_notify(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_data, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + return 0; + +fail_cfg_rep: + if (cfg_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + serv_ops->set_eswitch_mode(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), cur_eswitch_mode); + vsi->ops->stop(dev_mgt, vsi); + ret = nbl_dev_uninit_offload_mode(dev_mgt); + if (ret) + dev_err(dev, "dev fail uninit offload mode when rep create fail\n"); + } else if (cur_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + ret = nbl_dev_init_offload_mode(dev_mgt, vsi->vsi_id); + if (ret) + dev_err(dev, "dev fail init offload mode when rep destroy fail\n"); + } + return -EBUSY; +} + +/* ---------- Devlink config ---------- */ +static void nbl_dev_devlink_free(void *devlink_ptr) +{ + devlink_free((struct devlink *)devlink_ptr); +} + +static int nbl_dev_setup_devlink(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) +{ + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct devlink *devlink; + struct devlink_ops *devlink_ops; + struct nbl_devlink_priv *priv; + int ret = 0; + + if (param->caps.is_vf || param->product_type == NBL_VIRTIO_TYPE) + return 0; + + devlink_ops = devm_kzalloc(dev, sizeof(*devlink_ops), GFP_KERNEL); + if (!devlink_ops) + return -ENOMEM; + devlink_ops->eswitch_mode_set = nbl_dev_set_devlink_eswitch_mode; + devlink_ops->eswitch_mode_get = nbl_dev_get_devlink_eswitch_mode; + + devlink = devlink_alloc(devlink_ops, sizeof(*priv), dev); + + if (!devlink) + return -ENOMEM; + + common_dev->devlink_ops = devlink_ops; + + if (devm_add_action(dev, nbl_dev_devlink_free, devlink)) { + devlink_free(devlink); + return -EFAULT; + } + priv = devlink_priv(devlink); + priv->priv = NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt); + priv->dev_mgt = dev_mgt; + + devlink_register(devlink); + + common_dev->devlink = devlink; + return ret; +} + +static void nbl_dev_remove_devlink(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + + if (common_dev->devlink) { + devlink_unregister(common_dev->devlink); + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), common_dev->devlink_ops); + } +} + +static int nbl_dev_start_common_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + int ret = 0; + + ret = nbl_dev_configure_msix_map(dev_mgt); + if (ret) + goto config_msix_map_err; + + ret = nbl_dev_init_interrupt_scheme(dev_mgt); + if (ret) + goto init_interrupt_scheme_err; + + ret = nbl_dev_request_mailbox_irq(dev_mgt); + if (ret) + goto mailbox_request_irq_err; + + ret = nbl_dev_enable_mailbox_irq(dev_mgt); + if (ret) + goto enable_mailbox_irq_err; + + ret = nbl_dev_setup_devlink(dev_mgt, param); + if (ret) + goto setup_devlink_err; + + if (!param->caps.is_vf && + serv_ops->get_product_fix_cap(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_HWMON_TEMP_CAP)) { + ret = nbl_dev_setup_hwmon(adapter); + if (ret) + goto setup_hwmon_err; + } + + nbl_dev_setup_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + return 0; + +setup_hwmon_err: + nbl_dev_remove_devlink(dev_mgt); +setup_devlink_err: + nbl_dev_disable_mailbox_irq(dev_mgt); +enable_mailbox_irq_err: + nbl_dev_free_mailbox_irq(dev_mgt); +mailbox_request_irq_err: + nbl_dev_clear_interrupt_scheme(dev_mgt); +init_interrupt_scheme_err: + nbl_dev_destroy_msix_map(dev_mgt); +config_msix_map_err: + return ret; +} + +static void nbl_dev_stop_common_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + + nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + nbl_dev_remove_hwmon(adapter); + nbl_dev_remove_devlink(dev_mgt); + nbl_dev_free_mailbox_irq(dev_mgt); + nbl_dev_disable_mailbox_irq(dev_mgt); + nbl_dev_clear_interrupt_scheme(dev_mgt); + nbl_dev_destroy_msix_map(dev_mgt); +} + +static int nbl_dev_resume_common_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + int ret = 0; + + ret = nbl_dev_request_mailbox_irq(dev_mgt); + if (ret) + return ret; + + nbl_dev_setup_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + + return 0; +} + +static void nbl_dev_suspend_common_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + + nbl_dev_remove_chan_keepalive(dev_mgt, NBL_CHAN_TYPE_MAILBOX); + nbl_dev_free_mailbox_irq(dev_mgt); +} + +static int nbl_dev_start_virtio_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_common *dev_common = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(dev_common); + struct nbl_dev_virtio *virtio_dev = NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + u16 rdma_vector_id; + + if (!virtio_dev) + return 0; + + serv_ops->configure_virtio_dev_msix(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + virtio_dev->device_msix); + + rdma_vector_id = msix_info->serv_info[NBL_MSIX_RDMA_TYPE].base_vector_id; + serv_ops->configure_rdma_msix_off(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), rdma_vector_id); + + serv_ops->configure_virtio_dev_ready(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + return 0; +} + +static void nbl_dev_stop_virtio_dev(struct nbl_adapter *adapter) +{ + // not need to do anything +} + +static int nbl_dev_start_factory_ctrl_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + int ret = 0; + + ret = nbl_dev_configure_msix_map(dev_mgt); + if (ret) + goto config_msix_map_err; + + ret = nbl_dev_init_interrupt_scheme(dev_mgt); + if (ret) + goto init_interrupt_scheme_err; + + ret = nbl_dev_request_mailbox_irq(dev_mgt); + if (ret) + goto mailbox_request_irq_err; + + ret = nbl_dev_enable_mailbox_irq(dev_mgt); + if (ret) + goto enable_mailbox_irq_err; + + ret = nbl_dev_request_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt)->task_info); + if (ret) + goto request_adminq_irq_err; + + ret = nbl_dev_enable_adminq_irq(dev_mgt); + if (ret) + goto enable_adminq_irq_err; + + ret = nbl_dev_setup_devlink(dev_mgt, param); + if (ret) + goto setup_devlink_err; + + nbl_dev_factory_task_start(dev_mgt); + + return 0; + +setup_devlink_err: + nbl_dev_disable_adminq_irq(dev_mgt); +enable_adminq_irq_err: + nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt)->task_info); +request_adminq_irq_err: + nbl_dev_disable_mailbox_irq(dev_mgt); +enable_mailbox_irq_err: + nbl_dev_free_mailbox_irq(dev_mgt); +mailbox_request_irq_err: + nbl_dev_clear_interrupt_scheme(dev_mgt); +init_interrupt_scheme_err: + nbl_dev_destroy_msix_map(dev_mgt); +config_msix_map_err: + return ret; +} + +static bool nbl_dev_stop_factory_ctrl_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + + if (!NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt)) + return false; + + nbl_dev_remove_devlink(dev_mgt); + + nbl_dev_factory_task_stop(dev_mgt); + nbl_dev_disable_adminq_irq(dev_mgt); + nbl_dev_free_adminq_irq(dev_mgt, &NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt)->task_info); + nbl_dev_free_mailbox_irq(dev_mgt); + nbl_dev_disable_mailbox_irq(dev_mgt); + nbl_dev_clear_interrupt_scheme(dev_mgt); + nbl_dev_destroy_msix_map(dev_mgt); + + return true; +} + +int nbl_dev_start(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + int ret = 0; + + /* If we have factory_dev, no need to go further */ + if (param->caps.has_factory_ctrl) + return nbl_dev_start_factory_ctrl_dev(adapter, param); + + ret = nbl_dev_start_common_dev(adapter, param); + if (ret) + goto start_common_dev_fail; + + if (param->caps.has_ctrl) { + ret = nbl_dev_start_ctrl_dev(adapter, param); + if (ret) + goto start_ctrl_dev_fail; + } + + ret = nbl_dev_start_net_dev(adapter, param); + if (ret) + goto start_net_dev_fail; + + ret = nbl_dev_start_virtio_dev(adapter); + if (ret) + goto start_virtio_dev_fail; + + ret = nbl_dev_start_rdma_dev(adapter); + if (ret) + goto start_rdma_dev_fail; + + if (param->caps.has_user) + nbl_dev_start_user_dev(adapter); + + return 0; + +start_rdma_dev_fail: + nbl_dev_stop_virtio_dev(adapter); +start_virtio_dev_fail: + nbl_dev_stop_net_dev(adapter); +start_net_dev_fail: + nbl_dev_stop_ctrl_dev(adapter); +start_ctrl_dev_fail: + nbl_dev_stop_common_dev(adapter); +start_common_dev_fail: + return ret; +} + +void nbl_dev_stop(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + + /* If we succeed in factory_dev stop, no need to go further */ + if (nbl_dev_stop_factory_ctrl_dev(adapter)) + return; + + nbl_dev_stop_user_dev(adapter); + nbl_dev_stop_virtio_dev(adapter); + nbl_dev_stop_rdma_dev(adapter); + nbl_dev_stop_ctrl_dev(adapter); + nbl_dev_stop_net_dev(adapter); + nbl_dev_stop_common_dev(adapter); +} + +int nbl_dev_resume(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_init_param *param = &adapter->init_param; + int ret = 0; + + /* If we have factory_dev, no need to go further */ + if (param->caps.has_factory_ctrl) + return nbl_dev_start_factory_ctrl_dev(adapter, param); + + ret = nbl_dev_resume_common_dev(adapter, param); + if (ret) + goto start_common_dev_fail; + + if (param->caps.has_ctrl) { + ret = nbl_dev_start_ctrl_dev(adapter, param); + if (ret) + goto start_ctrl_dev_fail; + } + + ret = nbl_dev_resume_net_dev(adapter, param); + if (ret) + goto start_net_dev_fail; + + ret = nbl_dev_resume_rdma_dev(adapter); + if (ret) + goto start_rdma_dev_fail; + + return 0; + +start_rdma_dev_fail: + nbl_dev_stop_net_dev(adapter); +start_net_dev_fail: + nbl_dev_stop_ctrl_dev(adapter); +start_ctrl_dev_fail: + nbl_dev_stop_common_dev(adapter); +start_common_dev_fail: + return ret; +} + +int nbl_dev_suspend(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + + /* If we succeed in factory_dev stop, no need to go further */ + if (nbl_dev_stop_factory_ctrl_dev(adapter)) + return 0; + + nbl_dev_suspend_rdma_dev(adapter); + nbl_dev_stop_ctrl_dev(adapter); + nbl_dev_suspend_net_dev(adapter); + nbl_dev_suspend_common_dev(adapter); + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..061405ea86c9753dd6eab2db5e91c1aa88d7ddf1 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev.h @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEV_H_ +#define _NBL_DEV_H_ + +#include "nbl_core.h" +#include "nbl_export_rdma.h" +#include "nbl_dev_user.h" +#include "nbl_sysfs.h" + +#define NBL_DEV_MGT_TO_COMMON(dev_mgt) ((dev_mgt)->common) +#define NBL_DEV_MGT_TO_DEV(dev_mgt) NBL_COMMON_TO_DEV(NBL_DEV_MGT_TO_COMMON(dev_mgt)) +#define NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt) ((dev_mgt)->common_dev) +#define NBL_DEV_MGT_TO_FACTORY_DEV(dev_mgt) ((dev_mgt)->factory_dev) +#define NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt) ((dev_mgt)->ctrl_dev) +#define NBL_DEV_MGT_TO_NET_DEV(dev_mgt) ((dev_mgt)->net_dev) +#define NBL_DEV_MGT_TO_VIRTIO_DEV(dev_mgt) ((dev_mgt)->virtio_dev) +#define NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt) ((dev_mgt)->rdma_dev) +#define NBL_DEV_MGT_TO_USER_DEV(dev_mgt) ((dev_mgt)->user_dev) +#define NBL_DEV_MGT_TO_REP_DEV(dev_mgt) ((dev_mgt)->rep_dev) +#define NBL_DEV_COMMON_TO_MSIX_INFO(dev_common) (&(dev_common)->msix_info) +#define NBL_DEV_CTRL_TO_TASK_INFO(dev_ctrl) (&(dev_ctrl)->task_info) +#define NBL_DEV_FACTORY_TO_TASK_INFO(dev_factory) (&(dev_factory)->task_info) +#define NBL_DEV_MGT_TO_EMP_CONSOLE(dev_mgt) ((dev_mgt)->emp_console) +#define NBL_DEV_MGT_TO_NETDEV_OPS(dev_mgt) ((dev_mgt)->net_dev->ops) + +#define NBL_DEV_MGT_TO_SERV_OPS_TBL(dev_mgt) ((dev_mgt)->serv_ops_tbl) +#define NBL_DEV_MGT_TO_SERV_OPS(dev_mgt) (NBL_DEV_MGT_TO_SERV_OPS_TBL(dev_mgt)->ops) +#define NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt) (NBL_DEV_MGT_TO_SERV_OPS_TBL(dev_mgt)->priv) +#define NBL_DEV_MGT_TO_RES_PT_OPS(adapter) (&(NBL_DEV_MGT_TO_SERV_OPS_TBL(dev_mgt)->pt_ops)) +#define NBL_DEV_MGT_TO_CHAN_OPS_TBL(dev_mgt) ((dev_mgt)->chan_ops_tbl) +#define NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt) (NBL_DEV_MGT_TO_CHAN_OPS_TBL(dev_mgt)->ops) +#define NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt) (NBL_DEV_MGT_TO_CHAN_OPS_TBL(dev_mgt)->priv) + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + +#define NBL_STRING_NAME_LEN (32) +#define NBL_DEFAULT_MTU (1500) + +#define NBL_MAX_CARDS 16 + +#define NBL_KEEPALIVE_TIME_CYCLE (10 * HZ) + +#define NBL_DEV_BATCH_RESET_FUNC_NUM (32) +#define NBL_DEV_BATCH_RESET_USEC (1000000) + +enum nbl_reset_status { + NBL_RESET_INIT, + NBL_RESET_SEND, + NBL_RESET_DONE, + NBL_RESET_STATUS_MAX +}; + +struct nbl_task_info { + struct nbl_adapter *adapter; + struct nbl_dev_mgt *dev_mgt; + struct work_struct offload_network_task; + struct work_struct fw_hb_task; + struct delayed_work fw_reset_task; + struct work_struct clean_adminq_task; + struct work_struct ipsec_task; + struct work_struct adapt_desc_gother_task; + struct work_struct clean_abnormal_irq_task; + struct work_struct recovery_abnormal_task; + struct work_struct reset_task; + enum nbl_reset_event reset_event; + enum nbl_reset_status reset_status[NBL_MAX_FUNC]; + struct timer_list serv_timer; + unsigned long serv_timer_period; + + bool fw_resetting; + bool timer_setup; +}; + +struct nbl_reset_task_info { + struct work_struct task; + enum nbl_reset_event event; +}; + +enum nbl_msix_serv_type { + /* virtio_dev has a config vector_id, and the vector_id need is 0 */ + NBL_MSIX_VIRTIO_TYPE = 0, + NBL_MSIX_NET_TYPE, + NBL_MSIX_MAILBOX_TYPE, + NBL_MSIX_ABNORMAL_TYPE, + NBL_MSIX_ADMINDQ_TYPE, + NBL_MSIX_RDMA_TYPE, + NBL_MSIX_TYPE_MAX + +}; + +struct nbl_msix_serv_info { + u16 num; + u16 base_vector_id; + /* true: hw report msix, hw need to mask actively */ + bool hw_self_mask_en; +}; + +struct nbl_msix_info { + struct nbl_msix_serv_info serv_info[NBL_MSIX_TYPE_MAX]; + struct msix_entry *msix_entries; +}; + +struct nbl_dev_common { + struct nbl_dev_mgt *dev_mgt; + struct device *hwmon_dev; + struct nbl_msix_info msix_info; + char mailbox_name[NBL_STRING_NAME_LEN]; + // for ctrl-dev/net-dev mailbox recv msg + struct work_struct clean_mbx_task; + + struct devlink_ops *devlink_ops; + struct devlink *devlink; + struct nbl_reset_task_info reset_task; +}; + +struct nbl_dev_factory { + struct nbl_task_info task_info; +}; + +enum nbl_dev_temp_status { + NBL_TEMP_STATUS_NORMAL = 0, + NBL_TEMP_STATUS_WARNING, + NBL_TEMP_STATUS_CRIT, + NBL_TEMP_STATUS_EMERG, + NBL_TEMP_STATUS_MAX +}; + +struct nbl_dev_ctrl { + struct nbl_task_info task_info; + enum nbl_dev_temp_status temp_status; +}; + +enum nbl_dev_emp_alert_event { + NBL_EMP_EVENT_TEMP_ALERT = 1, + NBL_EMP_EVENT_MAX +}; + +enum nbl_dev_temp_threshold { + NBL_TEMP_NOMAL_THRESHOLD = 85, + NBL_TEMP_WARNING_THRESHOLD = 105, + NBL_TEMP_CRIT_THRESHOLD = 115, + NBL_TEMP_EMERG_THRESHOLD = 120, +}; + +struct nbl_dev_temp_alarm_info { + int logvel; +#define NBL_TEMP_ALARM_STR_LEN 128 + char alarm_info[NBL_TEMP_ALARM_STR_LEN]; +}; + +struct nbl_dev_vsi_controller { + u16 queue_num; + u16 queue_free_offset; + void *vsi_list[NBL_VSI_MAX]; +}; + +struct nbl_dev_net_ops { + void (*setup_netdev_ops)(void *priv, struct net_device *netdev, + struct nbl_init_param *param); + void (*setup_ethtool_ops)(void *priv, struct net_device *netdev, + struct nbl_init_param *param); +}; + +struct nbl_dev_net { + struct net_device *netdev; + struct nbl_lag_member *lag_mem; + struct nbl_dev_net_ops *ops; + u8 lag_inited; + u8 eth_id; + struct nbl_dev_vsi_controller vsi_ctrl; + u16 total_queue_num; + u16 kernel_queue_num; + u16 user_queue_num; + u16 total_vfs; + struct nbl_net_qos qos_config; +}; + +struct nbl_dev_virtio { + u8 device_msix; +}; + +struct nbl_dev_rdma_event_data { + struct list_head node; + /* Lag event will be processed async, so we need to fully store the param in case it is + * released by caller. + * + * callback_data will always be dev_mgt, which will not be released, so don't bother. + */ + struct nbl_event_rdma_bond_update event_data; + void *callback_data; + u16 type; +}; + +struct nbl_dev_rdma { + struct auxiliary_device *adev; + struct auxiliary_device *grc_adev; + struct auxiliary_device *bond_adev; + + struct work_struct abnormal_event_task; + + struct work_struct lag_event_task; + struct list_head lag_event_param_list; + struct mutex lag_event_lock; /* Protect lag_event_param_list */ + + int adev_index; + u32 mem_type; + bool has_rdma; + bool has_grc; + u16 func_id; + u16 lag_id; + bool bond_registered; + bool bond_shaping_configed; + + bool is_halting; + bool pf_event_ready; +}; + +struct nbl_dev_emp_console { + struct nbl_dev_mgt *dev_mgt; + unsigned int id; + atomic_t opened; + wait_queue_head_t wait; + struct cdev cdev; + struct kfifo rx_fifo; + struct ktermios termios; +}; + +struct nbl_dev_user_iommu_group { + struct mutex dma_tree_lock; /* lock dma tree */ + struct list_head group_next; + struct kref kref; + struct rb_root dma_tree; + struct iommu_group *iommu_group; + struct device *dev; + struct vfio_device *vdev; +}; + +struct nbl_dev_user { + struct vfio_device *vdev; + struct device mdev; + struct notifier_block iommu_notifier; + struct device *dev; + struct nbl_adapter *adapter; + struct nbl_dev_user_iommu_group *group; + void *shm_msg_ring; + u64 dma_limit; + atomic_t open_cnt; + int minor; + int network_type; + bool iommu_status; + bool remap_status; + bool user_promisc_mode; +}; + +struct nbl_vfio_device { + struct vfio_device vdev; + struct nbl_dev_user *user; +}; + +#define NBL_USERDEV_TO_VFIO_DEV(user) ((user)->vdev) +#define NBL_VFIO_DEV_TO_USERDEV(vdev) (*(struct nbl_dev_user **)((vdev) + 1)) + +struct nbl_dev_rep { + struct nbl_rep_data *rep; + int num_vfs; +}; + +struct nbl_dev_mgt { + struct nbl_common_info *common; + struct nbl_service_ops_tbl *serv_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_dev_common *common_dev; + struct nbl_dev_factory *factory_dev; + struct nbl_dev_ctrl *ctrl_dev; + struct nbl_dev_net *net_dev; + struct nbl_dev_virtio *virtio_dev; + struct nbl_dev_rdma *rdma_dev; + struct nbl_dev_emp_console *emp_console; + struct nbl_dev_rep *rep_dev; + struct nbl_dev_user *user_dev; +}; + +struct nbl_dev_vsi_feature { + u16 has_lldp:1; + u16 has_lacp:1; + u16 rsv:14; +}; + +struct nbl_dev_vsi_ops { + int (*register_vsi)(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data); + int (*setup)(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + void *vsi_data); + void (*remove)(struct nbl_dev_mgt *dev_mgt, void *vsi_data); + int (*start)(void *dev_priv, struct net_device *netdev, void *vsi_data); + void (*stop)(void *dev_priv, void *vsi_data); + int (*netdev_build)(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param, + struct net_device *netdev, void *vsi_data); + void (*netdev_destroy)(struct nbl_dev_mgt *dev_mgt, void *vsi_data); +}; + +struct nbl_dev_vsi { + struct nbl_dev_vsi_ops *ops; + struct net_device *netdev; + struct net_device *napi_netdev; + struct nbl_register_net_result register_result; + struct nbl_dev_vsi_feature feature; + u16 vsi_id; + u16 queue_offset; + u16 queue_num; + u16 queue_size; + u16 in_kernel; + u8 index; + bool enable; + bool use_independ_irq; + bool static_queue; +}; + +struct nbl_dev_vsi_tbl { + struct nbl_dev_vsi_ops vsi_ops; + bool vf_support; + bool only_nic_support; + u16 in_kernel; + bool use_independ_irq; + bool static_queue; +}; + +#define NBL_DEV_BOARD_ID_MAX NBL_DRIVER_DEV_MAX +struct nbl_dev_board_id_entry { + u32 board_key; /* domain << 16 | bus_id */ + u8 refcount; + bool valid; +}; + +struct nbl_dev_board_id_table { + struct nbl_dev_board_id_entry entry[NBL_DEV_BOARD_ID_MAX]; +}; + +int nbl_dev_setup_rdma_dev(struct nbl_adapter *adapter, struct nbl_init_param *param); +void nbl_dev_remove_rdma_dev(struct nbl_adapter *adapter); +int nbl_dev_start_rdma_dev(struct nbl_adapter *adapter); +void nbl_dev_stop_rdma_dev(struct nbl_adapter *adapter); +int nbl_dev_resume_rdma_dev(struct nbl_adapter *adapter); +int nbl_dev_suspend_rdma_dev(struct nbl_adapter *adapter); +void nbl_dev_grc_process_abnormal_event(struct nbl_dev_rdma *rdma_dev); +void nbl_dev_grc_process_flr_event(struct nbl_dev_rdma *rdma_dev, u16 vsi_id); +size_t nbl_dev_rdma_qos_cfg_store(struct nbl_dev_mgt *dev_mgt, int offset, + const char *buf, size_t count); +size_t nbl_dev_rdma_qos_cfg_show(struct nbl_dev_mgt *dev_mgt, int offset, char *buf); + +int nbl_dev_init_emp_console(struct nbl_adapter *adapter); +void nbl_dev_destroy_emp_console(struct nbl_adapter *adapter); +int nbl_dev_setup_hwmon(struct nbl_adapter *adapter); +void nbl_dev_remove_hwmon(struct nbl_adapter *adapter); +struct nbl_dev_vsi *nbl_dev_vsi_select(struct nbl_dev_mgt *dev_mgt, u8 vsi_index); + +int nbl_netdev_add_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev); +void nbl_netdev_remove_sysfs(struct nbl_dev_net *net_dev); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c new file mode 100644 index 0000000000000000000000000000000000000000..4e9e7b2e29292fbb024f572aa0650af0d949f9c4 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_dev_rdma.h" + +static int nbl_dev_create_rdma_aux_dev(struct nbl_dev_mgt *dev_mgt, u8 type, + struct nbl_core_dev_lag_info *lag_info); +static void nbl_dev_destroy_rdma_aux_dev(struct nbl_dev_rdma *rdma_dev, + struct auxiliary_device **adev); + +static int nbl_dev_rdma_bond_active_num(struct nbl_core_dev_info *cdev_info) +{ + int i, count = 0; + + if (!cdev_info->is_lag) + return 0; + + for (i = 0; i < NBL_RDMA_LAG_MAX_PORTS; i++) + if (cdev_info->lag_info.lag_mem[i].active) + count++; + + return count; +} + +static void nbl_dev_rdma_cfg_bond(struct nbl_dev_mgt *dev_mgt, struct nbl_core_dev_info *cdev_info, + bool enable) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int other_eth_id = -1, i; + + /* TODO: if we need to support bond with more than two ports, need to modify here */ + for (i = 0; i < NBL_LAG_MAX_PORTS; i++) + if (cdev_info->lag_info.lag_mem[i].eth_id != NBL_COMMON_TO_ETH_ID(common)) + other_eth_id = cdev_info->lag_info.lag_mem[i].eth_id; + + if (other_eth_id == -1) { + nbl_warn(common, NBL_DEBUG_MAIN, "Fail to find bond other eth id, rdma cfg abort"); + return; + } + + serv_ops->cfg_bond_shaping(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), enable); + serv_ops->cfg_bgid_back_pressure(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), other_eth_id, enable); + + rdma_dev->bond_shaping_configed = enable; +} + +static int nbl_dev_chan_grc_process_req(void *priv, u8 *req_args, u8 req_len, + void *resp, u16 resp_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_chan_rdma_resp param = {0}; + struct nbl_chan_rdma_resp result = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret = 0; + + if (!chan_ops) + return 0; + + memcpy(param.resp_data, req_args, req_len); + param.data_len = req_len; + + common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GRC_PROCESS, ¶m, sizeof(param), + &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + if (ret) + return ret; + + resp_len = min(resp_len, result.data_len); + memcpy(resp, result.resp_data, resp_len); + + return 0; +} + +static void nbl_dev_chan_grc_process_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)priv; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_chan_rdma_resp *param; + struct nbl_chan_rdma_resp result = {0}; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + struct nbl_aux_dev *dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + + param = (struct nbl_chan_rdma_resp *)data; + + if (!dev_link->recv) { + err = NBL_CHAN_RESP_ERR; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GRC_PROCESS, + msg_id, err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_ack); + return; + } + + dev_link->recv(rdma_dev->grc_adev, param->resp_data, param->data_len, &result); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GRC_PROCESS, + msg_id, err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_ack); +} + +static int nbl_dev_grc_process_send(struct pci_dev *pdev, u8 *req_args, u8 req_len, + void *resp, u16 resp_len) +{ + struct nbl_adapter *adapter; + struct nbl_dev_mgt *dev_mgt; + struct nbl_chan_rdma_resp chan_resp = {0}; + struct nbl_dev_rdma *rdma_dev; + struct nbl_aux_dev *dev_link; + + if (!pdev || !req_args || !resp) + return -EINVAL; + + adapter = pci_get_drvdata(pdev); + dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + + if (rdma_dev->has_grc) { + int ret = 0; + + if (dev_link->recv) { + dev_link->recv(rdma_dev->grc_adev, req_args, req_len, &chan_resp); + } else { + chan_resp.data_len = 1; + chan_resp.resp_data[0] = 1; + ret = -EINVAL; + } + resp_len = min(chan_resp.data_len, resp_len); + memcpy(resp, chan_resp.resp_data, resp_len); + return ret; + } else { + return nbl_dev_chan_grc_process_req(dev_mgt, req_args, req_len, resp, resp_len); + } +} + +static void nbl_dev_grc_handle_abnormal_event(struct work_struct *work) +{ + struct nbl_dev_rdma *rdma_dev = container_of(work, struct nbl_dev_rdma, + abnormal_event_task); + struct nbl_aux_dev *dev_link = NULL; + + if (rdma_dev->is_halting) + return; + + if (rdma_dev && rdma_dev->grc_adev) + dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + else if (rdma_dev && rdma_dev->adev) + dev_link = container_of(rdma_dev->adev, struct nbl_aux_dev, adev); + else if (rdma_dev && rdma_dev->bond_adev) + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + else + return; + + if (dev_link && dev_link->abnormal_event_process) + dev_link->abnormal_event_process(&dev_link->adev); +} + +void nbl_dev_grc_process_abnormal_event(struct nbl_dev_rdma *rdma_dev) +{ + if (rdma_dev && !rdma_dev->is_halting && rdma_dev->pf_event_ready) + nbl_common_queue_work_rdma(&rdma_dev->abnormal_event_task, false); +} + +void nbl_dev_grc_process_flr_event(struct nbl_dev_rdma *rdma_dev, u16 vsi_id) +{ + struct nbl_aux_dev *dev_link = container_of(rdma_dev->grc_adev, struct nbl_aux_dev, adev); + + if (rdma_dev && rdma_dev->grc_adev && dev_link->process_flr_event) + dev_link->process_flr_event(rdma_dev->grc_adev, vsi_id); +} + +static int nbl_dev_rdma_register_bond(struct pci_dev *pdev, bool enable) +{ + struct nbl_adapter *adapter; + struct nbl_dev_mgt *dev_mgt; + struct nbl_dev_rdma *rdma_dev; + struct nbl_aux_dev *dev_link; + struct nbl_common_info *common; + struct nbl_service_ops *serv_ops; + + if (!pdev) + return -EINVAL; + + adapter = pci_get_drvdata(pdev); + dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + if (!rdma_dev->bond_adev) + return -EINVAL; + + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + + rdma_dev->bond_registered = enable; + + if (rdma_dev->bond_registered && nbl_dev_rdma_bond_active_num(dev_link->cdev_info) > 1 && + !rdma_dev->bond_shaping_configed) + nbl_dev_rdma_cfg_bond(dev_mgt, dev_link->cdev_info, true); + else if (!rdma_dev->bond_registered && rdma_dev->bond_shaping_configed) + nbl_dev_rdma_cfg_bond(dev_mgt, dev_link->cdev_info, false); + + return 0; +} + +static void nbl_dev_rdma_form_lag_info(struct nbl_core_dev_lag_info *lag_info, + struct nbl_lag_member_list_param *list_param, + struct nbl_common_info *common) +{ + int i; + + lag_info->lag_num = list_param->lag_num; + lag_info->lag_id = list_param->lag_id; + nbl_debug(common, NBL_DEBUG_MAIN, "update lag id %u, lag num %u.", + list_param->lag_id, list_param->lag_num); + + for (i = 0; i < NBL_RDMA_LAG_MAX_PORTS; i++) { + nbl_debug(common, NBL_DEBUG_MAIN, "update lag member %u, eth_id %u, vsi_id %u, active %u.", + i, list_param->member_list[i].eth_id, + list_param->member_list[i].vsi_id, list_param->member_list[i].active); + lag_info->lag_mem[i].vsi_id = list_param->member_list[i].vsi_id; + lag_info->lag_mem[i].eth_id = list_param->member_list[i].eth_id; + lag_info->lag_mem[i].active = list_param->member_list[i].active; + } +} + +static void nbl_dev_rdma_update_bond_member(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_member_list_param *list_param) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_core_dev_lag_info lag_info = {0}; + struct nbl_aux_dev *dev_link; + + if (!rdma_dev->bond_adev) { + nbl_err(common, NBL_DEBUG_MAIN, "Something wrong, lag adev err"); + return; + } + + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + rdma_dev->lag_id = list_param->lag_id; + + nbl_dev_rdma_form_lag_info(&lag_info, list_param, common); + + memcpy(&dev_link->cdev_info->lag_info, &lag_info, sizeof(lag_info)); + + if (dev_link->cdev_info->lag_mem_notify) + dev_link->cdev_info->lag_mem_notify(rdma_dev->bond_adev, &lag_info); + + if (rdma_dev->bond_registered && nbl_dev_rdma_bond_active_num(dev_link->cdev_info) > 1 && + !rdma_dev->bond_shaping_configed) + nbl_dev_rdma_cfg_bond(dev_mgt, dev_link->cdev_info, true); + else if (nbl_dev_rdma_bond_active_num(dev_link->cdev_info) < 2 && + rdma_dev->bond_shaping_configed) + nbl_dev_rdma_cfg_bond(dev_mgt, dev_link->cdev_info, false); +} + +static int nbl_dev_rdma_handle_bond_event(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_dev_rdma_event_data *data = NULL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + memcpy(&data->event_data, event_data, sizeof(data->event_data)); + data->type = type; + data->callback_data = callback_data; + + /* Why we need a list here? + * + * First, we have to make sure we don't lose any notify. When we try to queue_work when + * there is already a work being processed, we don't want to lose that notify. e.g. + * + * CONTEXT_0: add_slave0 -> notify_0(lag_num=1) -> add_slave1 -> notify_1(lag_num=2) + * CONTEXT_1: | -- process notify_0 -------> | + * + * Then why not simply use a single variable to store it? e.g. + * + * CONTEXT_0: add_slave0 -> notify_0 -> add_slave1 -> notify_1 + * CONTEXT_1: | -- process notify_1 -> | + * VARIABLE: | -- lag_num = 0 -- | | -- lag_num = 1 --| + * + * or + * + * CONTEXT_0: add_slave0 -> notify_0 -> add_slave1 -> notify_1 + * CONTEXT_1: | process notify_0 | | process notify_1 | + * VARIABLE: | -- lag_num = 0 -- | | -- lag_num = 1 --| + * + * This make sure that we always use the lastest param, functionally correct. + * + * But this will require the task function(nbl_dev_rdma_process_bond_event) to lock all its + * body, for that we must make sure that once we get a param, we will use it until we + * finished all the process, or else we will have trouble for using differnet param while + * processing. + * + * But this requirement cannot be fulfilled. Consider this situation: + * CONTEXT_0: rtnl_lock -> add_slave0 -> notify_0 -> add_slave1 -> notify_1 -> event_lock + * CONTEXT_1: | --notify_0 -> event_lock -> ib_func -> rtnl_lock -- | + * + * At this moment, CONTEXT_0 have rtnl_lock but need event_lock, CONTEXT_1 have event_lock + * but need rtnl_lock, thus deadlock. + * + * Based on all of the above, we need a list to fix it. Each time we want to queue work, we + * queue a new entry on the list, and each time a task processing, it dequeues a entry. + * Then the lock only needs to lock the list itself(rather than the whole aux_dev process), + * thus no trouble for deadlock. + */ + mutex_lock(&rdma_dev->lag_event_lock); + /* Always add_tail and dequeue the first, to maintain the order of notify */ + list_add_tail(&data->node, &rdma_dev->lag_event_param_list); + mutex_unlock(&rdma_dev->lag_event_lock); + + if (rdma_dev && rdma_dev->pf_event_ready) + nbl_common_queue_work_rdma(&rdma_dev->lag_event_task, true); + + return 0; +} + +static int nbl_dev_rdma_handle_offload_status(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_event_offload_status_data *data = + (struct nbl_event_offload_status_data *)event_data; + struct nbl_aux_dev *dev_link; + + if (!rdma_dev->bond_adev) + return 0; + + if (data->pf_vsi_id != NBL_COMMON_TO_VSI_ID(NBL_DEV_MGT_TO_COMMON(dev_mgt))) + return 0; + + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + if (dev_link->cdev_info->offload_status_notify) + dev_link->cdev_info->offload_status_notify(rdma_dev->bond_adev, data->status); + + return 0; +} + +static int nbl_dev_rdma_process_adev_event(void *event_data, void *callback_data) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)callback_data; + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_event_rdma_bond_update *event = (struct nbl_event_rdma_bond_update *)event_data; + struct nbl_lag_member_list_param *list_param = &event->param; + struct nbl_rdma_register_param register_param = {0}; + struct nbl_core_dev_lag_info lag_info = {0}; + + switch (event->subevent) { + case NBL_SUBEVENT_CREATE_ADEV: + if (!rdma_dev->adev) { + serv_ops->register_rdma(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common), ®ister_param); + if (register_param.has_rdma) + nbl_dev_create_rdma_aux_dev(dev_mgt, NBL_AUX_DEV_ROCE, NULL); + } + break; + case NBL_SUBEVENT_RELEASE_ADEV: + if (rdma_dev->adev) { + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->adev); + serv_ops->unregister_rdma(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + } + break; + case NBL_SUBEVENT_CREATE_BOND_ADEV: + if (!rdma_dev->bond_adev) { + serv_ops->register_rdma_bond(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + list_param, ®ister_param); + + nbl_dev_rdma_form_lag_info(&lag_info, list_param, common); + + if (register_param.has_rdma) { + rdma_dev->lag_id = list_param->lag_id; + nbl_dev_create_rdma_aux_dev(dev_mgt, NBL_AUX_DEV_BOND, &lag_info); + } + } + break; + case NBL_SUBEVENT_RELEASE_BOND_ADEV: + if (rdma_dev->bond_adev) { + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->bond_adev); + serv_ops->unregister_rdma_bond(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + rdma_dev->lag_id); + } + break; + default: + break; + } + + return 0; +} + +static int nbl_dev_rdma_process_bond_event(struct work_struct *work) +{ + struct nbl_dev_rdma *rdma_dev = container_of(work, struct nbl_dev_rdma, lag_event_task); + struct nbl_dev_mgt *dev_mgt; + struct nbl_common_info *common; + struct nbl_lag_member_list_param *list_param; + struct nbl_dev_rdma_event_data *data = NULL; + struct nbl_event_rdma_bond_update *lag_event = NULL; + + mutex_lock(&rdma_dev->lag_event_lock); + + if (!nbl_list_empty(&rdma_dev->lag_event_param_list)) { + data = list_first_entry(&rdma_dev->lag_event_param_list, + struct nbl_dev_rdma_event_data, node); + list_del(&data->node); + } + + mutex_unlock(&rdma_dev->lag_event_lock); + + if (!data) + return 0; + + dev_mgt = (struct nbl_dev_mgt *)data->callback_data; + common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + lag_event = &data->event_data; + list_param = &lag_event->param; + + nbl_info(common, NBL_DEBUG_MAIN, "process rdma lag subevent %u.", lag_event->subevent); + + switch (lag_event->subevent) { + case NBL_SUBEVENT_UPDATE_BOND_MEMBER: + nbl_dev_rdma_update_bond_member(dev_mgt, list_param); + break; + default: + nbl_dev_rdma_process_adev_event(lag_event, dev_mgt); + break; + } + + kfree(data); + /* Always queue it again, because we don't know if there is another param need to process */ + if (rdma_dev && rdma_dev->pf_event_ready) + nbl_common_queue_work_rdma(&rdma_dev->lag_event_task, true); + + return 0; +} + +static int nbl_dev_rdma_handle_reset_event(u16 type, void *event_data, void *callback_data) +{ + struct nbl_dev_rdma *rdma_dev = (struct nbl_dev_rdma *)callback_data; + enum nbl_core_reset_event event = *(enum nbl_core_reset_event *)event_data; + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev; + + adev = rdma_dev->adev ? rdma_dev->adev : rdma_dev->bond_adev; + if (!adev) + return -1; + + dev_link = container_of(adev, struct nbl_aux_dev, adev); + if (dev_link->reset_event_notify) + dev_link->reset_event_notify(adev, event); + + if (rdma_dev->has_grc && rdma_dev->grc_adev) { + adev = rdma_dev->grc_adev; + dev_link = container_of(adev, struct nbl_aux_dev, adev); + if (dev_link->reset_event_notify) + dev_link->reset_event_notify(adev, event); + } + + return 0; +} + +size_t nbl_dev_rdma_qos_cfg_store(struct nbl_dev_mgt *dev_mgt, int offset, + const char *buf, size_t count) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev; + + if (rdma_dev->bond_adev) { + adev = rdma_dev->bond_adev; + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + } else if (rdma_dev->adev) { + adev = rdma_dev->adev; + dev_link = container_of(adev, struct nbl_aux_dev, adev); + } else { + return -EINVAL; + } + + if (dev_link->qos_cfg_store) + return dev_link->qos_cfg_store(adev, offset, buf, count); + + return -EINVAL; +} + +size_t nbl_dev_rdma_qos_cfg_show(struct nbl_dev_mgt *dev_mgt, int offset, char *buf) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev; + + if (rdma_dev->bond_adev) { + adev = rdma_dev->bond_adev; + dev_link = container_of(rdma_dev->bond_adev, struct nbl_aux_dev, adev); + } else if (rdma_dev->adev) { + adev = rdma_dev->adev; + dev_link = container_of(adev, struct nbl_aux_dev, adev); + } else { + return -EINVAL; + } + + if (dev_link->qos_cfg_show) + return dev_link->qos_cfg_show(adev, offset, buf); + + return -EINVAL; +} + +static struct nbl_core_dev_info * +nbl_dev_rdma_setup_cdev_info(struct nbl_dev_mgt *dev_mgt, u8 type, + struct nbl_core_dev_lag_info *lag_info) +{ + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(common_dev); + struct nbl_core_dev_info *cdev_info = NULL; + u16 base_vector_id = msix_info->serv_info[NBL_MSIX_RDMA_TYPE].base_vector_id; + int irq_num, i; + + cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL); + if (!cdev_info) + goto malloc_cdev_info_err; + + cdev_info->dma_dev = NBL_COMMON_TO_DMA_DEV(common); + cdev_info->pdev = NBL_COMMON_TO_PDEV(common); + cdev_info->hw_addr = serv_ops->get_hw_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NULL); + cdev_info->real_hw_addr = serv_ops->get_real_hw_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + cdev_info->function_id = serv_ops->get_function_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + cdev_info->netdev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt)->netdev; + + cdev_info->vsi_id = NBL_COMMON_TO_VSI_ID(common); + cdev_info->eth_mode = NBL_COMMON_TO_ETH_MODE(common); + cdev_info->eth_id = NBL_COMMON_TO_ETH_ID(common); + cdev_info->mem_type = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt)->mem_type; + + if (type == NBL_AUX_DEV_GRC) + cdev_info->rdma_cap_num = + serv_ops->get_rdma_cap_num(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + serv_ops->get_real_bdf(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_VSI_ID(common), + &cdev_info->real_bus, &cdev_info->real_dev, + &cdev_info->real_function); + + cdev_info->send = nbl_dev_grc_process_send; + + /* grc aux dev needs no interrupt */ + if (type == NBL_AUX_DEV_GRC) + goto out; + + irq_num = msix_info->serv_info[NBL_MSIX_RDMA_TYPE].num; + cdev_info->msix_entries = kcalloc(irq_num, sizeof(*cdev_info->msix_entries), GFP_KERNEL); + if (!cdev_info->msix_entries) + goto malloc_msix_entries_err; + + cdev_info->global_vector_id = kcalloc(irq_num, sizeof(*cdev_info->global_vector_id), + GFP_KERNEL); + if (!cdev_info->global_vector_id) + goto malloc_global_vector_id_err; + + for (i = 0; i < irq_num; i++) { + memcpy(&cdev_info->msix_entries[i], &msix_info->msix_entries[i + base_vector_id], + sizeof(cdev_info->msix_entries[i])); + cdev_info->global_vector_id[i] = + serv_ops->get_global_vector(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + i + base_vector_id); + } + cdev_info->msix_count = irq_num; + + if (type == NBL_AUX_DEV_BOND && lag_info) { + memcpy(&cdev_info->lag_info, lag_info, sizeof(cdev_info->lag_info)); + cdev_info->is_lag = true; + cdev_info->register_bond = nbl_dev_rdma_register_bond; + } + +out: + return cdev_info; + +malloc_global_vector_id_err: + kfree(cdev_info->msix_entries); +malloc_msix_entries_err: + kfree(cdev_info); +malloc_cdev_info_err: + return NULL; +} + +static void nbl_dev_rdma_remove_cdev_info(struct nbl_core_dev_info *cdev_info) +{ + kfree(cdev_info->msix_entries); + kfree(cdev_info->global_vector_id); + kfree(cdev_info); +} + +static void nbl_dev_adev_release(struct device *dev) +{ + struct nbl_aux_dev *dev_link; + + dev_link = container_of(dev, struct nbl_aux_dev, adev.dev); + nbl_dev_rdma_remove_cdev_info(dev_link->cdev_info); + kfree(dev_link); +} + +static int nbl_dev_create_rdma_aux_dev(struct nbl_dev_mgt *dev_mgt, u8 type, + struct nbl_core_dev_lag_info *lag_info) +{ + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + struct nbl_aux_dev *dev_link; + struct auxiliary_device *adev, **temp_adev = NULL; + bool is_grc = false; + int ret = 0; + + dev_link = kzalloc(sizeof(*dev_link), GFP_KERNEL); + if (!dev_link) + return -ENOMEM; + + adev = &dev_link->adev; + + adev->id = type == NBL_AUX_DEV_GRC ? NBL_COMMON_TO_BOARD_ID(common) : rdma_dev->adev_index; + adev->dev.parent = dev; + adev->dev.release = nbl_dev_adev_release; + + switch (type) { + case NBL_AUX_DEV_GRC: + rdma_dev->grc_adev = adev; + adev->name = "nbl.roce_grc"; + temp_adev = &rdma_dev->grc_adev; + is_grc = true; + break; + case NBL_AUX_DEV_ROCE: + rdma_dev->adev = adev; + adev->name = "nbl.roce"; + temp_adev = &rdma_dev->adev; + break; + case NBL_AUX_DEV_BOND: + rdma_dev->bond_adev = adev; + adev->name = "nbl.roce_bond"; + temp_adev = &rdma_dev->bond_adev; + break; + default: + goto unknown_type_err; + } + + dev_link->cdev_info = nbl_dev_rdma_setup_cdev_info(dev_mgt, type, lag_info); + if (!dev_link->cdev_info) { + ret = -ENOMEM; + goto malloc_cdev_info_err; + } + + ret = auxiliary_device_init(adev); + if (ret) { + dev_err(dev, "auxiliary_device_init fail ret= %d", ret); + goto aux_dev_init_err; + } + + ret = __auxiliary_device_add(adev, "nbl"); + if (ret) { + dev_err(dev, "__auxiliary_device_add fail ret= %d", ret); + goto aux_dev_add_err; + } + + dev_info(dev, "nbl plug %d auxiliary device OK", type); + return 0; + +aux_dev_add_err: + /* When uninit, it will call nbl_dev_adev_release, which will free dev_link. + * So just return. + */ + auxiliary_device_uninit(adev); + if (temp_adev) + *temp_adev = NULL; + return ret; +aux_dev_init_err: + nbl_dev_rdma_remove_cdev_info(dev_link->cdev_info); +malloc_cdev_info_err: +unknown_type_err: + kfree(dev_link); + if (temp_adev) + *temp_adev = NULL; + return ret; +} + +static void nbl_dev_destroy_rdma_aux_dev(struct nbl_dev_rdma *rdma_dev, + struct auxiliary_device **adev) +{ + rdma_dev->is_halting = true; + + if (!adev || !*adev) + return; + + if (rdma_dev->pf_event_ready) + nbl_common_flush_task(&rdma_dev->abnormal_event_task); + + auxiliary_device_delete(*adev); + auxiliary_device_uninit(*adev); + + *adev = NULL; + + rdma_dev->is_halting = false; +} + +int nbl_dev_setup_rdma_dev(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct nbl_msix_info *msix_info = NBL_DEV_COMMON_TO_MSIX_INFO(common_dev); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_rdma_register_param register_param = {0}; + struct nbl_event_callback event_callback = {0}; + bool has_grc = false; + + /* This must be performed after ctrl dev setup */ + if (param->caps.has_ctrl) + serv_ops->setup_rdma_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + serv_ops->register_rdma(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common), ®ister_param); + + if (param->caps.has_grc) + has_grc = true; + + if (!register_param.has_rdma && !has_grc) + return 0; + + rdma_dev = devm_kzalloc(NBL_ADAPTER_TO_DEV(adapter), + sizeof(struct nbl_dev_rdma), GFP_KERNEL); + if (!rdma_dev) + return -ENOMEM; + + rdma_dev->has_rdma = register_param.has_rdma; + rdma_dev->has_grc = has_grc; + rdma_dev->mem_type = register_param.mem_type; + rdma_dev->adev_index = register_param.id; + msix_info->serv_info[NBL_MSIX_RDMA_TYPE].num += register_param.intr_num; + + if (!NBL_COMMON_TO_VF_CAP(common)) { + nbl_common_alloc_task(&rdma_dev->lag_event_task, + (void *)nbl_dev_rdma_process_bond_event); + INIT_LIST_HEAD(&rdma_dev->lag_event_param_list); + mutex_init(&rdma_dev->lag_event_lock); + + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_bond_event; + nbl_event_register(NBL_EVENT_RDMA_BOND_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } + + NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt) = rdma_dev; + + return 0; +} + +void nbl_dev_remove_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_dev_rdma_event_data *data, *data_safe; + struct nbl_event_callback event_callback = {0}; + + if (!rdma_dev) + return; + + if (!NBL_COMMON_TO_VF_CAP(common)) { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_bond_event; + nbl_event_unregister(NBL_EVENT_RDMA_BOND_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + mutex_lock(&rdma_dev->lag_event_lock); + list_for_each_entry_safe(data, data_safe, + &rdma_dev->lag_event_param_list, node) { + list_del(&data->node); + kfree(data); + } + mutex_unlock(&rdma_dev->lag_event_lock); + + nbl_common_release_task(&rdma_dev->lag_event_task); + } + + if (rdma_dev->has_rdma) + serv_ops->unregister_rdma(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_VSI_ID(common)); + + if (NBL_DEV_MGT_TO_CTRL_DEV(dev_mgt)) + serv_ops->remove_rdma_id(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + + devm_kfree(NBL_ADAPTER_TO_DEV(adapter), rdma_dev); + NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt) = NULL; +} + +int nbl_dev_start_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_event_callback event_callback = {0}; + int ret; + + if (!rdma_dev || (!rdma_dev->has_rdma && !rdma_dev->has_grc)) + return 0; + + if (!NBL_COMMON_TO_VF_CAP(common)) + nbl_common_alloc_task(&rdma_dev->abnormal_event_task, + nbl_dev_grc_handle_abnormal_event); + + if (chan_ops->check_queue_exist(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), + NBL_CHAN_TYPE_MAILBOX)) + chan_ops->register_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), NBL_CHAN_MSG_GRC_PROCESS, + nbl_dev_chan_grc_process_resp, dev_mgt); + + if (rdma_dev->has_grc) { + ret = nbl_dev_create_rdma_aux_dev(dev_mgt, NBL_AUX_DEV_GRC, NULL); + if (ret) + return ret; + } + + if (rdma_dev->has_rdma) { + ret = nbl_dev_create_rdma_aux_dev(dev_mgt, NBL_AUX_DEV_ROCE, NULL); + if (ret) + goto create_rdma_aux_err; + } + + if (!NBL_COMMON_TO_VF_CAP(common)) { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_offload_status; + nbl_event_register(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + rdma_dev->pf_event_ready = true; + } + + event_callback.callback_data = rdma_dev; + event_callback.callback = nbl_dev_rdma_handle_reset_event; + nbl_event_register(NBL_EVENT_RESET_EVENT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + if (rdma_dev && rdma_dev->pf_event_ready) + nbl_common_queue_work_rdma(&rdma_dev->lag_event_task, true); + + return 0; + +create_rdma_aux_err: + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->grc_adev); + return ret; +} + +void nbl_dev_stop_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_event_callback event_callback = {0}; + + if (!rdma_dev) + return; + + if (!NBL_COMMON_TO_VF_CAP(common)) { + event_callback.callback_data = dev_mgt; + event_callback.callback = nbl_dev_rdma_handle_offload_status; + nbl_event_unregister(NBL_EVENT_OFFLOAD_STATUS_CHANGED, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + rdma_dev->pf_event_ready = false; + nbl_common_flush_task(&rdma_dev->abnormal_event_task); + nbl_common_flush_task(&rdma_dev->lag_event_task); + } + + event_callback.callback_data = rdma_dev; + event_callback.callback = nbl_dev_rdma_handle_reset_event; + nbl_event_unregister(NBL_EVENT_RESET_EVENT, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->bond_adev); + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->adev); + nbl_dev_destroy_rdma_aux_dev(rdma_dev, &rdma_dev->grc_adev); + + if (!NBL_COMMON_TO_VF_CAP(common)) + nbl_common_release_task(&rdma_dev->abnormal_event_task); +} + +int nbl_dev_resume_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + if (!rdma_dev || (!rdma_dev->has_rdma && !rdma_dev->has_grc)) + return 0; + + if (!NBL_COMMON_TO_VF_CAP(common)) + nbl_common_alloc_task(&rdma_dev->abnormal_event_task, + nbl_dev_grc_handle_abnormal_event); + + if (!NBL_COMMON_TO_VF_CAP(common)) + nbl_common_alloc_task(&rdma_dev->lag_event_task, nbl_dev_rdma_process_bond_event); + + return 0; +} + +int nbl_dev_suspend_rdma_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_rdma *rdma_dev = NBL_DEV_MGT_TO_RDMA_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + + if (!rdma_dev) + return 0; + + if (!NBL_COMMON_TO_VF_CAP(common)) + nbl_common_release_task(&rdma_dev->lag_event_task); + + if (!NBL_COMMON_TO_VF_CAP(common)) + nbl_common_release_task(&rdma_dev->abnormal_event_task); + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h new file mode 100644 index 0000000000000000000000000000000000000000..d560fa0a2ebf1e8851db307ba321ec1740aa0d55 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_rdma.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEV_RDMA_H_ +#define _NBL_DEV_RDMA_H_ + +#include "nbl_dev.h" +#include "nbl_export_rdma.h" + +enum { + NBL_AUX_DEV_GRC = 0, + NBL_AUX_DEV_ROCE, + NBL_AUX_DEV_BOND, +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c new file mode 100644 index 0000000000000000000000000000000000000000..96c8e7c36b94f2df81a15321643ead1e6237dbc5 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.c @@ -0,0 +1,1513 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ +#include "nbl_dev.h" +#include "nbl_service.h" + +extern int device_driver_attach(struct device_driver *drv, struct device *dev); + +static struct nbl_userdev { + struct cdev cdev; + struct class *cls; + struct idr cidr; + dev_t cdevt; + struct mutex clock; /* lock character device */ + struct list_head glist; + struct mutex glock; /* lock iommu group list */ + bool success; +} nbl_userdev; + +struct nbl_vfio_batch { + unsigned long *pages_out; + unsigned long *pages_in; + int size; + int offset; + struct page **h_page; +}; + +struct nbl_userdev_dma { + struct rb_node node; + dma_addr_t iova; + unsigned long vaddr; + size_t size; + unsigned long pfn; + unsigned int ref_cnt; +}; + +bool nbl_dma_iommu_status(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + + if (dev->iommu_group && iommu_get_domain_for_dev(dev)) + return 1; + + return 0; +} + +bool nbl_dma_remap_status(struct pci_dev *pdev, u64 *dma_limit) +{ + struct device *dev = &pdev->dev; + struct iommu_domain *domain; + dma_addr_t dma_mask = dma_get_mask(dev); + + /* get dma_limit references iommu_dma_alloc_iova */ + *dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); + domain = iommu_get_domain_for_dev(dev); + if (!domain) + return 0; + + if (domain->geometry.force_aperture) + *dma_limit = min_t(u64, *dma_limit, domain->geometry.aperture_end); + + if (domain->type & IOMMU_DOMAIN_IDENTITY) + return 0; + + return 1; +} + +static char *user_cdevnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "nbl_userdev/%s", dev_name(dev)); +} + +static void nbl_user_change_kernel_network(struct nbl_dev_user *user) +{ + struct nbl_adapter *adapter = user->adapter; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_event_dev_mode_switch_data data = {0}; + struct net_device *netdev = net_dev->netdev; + + if (user->network_type == NBL_KERNEL_NETWORK) + return; + + rtnl_lock(); + clear_bit(NBL_USER, adapter->state); + + data.op = NBL_DEV_USER_TO_KERNEL; + data.promosic = user->user_promisc_mode; + nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + if (data.ret) { + netdev_err(netdev, "network changes to kernel space failed %d\n", data.ret); + goto unlock; + } + + user->network_type = NBL_KERNEL_NETWORK; + netdev_info(netdev, "network changes to kernel space\n"); + +unlock: + rtnl_unlock(); +} + +static int nbl_user_change_user_network(struct nbl_dev_user *user) +{ + struct nbl_adapter *adapter = user->adapter; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct net_device *netdev = net_dev->netdev; + struct nbl_event_dev_mode_switch_data data = {0}; + int ret = 0; + + rtnl_lock(); + + data.op = NBL_DEV_KERNEL_TO_USER; + data.promosic = user->user_promisc_mode; + + nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + if (data.ret) { + netdev_err(netdev, "network changes to user space failed %u\n", data.ret); + goto unlock; + } + + set_bit(NBL_USER, adapter->state); + user->network_type = NBL_USER_NETWORK; + netdev_info(netdev, "network changes to user\n"); + +unlock: + rtnl_unlock(); + + return ret; +} + +static int nbl_cdev_open(struct inode *inode, struct file *filep) +{ + struct nbl_adapter *p; + struct nbl_dev_mgt *dev_mgt; + struct nbl_dev_user *user; + int opened; + + mutex_lock(&nbl_userdev.clock); + p = idr_find(&nbl_userdev.cidr, iminor(inode)); + mutex_unlock(&nbl_userdev.clock); + + if (!p) + return -ENODEV; + + if (test_bit(NBL_FATAL_ERR, p->state)) + return -EIO; + + dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(p); + user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + opened = atomic_cmpxchg(&user->open_cnt, 0, 1); + if (opened) + return -EBUSY; + + filep->private_data = p; + + return 0; +} + +static int nbl_cdev_release(struct inode *inode, struct file *filp) +{ + struct nbl_adapter *adapter = filp->private_data; + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + + chan_ops->clear_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt)); + nbl_user_change_kernel_network(user); + serv_ops->config_fd_flow_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_CHAN_FDIR_RULE_ISOLATE, NBL_FD_STATE_FLUSH); + atomic_set(&user->open_cnt, 0); + user->user_promisc_mode = 0; + + return 0; +} + +static void nbl_userdev_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void nbl_userdev_mmap_close(struct vm_area_struct *vma) +{ +} + +static vm_fault_t nbl_userdev_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + vm_fault_t ret = VM_FAULT_NOPAGE; + + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + ret = VM_FAULT_SIGBUS; + + return ret; +} + +static const struct vm_operations_struct nbl_userdev_mmap_ops = { + .open = nbl_userdev_mmap_open, + .close = nbl_userdev_mmap_close, + .fault = nbl_userdev_mmap_fault, +}; + +static int nbl_userdev_common_mmap(struct nbl_adapter *adapter, struct vm_area_struct *vma) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct pci_dev *pdev = adapter->pdev; + unsigned int index; + u64 phys_len, req_len, req_start, pgoff; + int ret; + + index = vma->vm_pgoff >> (NBL_DEV_USER_PCI_OFFSET_SHIFT - PAGE_SHIFT); + pgoff = vma->vm_pgoff & ((1U << (NBL_DEV_USER_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + + req_len = vma->vm_end - vma->vm_start; + req_start = pgoff << PAGE_SHIFT; + + if (index == NBL_DEV_SHM_MSG_RING_INDEX) + phys_len = NBL_USER_DEV_SHMMSGRING_SIZE; + else + phys_len = PAGE_ALIGN(pci_resource_len(pdev, 0)); + + if (req_start + req_len > phys_len) + return -EINVAL; + + if (index == NBL_DEV_SHM_MSG_RING_INDEX) { + struct page *page = virt_to_page((void *)((unsigned long)user->shm_msg_ring + + (pgoff << PAGE_SHIFT))); + vma->vm_pgoff = pgoff; + ret = remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), + req_len, vma->vm_page_prot); + return ret; + } + + vma->vm_private_data = adapter; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = (pci_resource_start(pdev, 0) >> PAGE_SHIFT) + pgoff; + + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &nbl_userdev_mmap_ops; + + return 0; +} + +static int nbl_cdev_mmap(struct file *filep, struct vm_area_struct *vma) +{ + struct nbl_adapter *adapter = filep->private_data; + + return nbl_userdev_common_mmap(adapter, vma); +} + +static int nbl_userdev_register_net(struct nbl_adapter *adapter, void *resp, + struct nbl_chan_send_info *chan_send) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_register_net_result *result = (struct nbl_register_net_result *)resp; + struct nbl_dev_vsi *vsi; + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + + memset(result, 0, sizeof(*result)); + result->tx_queue_num = vsi->queue_num; + result->rx_queue_num = vsi->queue_num; + result->rdma_enable = 0; + result->queue_offset = vsi->queue_offset; + + chan_send->ack_len = sizeof(struct nbl_register_net_result); + + return 0; +} + +static int nbl_userdev_alloc_txrx_queues(struct nbl_adapter *adapter, void *resp, + struct nbl_chan_send_info *chan_send) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_chan_param_alloc_txrx_queues *result; + struct nbl_dev_vsi *vsi; + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + result = (struct nbl_chan_param_alloc_txrx_queues *)resp; + result->queue_num = vsi->queue_num; + result->vsi_id = vsi->vsi_id; + + chan_send->ack_len = sizeof(struct nbl_chan_param_alloc_txrx_queues); + + return 0; +} + +static int nbl_userdev_get_vsi_id(struct nbl_adapter *adapter, void *resp, + struct nbl_chan_send_info *chan_send) +{ + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_chan_param_get_vsi_id *result; + struct nbl_dev_vsi *vsi; + + vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_USER]; + result = (struct nbl_chan_param_get_vsi_id *)resp; + result->vsi_id = vsi->vsi_id; + + chan_send->ack_len = sizeof(struct nbl_chan_param_get_vsi_id); + + return 0; +} + +static void nbl_userdev_translate_register_vsi2q(struct nbl_chan_send_info *chan_send) +{ + struct nbl_chan_param_register_vsi2q *param = chan_send->arg; + + param->vsi_index = NBL_VSI_USER; +} + +static void nbl_userdev_translate_clear_queues(struct nbl_chan_send_info *chan_send) +{ + chan_send->msg_type = NBL_CHAN_MSG_REMOVE_RSS; +} + +static long nbl_userdev_channel_ioctl(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_dev_user_channel_msg *msg; + void *resp; + int ret = 0; + + msg = vmalloc(sizeof(*msg)); + if (!msg) + return -ENOMEM; + + if (copy_from_user(msg, (void __user *)arg, sizeof(*msg))) { + vfree(msg); + return -EFAULT; + } + + resp = (unsigned char *)msg->data + msg->arg_len; + resp = (void *)ALIGN((u64)resp, 4); + NBL_CHAN_SEND(chan_send, msg->dst_id, msg->msg_type, msg->data, msg->arg_len, + resp, msg->ack_length, msg->ack); + + dev_dbg(&adapter->pdev->dev, "msg_type %u, arg_len %u, request %llx, resp %llx\n", + msg->msg_type, msg->arg_len, (u64)msg->data, (u64)resp); + + switch (msg->msg_type) { + case NBL_CHAN_MSG_REGISTER_NET: + ret = nbl_userdev_register_net(adapter, resp, &chan_send); + break; + case NBL_CHAN_MSG_ALLOC_TXRX_QUEUES: + ret = nbl_userdev_alloc_txrx_queues(adapter, resp, &chan_send); + break; + case NBL_CHAN_MSG_GET_VSI_ID: + ret = nbl_userdev_get_vsi_id(adapter, resp, &chan_send); + break; + case NBL_CHAN_MSG_ADD_MACVLAN: + WARN_ON(1); + break; + case NBL_CHAN_MSG_DEL_MACVLAN: + case NBL_CHAN_MSG_UNREGISTER_NET: + case NBL_CHAN_MSG_ADD_MULTI_RULE: + case NBL_CHAN_MSG_DEL_MULTI_RULE: + case NBL_CHAN_MSG_FREE_TXRX_QUEUES: + case NBL_CHAN_MSG_CLEAR_FLOW: + break; + case NBL_CHAN_MSG_CLEAR_QUEUE: + nbl_userdev_translate_clear_queues(&chan_send); + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + break; + case NBL_CHAN_MSG_REGISTER_VSI2Q: + nbl_userdev_translate_register_vsi2q(&chan_send); + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + break; + default: + ret = chan_ops->send_msg(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), &chan_send); + break; + } + + msg->ack_length = chan_send.ack_len; + msg->ack_err = ret; + ret = copy_to_user((void __user *)arg, msg, sizeof(*msg)); + + vfree(msg); + + return ret; +} + +static long nbl_userdev_switch_network(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + int timeout = 50; + int type; + + if (get_user(type, (unsigned long __user *)arg)) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), + "switch network get type failed\n"); + return -EFAULT; + } + + if (type == user->network_type) + return 0; + + while (test_bit(NBL_RESETTING, adapter->state)) { + timeout--; + if (!timeout) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), + "Timeout while resetting in user change state\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + /* todolist: concurreny about adapter->state */ + if (type == NBL_USER_NETWORK) + nbl_user_change_user_network(user); + else + nbl_user_change_kernel_network(user); + + return 0; +} + +static long nbl_userdev_get_ifindex(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct net_device *netdev = net_dev->netdev; + int ifindex, ret; + + ifindex = netdev->ifindex; + ret = copy_to_user((void __user *)arg, &ifindex, sizeof(ifindex)); + return ret; +} + +static long nbl_userdev_clear_eventfd(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + + chan_ops->clear_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt)); + + return 0; +} + +static long nbl_userdev_set_listener(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + int msgtype; + + if (get_user(msgtype, (unsigned long __user *)arg)) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), "get listener msgtype failed\n"); + return -EFAULT; + } + + chan_ops->set_listener_msgtype(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), msgtype); + + return 0; +} + +static long nbl_userdev_set_eventfd(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct nbl_channel_ops *chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + struct eventfd_ctx *ctx; + struct fd eventfd; + int fd; + long ret = 0; + + if (get_user(fd, (unsigned long __user *)arg)) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), "get user fd failed\n"); + return -EFAULT; + } + + eventfd = fdget(fd); + if (!eventfd.file) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), "get eventfd failed\n"); + return -EBADF; + } + + ctx = eventfd_ctx_fileget(eventfd.file); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + dev_err(NBL_ADAPTER_TO_DEV(adapter), "get eventfd ctx failed\n"); + return ret; + } + + chan_ops->set_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt), user->shm_msg_ring, ctx); + + return ret; +} + +static long nbl_userdev_get_bar_size(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + size_t size = pci_resource_len(adapter->pdev, 0); + u8 __iomem *hw_addr; + int ret; + + hw_addr = serv_ops->get_hw_addr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &size); + ret = copy_to_user((void __user *)arg, &size, sizeof(size)); + + return ret; +} + +static long nbl_userdev_get_dma_limit(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + + /** + * Linux kernel perfers to use 32-bit IOVAs, when 32-bit address space has been used. + * Then attempt to use high address space. + * The order of allocation is from high address to low address. + * + * DPDK setting the starting address at 4GB, please reference eal_get_baseaddr. + * So dpdk iova almost not conflict with kernel. + * Like heap-stack, kernel alloc iova from high to low, dpdk alloc iova from low to high. + * + * But in the scene, linux kernel config is passthrough, + * nbl device has been modify to DMA by sysfs, + * concurrent dpdk attach a device base uio, now dpdk use pa as iova. + * Now pa maybe below 4G, and iommu map(iova(pa)->pa) may conflict with kernel. + * + * So dpdk remap policy is when dpdk use iova, not set iova msb. + * when dpdk use pa as iova, set iova msb. + * The best way is call reserve_iova to keep consistent between dpdk and kernel. + * But struct iommu_dma_cookie not export symbols, we cannot get struct iova_domain + * by struct iommu_domain->iova_cookie->iovad except define struct iommu_dma_cookie + * in driver code. + */ + + return copy_to_user((void __user *)arg, &user->dma_limit, sizeof(user->dma_limit)); +} + +static long nbl_userdev_set_promisc_mode(struct nbl_adapter *adapter, unsigned long arg) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_event_dev_mode_switch_data data = {0}; + int user_promisc_mode; + int ret = 0; + + if (get_user(user_promisc_mode, (unsigned long __user *)arg)) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), + "set promic mode get mode failed\n"); + return -EFAULT; + } + + if (user_promisc_mode == user->user_promisc_mode) + return 0; + + if (user->network_type == NBL_USER_NETWORK) { + data.op = NBL_DEV_SET_USER_PROMISC_MODE; + data.promosic = user_promisc_mode; + nbl_event_notify(NBL_EVENT_DEV_MODE_SWITCH, &data, NBL_COMMON_TO_ETH_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + ret = data.ret; + if (ret) { + dev_err(NBL_ADAPTER_TO_DEV(adapter), + "user set promic mode %u failed %d\n", user_promisc_mode, ret); + return ret; + } + } + + user->user_promisc_mode = user_promisc_mode; + return ret; +} + +static long nbl_userdev_common_ioctl(struct nbl_adapter *adapter, unsigned int cmd, + unsigned long arg) +{ + int ret = -EINVAL; + + switch (cmd) { + case NBL_DEV_USER_CHANNEL: + ret = nbl_userdev_channel_ioctl(adapter, arg); + break; + case NBL_DEV_USER_MAP_DMA: + case NBL_DEV_USER_UNMAP_DMA: + break; + case NBL_DEV_USER_SWITCH_NETWORK: + ret = nbl_userdev_switch_network(adapter, arg); + break; + case NBL_DEV_USER_GET_IFINDEX: + ret = nbl_userdev_get_ifindex(adapter, arg); + break; + case NBL_DEV_USER_SET_EVENTFD: + ret = nbl_userdev_set_eventfd(adapter, arg); + break; + case NBL_DEV_USER_CLEAR_EVENTFD: + ret = nbl_userdev_clear_eventfd(adapter, arg); + break; + case NBL_DEV_USER_SET_LISTENER: + ret = nbl_userdev_set_listener(adapter, arg); + break; + case NBL_DEV_USER_GET_BAR_SIZE: + ret = nbl_userdev_get_bar_size(adapter, arg); + break; + case NBL_DEV_USER_GET_DMA_LIMIT: + ret = nbl_userdev_get_dma_limit(adapter, arg); + break; + case NBL_DEV_USER_SET_PROMISC_MODE: + ret = nbl_userdev_set_promisc_mode(adapter, arg); + break; + default: + break; + } + + return ret; +} + +static long nbl_cdev_unlock_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + struct nbl_adapter *adapter = filep->private_data; + + return nbl_userdev_common_ioctl(adapter, cmd, arg); +} + +static ssize_t nbl_vfio_read(struct vfio_device *vdev, char __user *buf, + size_t count, loff_t *ppos) +{ + return -EFAULT; +} + +static ssize_t nbl_vfio_write(struct vfio_device *vdev, const char __user *buf, + size_t count, loff_t *ppos) +{ + return count; +} + +#define NBL_VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(unsigned long)) + +static int nbl_vfio_batch_init(struct nbl_vfio_batch *batch) +{ + batch->offset = 0; + batch->size = 0; + + batch->pages_in = (unsigned long *)__get_free_page(GFP_KERNEL); + if (!batch->pages_in) + return -ENOMEM; + + batch->pages_out = (unsigned long *)__get_free_page(GFP_KERNEL); + if (!batch->pages_out) { + free_page((unsigned long)batch->pages_in); + return -ENOMEM; + } + + batch->h_page = kzalloc(NBL_VFIO_BATCH_MAX_CAPACITY * sizeof(struct page *), GFP_KERNEL); + if (!batch->h_page) { + free_page((unsigned long)batch->pages_in); + free_page((unsigned long)batch->pages_out); + return -ENOMEM; + } + + return 0; +} + +static void nbl_vfio_batch_fini(struct nbl_vfio_batch *batch) +{ + if (batch->pages_in) + free_page((unsigned long)batch->pages_in); + + if (batch->pages_out) + free_page((unsigned long)batch->pages_out); + + kfree(batch->h_page); +} + +static struct nbl_userdev_dma *nbl_userdev_find_dma(struct nbl_dev_user_iommu_group *group, + dma_addr_t start, size_t size) +{ + struct rb_node *node = group->dma_tree.rb_node; + + while (node) { + struct nbl_userdev_dma *dma = rb_entry(node, struct nbl_userdev_dma, node); + + if (start + size <= dma->vaddr) + node = node->rb_left; + else if (start >= dma->vaddr + dma->size) + node = node->rb_right; + else + return dma; + } + + return NULL; +} + +static void nbl_userdev_link_dma(struct nbl_dev_user_iommu_group *group, + struct nbl_userdev_dma *new) +{ + struct rb_node **link = &group->dma_tree.rb_node, *parent = NULL; + struct nbl_userdev_dma *dma; + + while (*link) { + parent = *link; + dma = rb_entry(parent, struct nbl_userdev_dma, node); + + if (new->vaddr + new->size <= dma->vaddr) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + + rb_link_node(&new->node, parent, link); + rb_insert_color(&new->node, &group->dma_tree); +} + +static void nbl_userdev_remove_dma(struct nbl_dev_user_iommu_group *group, + struct nbl_userdev_dma *dma) +{ + struct nbl_vfio_batch batch; + long npage, batch_pages; + unsigned long vaddr; + int ret, caps; + unsigned long *ppfn, pfn; + int i = 0; + + dev_dbg(group->dev, "dma remove: vaddr 0x%lx, iova 0x%llx, size 0x%lx\n", + dma->vaddr, dma->iova, dma->size); + iommu_unmap(iommu_get_domain_for_dev(group->dev), dma->iova, dma->size); + + ret = nbl_vfio_batch_init(&batch); + if (ret) { + caps = 1; + ppfn = &pfn; + } else { + caps = NBL_VFIO_BATCH_MAX_CAPACITY; + ppfn = batch.pages_in; + } + + npage = dma->size >> PAGE_SHIFT; + vaddr = dma->vaddr; + + while (npage) { + if (npage >= caps) + batch_pages = caps; + else + batch_pages = npage; + + ppfn[0] = vaddr >> PAGE_SHIFT; + for (i = 1; i < batch_pages; i++) + ppfn[i] = ppfn[i - 1] + 1; + + vfio_unpin_pages(group->vdev, vaddr, batch_pages); + dev_dbg(group->dev, "unpin pages 0x%lx, npages %ld, ret %d\n", + ppfn[0], batch_pages, ret); + npage -= batch_pages; + vaddr += (batch_pages << PAGE_SHIFT); + } + + nbl_vfio_batch_fini(&batch); + rb_erase(&dma->node, &group->dma_tree); + kfree(dma); +} + +static long nbl_userdev_dma_map_ioctl(struct nbl_dev_user *user, unsigned long arg) +{ + struct nbl_dev_user_dma_map map; + struct nbl_adapter *adapter = user->adapter; + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &pdev->dev; + struct nbl_vfio_batch batch; + struct nbl_userdev_dma *dma; + struct page *h_page; + unsigned long minsz, pfn_base = 0, pfn; + unsigned long vaddr, vfn; + dma_addr_t iova; + u32 mask = NBL_DEV_USER_DMA_MAP_FLAG_READ | NBL_DEV_USER_DMA_MAP_FLAG_WRITE; + size_t size; + long npage, batch_pages, pinned = 0; + int i, ret = 0; + phys_addr_t phys; + + minsz = offsetofend(struct nbl_dev_user_dma_map, size); + + if (copy_from_user(&map, (void __user *)arg, minsz)) + return -EFAULT; + + if (map.argsz < minsz || map.flags & ~mask) + return -EINVAL; + + npage = map.size >> PAGE_SHIFT; + vaddr = map.vaddr; + iova = map.iova; + + if (!npage) + return ret; + mutex_lock(&user->group->dma_tree_lock); + /* rb-tree find */ + dma = nbl_userdev_find_dma(user->group, vaddr, map.size); + if (dma && dma->iova == iova && dma->size == map.size) { + vfn = vaddr >> PAGE_SHIFT; + ret = vfio_pin_pages(NBL_USERDEV_TO_VFIO_DEV(user), + vaddr, 1, IOMMU_READ | IOMMU_WRITE, &h_page); + if (ret <= 0) { + dev_err(dev, "vfio_pin_pages failed %d\n", ret); + goto mutext_unlock; + } + + pfn = page_to_pfn(h_page); + ret = 0; + vfio_unpin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr, 1); + if (pfn != dma->pfn) { + dev_err(dev, "multiple dma pfn not equal, new pfn %lu, dma pfn %lu\n", + pfn, dma->pfn); + ret = -EINVAL; + goto mutext_unlock; + } + + dev_info(dev, "existing dma info, ref_cnt++\n"); + dma->ref_cnt++; + goto mutext_unlock; + } else if (dma) { + dev_info(dev, "multiple dma not equal\n"); + ret = -EINVAL; + goto mutext_unlock; + } + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) { + ret = -ENOMEM; + goto mutext_unlock; + } + + if (nbl_vfio_batch_init(&batch)) { + kfree(dma); + ret = -ENOMEM; + goto mutext_unlock; + } + + while (npage) { + if (batch.size == 0) { + if (npage >= NBL_VFIO_BATCH_MAX_CAPACITY) + batch_pages = NBL_VFIO_BATCH_MAX_CAPACITY; + else + batch_pages = npage; + batch.pages_in[0] = vaddr >> PAGE_SHIFT; + for (i = 1; i < batch_pages; i++) + batch.pages_in[i] = batch.pages_in[i - 1] + 1; + ret = vfio_pin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr, batch_pages, + IOMMU_READ | IOMMU_WRITE, batch.h_page); + + dev_dbg(dev, "page %ld pages, return %d\n", batch_pages, batch.size); + if (ret <= 0) { + dev_err(dev, "pin page failed\n"); + goto unwind; + } + + for (i = 0; i < batch_pages; i++) + batch.pages_out[i] = page_to_pfn(batch.h_page[i]); + batch.offset = 0; + batch.size = ret; + if (!pfn_base) { + pfn_base = batch.pages_out[batch.offset]; + dma->pfn = batch.pages_out[batch.offset]; + } + } + + while (batch.size) { + pfn = batch.pages_out[batch.offset]; + if (pfn == (pfn_base + pinned)) { + pinned++; + vaddr += PAGE_SIZE; + batch.offset++; + batch.size--; + npage--; + continue; + } + + size = pinned << PAGE_SHIFT; + phys = pfn_base << PAGE_SHIFT; + + ret = iommu_map(iommu_get_domain_for_dev(dev), iova, phys, + size, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); + if (ret) { + dev_err(dev, "iommu_map failed\n"); + goto unwind; + } + dev_dbg(dev, "iommu map succeed, iova 0x%llx, phys 0x%llx, " + "size 0x%llx\n", (u64)iova, (u64)phys, (u64)size); + pfn_base = pfn; + pinned = 0; + iova += size; + } + } + + if (pinned) { + size = pinned << PAGE_SHIFT; + phys = pfn_base << PAGE_SHIFT; + ret = iommu_map(iommu_get_domain_for_dev(dev), iova, phys, + size, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL); + if (ret) { + dev_err(dev, "iommu_map failed\n"); + goto unwind; + } + dev_dbg(dev, "iommu map succeed, iova 0x%llx, phys 0x%llx, " + "size 0x%llx\n", (u64)iova, (u64)phys, (u64)size); + } + nbl_vfio_batch_fini(&batch); + + dma->iova = map.iova; + dma->size = map.size; + dma->vaddr = map.vaddr; + dma->ref_cnt = 1; + nbl_userdev_link_dma(user->group, dma); + + dev_info(dev, "dma map info: vaddr=0x%llx, iova=0x%llx, size=0x%llx\n", + (u64)map.vaddr, (u64)map.iova, (u64)map.size); + mutex_unlock(&user->group->dma_tree_lock); + + return ret; + +unwind: + if (iova > map.iova) + iommu_unmap(iommu_get_domain_for_dev(dev), map.iova, iova - map.iova); + + if (batch.size) + vfio_unpin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr, batch.size); + + npage = (vaddr - map.vaddr) >> PAGE_SHIFT; + vaddr = map.vaddr; + + while (npage) { + if (npage >= NBL_VFIO_BATCH_MAX_CAPACITY) + batch_pages = NBL_VFIO_BATCH_MAX_CAPACITY; + else + batch_pages = npage; + + batch.pages_in[0] = vaddr >> PAGE_SHIFT; + for (i = 1; i < batch_pages; i++) + batch.pages_in[i] = batch.pages_in[i - 1] + 1; + + vfio_unpin_pages(NBL_USERDEV_TO_VFIO_DEV(user), vaddr, batch_pages); + npage -= batch_pages; + vaddr += (batch_pages << PAGE_SHIFT); + } + nbl_vfio_batch_fini(&batch); + +mutext_unlock: + mutex_unlock(&user->group->dma_tree_lock); + + return ret; +} + +static long nbl_userdev_dma_unmap_ioctl(struct nbl_dev_user *user, unsigned long arg) +{ + struct nbl_adapter *adapter = user->adapter; + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &pdev->dev; + struct nbl_dev_user_dma_unmap unmap; + struct nbl_userdev_dma *dma; + unsigned long minsz; + + minsz = offsetofend(struct nbl_dev_user_dma_unmap, size); + + if (copy_from_user(&unmap, (void __user *)arg, minsz)) + return -EFAULT; + + if (unmap.argsz < minsz) + return -EINVAL; + + dev_info(dev, "dma unmap info: vaddr=0x%llx, iova=0x%llx, size=0x%llx\n", + (u64)unmap.vaddr, (u64)unmap.iova, (u64)unmap.size); + + mutex_lock(&user->group->dma_tree_lock); + user->group->vdev = NBL_USERDEV_TO_VFIO_DEV(user); + dma = nbl_userdev_find_dma(user->group, unmap.vaddr, unmap.size); + /* unmmap pages: rb-tree lock */ + if (dma) { + if (dma->vaddr != unmap.vaddr || dma->iova != unmap.iova || dma->size != unmap.size) + dev_err(dev, "dma unmap not equal, unmap vaddr 0x%llx, iova 0x%llx, " + "size 0x%llx, dma rbtree vaddr 0x%lx, iova 0x%llx, size 0x%lx\n", + unmap.vaddr, unmap.iova, unmap.size, + dma->vaddr, dma->iova, dma->size); + dma->ref_cnt--; + if (!dma->ref_cnt) + nbl_userdev_remove_dma(user->group, dma); + } + mutex_unlock(&user->group->dma_tree_lock); + + return 0; +} + +static long nbl_vfio_ioctl(struct vfio_device *vdev, unsigned int cmd, unsigned long arg) +{ + struct nbl_dev_user *user; + long ret; + + user = NBL_VFIO_DEV_TO_USERDEV(vdev); + switch (cmd) { + case NBL_DEV_USER_MAP_DMA: + ret = nbl_userdev_dma_map_ioctl(user, arg); + break; + case NBL_DEV_USER_UNMAP_DMA: + ret = nbl_userdev_dma_unmap_ioctl(user, arg); + break; + default: + ret = nbl_userdev_common_ioctl(user->adapter, cmd, arg); + break; + } + + return ret; +} + +static int nbl_vfio_mmap(struct vfio_device *vdev, struct vm_area_struct *vma) +{ + struct nbl_dev_user *user; + + user = NBL_VFIO_DEV_TO_USERDEV(vdev); + return nbl_userdev_common_mmap(user->adapter, vma); +} + +static void nbl_vfio_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length) +{ + struct nbl_dev_user *user = NBL_VFIO_DEV_TO_USERDEV(vdev); + struct nbl_userdev_dma *dma; + + dev_info(user->group->dev, "vdev notifyier iova 0x%llx, size 0x%llx\n", + iova, length); + + mutex_lock(&user->group->dma_tree_lock); + user->group->vdev = vdev; + dma = nbl_userdev_find_dma(user->group, (dma_addr_t)iova, (size_t)length); + if (dma) + nbl_userdev_remove_dma(user->group, dma); + mutex_unlock(&user->group->dma_tree_lock); +} + +static void nbl_userdev_group_get(struct nbl_dev_user_iommu_group *group) +{ + kref_get(&group->kref); +} + +static void nbl_userdev_release_group(struct kref *kref) +{ + struct nbl_dev_user_iommu_group *group; + struct rb_node *node; + + group = container_of(kref, struct nbl_dev_user_iommu_group, kref); + list_del(&group->group_next); + mutex_unlock(&nbl_userdev.glock); + while ((node = rb_first(&group->dma_tree))) + nbl_userdev_remove_dma(group, rb_entry(node, struct nbl_userdev_dma, node)); + + iommu_group_put(group->iommu_group); + kfree(group); +} + +static void nbl_userdev_group_put(struct nbl_dev_user *user, struct nbl_dev_user_iommu_group *group) +{ + group->vdev = NBL_USERDEV_TO_VFIO_DEV(user); + + kref_put_mutex(&group->kref, nbl_userdev_release_group, &nbl_userdev.glock); +} + +static struct nbl_dev_user_iommu_group * + nbl_userdev_group_get_from_iommu(struct iommu_group *iommu_group) +{ + struct nbl_dev_user_iommu_group *group; + + mutex_lock(&nbl_userdev.glock); + list_for_each_entry(group, &nbl_userdev.glist, group_next) { + if (group->iommu_group == iommu_group) { + nbl_userdev_group_get(group); + mutex_unlock(&nbl_userdev.glock); + return group; + } + } + + mutex_unlock(&nbl_userdev.glock); + + return NULL; +} + +static +struct nbl_dev_user_iommu_group *nbl_userdev_create_group(struct iommu_group *iommu_group, + struct device *dev, + struct vfio_device *vdev) +{ + struct nbl_dev_user_iommu_group *group, *tmp; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return ERR_PTR(-ENOMEM); + + kref_init(&group->kref); + mutex_init(&group->dma_tree_lock); + group->iommu_group = iommu_group; + group->dma_tree = RB_ROOT; + group->dev = dev; + group->vdev = vdev; + + mutex_lock(&nbl_userdev.glock); + list_for_each_entry(tmp, &nbl_userdev.glist, group_next) { + if (tmp->iommu_group == iommu_group) { + nbl_userdev_group_get(tmp); + mutex_unlock(&nbl_userdev.glock); + kfree(group); + return tmp; + } + } + + list_add(&group->group_next, &nbl_userdev.glist); + mutex_unlock(&nbl_userdev.glock); + + return group; +} + +static int nbl_vfio_open(struct vfio_device *vdev) +{ + struct nbl_dev_user *user; + struct nbl_dev_user_iommu_group *group; + struct iommu_group *iommu_group; + struct nbl_adapter *adapter; + struct pci_dev *pdev; + int ret = 0, opened; + + user = NBL_VFIO_DEV_TO_USERDEV(vdev); + adapter = user->adapter; + pdev = adapter->pdev; + + if (test_bit(NBL_FATAL_ERR, adapter->state)) + return -EIO; + + opened = atomic_cmpxchg(&user->open_cnt, 0, 1); + if (opened) + return -EBUSY; + + /* add iommu group list */ + iommu_group = iommu_group_get(&pdev->dev); + if (!iommu_group) { + dev_err(&pdev->dev, "nbl vfio open failed\n"); + ret = -EINVAL; + goto clear_open_cnt; + } + + group = nbl_userdev_group_get_from_iommu(iommu_group); + if (!group) { + group = nbl_userdev_create_group(iommu_group, &pdev->dev, vdev); + if (IS_ERR(group)) { + iommu_group_put(iommu_group); + ret = PTR_ERR(group); + goto clear_open_cnt; + } + } else { + iommu_group_put(iommu_group); + } + + user->group = group; + + dev_info(&pdev->dev, "nbl vfio open\n"); + + return ret; + +clear_open_cnt: + atomic_set(&user->open_cnt, 0); + return ret; +} + +static void nbl_vfio_close(struct vfio_device *vdev) +{ + struct nbl_dev_user *user; + struct nbl_adapter *adapter; + struct pci_dev *pdev; + struct nbl_dev_mgt *dev_mgt; + struct nbl_channel_ops *chan_ops; + struct nbl_service_ops *serv_ops; + + user = NBL_VFIO_DEV_TO_USERDEV(vdev); + adapter = user->adapter; + pdev = adapter->pdev; + dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + chan_ops = NBL_DEV_MGT_TO_CHAN_OPS(dev_mgt); + serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (user->group) + nbl_userdev_group_put(user, user->group); + user->group = NULL; + + chan_ops->clear_listener_info(NBL_DEV_MGT_TO_CHAN_PRIV(dev_mgt)); + nbl_user_change_kernel_network(user); + serv_ops->config_fd_flow_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_CHAN_FDIR_RULE_ISOLATE, NBL_FD_STATE_FLUSH); + atomic_set(&user->open_cnt, 0); + user->user_promisc_mode = 0; + + dev_info(&pdev->dev, "nbl vfio close\n"); +} + +static void nbl_vfio_release(struct vfio_device *vdev) +{ +} + +static int nbl_vfio_init(struct vfio_device *vdev) +{ + return 0; +} + +static struct vfio_device_ops nbl_vfio_dev_ops = { + .name = "vfio-nbl", + .open_device = nbl_vfio_open, + .close_device = nbl_vfio_close, + .init = nbl_vfio_init, + .release = nbl_vfio_release, + .read = nbl_vfio_read, + .write = nbl_vfio_write, + .ioctl = nbl_vfio_ioctl, + .mmap = nbl_vfio_mmap, + .dma_unmap = nbl_vfio_dma_unmap, + .bind_iommufd = vfio_iommufd_emulated_bind, + .unbind_iommufd = vfio_iommufd_emulated_unbind, + .attach_ioas = vfio_iommufd_emulated_attach_ioas, + .detach_ioas = vfio_iommufd_emulated_detach_ioas, +}; + +static const struct file_operations nbl_cdev_fops = { + .owner = THIS_MODULE, + .open = nbl_cdev_open, + .unlocked_ioctl = nbl_cdev_unlock_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = nbl_cdev_release, + .mmap = nbl_cdev_mmap, +}; + +static int nbl_bus_probe(struct device *dev) +{ + struct nbl_dev_user *user = container_of(dev, struct nbl_dev_user, mdev); + struct nbl_vfio_device *vdev; + int ret; + + vdev = vfio_alloc_device(nbl_vfio_device, vdev, dev, &nbl_vfio_dev_ops); + if (IS_ERR(vdev)) + return PTR_ERR(vdev); + user->vdev = &vdev->vdev; + vdev->user = user; + + ret = vfio_register_emulated_iommu_dev(NBL_USERDEV_TO_VFIO_DEV(user)); + if (ret) { + dev_err(dev, "vfio register iommu failed, ret %d\n", ret); + vfio_put_device(NBL_USERDEV_TO_VFIO_DEV(user)); + } + + return ret; +} + +static void nbl_bus_remove(struct device *dev) +{ + struct nbl_dev_user *user = container_of(dev, struct nbl_dev_user, mdev); + + vfio_unregister_group_dev(NBL_USERDEV_TO_VFIO_DEV(user)); + vfio_put_device(NBL_USERDEV_TO_VFIO_DEV(user)); +} + +static int nbl_bus_match(struct device *dev, struct device_driver *drv) +{ + return 0; +} + +static struct bus_type nbl_bus_type = { + .name = "nbl_bus_type", + .probe = nbl_bus_probe, + .remove = nbl_bus_remove, + .match = nbl_bus_match, +}; + +static struct device_driver nbl_userdev_driver = { + .bus = &nbl_bus_type, + .name = "nbl_userdev", + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, +}; + +static void nbl_mdev_device_release(struct device *dev) +{ + dev_info(dev, "nbl mdev device release\n"); +} + +void nbl_dev_start_user_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *cdev = NULL, *mdev; + struct pci_dev *pdev = NBL_COMMON_TO_PDEV(common); + struct nbl_dev_user *user; + struct device_driver *drv; + void *shm_msg_ring; + struct nbl_dev_vsi *user_vsi, *xdp_vsi; + u64 dma_limit; + bool iommu_status = 0, remap_status = 0; + int minor = 0, ret; + + if (!nbl_userdev.success) + return; + + if (!dev_is_dma_coherent(dev)) + return; + + /* xdp and user vsi share same queue range */ + user_vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_USER); + xdp_vsi = nbl_dev_vsi_select(dev_mgt, NBL_VSI_XDP); + if (xdp_vsi) { + user_vsi->queue_offset = xdp_vsi->queue_offset; + } else { + ret = user_vsi->ops->setup(dev_mgt, &adapter->init_param, user_vsi); + if (ret) { + dev_err(NBL_DEV_MGT_TO_DEV(dev_mgt), "User-vsi setup failed"); + return; + } + } + + iommu_status = nbl_dma_iommu_status(pdev); + remap_status = nbl_dma_remap_status(pdev, &dma_limit); + /* 39bits with 3-level paging, 48bits with 4-level paging, 57bits with 5-level paging */ + WARN_ON(fls64(dma_limit) < 39); + /* iommu passthrough must keep dpdk iova pa mode */ + shm_msg_ring = kzalloc(NBL_USER_DEV_SHMMSGRING_SIZE, GFP_KERNEL); + if (!shm_msg_ring) + return; + + user = devm_kzalloc(dev, sizeof(struct nbl_dev_user), GFP_KERNEL); + if (!user) { + kfree(shm_msg_ring); + return; + } + + if (remap_status) { + /* mdev init */ + mdev = &user->mdev; + mdev->bus = &nbl_bus_type; + drv = &nbl_userdev_driver; + device_initialize(mdev); + mdev->parent = dev; + mdev->release = nbl_mdev_device_release; + + ret = dev_set_name(mdev, pci_name(pdev)); + if (ret) { + dev_info(dev, "mdev set name failed\n"); + goto free_dev; + } + + ret = device_add(mdev); + if (ret) { + dev_err(dev, "mdev add failed\n"); + goto free_dev; + } + dev_info(dev, "MDEV: created\n"); + + ret = device_driver_attach(drv, mdev); + if (ret) { + dev_err(dev, "driver attach failed %d\n", ret); + device_del(mdev); + put_device(mdev); + goto free_dev; + } + } else { + mutex_lock(&nbl_userdev.clock); + minor = idr_alloc(&nbl_userdev.cidr, adapter, 1, MINORMASK + 1, GFP_KERNEL); + if (minor < 0) { + dev_err(dev, "alloc userdev dev minor failed\n"); + mutex_unlock(&nbl_userdev.clock); + goto free_dev; + } + + cdev = device_create(nbl_userdev.cls, NULL, MKDEV(MAJOR(nbl_userdev.cdevt), minor), + NULL, pci_name(pdev)); + if (IS_ERR(cdev)) { + dev_err(dev, "device create failed\n"); + idr_remove(&nbl_userdev.cidr, minor); + mutex_unlock(&nbl_userdev.clock); + goto free_dev; + } + mutex_unlock(&nbl_userdev.clock); + user->dev = cdev; + user->minor = minor; + } + + user->shm_msg_ring = shm_msg_ring; + user->adapter = adapter; + user->iommu_status = iommu_status; + user->remap_status = remap_status; + user->dma_limit = dma_limit; + atomic_set(&user->open_cnt, 0); + user->network_type = NBL_KERNEL_NETWORK; + user->user_promisc_mode = 0; + + NBL_DEV_MGT_TO_USER_DEV(dev_mgt) = user; + + return; + +free_dev: + devm_kfree(dev, user); + kfree(shm_msg_ring); +} + +void nbl_dev_stop_user_dev(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_user *user = NBL_DEV_MGT_TO_USER_DEV(dev_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct device *mdev; + + if (!user) + return; + + while (atomic_read(&user->open_cnt)) { + dev_info(dev, "userdev application need quit!\n"); + msleep(2000); + } + + kfree(user->shm_msg_ring); + + if (user->remap_status) { + mdev = &user->mdev; + device_del(mdev); + put_device(mdev); + devm_kfree(dev, user); + } else if (user->dev) { + mutex_lock(&nbl_userdev.clock); + device_destroy(nbl_userdev.cls, MKDEV(MAJOR(nbl_userdev.cdevt), user->minor)); + user->dev = NULL; + mutex_unlock(&nbl_userdev.clock); + devm_kfree(dev, user); + } + + NBL_DEV_MGT_TO_USER_DEV(dev_mgt) = NULL; +} + +void nbl_dev_user_module_init(void) +{ + int ret; + + idr_init(&nbl_userdev.cidr); + mutex_init(&nbl_userdev.clock); + mutex_init(&nbl_userdev.glock); + INIT_LIST_HEAD(&nbl_userdev.glist); + + ret = bus_register(&nbl_bus_type); + if (ret) { + pr_err("nbl bus type register failed\n"); + return; + } + ret = driver_register(&nbl_userdev_driver); + if (ret) { + pr_err("nbl userdev driver register failed\n"); + bus_unregister(&nbl_bus_type); + return; + } + + nbl_userdev.cls = class_create("nbl_userdev"); + if (IS_ERR(nbl_userdev.cls)) { + pr_err("nbl_userdev class alloc failed\n"); + goto err_create_cls; + } + + nbl_userdev.cls->devnode = user_cdevnode; + + ret = alloc_chrdev_region(&nbl_userdev.cdevt, 0, MINORMASK + 1, "nbl_userdev"); + if (ret) { + pr_err("nbl_userdev alloc chrdev region failed\n"); + goto err_alloc_chrdev; + } + + cdev_init(&nbl_userdev.cdev, &nbl_cdev_fops); + ret = cdev_add(&nbl_userdev.cdev, nbl_userdev.cdevt, MINORMASK + 1); + if (ret) { + pr_err("nbl_userdev cdev add failed\n"); + goto err_cdev_add; + } + + nbl_userdev.success = 1; + pr_info("user_module init success\n"); + + return; + +err_cdev_add: + unregister_chrdev_region(nbl_userdev.cdevt, MINORMASK + 1); +err_alloc_chrdev: + class_destroy(nbl_userdev.cls); + nbl_userdev.cls = NULL; +err_create_cls: + driver_unregister(&nbl_userdev_driver); + bus_unregister(&nbl_bus_type); +} + +void nbl_dev_user_module_destroy(void) +{ + if (nbl_userdev.success) { + idr_destroy(&nbl_userdev.cidr); + cdev_del(&nbl_userdev.cdev); + unregister_chrdev_region(nbl_userdev.cdevt, MINORMASK + 1); + class_destroy(nbl_userdev.cls); + nbl_userdev.cls = NULL; + driver_unregister(&nbl_userdev_driver); + bus_unregister(&nbl_bus_type); + + nbl_userdev.success = 0; + } +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h new file mode 100644 index 0000000000000000000000000000000000000000..8aa5a764b09b26beb57f84b6dc811543ac9b2d02 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dev_user.h @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEV_USER_H_ +#define _NBL_DEV_USER_H_ + +#define NBL_DEV_USER_TYPE ('n') + +#define NBL_DEV_USER_PCI_OFFSET_SHIFT 40 +#define NBL_DEV_USER_OFFSET_TO_INDEX(off) ((off) >> NBL_DEV_USER_PCI_OFFSET_SHIFT) +#define NBL_DEV_USER_INDEX_TO_OFFSET(index) ((u64)(index) << NBL_DEV_USER_PCI_OFFSET_SHIFT) +#define NBL_DEV_SHM_MSG_RING_INDEX (6) + +/* 8192 ioctl mailbox msg */ +struct nbl_dev_user_channel_msg { + u16 msg_type; + u16 dst_id; + u32 arg_len; + u32 ack_err; + u16 ack_length; + u16 ack; + u32 data[2044]; +}; + +#define NBL_DEV_USER_CHANNEL _IO(NBL_DEV_USER_TYPE, 0) + +struct nbl_dev_user_dma_map { + u32 argsz; + u32 flags; +#define NBL_DEV_USER_DMA_MAP_FLAG_READ BIT(0) /* readable from device */ +#define NBL_DEV_USER_DMA_MAP_FLAG_WRITE BIT(1) /* writable from device */ + u64 vaddr; /* Process virtual address */ + u64 iova; /* IO virtual address */ + u64 size; /* Size of mapping (bytes) */ +}; + +#define NBL_DEV_USER_MAP_DMA _IO(NBL_DEV_USER_TYPE, 1) + +struct nbl_dev_user_dma_unmap { + u32 argsz; + u32 flags; + u64 vaddr; + u64 iova; /* IO virtual address */ + u64 size; /* Size of mapping (bytes) */ +}; + +#define NBL_DEV_USER_UNMAP_DMA _IO(NBL_DEV_USER_TYPE, 2) + +#define NBL_KERNEL_NETWORK 0 +#define NBL_USER_NETWORK 1 + +#define NBL_DEV_USER_SWITCH_NETWORK _IO(NBL_DEV_USER_TYPE, 3) + +#define NBL_DEV_USER_GET_IFINDEX _IO(NBL_DEV_USER_TYPE, 4) + +#define NBL_DEV_USER_SET_EVENTFD _IO(NBL_DEV_USER_TYPE, 5) + +#define NBL_DEV_USER_CLEAR_EVENTFD _IO(NBL_DEV_USER_TYPE, 6) + +#define NBL_DEV_USER_SET_LISTENER _IO(NBL_DEV_USER_TYPE, 7) + +#define NBL_DEV_USER_GET_BAR_SIZE _IO(NBL_DEV_USER_TYPE, 8) + +#define NBL_DEV_USER_GET_DMA_LIMIT _IO(NBL_DEV_USER_TYPE, 9) + +#define NBL_DEV_USER_SET_PROMISC_MODE _IO(NBL_DEV_USER_TYPE, 10) + +void nbl_dev_start_user_dev(struct nbl_adapter *adapter); +void nbl_dev_stop_user_dev(struct nbl_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c new file mode 100644 index 0000000000000000000000000000000000000000..b30da28d60561baea972872fda40db7fc9115189 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.c @@ -0,0 +1,9868 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_dispatch.h" + +static int nbl_disp_chan_add_macvlan_req(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_add_macvlan param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt || !mac) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(param.mac, mac, sizeof(param.mac)); + param.vlan = vlan; + param.vsi = vsi; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_MACVLAN, ¶m, sizeof(param), + NULL, 0, 1); + + if (chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) + return -EFAULT; + + return 0; +} + +static void nbl_disp_chan_add_macvlan_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_add_macvlan *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_add_macvlan *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, + param->vlan, param->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_MACVLAN, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_ADD_MACVLAN); +} + +static void nbl_disp_chan_del_macvlan_req(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_del_macvlan param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt || !mac) + return; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(param.mac, mac, sizeof(param.mac)); + param.vlan = vlan; + param.vsi = vsi; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_MACVLAN, ¶m, sizeof(param), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_macvlan_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_del_macvlan *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_del_macvlan *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->mac, param->vlan, param->vsi); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_MACVLAN, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_add_multi_rule_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_MULTI_RULE, + &vsi_id, sizeof(vsi_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_multi_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_MULTI_RULE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_del_multi_rule_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_MULTI_RULE, + &vsi_id, sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_multi_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_MULTI_RULE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_multi_group_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_MULTI_GROUP, + NULL, 0, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_multi_group_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_MULTI_GROUP, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_multi_group_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_MULTI_GROUP, + NULL, 0, NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_multi_group_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_MULTI_GROUP, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_register_net_req(void *priv, + struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_register_net_info param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + int ret = 0; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.pf_bar_start = register_param->pf_bar_start; + param.pf_bdf = register_param->pf_bdf; + param.vf_bar_start = register_param->vf_bar_start; + param.vf_bar_size = register_param->vf_bar_size; + param.total_vfs = register_param->total_vfs; + param.offset = register_param->offset; + param.stride = register_param->stride; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REGISTER_NET, ¶m, sizeof(param), + (void *)register_result, sizeof(*register_result), 1); + + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return ret; +} + +static void nbl_disp_chan_register_net_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_register_net_info *param; + struct nbl_register_net_result result = {0}; + struct nbl_register_net_param register_param = {0}; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_register_net_info *)data; + + register_param.pf_bar_start = param->pf_bar_start; + register_param.pf_bdf = param->pf_bdf; + register_param.vf_bar_start = param->vf_bar_start; + register_param.vf_bar_size = param->vf_bar_size; + register_param.total_vfs = param->total_vfs; + register_param.offset = param->offset; + register_param.stride = param->stride; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, ®ister_param, &result); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_NET, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id:%d\n", + ret, NBL_CHAN_MSG_REGISTER_NET, src_id); +} + +static int nbl_disp_unregister_net(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0); +} + +static int nbl_disp_chan_unregister_net_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_UNREGISTER_NET, NULL, 0, NULL, 0, 1); + + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_unregister_net_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_NET, + msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id:%d\n", + ret, NBL_CHAN_MSG_UNREGISTER_NET, src_id); +} + +static int nbl_disp_chan_alloc_txrx_queues_req(void *priv, u16 vsi_id, u16 queue_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_alloc_txrx_queues param = {0}; + struct nbl_chan_param_alloc_txrx_queues result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.queue_num = queue_num; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, ¶m, + sizeof(param), &result, sizeof(result), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_alloc_txrx_queues_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_alloc_txrx_queues *param; + struct nbl_chan_param_alloc_txrx_queues result = {0}; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_alloc_txrx_queues *)data; + result.queue_num = param->queue_num; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->alloc_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->queue_num); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, + msg_id, err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_free_txrx_queues_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_FREE_TXRX_QUEUES, + &vsi_id, sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_txrx_queues_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->free_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_FREE_TXRX_QUEUES, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_register_vsi2q_req(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_vsi2q param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_index = vsi_index; + param.vsi_id = vsi_id; + param.queue_offset = queue_offset; + param.queue_num = queue_num; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REGISTER_VSI2Q, ¶m, + sizeof(param), NULL, 0, 1); + + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_register_vsi2q_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_vsi2q *param = NULL; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_register_vsi2q *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_vsi2q, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_index, param->vsi_id, + param->queue_offset, param->queue_num); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_VSI2Q, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_q2vsi_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_Q2VSI, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_q2vsi_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_q2vsi, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_Q2VSI, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_q2vsi_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_Q2VSI, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_q2vsi_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_q2vsi, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_Q2VSI, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_rss_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_RSS, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_rss_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_rss, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_RSS, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_rss_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_RSS, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_rss_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_rss, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_RSS, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_queue_req(void *priv, struct nbl_txrx_queue_param *queue_param, + bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_queue param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(¶m.queue_param, queue_param, sizeof(param.queue_param)); + param.is_tx = is_tx; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_QUEUE, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_queue *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_setup_queue *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_queue, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + ¶m->queue_param, param->is_tx); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_QUEUE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_all_queues_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_REMOVE_ALL_QUEUES, + &vsi_id, sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_all_queues_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_all_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_ALL_QUEUES, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_cfg_dsch_req(void *priv, u16 vsi_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_dsch param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.vld = vld; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_DSCH, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_dsch_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_dsch *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_cfg_dsch *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vld); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_DSCH, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_setup_cqs_req(void *priv, u16 vsi_id, u16 real_qps) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_cqs param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.real_qps = real_qps; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SETUP_CQS, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_setup_cqs_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_setup_cqs *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_setup_cqs *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->real_qps); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SETUP_CQS, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_remove_cqs_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_REMOVE_CQS, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_cqs_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_CQS, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_promisc_mode(void *priv, u16 vsi_id, u16 mode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_promisc_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, mode); + return ret; +} + +static int nbl_disp_chan_set_promisc_mode_req(void *priv, u16 vsi_id, u16 mode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_promisc_mode param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.mode = mode; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_PROSISC_MODE, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_promisc_mode_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_promisc_mode *param = NULL; + int err = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_promisc_mode *)data; + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_promisc_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->mode); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PROSISC_MODE, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_cfg_qdisc_mqprio_req(void *priv, struct nbl_tc_qidsc_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_QDISC_MQPRIO, + param, sizeof(*param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_qdisc_mqprio_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_tc_qidsc_param *param = (struct nbl_tc_qidsc_param *)data; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_qdisc_mqprio, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_QDISC_MQPRIO, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_set_spoof_check_addr_req(void *priv, u16 vsi_id, u8 *mac) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_spoof_check_addr param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + ether_addr_copy(param.mac, mac); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_SPOOF_CHECK_ADDR, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_spoof_check_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_spoof_check_addr *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_spoof_check_addr *)data; + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_spoof_check_addr, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->mac); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_SPOOF_CHECK_ADDR, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_set_vf_spoof_check_req(void *priv, u16 vsi_id, int vf_id, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_vf_spoof_check param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.vf_id = vf_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_VF_SPOOF_CHECK, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_vf_spoof_check_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_vf_spoof_check *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_vf_spoof_check *)data; + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_vf_spoof_check, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, + param->vf_id, param->enable); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_VF_SPOOF_CHECK, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_base_mac_addr_req(void *priv, u8 *mac) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_BASE_MAC_ADDR, + NULL, 0, mac, ETH_ALEN, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_base_mac_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 mac[ETH_ALEN]; + + NBL_OPS_CALL(res_ops->get_base_mac_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BASE_MAC_ADDR, msg_id, err, + mac, ETH_ALEN); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_firmware_version_req(void *priv, char *firmware_verion, u8 max_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_FIRMWARE_VERSION, NULL, 0, + firmware_verion, max_len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_firmware_version_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + char firmware_verion[ETHTOOL_FWVERS_LEN] = ""; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL(res_ops->get_firmware_version, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), firmware_verion)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "get emp version failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FIRMWARE_VERSION, msg_id, err, + firmware_verion, ETHTOOL_FWVERS_LEN); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_FIRMWARE_VERSION, src_id); +} + +static int nbl_disp_get_queue_err_stats(void *priv, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_queue_err_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + 0, queue_id, queue_err_stats, is_tx)); +} + +static int nbl_disp_chan_get_queue_err_stats_req(void *priv, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, + bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_queue_err_stats param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.queue_id = queue_id; + param.is_tx = is_tx; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, ¶m, + sizeof(param), queue_err_stats, sizeof(*queue_err_stats), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_queue_err_stats_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_queue_err_stats *param; + struct nbl_chan_ack_info chan_ack; + struct nbl_queue_err_stats queue_err_stats = { 0 }; + int err = NBL_CHAN_RESP_OK; + int ret; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_get_queue_err_stats *)data; + + ret = NBL_OPS_CALL(res_ops->get_queue_err_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->queue_id, + &queue_err_stats, param->is_tx)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp get queue err stats_resp failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, msg_id, err, + &queue_err_stats, sizeof(queue_err_stats)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, src_id); +} + +static void nbl_disp_chan_get_coalesce_req(void *priv, u16 vector_id, + struct nbl_chan_param_get_coalesce *ec) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_COALESCE, &vector_id, sizeof(vector_id), + ec, sizeof(*ec), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_coalesce_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + struct nbl_chan_param_get_coalesce ec = { 0 }; + u16 vector_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + vector_id = *(u16 *)data; + + NBL_OPS_CALL(res_ops->get_coalesce, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, + vector_id, &ec)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_COALESCE, msg_id, ret, + &ec, sizeof(ec)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_set_coalesce_req(void *priv, u16 vector_id, + u16 vector_num, u16 pnum, u16 rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_coalesce param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.local_vector_id = vector_id; + param.vector_num = vector_num; + param.rx_max_coalesced_frames = pnum; + param.rx_coalesce_usecs = rate; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_COALESCE, ¶m, sizeof(param), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_coalesce_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_coalesce *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_coalesce *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_coalesce, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->local_vector_id, + param->vector_num, param->rx_max_coalesced_frames, + param->rx_coalesce_usecs); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_COALESCE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_rxfh_indir_size_req(void *priv, u16 vsi_id, u32 *rxfh_indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_RXFH_INDIR_SIZE, + &vsi_id, sizeof(vsi_id), rxfh_indir_size, sizeof(u32), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rxfh_indir_size_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u32 rxfh_indir_size = 0; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + NBL_OPS_CALL(res_ops->get_rxfh_indir_size, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, &rxfh_indir_size)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_INDIR_SIZE, msg_id, + ret, &rxfh_indir_size, sizeof(u32)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_rxfh_indir_req(void *priv, u16 vsi_id, u32 *indir, u32 indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_rxfh_indir param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.rxfh_indir_size = indir_size; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_RXFH_INDIR, ¶m, + sizeof(param), indir, indir_size * sizeof(u32), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rxfh_indir_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_rxfh_indir *param; + struct nbl_chan_ack_info chan_ack; + u32 *indir; + int ret = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_get_rxfh_indir *)data; + + indir = kcalloc(param->rxfh_indir_size, sizeof(u32), GFP_KERNEL); + NBL_OPS_CALL(res_ops->get_rxfh_indir, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, indir)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_INDIR, msg_id, ret, + indir, param->rxfh_indir_size * sizeof(u32)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(indir); +} + +static void nbl_disp_chan_get_rxfh_rss_key_req(void *priv, u8 *rss_key, u32 rss_key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_RXFH_RSS_KEY, &rss_key_len, + sizeof(rss_key_len), rss_key, rss_key_len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rxfh_rss_key_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u8 *rss_key; + int ret = NBL_CHAN_RESP_OK; + u32 rss_key_len; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + rss_key_len = *(u32 *)data; + + rss_key = kzalloc(rss_key_len, GFP_KERNEL); + NBL_OPS_CALL(res_ops->get_rxfh_rss_key, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rss_key)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_RSS_KEY, msg_id, ret, + rss_key, rss_key_len); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(rss_key); +} + +static void nbl_disp_chan_get_rxfh_rss_alg_sel_req(void *priv, u8 *rss_alg_sel, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, ð_id, + sizeof(eth_id), rss_alg_sel, sizeof(u8), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_rxfh_rss_alg_sel_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u8 rss_alg_sel, eth_id; + int ret = NBL_CHAN_RESP_OK; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + eth_id = *(u8 *)data; + + NBL_OPS_CALL(res_ops->get_rss_alg_sel, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &rss_alg_sel, eth_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, msg_id, ret, + &rss_alg_sel, sizeof(rss_alg_sel)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_phy_caps_req(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PHY_CAPS, ð_id, + sizeof(eth_id), phy_caps, sizeof(*phy_caps), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_phy_caps_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + struct nbl_phy_caps phy_caps = { 0 }; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + eth_id = *(u8 *)data; + + NBL_OPS_CALL(res_ops->get_phy_caps, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &phy_caps)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PHY_CAPS, msg_id, ret, + &phy_caps, sizeof(phy_caps)); + + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_set_sfp_state_req(void *priv, u8 eth_id, u8 state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_sfp_state param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.state = state; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_SFP_STATE, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_sfp_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_sfp_state *param; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_sfp_state *)data; + + ret = NBL_OPS_CALL(res_ops->set_sfp_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->state)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "set sfp state failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_SFP_STATE, msg_id, err, NULL, 0); + + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_SET_SFP_STATE, src_id); +} + +static void nbl_disp_chan_register_rdma_req(void *priv, u16 vsi_id, + struct nbl_rdma_register_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_REGISTER_RDMA, + &vsi_id, sizeof(vsi_id), param, sizeof(*param), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_register_rdma_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_rdma_register_param result = {0}; + struct nbl_chan_ack_info chan_ack; + u16 *vsi_id; + int ret = NBL_CHAN_RESP_OK; + + vsi_id = (u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id, &result); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_RDMA, + msg_id, ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_unregister_rdma_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_UNREGISTER_RDMA, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_unregister_rdma_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 *vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + vsi_id = (u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_RDMA, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u64 nbl_disp_chan_get_real_hw_addr_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + u64 addr = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_REAL_HW_ADDR, &vsi_id, + sizeof(vsi_id), &addr, sizeof(addr), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return addr; +} + +static void nbl_disp_chan_get_real_hw_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id; + u64 addr; + + vsi_id = *(u16 *)data; + addr = NBL_OPS_CALL(res_ops->get_real_hw_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REAL_HW_ADDR, msg_id, + ret, &addr, sizeof(addr)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_chan_get_function_id_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + u16 func_id = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_FUNCTION_ID, &vsi_id, + sizeof(vsi_id), &func_id, sizeof(func_id), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return func_id; +} + +static void nbl_disp_chan_get_function_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id, func_id; + + vsi_id = *(u16 *)data; + + func_id = NBL_OPS_CALL(res_ops->get_function_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FUNCTION_ID, msg_id, + ret, &func_id, sizeof(func_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_real_bdf_req(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_result_get_real_bdf result = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REAL_BDF, &vsi_id, + sizeof(vsi_id), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + *bus = result.bus; + *dev = result.dev; + *function = result.function; +} + +static void nbl_disp_chan_get_real_bdf_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_result_get_real_bdf result = {0}; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id; + + vsi_id = *(u16 *)data; + NBL_OPS_CALL(res_ops->get_real_bdf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, + &result.bus, &result.dev, &result.function)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REAL_BDF, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_get_mbx_irq_num_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int result = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_MBX_IRQ_NUM, NULL, 0, + &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result; +} + +static void nbl_disp_chan_get_mbx_irq_num_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int result, ret = NBL_CHAN_RESP_OK; + + result = NBL_OPS_CALL(res_ops->get_mbx_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MBX_IRQ_NUM, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_clear_accel_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CLEAR_ACCEL_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_clear_accel_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u16 *vsi_id = (u16 *)data; + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->clear_accel_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CLEAR_ACCEL_FLOW, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_clear_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CLEAR_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_clear_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u16 *vsi_id = (u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CLEAR_FLOW, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_clear_queues_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CLEAR_QUEUE, &vsi_id, + sizeof(vsi_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_clear_queues_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u16 *vsi_id = (u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *vsi_id); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CLEAR_QUEUE, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_disable_phy_flow_req(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DISABLE_PHY_FLOW, ð_id, + sizeof(eth_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_disable_phy_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u8 *eth_id = (u8 *)data; + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->disable_phy_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp disable phy flow resp failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DISABLE_PHY_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_DISABLE_PHY_FLOW, src_id); +} + +static int nbl_disp_chan_enable_phy_flow_req(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ENABLE_PHY_FLOW, ð_id, + sizeof(eth_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_enable_phy_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u8 *eth_id = (u8 *)data; + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_phy_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp enable phy flow resp failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ENABLE_PHY_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_ENABLE_PHY_FLOW, src_id); +} + +static void nbl_disp_chan_init_acl_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_INIT_ACL, NULL, 0, NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_init_acl_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_acl, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_ACL, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_uninit_acl_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_UNINIT_ACL, NULL, 0, NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_uninit_acl_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->uninit_acl, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNINIT_ACL, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_chan_set_upcall_rule_req(void *priv, u8 eth_id, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_upcall param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.vsi_id = vsi_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_UPCALL_RULE, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_upcall_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_upcall *param; + int err = NBL_CHAN_RESP_OK; + int ret; + + param = (struct nbl_chan_param_set_upcall *)data; + + ret = NBL_OPS_CALL(res_ops->set_upcall_rule, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->vsi_id)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp set upcall rule resp failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_UPCALL_RULE, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_SET_UPCALL_RULE, src_id); +} + +static int nbl_disp_chan_unset_upcall_rule_req(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_UNSET_UPCALL_RULE, + ð_id, sizeof(eth_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_unset_upcall_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u8 *eth_id = (u8 *)data; + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL(res_ops->unset_upcall_rule, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id)); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp unset upcall rule resp failed with ret: %d\n", ret); + } + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNSET_UPCALL_RULE, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_UNSET_UPCALL_RULE, src_id); +} + +static void nbl_disp_chan_set_shaping_dport_vld_req(void *priv, u8 eth_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_func_vld param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.vld = vld; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_SET_SHAPING_DPORT_VLD, + ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_shaping_dport_vld_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_func_vld *param; + int err = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_func_vld *)data; + + NBL_OPS_CALL(res_ops->set_shaping_dport_vld, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->vld)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_SHAPING_DPORT_VLD, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_set_dport_fc_th_vld_req(void *priv, u8 eth_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_func_vld param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.vld = vld; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_SET_DPORT_FC_TH_VLD, + ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_dport_fc_th_vld_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_func_vld *param; + int err = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_func_vld *)data; + + NBL_OPS_CALL(res_ops->set_dport_fc_th_vld, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->vld)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_DPORT_FC_TH_VLD, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_chan_get_vsi_id_req(void *priv, u16 func_id, u16 type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vsi_id param = {0}; + struct nbl_chan_param_get_vsi_id result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.type = type; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_VSI_ID, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result.vsi_id; +} + +static void nbl_disp_chan_get_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vsi_id *param; + struct nbl_chan_param_get_vsi_id result; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_get_vsi_id *)data; + + result.vsi_id = NBL_OPS_CALL(res_ops->get_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->type)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VSI_ID, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_GET_VSI_ID); +} + +static void +nbl_disp_chan_get_eth_id_req(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_eth_id param = {0}; + struct nbl_chan_param_get_eth_id result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_ID, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + *eth_mode = result.eth_mode; + *eth_id = result.eth_id; + *logic_eth_id = result.logic_eth_id; +} + +static void nbl_disp_chan_get_eth_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_eth_id *param; + struct nbl_chan_param_get_eth_id result = {0}; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_get_eth_id *)data; + + NBL_OPS_CALL(res_ops->get_eth_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, + &result.eth_mode, &result.eth_id, &result.logic_eth_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_ID, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_GET_ETH_ID); +} + +static int nbl_disp_alloc_rings(void *priv, struct net_device *netdev, + struct nbl_ring_param *ring_param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->alloc_rings, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), netdev, ring_param)); + return ret; +} + +static void nbl_disp_remove_rings(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->remove_rings, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static dma_addr_t nbl_disp_start_tx_ring(void *priv, u8 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + dma_addr_t addr = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + addr = NBL_OPS_CALL(res_ops->start_tx_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + return addr; +} + +static void nbl_disp_stop_tx_ring(void *priv, u8 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->stop_tx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); +} + +static dma_addr_t nbl_disp_start_rx_ring(void *priv, u8 ring_index, bool use_napi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + dma_addr_t addr = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + addr = NBL_OPS_CALL(res_ops->start_rx_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, use_napi)); + + return addr; +} + +static void nbl_disp_stop_rx_ring(void *priv, u8 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->stop_rx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); +} + +static void nbl_disp_kick_rx_ring(void *priv, u16 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->kick_rx_ring, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index)); +} + +static int nbl_disp_dump_ring(void *priv, struct seq_file *m, bool is_tx, int index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->dump_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m, is_tx, index)); + return ret; +} + +static int nbl_disp_dump_ring_stats(void *priv, struct seq_file *m, bool is_tx, int index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->dump_ring_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m, is_tx, index)); + return ret; +} + +static void nbl_disp_set_rings_xdp_prog(void *priv, void *prog) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->set_rings_xdp_prog, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), prog)); +} + +static int nbl_disp_register_xdp_rxq(void *priv, u8 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->register_xdp_rxq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + return ret; +} + +static void nbl_disp_unregister_xdp_rxq(void *priv, u8 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->unregister_xdp_rxq, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); +} + +static struct napi_struct *nbl_disp_get_vector_napi(void *priv, u16 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_vector_napi, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index)); +} + +static void nbl_disp_set_vector_info(void *priv, u8 *irq_enable_base, + u32 irq_data, u16 index, bool mask_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->set_vector_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + irq_enable_base, irq_data, index, mask_en)); +} + +static void nbl_disp_register_vsi_ring(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->register_vsi_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_index, ring_offset, ring_num)); +} + +static void nbl_disp_get_res_pt_ops(void *priv, struct nbl_resource_pt_ops *pt_ops) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_resource_pt_ops, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pt_ops)); +} + +static int nbl_disp_register_net(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, + register_param, register_result); + return ret; +} + +static int nbl_disp_alloc_txrx_queues(void *priv, u16 vsi_id, u16 queue_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->alloc_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, queue_num); + return ret; +} + +static void nbl_disp_free_txrx_queues(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->free_txrx_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_register_vsi2q(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_vsi2q, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_index, vsi_id, + queue_offset, queue_num); +} + +static int nbl_disp_setup_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_q2vsi, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_remove_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_q2vsi, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_setup_rss(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_rss, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_remove_rss(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_rss, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_setup_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, is_tx); + return ret; +} + +static void nbl_disp_remove_all_queues(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_all_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_cfg_dsch(void *priv, u16 vsi_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_dsch, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vld); + return ret; +} + +static int nbl_disp_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, real_qps); + return ret; +} + +static void nbl_disp_remove_cqs(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_cqs, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static u8 *nbl_disp_get_msix_irq_enable_info(void *priv, u16 global_vector_id, u32 *irq_data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return NULL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_msix_irq_enable_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), global_vector_id, irq_data)); +} + +static int nbl_disp_add_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt || !mac) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, vlan, vsi); + return ret; +} + +static void nbl_disp_del_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt || !mac) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_macvlan, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, vlan, vsi); +} + +static int nbl_disp_add_multi_rule(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_multi_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + return ret; +} + +static void nbl_disp_del_multi_rule(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_multi_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); +} + +static int nbl_disp_setup_multi_group(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->setup_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_remove_multi_group(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_multi_group, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_get_net_stats(void *priv, struct nbl_stats *net_stats) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_net_stats, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), net_stats)); +} + +static void nbl_disp_get_private_stat_len(void *priv, u32 *len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_private_stat_len, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), len); +} + +static void nbl_disp_get_private_stat_data(void *priv, u32 eth_id, u64 *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_private_stat_data, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, data); +} + +static void nbl_disp_get_private_stat_data_req(void *priv, u32 eth_id, u64 *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_private_stat_data param = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.data_len = data_len; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_ETH_STATS, ¶m, + sizeof(param), data, data_len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_private_stat_data_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_private_stat_data *param; + struct nbl_chan_ack_info chan_ack; + u64 *recv_data; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_get_private_stat_data *)data; + recv_data = kmalloc(param->data_len, GFP_ATOMIC); + if (!recv_data) { + dev_err(dev, "Allocate memory to private_stat_data failed\n"); + return; + } + + NBL_OPS_CALL(res_ops->get_private_stat_data, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, recv_data)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_STATS, msg_id, + ret, recv_data, param->data_len); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(recv_data); +} + +static void nbl_disp_fill_private_stat_strings(void *priv, u8 *strings) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->fill_private_stat_strings, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), strings); +} + +static u16 nbl_disp_get_max_desc_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_max_desc_num, ()); + return ret; +} + +static u16 nbl_disp_get_min_desc_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_min_desc_num, ()); + return ret; +} + +static int nbl_disp_cfg_qdisc_mqprio(void *priv, struct nbl_tc_qidsc_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_qdisc_mqprio, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + return ret; +} + +static int nbl_disp_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_spoof_check_addr, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, mac); + return ret; +} + +static int nbl_disp_set_vf_spoof_check(void *priv, u16 vsi_id, int vf_id, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_vf_spoof_check, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id, enable); + return ret; +} + +static void nbl_disp_get_base_mac_addr(void *priv, u8 *mac) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_base_mac_addr, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac); +} + +static u16 nbl_disp_get_tx_desc_num(void *priv, u32 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_tx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + return ret; +} + +static u16 nbl_disp_get_rx_desc_num(void *priv, u32 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_rx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); + return ret; +} + +static void nbl_disp_set_tx_desc_num(void *priv, u32 ring_index, u16 desc_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_tx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, desc_num)); +} + +static void nbl_disp_set_rx_desc_num(void *priv, u32 ring_index, u16 desc_num) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_rx_desc_num, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, desc_num)); +} + +static void nbl_disp_cfg_txrx_vlan(void *priv, u16 vlan_tci, u16 vlan_proto, u8 vsi_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->cfg_txrx_vlan, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vlan_tci, vlan_proto, vsi_index)); +} + +static void nbl_disp_get_rep_stats(void *priv, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats, bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rep_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rep_vsi_id, rep_stats, is_tx)); +} + +static u16 nbl_disp_get_rep_index(void *priv, u16 rep_vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_rep_index, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rep_vsi_id)); +} + +static void nbl_disp_get_queue_stats(void *priv, u8 queue_id, + struct nbl_queue_stats *queue_stats, bool is_tx) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_queue_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_id, queue_stats, is_tx)); +} + +static void nbl_disp_get_firmware_version(void *priv, char *firmware_verion, u8 max_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_firmware_version, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), firmware_verion)); + if (ret) + dev_err(dev, "get emp version failed with ret: %d\n", ret); +} + +static int nbl_disp_get_driver_info(void *priv, struct nbl_driver_info *driver_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_driver_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), driver_info)); +} + +static void nbl_disp_get_coalesce(void *priv, u16 vector_id, + struct nbl_chan_param_get_coalesce *ec) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_coalesce, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, ec)); +} + +static void nbl_disp_set_coalesce(void *priv, u16 vector_id, u16 vector_num, u16 pnum, u16 rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_coalesce, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, + vector_num, pnum, rate); +} + +static void nbl_disp_get_rxfh_indir_size(void *priv, u16 vsi_id, u32 *rxfh_indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_indir_size, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, rxfh_indir_size)); +} + +static void nbl_disp_get_rxfh_rss_key_size(void *priv, u32 *rxfh_rss_key_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_rss_key_size, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rxfh_rss_key_size)); +} + +static void nbl_disp_get_rxfh_indir(void *priv, u16 vsi_id, u32 *indir, u32 indir_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_indir, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, indir)); +} + +static void nbl_disp_get_rxfh_rss_key(void *priv, u8 *rss_key, u32 key_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rxfh_rss_key, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rss_key)); +} + +static void nbl_disp_get_rxfh_rss_alg_sel(void *priv, u8 *alg_sel, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rss_alg_sel, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), alg_sel, eth_id)); +} + +static void nbl_disp_get_phy_caps(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_phy_caps, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, phy_caps)); +} + +static int nbl_disp_set_sfp_state(void *priv, u8 eth_id, u8 state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->set_sfp_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, state)); + return ret; +} + +static int nbl_disp_init_chip_module(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->init_chip_module, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_queue_init(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->queue_init, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_vsi_init(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->vsi_init, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_configure_msix_map(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, num_net_msix, + num_others_msix, net_msix_mask_en); + return ret; +} + +static int nbl_disp_chan_configure_msix_map_req(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_msix_map param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.num_net_msix = num_net_msix; + param.num_others_msix = num_others_msix; + param.msix_mask_en = net_msix_mask_en; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_configure_msix_map_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_msix_map *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_cfg_msix_map *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, + param->num_net_msix, param->num_others_msix, param->msix_mask_en); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP); +} + +static int nbl_disp_chan_destroy_msix_map_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DESTROY_MSIX_MAP, + NULL, 0, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_destroy_msix_map_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_msix_map *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_cfg_msix_map *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DESTROY_MSIX_MAP, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_DESTROY_MSIX_MAP); +} + +static int nbl_disp_chan_enable_mailbox_irq_req(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_enable_mailbox_irq param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vector_id = vector_id; + param.enable_msix = enable_msix; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_enable_mailbox_irq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_enable_mailbox_irq *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_enable_mailbox_irq *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_mailbox_irq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, + param->vector_id, param->enable_msix); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ); +} + +static u16 nbl_disp_chan_get_global_vector_req(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_global_vector param = {0}; + struct nbl_chan_param_get_global_vector result = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + if (!disp_mgt) + return -EINVAL; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.vector_id = local_vector_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result.vector_id; +} + +static void nbl_disp_chan_get_global_vector_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_global_vector *param; + struct nbl_chan_param_get_global_vector result; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_get_global_vector *)data; + + result.vector_id = NBL_OPS_CALL(res_ops->get_global_vector, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->vector_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, + msg_id, err, &result, sizeof(result)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_GET_GLOBAL_VECTOR); +} + +static int nbl_disp_destroy_msix_map(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_msix_map, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0); + return ret; +} + +static int nbl_disp_enable_mailbox_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_mailbox_irq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0, vector_id, enable_msix); + return ret; +} + +static int nbl_disp_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->enable_abnormal_irq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector_id, enable_msix)); + return ret; +} + +static int nbl_disp_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->enable_adminq_irq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector_id, enable_msix)); + return ret; +} + +static u16 nbl_disp_get_global_vector(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + u16 ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_global_vector, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_vector_id)); + return ret; +} + +static u16 nbl_disp_get_msix_entry_id(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + u16 ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_msix_entry_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_vector_id)); + return ret; +} + +static void nbl_disp_dump_flow(void *priv, struct seq_file *m) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->dump_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m); +} + +static u16 nbl_disp_get_vsi_id(void *priv, u16 func_id, u16 type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + if (!disp_mgt) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_vsi_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + func_id, type)); +} + +static void nbl_disp_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_eth_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, eth_mode, eth_id, logic_eth_id)); +} + +static void nbl_disp_get_rep_feature(void *priv, + struct nbl_register_net_result *register_result) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->get_rep_feature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), register_result)); +} + +static void nbl_disp_set_eswitch_mode(void *priv, u16 eswitch_mode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->set_eswitch_mode, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eswitch_mode)); +} + +static u16 nbl_disp_get_eswitch_mode(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + u16 ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->get_eswitch_mode, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static int nbl_disp_alloc_rep_data(void *priv, int num_vfs, u16 vf_base_vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->alloc_rep_data, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), num_vfs, vf_base_vsi_id)); +} + +static void nbl_disp_free_rep_data(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->free_rep_data, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_set_rep_netdev_info(void *priv, void *rep_data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->set_rep_netdev_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rep_data)); +} + +static void nbl_disp_unset_rep_netdev_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->unset_rep_netdev_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static struct net_device *nbl_disp_get_rep_netdev_info(void *priv, u16 rep_data_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_rep_netdev_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + rep_data_index)); +} + +static int nbl_disp_enable_lag_protocol(void *priv, u16 eth_id, bool lag_en) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->enable_lag_protocol, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, lag_en)); +} + +static int nbl_disp_chan_cfg_lag_hash_algorithm_req(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_hash_algorithm param = {0}; + struct nbl_chan_send_info chan_send; + + param.eth_id = eth_id; + param.lag_id = lag_id; + param.hash_type = hash_type; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_lag_hash_algorithm_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_hash_algorithm *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_lag_hash_algorithm *)data; + + ret = NBL_OPS_CALL(res_ops->cfg_lag_hash_algorithm, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->lag_id, param->hash_type)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM); +} + +static int nbl_disp_cfg_lag_hash_algorithm(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_lag_hash_algorithm, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, lag_id, hash_type)); +} + +static int nbl_disp_chan_cfg_lag_member_fwd_req(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_member_fwd param = {0}; + struct nbl_chan_send_info chan_send; + + param.eth_id = eth_id; + param.lag_id = lag_id; + param.fwd = fwd; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_lag_member_fwd_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_member_fwd *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_lag_member_fwd *)data; + + ret = NBL_OPS_CALL(res_ops->cfg_lag_member_fwd, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->lag_id, param->fwd)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD); +} + +static int nbl_disp_cfg_lag_member_fwd(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_lag_member_fwd, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, lag_id, fwd)); +} + +static int nbl_disp_chan_cfg_lag_member_list_req(void *priv, + struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param chan_param = {0}; + struct nbl_chan_send_info chan_send; + + memcpy(&chan_param, param, sizeof(chan_param)); + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST, &chan_param, + sizeof(chan_param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_lag_member_list_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_lag_member_list_param *)data; + + ret = NBL_OPS_CALL(res_ops->cfg_lag_member_list, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST); +} + +static int nbl_disp_cfg_lag_member_list(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_lag_member_list, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_chan_cfg_lag_member_up_attr_req(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_member_up_attr param = {0}; + struct nbl_chan_send_info chan_send; + + param.eth_id = eth_id; + param.eth_id = enable; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR, ¶m, sizeof(param), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_lag_member_up_attr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_member_up_attr *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_lag_member_up_attr *)data; + + ret = NBL_OPS_CALL(res_ops->cfg_lag_member_up_attr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->lag_id, + param->enable)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR); +} + +static int nbl_disp_cfg_lag_member_up_attr(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_lag_member_up_attr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, lag_id, enable)); +} + +static int nbl_disp_chan_add_lag_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_LAG_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_lag_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lag_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_LAG_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_ADD_LAG_FLOW); +} + +static int nbl_disp_add_lag_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lag_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_chan_del_lag_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_LAG_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_lag_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lag_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + *(u16 *)data); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_LAG_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_DEL_LAG_FLOW); +} + +static void nbl_disp_del_lag_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lag_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_chan_add_lldp_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_LLDP_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_lldp_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lldp_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_LLDP_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_ADD_LLDP_FLOW); +} + +static int nbl_disp_add_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_lldp_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_chan_del_lldp_flow_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_LLDP_FLOW, &vsi_id, sizeof(vsi_id), + NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_lldp_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lldp_flow, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + *(u16 *)data); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_LLDP_FLOW, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_DEL_LLDP_FLOW); +} + +static void nbl_disp_del_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_lldp_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_chan_cfg_lag_mcc_req(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_mcc param = {0}; + struct nbl_chan_send_info chan_send; + + param.eth_id = eth_id; + param.lag_id = lag_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_LAG_MCC, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_lag_mcc_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_lag_mcc *param = NULL; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_lag_mcc *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_lag_mcc, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->lag_id, param->enable); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LAG_MCC, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_LAG_MCC); +} + +static int nbl_disp_cfg_lag_mcc(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_lag_mcc, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, lag_id, enable); +} + +static int nbl_disp_cfg_duppkt_info(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_duppkt_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_chan_cfg_duppkt_mcc_req(void *priv, struct nbl_lag_member_list_param *mem_param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param param = {0}; + struct nbl_chan_send_info chan_send; + + memcpy(¶m, mem_param, sizeof(param)); + + NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_DUPPKT_MCC, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_duppkt_mcc_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param *param = NULL; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_lag_member_list_param *)data; + + err = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_duppkt_mcc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_DUPPKT_MCC, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_DUPPKT_MCC); +} + +static int nbl_disp_cfg_duppkt_mcc(void *priv, struct nbl_lag_member_list_param *mem_param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_duppkt_mcc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mem_param); +} + +static int nbl_disp_chan_cfg_bond_shaping_req(void *priv, u8 eth_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_bond_shaping param = {0}; + struct nbl_chan_send_info chan_send; + + param.eth_id = eth_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_CFG_BOND_SHAPING, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_bond_shaping_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(NBL_DISP_MGT_TO_COMMON(disp_mgt)); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_bond_shaping *param = NULL; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_bond_shaping *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_bond_shaping, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->enable); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_BOND_SHAPING, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_BOND_SHAPING); +} + +static int nbl_disp_cfg_bond_shaping(void *priv, u8 eth_id, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_bond_shaping, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, enable); +} + +static void nbl_disp_chan_cfg_bgid_back_pressure_req(void *priv, u8 main_eth_id, u8 other_eth_id, + bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_bgid_back_pressure param = {0}; + struct nbl_chan_send_info chan_send; + + param.main_eth_id = main_eth_id; + param.other_eth_id = other_eth_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE, + ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_bgid_back_pressure_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(NBL_DISP_MGT_TO_COMMON(disp_mgt)); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_cfg_bgid_back_pressure *param = NULL; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_param_cfg_bgid_back_pressure *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_bgid_back_pressure, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->main_eth_id, + param->other_eth_id, param->enable); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE); +} + +static void nbl_disp_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, u8 other_eth_id, + bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_bgid_back_pressure, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), main_eth_id, other_eth_id, enable); +} + +static u32 nbl_disp_get_tx_headroom(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u32 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_tx_headroom, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static void nbl_disp_register_rdma(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, param); +} + +static void nbl_disp_unregister_rdma(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static u8 __iomem *nbl_disp_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u8 __iomem *addr = NULL; + + addr = NBL_OPS_CALL(res_ops->get_hw_addr, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), size)); + return addr; +} + +static u64 nbl_disp_get_real_hw_addr(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u64 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_real_hw_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + return ret; +} + +static u16 nbl_disp_get_function_id(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u16 ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_function_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); + return ret; +} + +static void nbl_disp_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_real_bdf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, bus, dev, function)); +} + +static bool nbl_disp_check_fw_heartbeat(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = false; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->check_fw_heartbeat, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + return ret; +} + +static bool nbl_disp_check_fw_reset(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->check_fw_reset, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_lock(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_lock, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_unlock(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_unlock, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_prepare(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_prepare, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_flash_image(void *priv, u32 module, const u8 *data, size_t len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_image, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), module, data, len)); +} + +static int nbl_disp_flash_activate(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->flash_activate, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_eth_loopback(void *priv, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + u8 eth_id = NBL_DISP_MGT_TO_COMMON(disp_mgt)->eth_id; + + return NBL_OPS_CALL(res_ops->setup_loopback, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, enable)); +} + +static int nbl_disp_chan_set_eth_loopback_req(void *priv, u8 enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_eth_loopback param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_port_id = NBL_DISP_MGT_TO_COMMON(disp_mgt)->eth_id; + param.enable = enable; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_ETH_LOOPBACK, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_eth_loopback_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_set_eth_loopback *param; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_set_eth_loopback *)data; + ret = NBL_OPS_CALL(res_ops->setup_loopback, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_port_id, param->enable)); + if (ret) { + dev_err(dev, "setup loopback adminq failed with ret: %d\n", ret); + err = NBL_CHAN_RESP_ERR; + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_ETH_LOOPBACK, + msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_SET_ETH_LOOPBACK); +} + +static struct sk_buff *nbl_disp_clean_rx_lb_test(void *priv, u32 ring_index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->clean_rx_lb_test, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index)); +} + +static u32 nbl_disp_check_active_vf(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->check_active_vf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), 0)); +} + +static u32 nbl_disp_chan_check_active_vf_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct device *dev = NBL_DISP_MGT_TO_DEV(disp_mgt); + u32 active_vf_num = 0; + int ret; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CHECK_ACTIVE_VF, NULL, 0, + &active_vf_num, sizeof(active_vf_num), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + dev_err(dev, "channel check active vf send msg failed with ret: %d\n", ret); + + return active_vf_num; +} + +static void nbl_disp_chan_check_active_vf_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u32 active_vf_num; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + active_vf_num = NBL_OPS_CALL(res_ops->check_active_vf, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CHECK_ACTIVE_VF, + msg_id, err, &active_vf_num, sizeof(active_vf_num)); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_SET_ETH_LOOPBACK); +} + +static u32 nbl_disp_get_adminq_tx_buf_size(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + return chan_ops->get_adminq_tx_buf_size(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt)); +} + +static int nbl_disp_adminq_emp_console_write(void *priv, char *buf, size_t count) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_WRITE, + buf, count, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static bool nbl_disp_get_product_flex_cap(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + bool has_cap = false; + + has_cap = NBL_OPS_CALL(res_ops->get_product_flex_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + cap_type)); + return has_cap; +} + +static int nbl_disp_set_pmd_debug(void *priv, bool pmd_debug) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_pmd_debug, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pmd_debug)); +} + +static bool nbl_disp_chan_get_product_flex_cap_req(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + bool has_cap = false; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, &cap_type, + sizeof(cap_type), &has_cap, sizeof(has_cap), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return has_cap; +} + +static void nbl_disp_chan_get_product_flex_cap_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + enum nbl_flex_cap_type *cap_type = (enum nbl_flex_cap_type *)data; + struct nbl_chan_ack_info chan_ack = {0}; + bool has_cap = false; + + has_cap = NBL_OPS_CALL(res_ops->get_product_flex_cap, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *cap_type)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, msg_id, + NBL_CHAN_RESP_OK, &has_cap, sizeof(has_cap)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static bool nbl_disp_get_product_fix_cap(void *priv, enum nbl_fix_cap_type cap_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + bool has_cap = false; + + has_cap = NBL_OPS_CALL(res_ops->get_product_fix_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + cap_type)); + return has_cap; +} + +static int nbl_disp_alloc_ktls_tx_index(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int index = 0; + + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ktls_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + return index; +} + +static int nbl_disp_chan_alloc_ktls_tx_index_req(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int index = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_ALLOC_KTLS_TX_INDEX, &vsi, sizeof(u16), + &index, sizeof(index), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return index; +} + +static void nbl_disp_chan_alloc_ktls_tx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack = {0}; + int index; + u16 vsi; + + vsi = *(u16 *)data; + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ktls_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_KTLS_TX_INDEX, msg_id, + NBL_CHAN_RESP_OK, &index, sizeof(index)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_free_ktls_tx_index(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ktls_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_free_ktls_tx_index_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_FREE_KTLS_TX_INDEX, &index, + sizeof(index), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_ktls_tx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack = {0}; + u32 index; + + index = *(u32 *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ktls_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_FREE_KTLS_TX_INDEX, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_cfg_ktls_tx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_tx_keymat, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, mode, salt, + key, key_len); +} + +static void nbl_disp_chan_cfg_ktls_tx_keymat_req(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_keymat param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + param.index = index; + param.mode = mode; + memcpy(param.salt, salt, sizeof(param.salt)); + memcpy(param.key, key, key_len); + param.key_len = key_len; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_KTLS_TX_KEYMAT, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ktls_tx_keymat_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_keymat *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ktls_keymat *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_tx_keymat, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + param->mode, param->salt, param->key, param->key_len); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_KTLS_TX_KEYMAT, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_alloc_ktls_rx_index(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int index = 0; + + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ktls_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + return index; +} + +static int nbl_disp_chan_alloc_ktls_rx_index_req(void *priv, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int index = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_ALLOC_KTLS_RX_INDEX, &vsi, sizeof(u16), + &index, sizeof(index), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return index; +} + +static void nbl_disp_chan_alloc_ktls_rx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack = {0}; + int index; + u16 vsi; + + vsi = *(u16 *)data; + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ktls_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_KTLS_RX_INDEX, msg_id, + NBL_CHAN_RESP_OK, &index, sizeof(index)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_free_ktls_rx_index(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ktls_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_free_ktls_rx_index_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_FREE_KTLS_RX_INDEX, &index, + sizeof(index), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_ktls_rx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack = {0}; + u32 index; + + index = *(u32 *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ktls_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_FREE_KTLS_RX_INDEX, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_cfg_ktls_rx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_rx_keymat, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, mode, + salt, key, key_len); +} + +static void nbl_disp_chan_cfg_ktls_rx_keymat_req(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_keymat param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + param.mode = mode; + memcpy(param.salt, salt, sizeof(param.salt)); + memcpy(param.key, key, key_len); + param.key_len = key_len; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_KTLS_RX_KEYMAT, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ktls_rx_keymat_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_keymat *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ktls_keymat *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_rx_keymat, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + param->mode, param->salt, param->key, param->key_len); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_KTLS_RX_KEYMAT, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_cfg_ktls_rx_record(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_rx_record, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, tcp_sn, rec_num, init); +} + +static void nbl_disp_chan_cfg_ktls_rx_record_req(void *priv, u32 index, + u32 tcp_sn, u64 rec_num, bool init) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_record param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.init = init; + param.index = index; + param.tcp_sn = tcp_sn; + param.rec_num = rec_num; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_KTLS_RX_RECORD, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ktls_rx_record_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_record *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ktls_record *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ktls_rx_record, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->index, param->tcp_sn, param->rec_num, param->init); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_KTLS_RX_RECORD, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_add_ktls_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ktls_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, data, vsi); +} + +static int nbl_disp_chan_add_ktls_rx_flow_req(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + param.vsi = vsi; + memcpy(param.data, data, sizeof(param.data)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_KTLS_RX_FLOW, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_ktls_rx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_flow *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_cfg_ktls_flow *)data; + ret = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ktls_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + param->data, param->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_KTLS_RX_FLOW, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_del_ktls_rx_flow(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ktls_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_del_ktls_rx_flow_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_KTLS_RX_FLOW, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_ktls_rx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ktls_flow *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ktls_flow *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ktls_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_KTLS_RX_FLOW, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_alloc_ipsec_tx_index(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int index = 0; + + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ipsec_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), cfg_info); + return index; +} + +static int nbl_disp_chan_alloc_ipsec_tx_index_req(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index param = {0}; + struct nbl_chan_ipsec_index result = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(¶m.cfg_info, cfg_info, sizeof(param.cfg_info)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ALLOC_IPSEC_TX_INDEX, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result.index; +} + +static void nbl_disp_chan_alloc_ipsec_tx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index *param; + struct nbl_chan_ipsec_index result = {0}; + struct nbl_chan_ack_info chan_ack = {0}; + + param = (struct nbl_chan_ipsec_index *)data; + result.index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ipsec_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + ¶m->cfg_info); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_IPSEC_TX_INDEX, msg_id, + NBL_CHAN_RESP_OK, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_free_ipsec_tx_index(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ipsec_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_free_ipsec_tx_index_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_FREE_IPSEC_TX_INDEX, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_ipsec_tx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_chan_ipsec_index *param; + + param = (struct nbl_chan_ipsec_index *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ipsec_tx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); +} + +static int nbl_disp_alloc_ipsec_rx_index(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int index = 0; + + index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ipsec_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), cfg_info); + return index; +} + +static int nbl_disp_chan_alloc_ipsec_rx_index_req(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index param = {0}; + struct nbl_chan_ipsec_index result = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(¶m.cfg_info, cfg_info, sizeof(param.cfg_info)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ALLOC_IPSEC_RX_INDEX, ¶m, + sizeof(param), &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result.index; +} + +static void nbl_disp_chan_alloc_ipsec_rx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index *param; + struct nbl_chan_ipsec_index result = {0}; + struct nbl_chan_ack_info chan_ack = {0}; + + param = (struct nbl_chan_ipsec_index *)data; + result.index = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->alloc_ipsec_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + ¶m->cfg_info); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ALLOC_IPSEC_RX_INDEX, msg_id, + NBL_CHAN_RESP_OK, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_free_ipsec_rx_index(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ipsec_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_free_ipsec_rx_index_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ipsec_index param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_FREE_IPSEC_RX_INDEX, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_free_ipsec_rx_index_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_chan_ipsec_index *param; + + param = (struct nbl_chan_ipsec_index *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->free_ipsec_rx_index, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); +} + +static void nbl_disp_cfg_ipsec_tx_sad(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ipsec_tx_sad, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, sa_entry); +} + +static void nbl_disp_chan_cfg_ipsec_tx_sad_req(void *priv, u32 index, + struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_sad param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + memcpy(¶m.sa_entry, sa_entry, sizeof(param.sa_entry)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_IPSEC_TX_SAD, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ipsec_tx_sad_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_sad *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ipsec_sad *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ipsec_tx_sad, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + ¶m->sa_entry); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_IPSEC_TX_SAD, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_cfg_ipsec_rx_sad(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ipsec_rx_sad, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, sa_entry); +} + +static void nbl_disp_chan_cfg_ipsec_rx_sad_req(void *priv, u32 index, + struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_sad param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + memcpy(¶m.sa_entry, sa_entry, sizeof(param.sa_entry)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CFG_IPSEC_RX_SAD, ¶m, + sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_ipsec_rx_sad_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_sad *param; + struct nbl_chan_ack_info chan_ack; + + param = (struct nbl_chan_cfg_ipsec_sad *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->cfg_ipsec_rx_sad, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index, + ¶m->sa_entry); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_IPSEC_RX_SAD, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_add_ipsec_tx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ipsec_tx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, data, vsi); +} + +static int nbl_disp_chan_add_ipsec_tx_flow_req(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + param.vsi = vsi; + memcpy(param.data, data, sizeof(param.data)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_IPSEC_TX_FLOW, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_ipsec_tx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_cfg_ipsec_flow *)data; + ret = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ipsec_tx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->index, param->data, param->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_IPSEC_TX_FLOW, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_del_ipsec_tx_flow(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ipsec_tx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_del_ipsec_tx_flow_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_IPSEC_TX_FLOW, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_ipsec_tx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow *param; + + param = (struct nbl_chan_cfg_ipsec_flow *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ipsec_tx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); +} + +static int nbl_disp_add_ipsec_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ipsec_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index, data, vsi); +} + +static int nbl_disp_chan_add_ipsec_rx_flow_req(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + param.vsi = vsi; + memcpy(param.data, data, sizeof(param.data)); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_ADD_IPSEC_RX_FLOW, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_ipsec_rx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + param = (struct nbl_chan_cfg_ipsec_flow *)data; + ret = NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->add_ipsec_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->index, param->data, param->vsi); + if (ret) + err = NBL_CHAN_RESP_ERR; + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_IPSEC_RX_FLOW, msg_id, err, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_del_ipsec_rx_flow(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ipsec_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_chan_del_ipsec_rx_flow_req(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.index = index; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_DEL_IPSEC_RX_FLOW, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_ipsec_rx_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_chan_cfg_ipsec_flow *param; + + param = (struct nbl_chan_cfg_ipsec_flow *)data; + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->del_ipsec_rx_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->index); +} + +static bool nbl_disp_check_ipsec_status(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->check_ipsec_status, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static u32 nbl_disp_get_dipsec_lft_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->get_dipsec_lft_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_handle_dipsec_soft_expire(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->handle_dipsec_soft_expire, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_handle_dipsec_hard_expire(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->handle_dipsec_hard_expire, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static u32 nbl_disp_get_uipsec_lft_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->get_uipsec_lft_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_handle_uipsec_soft_expire(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->handle_uipsec_soft_expire, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static void nbl_disp_handle_uipsec_hard_expire(void *priv, u32 index) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->handle_uipsec_hard_expire, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index); +} + +static int nbl_disp_get_mbx_irq_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_mbx_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_get_adminq_irq_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_adminq_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_get_abnormal_irq_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_abnormal_irq_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_clear_accel_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->clear_accel_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_clear_flow(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static void nbl_disp_clear_queues(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->clear_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); +} + +static int nbl_disp_disable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->disable_phy_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id); +} + +static int nbl_disp_enable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->enable_phy_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id); +} + +static void nbl_disp_init_acl(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->init_acl, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_uninit_acl(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->uninit_acl, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_upcall_rule(void *priv, u8 eth_id, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_upcall_rule, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, vsi_id)); +} + +static int nbl_disp_unset_upcall_rule(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->unset_upcall_rule, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id)); +} + +static void nbl_disp_set_shaping_dport_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_shaping_dport_vld, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, vld)); +} + +static void nbl_disp_set_dport_fc_th_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->set_dport_fc_th_vld, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, vld)); +} + +static u16 nbl_disp_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_vsi_global_queue_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_qid)); +} + +static u16 +nbl_disp_chan_get_vsi_global_qid_req(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_vsi_qid_info param = {0}; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.vsi_id = vsi_id; + param.local_qid = local_qid; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, ¶m, + sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void +nbl_disp_chan_get_vsi_global_qid_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_vsi_qid_info *param; + struct nbl_chan_ack_info chan_ack; + u16 global_qid; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_vsi_qid_info *)data; + global_qid = NBL_OPS_CALL(res_ops->get_vsi_global_queue_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->local_qid)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, + msg_id, global_qid, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_line_rate_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_rep_line_rate_info result = {0}; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_line_rate_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, + &result)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINE_RATE_INFO, + msg_id, 0, &result, sizeof(struct nbl_rep_line_rate_info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_register_net_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_register_net_rep *param; + struct nbl_chan_ack_info chan_ack; + struct nbl_register_net_rep_result result = {0}; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_register_net_rep *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_net_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->pf_id, + param->vf_id, &result); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_NET_REP, + msg_id, 0, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_unregister_net_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + vsi_id = *(u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_net_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_NET_REP, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_register_eth_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_eth_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_ETH_REP, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_queue_cxt_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_queue_cxt *param; + struct nbl_chan_ack_info chan_ack; + u16 cxt; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_get_queue_cxt *)data; + + cxt = NBL_OPS_CALL(res_ops->get_queue_ctx, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->local_queue)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_QUEUE_CXT, + msg_id, 0, &cxt, sizeof(cxt)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_vdpaq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_vdpaq_init_info *param; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_vdpaq_init_info *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_vdpaq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->pa, param->size); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_VDPAQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_destroy_vdpaq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_vdpaq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DESTROY_VDPAQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_upcall_port_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret; + u16 bdf; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL(res_ops->get_upcall_port, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &bdf)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_GET_UPCALL_PORT, + msg_id, ret, &bdf, sizeof(u16)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_board_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_board_port_info board_info = {0}; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_board_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &board_info)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BOARD_INFO, + msg_id, 0, &board_info, sizeof(board_info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_cfg_log_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_cfg_log *param; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_cfg_log *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_queue_log, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, + param->qps, param->vld); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_LOG, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_unregister_eth_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_eth_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_ETH_REP, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_register_upcall_port_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int ret; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_upcall_port, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_UPCALL_PORT, + msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_unregister_upcall_port_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_upcall_port, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_UPCALL_PORT, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_set_offload_status_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_offload_status, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id); +} + +static int nbl_disp_check_offload_status(void *priv, bool *is_down) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->check_offload_status, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), is_down)); +} + +static int nbl_disp_get_port_attributes(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_port_attributes, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + if (ret) + dev_err(dev, "get port attributes failed with ret: %d\n", ret); + + return ret; +} + +static int nbl_disp_update_ring_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->update_ring_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_update_rdma_cap(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->update_rdma_cap, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static u16 nbl_disp_get_rdma_cap_num(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_rdma_cap_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_update_rdma_mem_type(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->update_rdma_mem_type, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_ring_num(void *priv, struct nbl_fw_cmd_net_ring_num_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_ring_num, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_enable_port(void *priv, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->enable_port, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable)); + if (ret) + dev_err(dev, "enable port failed with ret: %d\n", ret); + + return ret; +} + +static void nbl_disp_init_port(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->init_port, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_chan_recv_port_notify_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL(res_ops->recv_port_notify, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data)); +} + +static int nbl_disp_get_port_state(void *priv, u8 eth_id, + struct nbl_port_state *port_state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_port_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, port_state)); + return ret; +} + +static int nbl_disp_chan_get_port_state_req(void *priv, u8 eth_id, + struct nbl_port_state *port_state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_PORT_STATE, ð_id, sizeof(eth_id), + port_state, sizeof(*port_state), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_port_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + struct nbl_port_state info = {0}; + int ret = 0; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = NBL_OPS_CALL(res_ops->get_port_state, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &info)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PORT_STATE, msg_id, err, + &info, sizeof(info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_port_advertising(void *priv, + struct nbl_port_advertising *port_advertising) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->set_port_advertising, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), port_advertising)); + return ret; +} + +static int nbl_disp_chan_set_port_advertising_req(void *priv, + struct nbl_port_advertising *port_advertising) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_PORT_ADVERTISING, + port_advertising, sizeof(*port_advertising), + NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_port_advertising_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_port_advertising *param; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_port_advertising *)data; + + ret = res_ops->set_port_advertising(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PORT_ADVERTISING, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_module_info(void *priv, u8 eth_id, struct ethtool_modinfo *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->get_module_info(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, info); +} + +static int nbl_disp_chan_get_module_info_req(void *priv, u8 eth_id, struct ethtool_modinfo *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_MODULE_INFO, ð_id, + sizeof(eth_id), info, sizeof(*info), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_module_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + struct ethtool_modinfo info; + int ret = 0; + u8 eth_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_module_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, &info); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_INFO, msg_id, err, + &info, sizeof(info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_module_eeprom(void *priv, u8 eth_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->get_module_eeprom(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, eeprom, data); +} + +static int nbl_disp_chan_get_module_eeprom_req(void *priv, u8 eth_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_module_eeprom param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + memcpy(¶m.eeprom, eeprom, sizeof(struct ethtool_eeprom)); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_MODULE_EEPROM, ¶m, + sizeof(param), data, eeprom->len, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_module_eeprom_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_get_module_eeprom *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + struct ethtool_eeprom *eeprom; + u8 *recv_data; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + param = (struct nbl_chan_param_get_module_eeprom *)data; + eth_id = param->eth_id; + eeprom = ¶m->eeprom; + recv_data = kmalloc(eeprom->len, GFP_ATOMIC); + if (!recv_data) { + dev_err(dev, "Allocate memory to store module eeprom failed\n"); + return; + } + + ret = res_ops->get_module_eeprom(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, eeprom, recv_data); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "Get module eeprom failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_EEPROM, msg_id, err, + recv_data, eeprom->len); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_GET_MODULE_EEPROM, src_id); + kfree(recv_data); +} + +static int nbl_disp_get_link_state(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + /* if donot have res_ops->get_link_state(), default eth is up */ + if (res_ops->get_link_state) + ret = res_ops->get_link_state(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, eth_link_info); + else + eth_link_info->link_status = 1; + + return ret; +} + +static int nbl_disp_chan_get_link_state_req(void *priv, u8 eth_id, + struct nbl_eth_link_info *eth_link_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, + NBL_CHAN_MSG_GET_LINK_STATE, ð_id, + sizeof(eth_id), eth_link_info, sizeof(*eth_link_info), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_link_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u8 eth_id; + struct nbl_eth_link_info eth_link_info = {0}; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + eth_id = *(u8 *)data; + ret = res_ops->get_link_state(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + eth_id, ð_link_info); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_STATE, msg_id, err, + ð_link_info, sizeof(eth_link_info)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, len)); +} + +static void nbl_disp_chan_get_reg_dump_req(void *priv, u32 *data, u32 len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + u32 *result = NULL; + + result = kmalloc(len, GFP_KERNEL); + if (!result) + return; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REG_DUMP, &len, sizeof(len), + result, len, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + memcpy(data, result, len); + kfree(result); +} + +static void nbl_disp_chan_get_reg_dump_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + u32 *result = NULL; + u32 len = 0; + + len = *(u32 *)data; + result = kmalloc(len, GFP_KERNEL); + if (!result) + return; + + NBL_OPS_CALL(res_ops->get_reg_dump, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), result, len)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REG_DUMP, msg_id, err, result, len); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + kfree(result); +} + +static int nbl_disp_get_reg_dump_len(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_reg_dump_len, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_get_reg_dump_len_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + int result = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REG_DUMP_LEN, NULL, 0, + &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result; +} + +static void nbl_disp_chan_get_reg_dump_len_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int result = 0; + + result = NBL_OPS_CALL(res_ops->get_reg_dump_len, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REG_DUMP_LEN, msg_id, err, + &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_offload_fwd_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + u16 vsi_id; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + vsi_id = *(u16 *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_offload_fwd, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_OFLD, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_cmdq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data, src_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_CMDQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_destroy_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->destroy_cmdq, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DESTROY_CMDQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_reset_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->reset_cmdq, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESET_CMDQ, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_offload_flow_rule_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->offload_flow_rule, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_OFFLOAD_FLOW_RULE, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_get_flow_acl_switch_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + u8 acl_enable = false; + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_flow_acl_switch, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &acl_enable); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ACL_SWITCH, + msg_id, 0, &acl_enable, sizeof(u8)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_rep_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_rep_cfg_info *param; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_rep_cfg_info *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_rep, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->inner_type, param->outer_type, param->rep_type); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_REP, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_init_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->init_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), data); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_INIT_FLOW, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void +nbl_disp_chan_deinit_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_ack_info chan_ack; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->deinit_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEINIT_FLOW, + msg_id, 0, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_configure_virtio_dev_msix(void *priv, u16 vector) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->configure_virtio_dev_msix(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector); +} + +static void nbl_disp_configure_rdma_msix_off(void *priv, u16 vector) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->configure_rdma_msix_off(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vector); +} + +static void nbl_disp_configure_virtio_dev_ready(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->configure_virtio_dev_ready(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static int nbl_disp_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_eth_mac_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, eth_id)); +} + +static int nbl_disp_chan_set_eth_mac_addr_req(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_eth_mac_addr param; + struct nbl_chan_send_info chan_send; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + memcpy(param.mac, mac, sizeof(param.mac)); + param.eth_id = eth_id; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_eth_mac_addr_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_eth_mac_addr *param; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_eth_mac_addr *)data; + + ret = NBL_OPS_CALL(res_ops->set_eth_mac_addr, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, param->eth_id)); + if (ret) + err = NBL_CHAN_RESP_ERR; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "channel send ack failed with ret: %d, msg_type: %d\n", + ret, NBL_CHAN_MSG_SET_ETH_MAC_ADDR); +} + +static u32 nbl_disp_get_chip_temperature(void *priv, enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_chip_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), type, senser_id)); +} + +static u32 nbl_disp_chan_get_chip_temperature_req(void *priv, + enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_hwmon param = {0}; + struct nbl_common_info *common; + u32 chip_tempetature = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.senser_id = senser_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, ¶m, sizeof(param), + &chip_tempetature, sizeof(chip_tempetature), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return chip_tempetature; +} + +static void nbl_disp_chan_get_chip_temperature_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_hwmon *param = (struct nbl_chan_param_get_hwmon *)data; + int ret = NBL_CHAN_RESP_OK; + u32 chip_tempetature = 0; + + chip_tempetature = NBL_OPS_CALL(res_ops->get_chip_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->type, param->senser_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, msg_id, + ret, &chip_tempetature, sizeof(chip_tempetature)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_module_temperature(void *priv, u8 eth_id, + enum nbl_hwmon_type type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_module_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, type)); +} + +static int nbl_disp_chan_get_module_temperature_req(void *priv, u8 eth_id, + enum nbl_hwmon_type type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + int module_temp; + struct nbl_chan_param_get_hwmon param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.senser_id = eth_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, + ¶m, sizeof(param), &module_temp, sizeof(module_temp), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return module_temp; +} + +static void nbl_disp_chan_get_module_temperature_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + int module_temp; + struct nbl_chan_param_get_hwmon *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_get_hwmon *)data; + module_temp = NBL_OPS_CALL(res_ops->get_module_temperature, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->senser_id, param->type)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, msg_id, + ret, &module_temp, sizeof(module_temp)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return res_ops->process_abnormal_event(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), abnomal_info); +} + +static int nbl_disp_chan_switchdev_init_cmdq_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret_status = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SWITCHDEV_INIT_CMDQ, + NULL, 0, &ret_status, sizeof(ret_status), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return ret_status; +} + +static void nbl_disp_chan_switchdev_init_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int ret_status = 0; + + ret_status = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->switchdev_init_cmdq, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SWITCHDEV_INIT_CMDQ, msg_id, + ret, &ret_status, sizeof(ret_status)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_switchdev_init_cmdq(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->switchdev_init_cmdq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static int nbl_disp_chan_switchdev_deinit_cmdq_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret_status = 0; + u8 tc_inst_id; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + tc_inst_id = common->tc_inst_id; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SWITCHDEV_DEINIT_CMDQ, + &tc_inst_id, sizeof(tc_inst_id), &ret_status, sizeof(ret_status), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (!ret_status) + common->tc_inst_id = NBL_TC_FLOW_INST_COUNT; + return 0; +} + +static void nbl_disp_chan_switchdev_deinit_cmdq_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int ret_status = 0; + u8 tc_inst_id; + + tc_inst_id = *(u8 *)data; + ret_status = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->switchdev_deinit_cmdq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), tc_inst_id); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SWITCHDEV_DEINIT_CMDQ, msg_id, + ret, &ret_status, sizeof(ret_status)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_switchdev_deinit_cmdq(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->switchdev_deinit_cmdq, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + common->tc_inst_id); +} + +static int nbl_disp_add_tc_flow(void *priv, struct nbl_tc_flow_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_tc_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + return ret; +} + +static int nbl_disp_del_tc_flow(void *priv, struct nbl_tc_flow_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!param) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_tc_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + return ret; +} + +static bool nbl_disp_tc_tun_encap_lookup(void *priv, + struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param) +{ + bool ret = 0; + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + if (!rule_act || !param) + return false; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->tc_tun_encap_lookup, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + rule_act, param); + return ret; +} + +static int nbl_disp_tc_tun_encap_del(void *priv, struct nbl_encap_key *key) +{ + int ret = 0; + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + if (!key) + return -EINVAL; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->tc_tun_encap_del, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), key); + return ret; +} + +static int nbl_disp_tc_tun_encap_add(void *priv, struct nbl_rule_action *action) +{ + int ret = 0; + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + if (!action) + return -EINVAL; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->tc_tun_encap_add, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), action); + return ret; +} + +static int nbl_disp_flow_index_lookup(void *priv, struct nbl_flow_index_key key) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + int ret = 0; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flow_index_lookup, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), key); + return ret; +} + +static int nbl_disp_query_tc_stats(void *priv, struct nbl_stats_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + int ret = 0; + + if (!param) + return -EINVAL; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + ret = NBL_OPS_CALL(res_ops->query_tc_stats, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); + return ret; +} + +static int nbl_disp_set_tc_flow_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_tc_flow_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static int nbl_disp_chan_set_tc_flow_info_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret_status = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_TC_FLOW_INFO, + NULL, 0, &ret_status, sizeof(ret_status), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return ret_status; +} + +static void nbl_disp_chan_set_tc_flow_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int ret_status = 0; + + ret_status = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_tc_flow_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_TC_FLOW_INFO, msg_id, + ret, &ret_status, sizeof(ret_status)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_unset_tc_flow_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unset_tc_flow_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static int nbl_disp_chan_unset_tc_flow_info_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + int ret_status = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_UNSET_TC_FLOW_INFO, + NULL, 0, &ret_status, sizeof(ret_status), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + return 0; +} + +static void nbl_disp_chan_unset_tc_flow_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int ret_status = 0; + + ret_status = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unset_tc_flow_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNSET_TC_FLOW_INFO, msg_id, + ret, &ret_status, sizeof(ret_status)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_tc_flow_info(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_tc_flow_info, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_adapt_desc_gother(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->adapt_desc_gother, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_flr_clear_rdma(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_rdma, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_net(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_net, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_accel(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->flr_clear_accel, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_queues(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_queues, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_accel_flow(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_SPIN_LOCK(disp_mgt, res_ops->flr_clear_accel_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_flows(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_flows, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static void nbl_disp_flr_clear_interrupt(void *priv, u16 vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->flr_clear_interrupt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vf_id); +} + +static u16 nbl_disp_covert_vfid_to_vsi_id(void *priv, u16 vfid) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->covert_vfid_to_vsi_id, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vfid); +} + +static void nbl_disp_unmask_all_interrupts(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unmask_all_interrupts, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_keep_alive_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_KEEP_ALIVE, + NULL, 0, NULL, 0, 1); + + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_keep_alive_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_KEEP_ALIVE, msg_id, + 0, NULL, 0); + + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_chan_get_rep_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_queue_info result = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_REP_QUEUE_INFO, + NULL, 0, &result, sizeof(result), 1); + + if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { + *queue_num = result.queue_num; + *queue_size = result.queue_size; + } +} + +static void nbl_disp_chan_get_rep_queue_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_queue_info result = {0}; + int ret = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL(res_ops->get_rep_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, &result.queue_size)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_REP_QUEUE_INFO, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_rep_queue_info(void *priv, u16 *queue_num, u16 *queue_size) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_rep_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size)); +} + +static void nbl_disp_chan_get_user_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size, + u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_queue_info result = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_USER_QUEUE_INFO, + &vsi_id, sizeof(vsi_id), &result, sizeof(result), 1); + + if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { + *queue_num = result.queue_num; + *queue_size = result.queue_size; + } +} + +static void nbl_disp_chan_get_user_queue_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_queue_info result = {0}; + int ret = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL(res_ops->get_user_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, + &result.queue_size, *(u16 *)data)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_USER_QUEUE_INFO, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_user_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size, vsi_id)); +} + +static int nbl_disp_ctrl_port_led(void *priv, u8 eth_id, + enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->ctrl_port_led, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, led_ctrl, led_reg)); +} + +static int nbl_disp_chan_ctrl_port_led_req(void *priv, u8 eth_id, + enum nbl_led_reg_ctrl led_ctrl, + u32 *led_reg) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_ctrl_port_led param = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.led_status = led_ctrl; + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_CTRL_PORT_LED, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_ctrl_port_led_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_ctrl_port_led *param = {0}; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_ctrl_port_led *)data; + ret = NBL_OPS_CALL(res_ops->ctrl_port_led, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->led_status, NULL)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CTRL_PORT_LED, msg_id, + ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_passthrough_fw_cmd(void *priv, struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->passthrough_fw_cmd, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, result)); +} + +static int nbl_disp_nway_reset(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->nway_reset, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id)); +} + +static int nbl_disp_chan_nway_reset_req(void *priv, u8 eth_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_NWAY_RESET, + ð_id, sizeof(eth_id), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_nway_reset_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + u8 *eth_id; + int ret = NBL_CHAN_RESP_OK; + + eth_id = (u8 *)data; + ret = NBL_OPS_CALL(res_ops->nway_reset, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *eth_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NWAY_RESET, msg_id, + ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_get_vf_base_vsi_id(void *priv, u16 func_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_vf_base_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id)); +} + +static u16 nbl_disp_chan_get_vf_base_vsi_id_req(void *priv, u16 func_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + u16 vf_base_vsi_id = 0; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, + NULL, 0, &vf_base_vsi_id, sizeof(vf_base_vsi_id), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return vf_base_vsi_id; +} + +static void nbl_disp_chan_get_vf_base_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vf_base_vsi_id; + + vf_base_vsi_id = NBL_OPS_CALL(res_ops->get_vf_base_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, msg_id, + ret, &vf_base_vsi_id, sizeof(vf_base_vsi_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_get_intr_suppress_level(void *priv, u64 pkt_rates, u16 last_level) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + return NBL_OPS_CALL(res_ops->get_intr_suppress_level, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), pkt_rates, last_level)); +} + +static void nbl_disp_set_intr_suppress_level(void *priv, u16 vector_id, u16 vector_num, u16 level) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_intr_suppress_level, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), common->mgt_pf, + vector_id, vector_num, level); +} + +static void nbl_disp_chan_set_intr_suppress_level_req(void *priv, u16 vector_id, + u16 vector_num, u16 level) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_intr_suppress_level param = {0}; + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common; + + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.local_vector_id = vector_id; + param.vector_num = vector_num; + param.level = level; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_SET_INTL_SUPPRESS_LEVEL, + ¶m, sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_intr_suppress_level_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops; + struct nbl_channel_ops *chan_ops; + struct nbl_chan_param_set_intr_suppress_level *param; + + res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + param = (struct nbl_chan_param_set_intr_suppress_level *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_intr_suppress_level, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, param->local_vector_id, + param->vector_num, param->level); +} + +static u32 nbl_disp_get_p4_version(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_p4_version, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_get_p4_info(void *priv, char *verify_code) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_p4_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), verify_code)); +} + +static int nbl_disp_load_p4(void *priv, struct nbl_load_p4_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->load_p4, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static int nbl_disp_load_p4_default(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->load_p4_default, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_get_p4_used_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + int p4_type; + + NBL_CHAN_SEND(chan_send, common->mgt_pf, NBL_CHAN_MSG_GET_P4_USED, + NULL, 0, &p4_type, sizeof(p4_type), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return p4_type; +} + +static void nbl_disp_chan_get_p4_used_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + int p4_type; + + p4_type = NBL_OPS_CALL(res_ops->get_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_P4_USED, msg_id, + ret, &p4_type, sizeof(p4_type)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_p4_used(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_set_p4_used(void *priv, int p4_type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_p4_used, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), p4_type)); +} + +static int nbl_disp_chan_cfg_eth_bond_info_req(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_CFG_ETH_BOND_INFO, + param, sizeof(*param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_cfg_eth_bond_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + ret = NBL_OPS_CALL(res_ops->cfg_eth_bond_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + (struct nbl_lag_member_list_param *)data)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_ETH_BOND_INFO, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_cfg_eth_bond_info(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->cfg_eth_bond_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param)); +} + +static int nbl_disp_chan_add_nd_upcall_flow(void *priv, u16 vsi_id, bool for_pmd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_nd_upcall_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, for_pmd); +} + +static int nbl_disp_chan_add_nd_upcall_flow_req(void *priv, u16 vsi_id, bool for_pmd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = { 0 }; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_nd_upcall param = { 0 }; + + param.vsi_id = vsi_id; + param.for_pmd = for_pmd; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_ADD_ND_UPCALL_FLOW, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_add_nd_upcall_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_nd_upcall *param = + (struct nbl_chan_param_nd_upcall *)data; + struct nbl_chan_ack_info chan_ack; + int err = NBL_CHAN_RESP_OK; + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->add_nd_upcall_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->for_pmd); + if (ret) { + err = NBL_CHAN_RESP_ERR; + dev_err(dev, "disp set nd dup rule failed with ret: %d\n", ret); + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_ADD_ND_UPCALL_FLOW, + msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_SET_UPCALL_RULE, src_id); +} + +static void nbl_disp_chan_del_nd_upcall_flow(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_nd_upcall_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); +} + +static void nbl_disp_chan_del_nd_upcall_flow_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_DEL_ND_UPCALL_FLOW, + NULL, 0, NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_del_nd_upcall_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(disp_mgt->common); + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_common_info *common; + int err = NBL_CHAN_RESP_OK; + int ret; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->del_nd_upcall_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_DEL_ND_UPCALL_FLOW, + msg_id, err, NULL, 0); + ret = chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + if (ret) + dev_err(dev, "disp chan send ack failed with ret: %d, msg_type: %d, src_id: %d\n", + ret, NBL_CHAN_MSG_SET_UPCALL_RULE, src_id); +} + +static int nbl_disp_chan_get_board_id_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + int result = -1; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_BOARD_ID, + NULL, 0, &result, sizeof(result), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return result; +} + +static void nbl_disp_chan_get_board_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK, result = -1; + + result = NBL_OPS_CALL(res_ops->get_board_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_BOARD_ID, + msg_id, ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_board_id(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_board_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_chan_register_rdma_bond_req(void *priv, + struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_REGISTER_RDMA_BOND, + list_param, sizeof(*list_param), register_param, sizeof(*register_param), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_register_rdma_bond_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_lag_member_list_param *list_param = NULL; + struct nbl_rdma_register_param register_param = {0}; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + list_param = (struct nbl_lag_member_list_param *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_rdma_bond, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + list_param, ®ister_param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_RDMA_BOND, + msg_id, ret, ®ister_param, sizeof(register_param)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_register_rdma_bond(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->register_rdma_bond, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), list_param, register_param); +} + +static void nbl_disp_chan_unregister_rdma_bond_req(void *priv, u16 lag_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_UNREGISTER_RDMA_BOND, &lag_id, sizeof(lag_id), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_unregister_rdma_bond_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_rdma_bond, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_UNREGISTER_RDMA_BOND, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_unregister_rdma_bond(void *priv, u16 lag_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->unregister_rdma_bond, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), lag_id); +} + +static dma_addr_t nbl_disp_restore_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->restore_abnormal_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, type)); +} + +static int nbl_disp_restart_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->restart_abnormal_ring, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ring_index, type)); +} + +static int nbl_disp_chan_restore_hw_queue_req(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_restore_hw_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.vsi_id = vsi_id; + param.local_queue_id = local_queue_id; + param.dma = dma; + param.type = type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_RESTORE_HW_QUEUE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_restore_hw_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_restore_hw_queue *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_restore_hw_queue *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->restore_hw_queue, NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->vsi_id, param->local_queue_id, param->dma, param->type); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTORE_HW_QUEUE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_restore_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->restore_hw_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, local_queue_id, dma, type); +} + +static int +nbl_disp_chan_stop_abnormal_hw_queue_req(void *priv, u16 vsi_id, u16 local_queue_id, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_stop_abnormal_hw_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.vsi_id = vsi_id; + param.local_queue_id = local_queue_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_STOP_ABNORMAL_HW_QUEUE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void +nbl_disp_chan_stop_abnormal_hw_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_stop_abnormal_hw_queue *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_stop_abnormal_hw_queue *)data; + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->stop_abnormal_hw_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->local_queue_id, + param->type); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_STOP_ABNORMAL_HW_QUEUE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_stop_abnormal_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->stop_abnormal_hw_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, local_queue_id, type); +} + +static int nbl_disp_stop_abnormal_sw_queue(void *priv, u16 local_queue_id, int type) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->stop_abnormal_sw_queue, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + local_queue_id, type); +} + +static u16 nbl_disp_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_local_queue_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + vsi_id, global_queue_id)); +} + +static int nbl_disp_chan_get_eth_bond_info_req(void *priv, struct nbl_bond_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_ETH_BOND_INFO, NULL, 0, param, sizeof(*param), 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_get_eth_bond_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_bond_param result; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + memset(&result, 0, sizeof(result)); + + NBL_OPS_CALL(res_ops->get_eth_bond_info, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_ETH_BOND_INFO, + msg_id, ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_eth_bond_info(void *priv, struct nbl_bond_param *param) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_eth_bond_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param)); +} + +static void nbl_disp_cfg_eth_bond_event(void *priv, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->cfg_eth_bond_event, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable)); +} + +static int nbl_disp_set_bridge_mode(void *priv, u16 bmode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_bridge_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + NBL_COMMON_TO_MGT_PF(common), bmode); +} + +static int nbl_disp_chan_set_bridge_mode_req(void *priv, u16 bmode) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_BRIDGE_MODE, &bmode, sizeof(bmode), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_bridge_mode_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 *bmode; + + bmode = (u16 *)data; + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_bridge_mode, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), src_id, *bmode); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_BRIDGE_MODE, + msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_get_vf_function_id(void *priv, u16 vsi_id, int vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_vf_function_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id)); +} + +static u16 nbl_disp_chan_get_vf_function_id_req(void *priv, u16 vsi_id, int vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_vf_func_id param; + struct nbl_common_info *common; + u16 func_id = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.vsi_id = vsi_id; + param.vf_id = vf_id; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_VF_FUNCTION_ID, ¶m, + sizeof(param), &func_id, sizeof(func_id), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return func_id; +} + +static void nbl_disp_chan_get_vf_function_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vf_func_id *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 func_id; + + param = (struct nbl_chan_param_get_vf_func_id *)data; + func_id = NBL_OPS_CALL(res_ops->get_vf_function_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vf_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_FUNCTION_ID, msg_id, + ret, &func_id, sizeof(func_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static u16 nbl_disp_get_vf_vsi_id(void *priv, u16 vsi_id, int vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_vf_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vf_id)); +} + +static u16 nbl_disp_chan_get_vf_vsi_id_req(void *priv, u16 vsi_id, int vf_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_vf_vsi_id param; + struct nbl_common_info *common; + u16 vf_vsi = 0; + + common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + param.vsi_id = vsi_id; + param.vf_id = vf_id; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_VF_VSI_ID, ¶m, + sizeof(param), &vf_vsi, sizeof(vf_vsi), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return vf_vsi; +} + +static void nbl_disp_chan_get_vf_vsi_id_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_vf_vsi_id *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + u16 vsi_id; + + param = (struct nbl_chan_param_get_vf_vsi_id *)data; + vsi_id = NBL_OPS_CALL(res_ops->get_vf_vsi_id, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->vf_id)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_VF_VSI_ID, msg_id, + ret, &vsi_id, sizeof(vsi_id)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_register_func_mac(void *priv, u8 *mac, u16 func_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->register_func_mac, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), mac, func_id)); +} + +static void nbl_disp_chan_register_func_mac_req(void *priv, u8 *mac, u16 func_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_register_func_mac param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.func_id = func_id; + ether_addr_copy(param.mac, mac); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_MAC, ¶m, sizeof(param), NULL, 0, 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_register_func_mac_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_func_mac *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_register_func_mac *)data; + NBL_OPS_CALL(res_ops->register_func_mac, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->mac, param->func_id)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_MAC, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_register_func_vlan(void *priv, u16 func_id, u16 vlan_tci, + u16 vlan_proto, bool *should_notify) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->register_func_vlan, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, vlan_tci, + vlan_proto, should_notify)); +} + +static int nbl_disp_chan_register_func_vlan_req(void *priv, u16 func_id, u16 vlan_tci, + u16 vlan_proto, bool *should_notify) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_register_vlan param; + bool result; + int ret; + + param.func_id = func_id; + param.vlan_tci = vlan_tci; + param.vlan_proto = vlan_proto; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_VLAN, ¶m, sizeof(param), + &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (!ret) + *should_notify = result; + + return ret; +} + +static void nbl_disp_chan_register_func_vlan_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_vlan *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + bool notify = false; + + param = (struct nbl_chan_param_register_vlan *)data; + ret = NBL_OPS_CALL(res_ops->register_func_vlan, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, + param->vlan_tci, param->vlan_proto, ¬ify)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_VLAN, + msg_id, ret, ¬ify, sizeof(notify)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_register_func_rate(void *priv, u16 func_id, int rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->register_func_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, rate)); +} + +static int nbl_disp_chan_register_func_rate_req(void *priv, u16 func_id, int tx_rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_tx_rate param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.func_id = func_id; + param.tx_rate = tx_rate; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_RATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_register_func_rate_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_tx_rate *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_tx_rate *)data; + ret = NBL_OPS_CALL(res_ops->register_func_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, param->tx_rate)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_RATE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_tx_rate(void *priv, u16 func_id, int tx_rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->set_tx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, tx_rate)); +} + +static int nbl_disp_chan_set_tx_rate_req(void *priv, u16 func_id, int tx_rate) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_tx_rate param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.func_id = func_id; + param.tx_rate = tx_rate; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_TX_RATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_tx_rate_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_tx_rate *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_tx_rate *)data; + ret = NBL_OPS_CALL(res_ops->set_tx_rate, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->func_id, param->tx_rate)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_TX_RATE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_register_func_link_forced(void *priv, u16 func_id, u8 link_forced, + bool *should_notify) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->register_func_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), func_id, + link_forced, should_notify)); +} + +static int nbl_disp_chan_register_func_link_forced_req(void *priv, u16 func_id, u8 link_forced, + bool *should_notify) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_register_func_link_forced param; + struct nbl_chan_param_register_func_link_forced result; + int ret = 0; + + param.func_id = func_id; + param.link_forced = link_forced; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, ¶m, sizeof(param), + &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; + + *should_notify = result.should_notify; + return 0; +} + +static void nbl_disp_chan_register_func_link_forced_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_register_func_link_forced *param; + struct nbl_chan_param_register_func_link_forced result = {0}; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_register_func_link_forced *)data; + ret = NBL_OPS_CALL(res_ops->register_func_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->func_id, param->link_forced, &result.should_notify)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, + msg_id, ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_link_forced(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id)); +} + +static int nbl_disp_chan_get_link_forced_req(void *priv, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + int link_forced = 0; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_LINK_FORCED, &vsi_id, sizeof(vsi_id), + &link_forced, sizeof(link_forced), 1); + chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + return link_forced; +} + +static void nbl_disp_chan_get_link_forced_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + ret = NBL_OPS_CALL(res_ops->get_link_forced, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), *(u16 *)data)); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_LINK_FORCED, + msg_id, NBL_CHAN_RESP_OK, &ret, sizeof(ret)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_driver_version(void *priv, char *ver, int len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_driver_version, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), ver, len)); +} + +static void nbl_disp_setup_rdma_id(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->setup_rdma_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static void nbl_disp_remove_rdma_id(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->remove_rdma_id, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_get_fd_flow_req(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_fd_flow param = {0}; + int ret = 0; + + param.vsi_id = vsi_id; + param.location = location; + param.rule_type = rule_type; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW, ¶m, + sizeof(param), cmd, NBL_CHAN_FDIR_FLOW_RULE_SIZE, 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; + + return 0; +} + +static void nbl_disp_chan_get_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_fd_flow *param = NULL; + struct nbl_chan_param_fdir_replace *result; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + result = kzalloc(NBL_CHAN_FDIR_FLOW_RULE_SIZE, GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto send_ack; + } + param = (struct nbl_chan_param_get_fd_flow *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->vsi_id, param->location, + param->rule_type, result); +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW, msg_id, + ret, result, sizeof(*result) + result->tlv_length); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + kfree(result); +} + +static int nbl_disp_get_fd_flow(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, location, + rule_type, cmd); +} + +static int nbl_disp_chan_get_fd_flow_cnt_req(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_fdir_flowcnt param; + int result = 0, ret = 0; + + param.rule_type = rule_type; + param.vsi = vsi_id; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW_CNT, ¶m, + sizeof(param), &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; + + return result; +} + +static void nbl_disp_chan_get_fd_flow_cnt_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_fdir_flowcnt *param; + int result = 0, err = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_fdir_flowcnt *)data; + result = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_cnt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->rule_type, param->vsi); + if (result < 0) { + err = result; + result = 0; + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_CNT, msg_id, + err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_fd_flow_cnt(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_cnt, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, vsi_id); +} + +static int nbl_disp_chan_get_fd_flow_all_req(void *priv, + struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_result_get_fd_flow_all *result = NULL; + int ret = 0; + + result = (struct nbl_chan_result_get_fd_flow_all *)rule_locs; + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW_ALL, param, + sizeof(*param), result, sizeof(*result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + goto send_fail; +send_fail: + return ret; +} + +static void nbl_disp_chan_get_fd_flow_all_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_fd_flow_all *param = NULL; + struct nbl_chan_result_get_fd_flow_all *result = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto send_ack; + } + + param = (struct nbl_chan_param_get_fd_flow_all *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_all, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, result->rule_locs); + +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_ALL, msg_id, + ret, result, sizeof(*result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); + + kfree(result); +} + +static int nbl_disp_get_fd_flow_all(void *priv, struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_fd_flow_all, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param, rule_locs); +} + +static int nbl_disp_chan_get_fd_flow_max_req(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + int ret = 0, result = 0; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_FD_FLOW_MAX, NULL, 0, &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + if (ret) + return ret; + + return result; +} + +static void nbl_disp_chan_get_fd_flow_max_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + int result = 0, err = NBL_CHAN_RESP_OK; + + result = NBL_OPS_CALL(res_ops->get_fd_flow_max, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); + if (result < 0) { + err = result; + result = 0; + } + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_FD_FLOW_MAX, msg_id, + err, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_fd_flow_max(void *priv) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL(res_ops->get_fd_flow_max, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt))); +} + +static int nbl_disp_chan_replace_fd_flow_req(void *priv, struct nbl_chan_param_fdir_replace *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REPLACE_FD_FLOW, info, + sizeof(struct nbl_chan_param_fdir_replace) + info->tlv_length, NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_replace_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_fdir_replace *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + param = (struct nbl_chan_param_fdir_replace *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->replace_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REPLACE_FD_FLOW, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_replace_fd_flow(void *priv, struct nbl_chan_param_fdir_replace *info) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->replace_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), info); +} + +static int nbl_disp_chan_remove_fd_flow_req(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_fdir_del param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.rule_type = rule_type; + param.location = loc; + param.vsi = vsi_id; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_REMOVE_FD_FLOW, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_remove_fd_flow_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_fdir_del *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + param = (struct nbl_chan_param_fdir_del *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->rule_type, + param->location, param->vsi); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_REMOVE_FD_FLOW, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_remove_fd_flow(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->remove_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, loc, vsi_id); +} + +static int nbl_disp_chan_config_fd_flow_state_req(void *priv, + enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + struct nbl_chan_param_config_fd_flow_state param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.rule_type = rule_type; + param.vsi_id = vsi_id; + param.state = state; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CFG_FD_FLOW_STATE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_config_fd_flow_state_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_config_fd_flow_state *param = NULL; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + param = (struct nbl_chan_param_config_fd_flow_state *)data; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->config_fd_flow_state, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->rule_type, + param->vsi_id, param->state); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CFG_FD_FLOW_STATE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_config_fd_flow_state(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + return NBL_OPS_CALL_LOCK(disp_mgt, res_ops->config_fd_flow_state, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rule_type, vsi_id, state); +} + +static void nbl_disp_cfg_fd_update_event(void *priv, bool enable) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->cfg_fd_update_event, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), enable); +} + +static void nbl_disp_dump_fd_flow(void *priv, struct seq_file *m) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->dump_fd_flow, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), m); +} + +static void nbl_disp_chan_get_xdp_queue_info_req(void *priv, u16 *queue_num, u16 *queue_size, + u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_queue_info result = {0}; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, + &vsi_id, sizeof(vsi_id), &result, sizeof(result), 1); + + if (!chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send)) { + *queue_num = result.queue_num; + *queue_size = result.queue_size; + } +} + +static void nbl_disp_chan_get_xdp_queue_info_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_ack_info chan_ack; + struct nbl_chan_param_get_queue_info result = {0}; + int ret = NBL_CHAN_RESP_OK; + + NBL_OPS_CALL(res_ops->get_xdp_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), &result.queue_num, + &result.queue_size, *(u16 *)data)); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, msg_id, + ret, &result, sizeof(result)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static void nbl_disp_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL(res_ops->get_xdp_queue_info, + (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), queue_num, queue_size, vsi_id)); +} + +static void nbl_disp_set_hw_status(void *priv, enum nbl_hw_status hw_status) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_hw_status, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), hw_status); +} + +static void nbl_disp_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + + NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_active_func_bitmaps, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), bitmap, max_func); +} + +static int nbl_disp_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_qos, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pfc, + trust, dscp2prio_map); + if (ret) + return ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_eth_pfc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, pfc); + + return ret; +} + +static int nbl_disp_chan_configure_qos_req(void *priv, u8 eth_id, u8 *pfc, + u8 trust, u8 *dscp2prio_map) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_configure_qos param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + memcpy(param.pfc, pfc, NBL_MAX_PFC_PRIORITIES); + memcpy(param.dscp2prio_map, dscp2prio_map, NBL_DSCP_MAX); + param.trust = trust; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_CONFIGURE_QOS, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_configure_qos_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_configure_qos *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_configure_qos *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->configure_qos, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->pfc, param->trust, param->dscp2prio_map); + if (ret) + goto send_ack; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_eth_pfc, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), param->eth_id, param->pfc); + +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_CONFIGURE_QOS, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, prio, xoff, xon); + + return ret; +} + +static int nbl_disp_chan_set_pfc_buffer_size_req(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_set_pfc_buffer_size param; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + + param.eth_id = eth_id; + param.prio = prio; + param.xoff = xoff; + param.xon = xon; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); +} + +static void nbl_disp_chan_set_pfc_buffer_size_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_set_pfc_buffer_size *param; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_set_pfc_buffer_size *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->set_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->prio, param->xoff, param->xon); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, msg_id, ret, NULL, 0); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +static int nbl_disp_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + int ret; + + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, prio, xoff, xon); + + return ret; +} + +static int +nbl_disp_chan_get_pfc_buffer_size_req(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_send_info chan_send = {0}; + struct nbl_chan_param_get_pfc_buffer_size param = {0}; + struct nbl_chan_param_get_pfc_buffer_size_resp resp; + struct nbl_common_info *common = NBL_DISP_MGT_TO_COMMON(disp_mgt); + int ret; + + param.eth_id = eth_id; + param.prio = prio; + + NBL_CHAN_SEND(chan_send, NBL_COMMON_TO_MGT_PF(common), + NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, ¶m, sizeof(param), + &resp, sizeof(resp), 1); + ret = chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send); + + *xoff = resp.xoff; + *xon = resp.xon; + + return ret; +} + +static void nbl_disp_chan_get_pfc_buffer_size_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv; + struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt); + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + struct nbl_chan_param_get_pfc_buffer_size *param; + struct nbl_chan_param_get_pfc_buffer_size_resp resp; + struct nbl_chan_ack_info chan_ack; + int ret = NBL_CHAN_RESP_OK; + + param = (struct nbl_chan_param_get_pfc_buffer_size *)data; + ret = NBL_OPS_CALL_LOCK(disp_mgt, res_ops->get_pfc_buffer_size, + NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), + param->eth_id, param->prio, &resp.xoff, &resp.xon); + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, msg_id, ret, + &resp, sizeof(resp)); + chan_ops->send_ack(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_ack); +} + +/* NBL_DISP_SET_OPS(disp_op_name, res_func, ctrl_lvl, msg_type, msg_req, msg_resp) + * ctrl_lvl is to define when this disp_op should go directly to res_op, not sending a channel msg. + * + * Use X Macros to reduce codes in channel_op and disp_op setup/remove + */ +#define NBL_DISP_OPS_TBL \ +do { \ + NBL_DISP_SET_OPS(init_chip_module, nbl_disp_init_chip_module, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_resource_pt_ops, nbl_disp_get_res_pt_ops, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(queue_init, nbl_disp_queue_init, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(vsi_init, nbl_disp_vsi_init, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(configure_msix_map, nbl_disp_configure_msix_map, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, \ + nbl_disp_chan_configure_msix_map_req, \ + nbl_disp_chan_configure_msix_map_resp); \ + NBL_DISP_SET_OPS(destroy_msix_map, nbl_disp_destroy_msix_map, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DESTROY_MSIX_MAP, \ + nbl_disp_chan_destroy_msix_map_req, \ + nbl_disp_chan_destroy_msix_map_resp); \ + NBL_DISP_SET_OPS(enable_mailbox_irq, nbl_disp_enable_mailbox_irq, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, \ + nbl_disp_chan_enable_mailbox_irq_req, \ + nbl_disp_chan_enable_mailbox_irq_resp); \ + NBL_DISP_SET_OPS(enable_abnormal_irq, nbl_disp_enable_abnormal_irq, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(enable_adminq_irq, nbl_disp_enable_adminq_irq, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_global_vector, nbl_disp_get_global_vector, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_GLOBAL_VECTOR, \ + nbl_disp_chan_get_global_vector_req, \ + nbl_disp_chan_get_global_vector_resp); \ + NBL_DISP_SET_OPS(get_msix_entry_id, nbl_disp_get_msix_entry_id, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(alloc_rings, nbl_disp_alloc_rings, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(remove_rings, nbl_disp_remove_rings, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(start_tx_ring, nbl_disp_start_tx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(stop_tx_ring, nbl_disp_stop_tx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(start_rx_ring, nbl_disp_start_rx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(stop_rx_ring, nbl_disp_stop_rx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(kick_rx_ring, nbl_disp_kick_rx_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(dump_ring, nbl_disp_dump_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(dump_ring_stats, nbl_disp_dump_ring_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_rings_xdp_prog, nbl_disp_set_rings_xdp_prog, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_xdp_rxq, nbl_disp_register_xdp_rxq, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(unregister_xdp_rxq, nbl_disp_unregister_xdp_rxq, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_vector_napi, nbl_disp_get_vector_napi, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_vector_info, nbl_disp_set_vector_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_vsi_ring, nbl_disp_register_vsi_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_net, nbl_disp_register_net, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_NET, \ + nbl_disp_chan_register_net_req, nbl_disp_chan_register_net_resp); \ + NBL_DISP_SET_OPS(unregister_net, nbl_disp_unregister_net, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNREGISTER_NET, \ + nbl_disp_chan_unregister_net_req, nbl_disp_chan_unregister_net_resp); \ + NBL_DISP_SET_OPS(alloc_txrx_queues, nbl_disp_alloc_txrx_queues, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, \ + nbl_disp_chan_alloc_txrx_queues_req, \ + nbl_disp_chan_alloc_txrx_queues_resp); \ + NBL_DISP_SET_OPS(free_txrx_queues, nbl_disp_free_txrx_queues, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_TXRX_QUEUES, \ + nbl_disp_chan_free_txrx_queues_req, \ + nbl_disp_chan_free_txrx_queues_resp); \ + NBL_DISP_SET_OPS(register_vsi2q, nbl_disp_register_vsi2q, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_VSI2Q, \ + nbl_disp_chan_register_vsi2q_req, \ + nbl_disp_chan_register_vsi2q_resp); \ + NBL_DISP_SET_OPS(setup_q2vsi, nbl_disp_setup_q2vsi, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_Q2VSI, \ + nbl_disp_chan_setup_q2vsi_req, \ + nbl_disp_chan_setup_q2vsi_resp); \ + NBL_DISP_SET_OPS(remove_q2vsi, nbl_disp_remove_q2vsi, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_Q2VSI, \ + nbl_disp_chan_remove_q2vsi_req, \ + nbl_disp_chan_remove_q2vsi_resp); \ + NBL_DISP_SET_OPS(setup_rss, nbl_disp_setup_rss, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_RSS, \ + nbl_disp_chan_setup_rss_req, \ + nbl_disp_chan_setup_rss_resp); \ + NBL_DISP_SET_OPS(remove_rss, nbl_disp_remove_rss, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_RSS, \ + nbl_disp_chan_remove_rss_req, \ + nbl_disp_chan_remove_rss_resp); \ + NBL_DISP_SET_OPS(setup_queue, nbl_disp_setup_queue, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_QUEUE, \ + nbl_disp_chan_setup_queue_req, nbl_disp_chan_setup_queue_resp); \ + NBL_DISP_SET_OPS(remove_all_queues, nbl_disp_remove_all_queues, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_ALL_QUEUES, \ + nbl_disp_chan_remove_all_queues_req, \ + nbl_disp_chan_remove_all_queues_resp); \ + NBL_DISP_SET_OPS(cfg_dsch, nbl_disp_cfg_dsch, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_DSCH, \ + nbl_disp_chan_cfg_dsch_req, nbl_disp_chan_cfg_dsch_resp); \ + NBL_DISP_SET_OPS(setup_cqs, nbl_disp_setup_cqs, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_CQS, \ + nbl_disp_chan_setup_cqs_req, nbl_disp_chan_setup_cqs_resp); \ + NBL_DISP_SET_OPS(remove_cqs, nbl_disp_remove_cqs, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_CQS, \ + nbl_disp_chan_remove_cqs_req, nbl_disp_chan_remove_cqs_resp); \ + NBL_DISP_SET_OPS(cfg_qdisc_mqprio, nbl_disp_cfg_qdisc_mqprio, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_QDISC_MQPRIO, \ + nbl_disp_chan_cfg_qdisc_mqprio_req, \ + nbl_disp_chan_cfg_qdisc_mqprio_resp); \ + NBL_DISP_SET_OPS(get_msix_irq_enable_info, nbl_disp_get_msix_irq_enable_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(add_macvlan, nbl_disp_add_macvlan, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_MACVLAN, \ + nbl_disp_chan_add_macvlan_req, nbl_disp_chan_add_macvlan_resp); \ + NBL_DISP_SET_OPS(del_macvlan, nbl_disp_del_macvlan, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_MACVLAN, \ + nbl_disp_chan_del_macvlan_req, nbl_disp_chan_del_macvlan_resp); \ + NBL_DISP_SET_OPS(add_multi_rule, nbl_disp_add_multi_rule, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_MULTI_RULE, \ + nbl_disp_chan_add_multi_rule_req, nbl_disp_chan_add_multi_rule_resp); \ + NBL_DISP_SET_OPS(del_multi_rule, nbl_disp_del_multi_rule, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_MULTI_RULE, \ + nbl_disp_chan_del_multi_rule_req, nbl_disp_chan_del_multi_rule_resp); \ + NBL_DISP_SET_OPS(setup_multi_group, nbl_disp_setup_multi_group, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_MULTI_GROUP, \ + nbl_disp_chan_setup_multi_group_req, \ + nbl_disp_chan_setup_multi_group_resp); \ + NBL_DISP_SET_OPS(remove_multi_group, nbl_disp_remove_multi_group, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_MULTI_GROUP, \ + nbl_disp_chan_remove_multi_group_req, \ + nbl_disp_chan_remove_multi_group_resp); \ + NBL_DISP_SET_OPS(dump_flow, nbl_disp_dump_flow, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_vsi_id, nbl_disp_get_vsi_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VSI_ID, \ + nbl_disp_chan_get_vsi_id_req, nbl_disp_chan_get_vsi_id_resp); \ + NBL_DISP_SET_OPS(get_eth_id, nbl_disp_get_eth_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_ID, \ + nbl_disp_chan_get_eth_id_req, nbl_disp_chan_get_eth_id_resp); \ + NBL_DISP_SET_OPS(enable_lag_protocol, nbl_disp_enable_lag_protocol, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(cfg_lag_hash_algorithm, nbl_disp_cfg_lag_hash_algorithm, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM, \ + nbl_disp_chan_cfg_lag_hash_algorithm_req, \ + nbl_disp_chan_cfg_lag_hash_algorithm_resp); \ + NBL_DISP_SET_OPS(cfg_lag_member_fwd, nbl_disp_cfg_lag_member_fwd, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD, \ + nbl_disp_chan_cfg_lag_member_fwd_req, \ + nbl_disp_chan_cfg_lag_member_fwd_resp); \ + NBL_DISP_SET_OPS(cfg_lag_member_list, nbl_disp_cfg_lag_member_list, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST, \ + nbl_disp_chan_cfg_lag_member_list_req, \ + nbl_disp_chan_cfg_lag_member_list_resp); \ + NBL_DISP_SET_OPS(cfg_lag_member_up_attr, nbl_disp_cfg_lag_member_up_attr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR, \ + nbl_disp_chan_cfg_lag_member_up_attr_req, \ + nbl_disp_chan_cfg_lag_member_up_attr_resp); \ + NBL_DISP_SET_OPS(add_lldp_flow, nbl_disp_add_lldp_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_LLDP_FLOW, \ + nbl_disp_chan_add_lldp_flow_req, nbl_disp_chan_add_lldp_flow_resp); \ + NBL_DISP_SET_OPS(del_lldp_flow, nbl_disp_del_lldp_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_LLDP_FLOW, \ + nbl_disp_chan_del_lldp_flow_req, nbl_disp_chan_del_lldp_flow_resp); \ + NBL_DISP_SET_OPS(add_lag_flow, nbl_disp_add_lag_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_LAG_FLOW, \ + nbl_disp_chan_add_lag_flow_req, nbl_disp_chan_add_lag_flow_resp); \ + NBL_DISP_SET_OPS(del_lag_flow, nbl_disp_del_lag_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_LAG_FLOW, \ + nbl_disp_chan_del_lag_flow_req, nbl_disp_chan_del_lag_flow_resp); \ + NBL_DISP_SET_OPS(cfg_duppkt_info, nbl_disp_cfg_duppkt_info, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(cfg_duppkt_mcc, nbl_disp_cfg_duppkt_mcc, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_DUPPKT_MCC, \ + nbl_disp_chan_cfg_duppkt_mcc_req, nbl_disp_chan_cfg_duppkt_mcc_resp); \ + NBL_DISP_SET_OPS(cfg_lag_mcc, nbl_disp_cfg_lag_mcc, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_LAG_MCC, \ + nbl_disp_chan_cfg_lag_mcc_req, nbl_disp_chan_cfg_lag_mcc_resp); \ + NBL_DISP_SET_OPS(cfg_bond_shaping, nbl_disp_cfg_bond_shaping, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_BOND_SHAPING, \ + nbl_disp_chan_cfg_bond_shaping_req, \ + nbl_disp_chan_cfg_bond_shaping_resp); \ + NBL_DISP_SET_OPS(cfg_bgid_back_pressure, nbl_disp_cfg_bgid_back_pressure, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE, \ + nbl_disp_chan_cfg_bgid_back_pressure_req, \ + nbl_disp_chan_cfg_bgid_back_pressure_resp); \ + NBL_DISP_SET_OPS(set_promisc_mode, nbl_disp_set_promisc_mode, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PROSISC_MODE, \ + nbl_disp_chan_set_promisc_mode_req, \ + nbl_disp_chan_set_promisc_mode_resp); \ + NBL_DISP_SET_OPS(set_spoof_check_addr, nbl_disp_set_spoof_check_addr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_SPOOF_CHECK_ADDR, \ + nbl_disp_chan_set_spoof_check_addr_req, \ + nbl_disp_chan_set_spoof_check_addr_resp); \ + NBL_DISP_SET_OPS(set_vf_spoof_check, nbl_disp_set_vf_spoof_check, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_VF_SPOOF_CHECK, \ + nbl_disp_chan_set_vf_spoof_check_req, \ + nbl_disp_chan_set_vf_spoof_check_resp); \ + NBL_DISP_SET_OPS(get_base_mac_addr, nbl_disp_get_base_mac_addr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_BASE_MAC_ADDR, \ + nbl_disp_chan_get_base_mac_addr_req, \ + nbl_disp_chan_get_base_mac_addr_resp); \ + NBL_DISP_SET_OPS(get_tx_headroom, nbl_disp_get_tx_headroom, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_rep_feature, nbl_disp_get_rep_feature, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_eswitch_mode, nbl_disp_set_eswitch_mode, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_eswitch_mode, nbl_disp_get_eswitch_mode, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(alloc_rep_data, nbl_disp_alloc_rep_data, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(free_rep_data, nbl_disp_free_rep_data, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_rep_netdev_info, nbl_disp_set_rep_netdev_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(unset_rep_netdev_info, nbl_disp_unset_rep_netdev_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_rep_netdev_info, nbl_disp_get_rep_netdev_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_rep_stats, nbl_disp_get_rep_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_rep_index, nbl_disp_get_rep_index, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_firmware_version, nbl_disp_get_firmware_version, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FIRMWARE_VERSION, \ + nbl_disp_chan_get_firmware_version_req, \ + nbl_disp_chan_get_firmware_version_resp); \ + NBL_DISP_SET_OPS(get_driver_info, nbl_disp_get_driver_info, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_queue_stats, nbl_disp_get_queue_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_queue_err_stats, nbl_disp_get_queue_err_stats, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, \ + nbl_disp_chan_get_queue_err_stats_req, \ + nbl_disp_chan_get_queue_err_stats_resp); \ + NBL_DISP_SET_OPS(get_net_stats, nbl_disp_get_net_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_private_stat_len, nbl_disp_get_private_stat_len, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_private_stat_data, nbl_disp_get_private_stat_data, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_STATS, \ + nbl_disp_get_private_stat_data_req, \ + nbl_disp_chan_get_private_stat_data_resp); \ + NBL_DISP_SET_OPS(fill_private_stat_strings, nbl_disp_fill_private_stat_strings, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_max_desc_num, nbl_disp_get_max_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_min_desc_num, nbl_disp_get_min_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_tx_desc_num, nbl_disp_get_tx_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_rx_desc_num, nbl_disp_get_rx_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(set_tx_desc_num, nbl_disp_set_tx_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(set_rx_desc_num, nbl_disp_set_rx_desc_num, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(set_eth_loopback, nbl_disp_set_eth_loopback, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_ETH_LOOPBACK, \ + nbl_disp_chan_set_eth_loopback_req, \ + nbl_disp_chan_set_eth_loopback_resp); \ + NBL_DISP_SET_OPS(clean_rx_lb_test, nbl_disp_clean_rx_lb_test, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_coalesce, nbl_disp_get_coalesce, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_COALESCE, \ + nbl_disp_chan_get_coalesce_req, \ + nbl_disp_chan_get_coalesce_resp); \ + NBL_DISP_SET_OPS(set_coalesce, nbl_disp_set_coalesce, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_COALESCE, \ + nbl_disp_chan_set_coalesce_req, \ + nbl_disp_chan_set_coalesce_resp); \ + NBL_DISP_SET_OPS(get_intr_suppress_level, nbl_disp_get_intr_suppress_level, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_intr_suppress_level, nbl_disp_set_intr_suppress_level, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_INTL_SUPPRESS_LEVEL, \ + nbl_disp_chan_set_intr_suppress_level_req, \ + nbl_disp_chan_set_intr_suppress_level_resp); \ + NBL_DISP_SET_OPS(get_rxfh_indir_size, nbl_disp_get_rxfh_indir_size, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_INDIR_SIZE, \ + nbl_disp_chan_get_rxfh_indir_size_req, \ + nbl_disp_chan_get_rxfh_indir_size_resp); \ + NBL_DISP_SET_OPS(get_rxfh_rss_key_size, nbl_disp_get_rxfh_rss_key_size, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_rxfh_indir, nbl_disp_get_rxfh_indir, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_INDIR, \ + nbl_disp_chan_get_rxfh_indir_req, nbl_disp_chan_get_rxfh_indir_resp); \ + NBL_DISP_SET_OPS(get_rxfh_rss_key, nbl_disp_get_rxfh_rss_key, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_RSS_KEY, \ + nbl_disp_chan_get_rxfh_rss_key_req, \ + nbl_disp_chan_get_rxfh_rss_key_resp); \ + NBL_DISP_SET_OPS(get_rxfh_rss_alg_sel, nbl_disp_get_rxfh_rss_alg_sel, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, \ + nbl_disp_chan_get_rxfh_rss_alg_sel_req, \ + nbl_disp_chan_get_rxfh_rss_alg_sel_resp); \ + NBL_DISP_SET_OPS(cfg_txrx_vlan, nbl_disp_cfg_txrx_vlan, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(setup_rdma_id, nbl_disp_setup_rdma_id, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(remove_rdma_id, nbl_disp_remove_rdma_id, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(register_rdma, nbl_disp_register_rdma, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_RDMA, \ + nbl_disp_chan_register_rdma_req, nbl_disp_chan_register_rdma_resp); \ + NBL_DISP_SET_OPS(unregister_rdma, nbl_disp_unregister_rdma, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNREGISTER_RDMA, \ + nbl_disp_chan_unregister_rdma_req, nbl_disp_chan_unregister_rdma_resp);\ + NBL_DISP_SET_OPS(register_rdma_bond, nbl_disp_register_rdma_bond, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_RDMA_BOND, \ + nbl_disp_chan_register_rdma_bond_req, \ + nbl_disp_chan_register_rdma_bond_resp); \ + NBL_DISP_SET_OPS(unregister_rdma_bond, nbl_disp_unregister_rdma_bond, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNREGISTER_RDMA_BOND, \ + nbl_disp_chan_unregister_rdma_bond_req, \ + nbl_disp_chan_unregister_rdma_bond_resp); \ + NBL_DISP_SET_OPS(get_hw_addr, nbl_disp_get_hw_addr, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_real_hw_addr, nbl_disp_get_real_hw_addr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REAL_HW_ADDR, \ + nbl_disp_chan_get_real_hw_addr_req, \ + nbl_disp_chan_get_real_hw_addr_resp); \ + NBL_DISP_SET_OPS(get_function_id, nbl_disp_get_function_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FUNCTION_ID, \ + nbl_disp_chan_get_function_id_req, nbl_disp_chan_get_function_id_resp);\ + NBL_DISP_SET_OPS(get_real_bdf, nbl_disp_get_real_bdf, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REAL_BDF, \ + nbl_disp_chan_get_real_bdf_req, nbl_disp_chan_get_real_bdf_resp); \ + NBL_DISP_SET_OPS(check_fw_heartbeat, nbl_disp_check_fw_heartbeat, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(check_fw_reset, nbl_disp_check_fw_reset, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_lock, nbl_disp_flash_lock, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_unlock, nbl_disp_flash_unlock, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_prepare, nbl_disp_flash_prepare, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_image, nbl_disp_flash_image, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(flash_activate, nbl_disp_flash_activate, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_phy_caps, nbl_disp_get_phy_caps, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PHY_CAPS, \ + nbl_disp_chan_get_phy_caps_req, \ + nbl_disp_chan_get_phy_caps_resp); \ + NBL_DISP_SET_OPS(set_sfp_state, nbl_disp_set_sfp_state, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_SFP_STATE, \ + nbl_disp_chan_set_sfp_state_req, \ + nbl_disp_chan_set_sfp_state_resp); \ + NBL_DISP_SET_OPS(passthrough_fw_cmd, nbl_disp_passthrough_fw_cmd, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(check_active_vf, nbl_disp_check_active_vf, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CHECK_ACTIVE_VF, \ + nbl_disp_chan_check_active_vf_req, \ + nbl_disp_chan_check_active_vf_resp); \ + NBL_DISP_SET_OPS(get_adminq_tx_buf_size, nbl_disp_get_adminq_tx_buf_size, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(emp_console_write, nbl_disp_adminq_emp_console_write, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_product_flex_cap, nbl_disp_get_product_flex_cap, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, \ + nbl_disp_chan_get_product_flex_cap_req, \ + nbl_disp_chan_get_product_flex_cap_resp); \ + NBL_DISP_SET_OPS(get_product_fix_cap, nbl_disp_get_product_fix_cap, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(alloc_ktls_tx_index, nbl_disp_alloc_ktls_tx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_KTLS_TX_INDEX, \ + nbl_disp_chan_alloc_ktls_tx_index_req, \ + nbl_disp_chan_alloc_ktls_tx_index_resp); \ + NBL_DISP_SET_OPS(free_ktls_tx_index, nbl_disp_free_ktls_tx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_KTLS_TX_INDEX, \ + nbl_disp_chan_free_ktls_tx_index_req, \ + nbl_disp_chan_free_ktls_tx_index_resp); \ + NBL_DISP_SET_OPS(cfg_ktls_tx_keymat, nbl_disp_cfg_ktls_tx_keymat, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_KTLS_TX_KEYMAT, \ + nbl_disp_chan_cfg_ktls_tx_keymat_req, \ + nbl_disp_chan_cfg_ktls_tx_keymat_resp); \ + NBL_DISP_SET_OPS(alloc_ktls_rx_index, nbl_disp_alloc_ktls_rx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_KTLS_RX_INDEX, \ + nbl_disp_chan_alloc_ktls_rx_index_req, \ + nbl_disp_chan_alloc_ktls_rx_index_resp); \ + NBL_DISP_SET_OPS(free_ktls_rx_index, nbl_disp_free_ktls_rx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_KTLS_RX_INDEX, \ + nbl_disp_chan_free_ktls_rx_index_req, \ + nbl_disp_chan_free_ktls_rx_index_resp); \ + NBL_DISP_SET_OPS(cfg_ktls_rx_keymat, nbl_disp_cfg_ktls_rx_keymat, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_KTLS_RX_KEYMAT, \ + nbl_disp_chan_cfg_ktls_rx_keymat_req, \ + nbl_disp_chan_cfg_ktls_rx_keymat_resp); \ + NBL_DISP_SET_OPS(cfg_ktls_rx_record, nbl_disp_cfg_ktls_rx_record, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_KTLS_RX_RECORD, \ + nbl_disp_chan_cfg_ktls_rx_record_req, \ + nbl_disp_chan_cfg_ktls_rx_record_resp); \ + NBL_DISP_SET_OPS(add_ktls_rx_flow, nbl_disp_add_ktls_rx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_KTLS_RX_FLOW, \ + nbl_disp_chan_add_ktls_rx_flow_req, \ + nbl_disp_chan_add_ktls_rx_flow_resp); \ + NBL_DISP_SET_OPS(del_ktls_rx_flow, nbl_disp_del_ktls_rx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_KTLS_RX_FLOW, \ + nbl_disp_chan_del_ktls_rx_flow_req, \ + nbl_disp_chan_del_ktls_rx_flow_resp); \ + NBL_DISP_SET_OPS(alloc_ipsec_tx_index, nbl_disp_alloc_ipsec_tx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_IPSEC_TX_INDEX, \ + nbl_disp_chan_alloc_ipsec_tx_index_req, \ + nbl_disp_chan_alloc_ipsec_tx_index_resp); \ + NBL_DISP_SET_OPS(free_ipsec_tx_index, nbl_disp_free_ipsec_tx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_IPSEC_TX_INDEX, \ + nbl_disp_chan_free_ipsec_tx_index_req, \ + nbl_disp_chan_free_ipsec_tx_index_resp); \ + NBL_DISP_SET_OPS(alloc_ipsec_rx_index, nbl_disp_alloc_ipsec_rx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ALLOC_IPSEC_RX_INDEX, \ + nbl_disp_chan_alloc_ipsec_rx_index_req, \ + nbl_disp_chan_alloc_ipsec_rx_index_resp); \ + NBL_DISP_SET_OPS(free_ipsec_rx_index, nbl_disp_free_ipsec_rx_index, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_FREE_IPSEC_RX_INDEX, \ + nbl_disp_chan_free_ipsec_rx_index_req, \ + nbl_disp_chan_free_ipsec_rx_index_resp); \ + NBL_DISP_SET_OPS(cfg_ipsec_tx_sad, nbl_disp_cfg_ipsec_tx_sad, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_IPSEC_TX_SAD, \ + nbl_disp_chan_cfg_ipsec_tx_sad_req, \ + nbl_disp_chan_cfg_ipsec_tx_sad_resp); \ + NBL_DISP_SET_OPS(cfg_ipsec_rx_sad, nbl_disp_cfg_ipsec_rx_sad, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_IPSEC_RX_SAD, \ + nbl_disp_chan_cfg_ipsec_rx_sad_req, \ + nbl_disp_chan_cfg_ipsec_rx_sad_resp); \ + NBL_DISP_SET_OPS(add_ipsec_tx_flow, nbl_disp_add_ipsec_tx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_IPSEC_TX_FLOW, \ + nbl_disp_chan_add_ipsec_tx_flow_req, \ + nbl_disp_chan_add_ipsec_tx_flow_resp); \ + NBL_DISP_SET_OPS(del_ipsec_tx_flow, nbl_disp_del_ipsec_tx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_IPSEC_TX_FLOW, \ + nbl_disp_chan_del_ipsec_tx_flow_req, \ + nbl_disp_chan_del_ipsec_tx_flow_resp); \ + NBL_DISP_SET_OPS(add_ipsec_rx_flow, nbl_disp_add_ipsec_rx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADD_IPSEC_RX_FLOW, \ + nbl_disp_chan_add_ipsec_rx_flow_req, \ + nbl_disp_chan_add_ipsec_rx_flow_resp); \ + NBL_DISP_SET_OPS(del_ipsec_rx_flow, nbl_disp_del_ipsec_rx_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DEL_IPSEC_RX_FLOW, \ + nbl_disp_chan_del_ipsec_rx_flow_req, \ + nbl_disp_chan_del_ipsec_rx_flow_resp); \ + NBL_DISP_SET_OPS(check_ipsec_status, nbl_disp_check_ipsec_status, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_dipsec_lft_info, nbl_disp_get_dipsec_lft_info, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(handle_dipsec_soft_expire, nbl_disp_handle_dipsec_soft_expire, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(handle_dipsec_hard_expire, nbl_disp_handle_dipsec_hard_expire, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_uipsec_lft_info, nbl_disp_get_uipsec_lft_info, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(handle_uipsec_soft_expire, nbl_disp_handle_uipsec_soft_expire, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(handle_uipsec_hard_expire, nbl_disp_handle_uipsec_hard_expire, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(get_mbx_irq_num, nbl_disp_get_mbx_irq_num, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MBX_IRQ_NUM, \ + nbl_disp_chan_get_mbx_irq_num_req, \ + nbl_disp_chan_get_mbx_irq_num_resp); \ + NBL_DISP_SET_OPS(get_adminq_irq_num, nbl_disp_get_adminq_irq_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_abnormal_irq_num, nbl_disp_get_abnormal_irq_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(clear_accel_flow, nbl_disp_clear_accel_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_ACCEL_FLOW, \ + nbl_disp_chan_clear_accel_flow_req, \ + nbl_disp_chan_clear_accel_flow_resp); \ + NBL_DISP_SET_OPS(clear_flow, nbl_disp_clear_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_FLOW, \ + nbl_disp_chan_clear_flow_req, nbl_disp_chan_clear_flow_resp); \ + NBL_DISP_SET_OPS(clear_queues, nbl_disp_clear_queues, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CLEAR_QUEUE, \ + nbl_disp_chan_clear_queues_req, nbl_disp_chan_clear_queues_resp); \ + NBL_DISP_SET_OPS(disable_phy_flow, nbl_disp_disable_phy_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_DISABLE_PHY_FLOW, \ + nbl_disp_chan_disable_phy_flow_req, \ + nbl_disp_chan_disable_phy_flow_resp); \ + NBL_DISP_SET_OPS(enable_phy_flow, nbl_disp_enable_phy_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ENABLE_PHY_FLOW, \ + nbl_disp_chan_enable_phy_flow_req, \ + nbl_disp_chan_enable_phy_flow_resp); \ + NBL_DISP_SET_OPS(init_acl, nbl_disp_init_acl, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_INIT_ACL, \ + nbl_disp_chan_init_acl_req, \ + nbl_disp_chan_init_acl_resp); \ + NBL_DISP_SET_OPS(uninit_acl, nbl_disp_uninit_acl, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNINIT_ACL, \ + nbl_disp_chan_uninit_acl_req, \ + nbl_disp_chan_uninit_acl_resp); \ + NBL_DISP_SET_OPS(set_upcall_rule, nbl_disp_set_upcall_rule, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_UPCALL_RULE, \ + nbl_disp_chan_set_upcall_rule_req, \ + nbl_disp_chan_set_upcall_rule_resp); \ + NBL_DISP_SET_OPS(unset_upcall_rule, nbl_disp_unset_upcall_rule, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNSET_UPCALL_RULE, \ + nbl_disp_chan_unset_upcall_rule_req, \ + nbl_disp_chan_unset_upcall_rule_resp); \ + NBL_DISP_SET_OPS(set_shaping_dport_vld, nbl_disp_set_shaping_dport_vld, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_SHAPING_DPORT_VLD, \ + nbl_disp_chan_set_shaping_dport_vld_req, \ + nbl_disp_chan_set_shaping_dport_vld_resp); \ + NBL_DISP_SET_OPS(set_dport_fc_th_vld, nbl_disp_set_dport_fc_th_vld, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_DPORT_FC_TH_VLD, \ + nbl_disp_chan_set_dport_fc_th_vld_req, \ + nbl_disp_chan_set_dport_fc_th_vld_resp); \ + NBL_DISP_SET_OPS(check_offload_status, nbl_disp_check_offload_status, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_reg_dump, nbl_disp_get_reg_dump, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REG_DUMP, \ + nbl_disp_chan_get_reg_dump_req, \ + nbl_disp_chan_get_reg_dump_resp); \ + NBL_DISP_SET_OPS(get_reg_dump_len, nbl_disp_get_reg_dump_len, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REG_DUMP_LEN, \ + nbl_disp_chan_get_reg_dump_len_req, \ + nbl_disp_chan_get_reg_dump_len_resp); \ + NBL_DISP_SET_OPS(get_p4_info, nbl_disp_get_p4_info, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(load_p4, nbl_disp_load_p4, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(load_p4_default, nbl_disp_load_p4_default, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_p4_used, nbl_disp_get_p4_used, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_P4_USED, \ + nbl_disp_chan_get_p4_used_req, nbl_disp_chan_get_p4_used_resp); \ + NBL_DISP_SET_OPS(set_p4_used, nbl_disp_set_p4_used, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_p4_version, nbl_disp_get_p4_version, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_board_id, nbl_disp_get_board_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_BOARD_ID, \ + nbl_disp_chan_get_board_id_req, nbl_disp_chan_get_board_id_resp); \ + NBL_DISP_SET_OPS(restore_abnormal_ring, nbl_disp_restore_abnormal_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(restart_abnormal_ring, nbl_disp_restart_abnormal_ring, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(restore_hw_queue, nbl_disp_restore_hw_queue, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_RESTORE_HW_QUEUE, \ + nbl_disp_chan_restore_hw_queue_req, \ + nbl_disp_chan_restore_hw_queue_resp); \ + NBL_DISP_SET_OPS(stop_abnormal_hw_queue, nbl_disp_stop_abnormal_hw_queue, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_STOP_ABNORMAL_HW_QUEUE, \ + nbl_disp_chan_stop_abnormal_hw_queue_req, \ + nbl_disp_chan_stop_abnormal_hw_queue_resp); \ + NBL_DISP_SET_OPS(stop_abnormal_sw_queue, nbl_disp_stop_abnormal_sw_queue, \ + NBL_DISP_CTRL_LVL_NET, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_local_queue_id, nbl_disp_get_local_queue_id, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_REGISTER_NET_REP, NULL, \ + nbl_disp_chan_register_net_rep_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_UNREGISTER_NET_REP, NULL, \ + nbl_disp_chan_unregister_net_rep_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_REGISTER_ETH_REP, NULL, \ + nbl_disp_chan_register_eth_rep_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_UNREGISTER_ETH_REP, NULL, \ + nbl_disp_chan_unregister_eth_rep_resp); \ + NBL_DISP_SET_OPS(get_vsi_global_queue_id, nbl_disp_get_vsi_global_qid, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, \ + nbl_disp_chan_get_vsi_global_qid_req, \ + nbl_disp_chan_get_vsi_global_qid_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_LINE_RATE_INFO, \ + NULL, nbl_disp_chan_get_line_rate_info_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_REGISTER_UPCALL_PORT, NULL, \ + nbl_disp_chan_register_upcall_port_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_UNREGISTER_UPCALL_PORT, NULL, \ + nbl_disp_chan_unregister_upcall_port_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_SET_OFFLOAD_STATUS, NULL, \ + nbl_disp_chan_set_offload_status_resp); \ + NBL_DISP_SET_OPS(get_port_attributes, nbl_disp_get_port_attributes, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(update_ring_num, nbl_disp_update_ring_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(update_rdma_cap, nbl_disp_update_rdma_cap, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_rdma_cap_num, nbl_disp_get_rdma_cap_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(update_rdma_mem_type, nbl_disp_update_rdma_mem_type, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(set_ring_num, nbl_disp_set_ring_num, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(enable_port, nbl_disp_enable_port, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(init_port, nbl_disp_init_port, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(dummy_func, NULL, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_ADMINQ_PORT_NOTIFY, \ + NULL, \ + nbl_disp_chan_recv_port_notify_resp); \ + NBL_DISP_SET_OPS(get_port_state, nbl_disp_get_port_state, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PORT_STATE, \ + nbl_disp_chan_get_port_state_req, \ + nbl_disp_chan_get_port_state_resp); \ + NBL_DISP_SET_OPS(set_port_advertising, nbl_disp_set_port_advertising, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PORT_ADVERTISING, \ + nbl_disp_chan_set_port_advertising_req, \ + nbl_disp_chan_set_port_advertising_resp); \ + NBL_DISP_SET_OPS(get_module_info, nbl_disp_get_module_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MODULE_INFO, \ + nbl_disp_chan_get_module_info_req, \ + nbl_disp_chan_get_module_info_resp); \ + NBL_DISP_SET_OPS(get_module_eeprom, nbl_disp_get_module_eeprom, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MODULE_EEPROM, \ + nbl_disp_chan_get_module_eeprom_req, \ + nbl_disp_chan_get_module_eeprom_resp); \ + NBL_DISP_SET_OPS(get_link_state, nbl_disp_get_link_state, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_STATE, \ + nbl_disp_chan_get_link_state_req, \ + nbl_disp_chan_get_link_state_resp); \ + NBL_DISP_SET_OPS(cfg_eth_bond_event, nbl_disp_cfg_eth_bond_event, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_OFLD, NULL, \ + nbl_disp_chan_init_offload_fwd_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_CMDQ, NULL, \ + nbl_disp_chan_init_cmdq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_DESTROY_CMDQ, NULL, \ + nbl_disp_chan_destroy_cmdq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_RESET_CMDQ, NULL, \ + nbl_disp_chan_reset_cmdq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_OFFLOAD_FLOW_RULE, NULL, \ + nbl_disp_chan_offload_flow_rule_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_ACL_SWITCH, NULL, \ + nbl_disp_chan_get_flow_acl_switch_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_REP, NULL, \ + nbl_disp_chan_init_rep_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_FLOW, NULL, \ + nbl_disp_chan_init_flow_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_DEINIT_FLOW, NULL, \ + nbl_disp_chan_deinit_flow_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_QUEUE_CXT, NULL, \ + nbl_disp_chan_get_queue_cxt_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_CFG_LOG, NULL, \ + nbl_disp_chan_cfg_log_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_INIT_VDPAQ, NULL, \ + nbl_disp_chan_init_vdpaq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_DESTROY_VDPAQ, NULL, \ + nbl_disp_chan_destroy_vdpaq_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_GET_UPCALL_PORT, NULL, \ + nbl_disp_chan_get_upcall_port_resp); \ + NBL_DISP_SET_OPS(configure_virtio_dev_msix, nbl_disp_configure_virtio_dev_msix, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(configure_rdma_msix_off, nbl_disp_configure_rdma_msix_off, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(configure_virtio_dev_ready, nbl_disp_configure_virtio_dev_ready, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_eth_mac_addr, nbl_disp_set_eth_mac_addr, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_ETH_MAC_ADDR, \ + nbl_disp_chan_set_eth_mac_addr_req, \ + nbl_disp_chan_set_eth_mac_addr_resp); \ + NBL_DISP_SET_OPS(get_chip_temperature, nbl_disp_get_chip_temperature, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, \ + nbl_disp_chan_get_chip_temperature_req, \ + nbl_disp_chan_get_chip_temperature_resp); \ + NBL_DISP_SET_OPS(get_module_temperature, nbl_disp_get_module_temperature, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, \ + nbl_disp_chan_get_module_temperature_req, \ + nbl_disp_chan_get_module_temperature_resp); \ + NBL_DISP_SET_OPS(process_abnormal_event, nbl_disp_process_abnormal_event, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(switchdev_init_cmdq, nbl_disp_switchdev_init_cmdq, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SWITCHDEV_INIT_CMDQ, \ + nbl_disp_chan_switchdev_init_cmdq_req, \ + nbl_disp_chan_switchdev_init_cmdq_resp); \ + NBL_DISP_SET_OPS(switchdev_deinit_cmdq, nbl_disp_switchdev_deinit_cmdq, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SWITCHDEV_DEINIT_CMDQ, \ + nbl_disp_chan_switchdev_deinit_cmdq_req, \ + nbl_disp_chan_switchdev_deinit_cmdq_resp); \ + NBL_DISP_SET_OPS(add_tc_flow, nbl_disp_add_tc_flow, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(del_tc_flow, nbl_disp_del_tc_flow, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(tc_tun_encap_lookup, nbl_disp_tc_tun_encap_lookup, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(tc_tun_encap_del, nbl_disp_tc_tun_encap_del, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(tc_tun_encap_add, nbl_disp_tc_tun_encap_add, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flow_index_lookup, nbl_disp_flow_index_lookup, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(set_tc_flow_info, nbl_disp_set_tc_flow_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_TC_FLOW_INFO, \ + nbl_disp_chan_set_tc_flow_info_req, \ + nbl_disp_chan_set_tc_flow_info_resp); \ + NBL_DISP_SET_OPS(unset_tc_flow_info, nbl_disp_unset_tc_flow_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_UNSET_TC_FLOW_INFO, \ + nbl_disp_chan_unset_tc_flow_info_req, \ + nbl_disp_chan_unset_tc_flow_info_resp); \ + NBL_DISP_SET_OPS(get_tc_flow_info, nbl_disp_get_tc_flow_info, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(query_tc_stats, nbl_disp_query_tc_stats, \ + NBL_DISP_CTRL_LVL_NET, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(adapt_desc_gother, nbl_disp_adapt_desc_gother, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_net, nbl_disp_flr_clear_net, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_accel, nbl_disp_flr_clear_accel, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_queues, nbl_disp_flr_clear_queues, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_accel_flow, nbl_disp_flr_clear_accel_flow, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_flows, nbl_disp_flr_clear_flows, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_interrupt, nbl_disp_flr_clear_interrupt, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(flr_clear_rdma, nbl_disp_flr_clear_rdma, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(covert_vfid_to_vsi_id, nbl_disp_covert_vfid_to_vsi_id, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(unmask_all_interrupts, nbl_disp_unmask_all_interrupts, \ + NBL_DISP_CTRL_LVL_MGT, -1, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(keep_alive, nbl_disp_keep_alive_req, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_KEEP_ALIVE, \ + nbl_disp_keep_alive_req, \ + nbl_disp_chan_keep_alive_resp); \ + NBL_DISP_SET_OPS(ctrl_port_led, nbl_disp_ctrl_port_led, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CTRL_PORT_LED, \ + nbl_disp_chan_ctrl_port_led_req, nbl_disp_chan_ctrl_port_led_resp); \ + NBL_DISP_SET_OPS(nway_reset, nbl_disp_nway_reset, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_NWAY_RESET, \ + nbl_disp_chan_nway_reset_req, nbl_disp_chan_nway_reset_resp); \ + NBL_DISP_SET_OPS(get_rep_queue_info, nbl_disp_get_rep_queue_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_REP_QUEUE_INFO, \ + nbl_disp_chan_get_rep_queue_info_req, \ + nbl_disp_chan_get_rep_queue_info_resp); \ + NBL_DISP_SET_OPS(get_user_queue_info, nbl_disp_get_user_queue_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_USER_QUEUE_INFO, \ + nbl_disp_chan_get_user_queue_info_req, \ + nbl_disp_chan_get_user_queue_info_resp); \ + NBL_DISP_SET_OPS(dummy_func, NULL, NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_GET_BOARD_INFO, NULL, \ + nbl_disp_chan_get_board_info_resp); \ + NBL_DISP_SET_OPS(get_vf_base_vsi_id, nbl_disp_get_vf_base_vsi_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, \ + nbl_disp_chan_get_vf_base_vsi_id_req, \ + nbl_disp_chan_get_vf_base_vsi_id_resp); \ + NBL_DISP_SET_OPS(cfg_eth_bond_info, nbl_disp_cfg_eth_bond_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_ETH_BOND_INFO, \ + nbl_disp_chan_cfg_eth_bond_info_req, \ + nbl_disp_chan_cfg_eth_bond_info_resp); \ + NBL_DISP_SET_OPS(get_eth_bond_info, nbl_disp_get_eth_bond_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_BOND_INFO, \ + nbl_disp_chan_get_eth_bond_info_req, \ + nbl_disp_chan_get_eth_bond_info_resp); \ + NBL_DISP_SET_OPS(add_nd_upcall_flow, nbl_disp_chan_add_nd_upcall_flow, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_ADD_ND_UPCALL_FLOW, \ + nbl_disp_chan_add_nd_upcall_flow_req, \ + nbl_disp_chan_add_nd_upcall_flow_resp); \ + NBL_DISP_SET_OPS(del_nd_upcall_flow, nbl_disp_chan_del_nd_upcall_flow, \ + NBL_DISP_CTRL_LVL_MGT, \ + NBL_CHAN_MSG_DEL_ND_UPCALL_FLOW, \ + nbl_disp_chan_del_nd_upcall_flow_req, \ + nbl_disp_chan_del_nd_upcall_flow_resp); \ + NBL_DISP_SET_OPS(set_bridge_mode, nbl_disp_set_bridge_mode, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_BRIDGE_MODE, \ + nbl_disp_chan_set_bridge_mode_req, \ + nbl_disp_chan_set_bridge_mode_resp); \ + NBL_DISP_SET_OPS(get_vf_function_id, nbl_disp_get_vf_function_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_FUNCTION_ID, \ + nbl_disp_chan_get_vf_function_id_req, \ + nbl_disp_chan_get_vf_function_id_resp); \ + NBL_DISP_SET_OPS(get_vf_vsi_id, nbl_disp_get_vf_vsi_id, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_VF_VSI_ID, \ + nbl_disp_chan_get_vf_vsi_id_req, \ + nbl_disp_chan_get_vf_vsi_id_resp); \ + NBL_DISP_SET_OPS(set_pmd_debug, nbl_disp_set_pmd_debug, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PMD_DEBUG, \ + NULL, NULL); \ + NBL_DISP_SET_OPS(register_func_mac, nbl_disp_register_func_mac, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_MAC, \ + nbl_disp_chan_register_func_mac_req, \ + nbl_disp_chan_register_func_mac_resp); \ + NBL_DISP_SET_OPS(set_tx_rate, nbl_disp_set_tx_rate, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_TX_RATE, \ + nbl_disp_chan_set_tx_rate_req, nbl_disp_chan_set_tx_rate_resp); \ + NBL_DISP_SET_OPS(register_func_link_forced, nbl_disp_register_func_link_forced, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, \ + nbl_disp_chan_register_func_link_forced_req, \ + nbl_disp_chan_register_func_link_forced_resp); \ + NBL_DISP_SET_OPS(get_link_forced, nbl_disp_get_link_forced, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_LINK_FORCED, \ + nbl_disp_chan_get_link_forced_req, nbl_disp_chan_get_link_forced_resp);\ + NBL_DISP_SET_OPS(get_driver_version, nbl_disp_get_driver_version, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(register_func_vlan, nbl_disp_register_func_vlan, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_VLAN, \ + nbl_disp_chan_register_func_vlan_req, \ + nbl_disp_chan_register_func_vlan_resp); \ + NBL_DISP_SET_OPS(register_func_rate, nbl_disp_register_func_rate, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REGISTER_FUNC_RATE, \ + nbl_disp_chan_register_func_rate_req, \ + nbl_disp_chan_register_func_rate_resp); \ + NBL_DISP_SET_OPS(get_fd_flow, nbl_disp_get_fd_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FD_FLOW, \ + nbl_disp_chan_get_fd_flow_req, nbl_disp_chan_get_fd_flow_resp); \ + NBL_DISP_SET_OPS(get_fd_flow_cnt, nbl_disp_get_fd_flow_cnt, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FD_FLOW_CNT, \ + nbl_disp_chan_get_fd_flow_cnt_req, nbl_disp_chan_get_fd_flow_cnt_resp);\ + NBL_DISP_SET_OPS(get_fd_flow_all, nbl_disp_get_fd_flow_all, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FD_FLOW_ALL, \ + nbl_disp_chan_get_fd_flow_all_req, nbl_disp_chan_get_fd_flow_all_resp);\ + NBL_DISP_SET_OPS(get_fd_flow_max, nbl_disp_get_fd_flow_max, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_FD_FLOW_MAX, \ + nbl_disp_chan_get_fd_flow_max_req, nbl_disp_chan_get_fd_flow_max_resp);\ + NBL_DISP_SET_OPS(replace_fd_flow, nbl_disp_replace_fd_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REPLACE_FD_FLOW, \ + nbl_disp_chan_replace_fd_flow_req, nbl_disp_chan_replace_fd_flow_resp);\ + NBL_DISP_SET_OPS(remove_fd_flow, nbl_disp_remove_fd_flow, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_FD_FLOW, \ + nbl_disp_chan_remove_fd_flow_req, nbl_disp_chan_remove_fd_flow_resp); \ + NBL_DISP_SET_OPS(config_fd_flow_state, nbl_disp_config_fd_flow_state, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_FD_FLOW_STATE, \ + nbl_disp_chan_config_fd_flow_state_req, \ + nbl_disp_chan_config_fd_flow_state_resp); \ + NBL_DISP_SET_OPS(cfg_fd_update_event, nbl_disp_cfg_fd_update_event, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(dump_fd_flow, nbl_disp_dump_fd_flow, \ + NBL_DISP_CTRL_LVL_MGT, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_xdp_queue_info, nbl_disp_get_xdp_queue_info, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, \ + nbl_disp_chan_get_xdp_queue_info_req, \ + nbl_disp_chan_get_xdp_queue_info_resp); \ + NBL_DISP_SET_OPS(set_hw_status, nbl_disp_set_hw_status, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(get_active_func_bitmaps, nbl_disp_get_active_func_bitmaps, \ + NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \ + NBL_DISP_SET_OPS(configure_qos, nbl_disp_configure_qos, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CONFIGURE_QOS, \ + nbl_disp_chan_configure_qos_req, \ + nbl_disp_chan_configure_qos_resp); \ + NBL_DISP_SET_OPS(get_pfc_buffer_size, nbl_disp_get_pfc_buffer_size, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, \ + nbl_disp_chan_get_pfc_buffer_size_req, \ + nbl_disp_chan_get_pfc_buffer_size_resp); \ + NBL_DISP_SET_OPS(set_pfc_buffer_size, nbl_disp_set_pfc_buffer_size, \ + NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, \ + nbl_disp_chan_set_pfc_buffer_size_req, \ + nbl_disp_chan_set_pfc_buffer_size_resp); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_disp_setup_msg(struct nbl_dispatch_mgt *disp_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt); + int ret = 0; + + if (!chan_ops->check_queue_exist(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return 0; + + mutex_init(&disp_mgt->ops_muxtex_lock); + spin_lock_init(&disp_mgt->ops_spin_lock); + disp_mgt->ops_lock_required = true; + +#define NBL_DISP_SET_OPS(disp_op, res_func, ctrl_lvl, msg_type, msg_req, msg_resp) \ +do { \ + typeof(msg_type) _msg_type = (msg_type); \ + if (_msg_type >= 0) \ + ret += chan_ops->register_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), \ + _msg_type, msg_resp, disp_mgt); \ +} while (0) + NBL_DISP_OPS_TBL; +#undef NBL_DISP_SET_OPS + + return ret; +} + +/* Ctrl lvl means that if a certain level is set, then all disp_ops that decleared this lvl + * will go directly to res_ops, rather than send a channel msg, and vice versa. + */ +static int nbl_disp_setup_ctrl_lvl(struct nbl_dispatch_mgt *disp_mgt, u32 lvl) +{ + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_DISP_MGT_TO_DISP_OPS(disp_mgt); + + set_bit(lvl, disp_mgt->ctrl_lvl); + +#define NBL_DISP_SET_OPS(disp_op, res_func, ctrl, msg_type, msg_req, msg_resp) \ +do { \ + disp_ops->NBL_NAME(disp_op) = test_bit(ctrl, disp_mgt->ctrl_lvl) ? res_func : msg_req; ;\ +} while (0) + NBL_DISP_OPS_TBL; +#undef NBL_DISP_SET_OPS + + return 0; +} + +static int nbl_disp_setup_disp_mgt(struct nbl_common_info *common, + struct nbl_dispatch_mgt **disp_mgt) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + *disp_mgt = devm_kzalloc(dev, sizeof(struct nbl_dispatch_mgt), GFP_KERNEL); + if (!*disp_mgt) + return -ENOMEM; + + NBL_DISP_MGT_TO_COMMON(*disp_mgt) = common; + return 0; +} + +static void nbl_disp_remove_disp_mgt(struct nbl_common_info *common, + struct nbl_dispatch_mgt **disp_mgt) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, *disp_mgt); + *disp_mgt = NULL; +} + +static void nbl_disp_remove_ops(struct device *dev, struct nbl_dispatch_ops_tbl **disp_ops_tbl) +{ + devm_kfree(dev, NBL_DISP_OPS_TBL_TO_OPS(*disp_ops_tbl)); + devm_kfree(dev, *disp_ops_tbl); + *disp_ops_tbl = NULL; +} + +static int nbl_disp_setup_ops(struct device *dev, struct nbl_dispatch_ops_tbl **disp_ops_tbl, + struct nbl_dispatch_mgt *disp_mgt) +{ + struct nbl_dispatch_ops *disp_ops; + + *disp_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_dispatch_ops_tbl), GFP_KERNEL); + if (!*disp_ops_tbl) + return -ENOMEM; + + disp_ops = devm_kzalloc(dev, sizeof(struct nbl_dispatch_ops), GFP_KERNEL); + if (!disp_ops) + return -ENOMEM; + + NBL_DISP_OPS_TBL_TO_OPS(*disp_ops_tbl) = disp_ops; + NBL_DISP_OPS_TBL_TO_PRIV(*disp_ops_tbl) = disp_mgt; + + return 0; +} + +int nbl_disp_init(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dispatch_mgt **disp_mgt = + (struct nbl_dispatch_mgt **)&NBL_ADAPTER_TO_DISP_MGT(adapter); + struct nbl_dispatch_ops_tbl **disp_ops_tbl = &NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + struct nbl_resource_ops_tbl *res_ops_tbl = NBL_ADAPTER_TO_RES_OPS_TBL(adapter); + struct nbl_channel_ops_tbl *chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + int ret = 0; + + ret = nbl_disp_setup_disp_mgt(common, disp_mgt); + if (ret) + goto setup_mgt_fail; + + ret = nbl_disp_setup_ops(dev, disp_ops_tbl, *disp_mgt); + if (ret) + goto setup_ops_fail; + + NBL_DISP_MGT_TO_RES_OPS_TBL(*disp_mgt) = res_ops_tbl; + NBL_DISP_MGT_TO_CHAN_OPS_TBL(*disp_mgt) = chan_ops_tbl; + NBL_DISP_MGT_TO_DISP_OPS_TBL(*disp_mgt) = *disp_ops_tbl; + + ret = nbl_disp_setup_msg(*disp_mgt); + if (ret) + goto setup_msg_fail; + + if (param->caps.has_ctrl || param->caps.has_factory_ctrl) { + ret = nbl_disp_setup_ctrl_lvl(*disp_mgt, NBL_DISP_CTRL_LVL_MGT); + if (ret) + goto setup_msg_fail; + } + + if (param->caps.has_net || param->caps.has_factory_ctrl) { + ret = nbl_disp_setup_ctrl_lvl(*disp_mgt, NBL_DISP_CTRL_LVL_NET); + if (ret) + goto setup_msg_fail; + } + + ret = nbl_disp_setup_ctrl_lvl(*disp_mgt, NBL_DISP_CTRL_LVL_ALWAYS); + if (ret) + goto setup_msg_fail; + + return 0; + +setup_msg_fail: + nbl_disp_remove_ops(dev, disp_ops_tbl); +setup_ops_fail: + nbl_disp_remove_disp_mgt(common, disp_mgt); +setup_mgt_fail: + return ret; +} + +void nbl_disp_remove(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_dispatch_mgt **disp_mgt; + struct nbl_dispatch_ops_tbl **disp_ops_tbl; + + if (!adapter) + return; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + disp_mgt = (struct nbl_dispatch_mgt **)&NBL_ADAPTER_TO_DISP_MGT(adapter); + disp_ops_tbl = &NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + + nbl_disp_remove_ops(dev, disp_ops_tbl); + + nbl_disp_remove_disp_mgt(common, disp_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..70d1d30c35fd0b1c4de7cb62a7ce33b1c0876f2b --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_dispatch.h @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DISPATCH_H_ +#define _NBL_DISPATCH_H_ + +#include "nbl_core.h" + +#define NBL_DISP_MGT_TO_COMMON(disp_mgt) ((disp_mgt)->common) +#define NBL_DISP_MGT_TO_DEV(disp_mgt) NBL_COMMON_TO_DEV(NBL_DISP_MGT_TO_COMMON(disp_mgt)) + +#define NBL_DISP_MGT_TO_RES_OPS_TBL(disp_mgt) ((disp_mgt)->res_ops_tbl) +#define NBL_DISP_MGT_TO_RES_OPS(disp_mgt) (NBL_DISP_MGT_TO_RES_OPS_TBL(disp_mgt)->ops) +#define NBL_DISP_MGT_TO_RES_PRIV(disp_mgt) (NBL_DISP_MGT_TO_RES_OPS_TBL(disp_mgt)->priv) +#define NBL_DISP_MGT_TO_CHAN_OPS_TBL(disp_mgt) ((disp_mgt)->chan_ops_tbl) +#define NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt) (NBL_DISP_MGT_TO_CHAN_OPS_TBL(disp_mgt)->ops) +#define NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt) (NBL_DISP_MGT_TO_CHAN_OPS_TBL(disp_mgt)->priv) +#define NBL_DISP_MGT_TO_DISP_OPS_TBL(disp_mgt) ((disp_mgt)->disp_ops_tbl) +#define NBL_DISP_MGT_TO_DISP_OPS(disp_mgt) (NBL_DISP_MGT_TO_DISP_OPS_TBL(disp_mgt)->ops) +#define NBL_DISP_MGT_TO_DISP_PRIV(disp_mgt) (NBL_DISP_MGT_TO_DISP_OPS_TBL(disp_mgt)->priv) + +#define NBL_OPS_CALL_LOCK(disp_mgt, func, ...) \ +({ \ + typeof(disp_mgt) _disp_mgt = (disp_mgt); \ + typeof(func) _func = (func); \ + u64 ret = 0; \ + \ + if (_disp_mgt->ops_lock_required) \ + mutex_lock(&_disp_mgt->ops_muxtex_lock); \ + __builtin_choose_expr( \ + /* Check if the func has void return value */ \ + __builtin_types_compatible_p(typeof(_func(__VA_ARGS__)), void), \ + (!_func) ? 0 : _func(__VA_ARGS__), \ + ret = __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(_func(__VA_ARGS__)), void), \ + 0, \ + (!_func) ? 0 : _func(__VA_ARGS__) \ + ) \ + ); \ + \ + if (_disp_mgt->ops_lock_required) \ + mutex_unlock(&_disp_mgt->ops_muxtex_lock); \ + \ + (typeof(_func(__VA_ARGS__))) ret; \ +}) + +#define NBL_OPS_CALL_SPIN_LOCK(disp_mgt, func, ...) \ +({ \ + typeof(disp_mgt) _disp_mgt = (disp_mgt); \ + typeof(func) _func = (func); \ + u64 ret = 0; \ + \ + if (_disp_mgt->ops_lock_required) \ + spin_lock(&_disp_mgt->ops_spin_lock); \ + \ + __builtin_choose_expr( \ + /* Check if the func has void return value */ \ + __builtin_types_compatible_p(typeof(_func(__VA_ARGS__)), void), \ + (!_func) ? 0 : _func(__VA_ARGS__), \ + ret = __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(_func(__VA_ARGS__)), void), \ + 0, \ + (!_func) ? 0 : _func(__VA_ARGS__) \ + ) \ + ); \ + \ + if (_disp_mgt->ops_lock_required) \ + spin_unlock(&_disp_mgt->ops_spin_lock); \ + \ + (typeof(_func(__VA_ARGS__))) ret; \ +}) + +struct nbl_dispatch_mgt { + struct nbl_common_info *common; + struct nbl_resource_ops_tbl *res_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_dispatch_ops_tbl *disp_ops_tbl; + DECLARE_BITMAP(ctrl_lvl, NBL_DISP_CTRL_LVL_MAX); + /* use for the caller not in interrupt */ + struct mutex ops_muxtex_lock; + /* use for the caller is in interrupt or other can't sleep thread */ + spinlock_t ops_spin_lock; + bool ops_lock_required; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..4eb2e539aa7cc36c9543f82064ec38f59efa918e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.c @@ -0,0 +1,3127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_ethtool.h" + +enum NBL_STATS_TYPE { + NBL_NETDEV_STATS, + NBL_ETH_STATS, + NBL_STATS, + NBL_PRIV_STATS, + NBL_STATS_TYPE_MAX +}; + +struct nbl_ethtool_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +static const char nbl_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "EEPROM test (offline)", + "Interrupt test (offline)", + "Loopback test (offline)", + "Link test (on/offline)", +}; + +enum nbl_ethtool_test_id { + NBL_ETH_TEST_REG = 0, + NBL_ETH_TEST_EEPROM, + NBL_ETH_TEST_INTR, + NBL_ETH_TEST_LOOP, + NBL_ETH_TEST_LINK, + NBL_ETH_TEST_MAX +}; + +#define NBL_TEST_LEN (sizeof(nbl_gstrings_test) / ETH_GSTRING_LEN) + +#define NBL_NETDEV_STAT(_name, stat_m, stat_n) { \ + .stat_string = _name, \ + .type = NBL_NETDEV_STATS, \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, stat_m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, stat_n) \ +} + +#define NBL_STAT(_name, stat_m, stat_n) { \ + .stat_string = _name, \ + .type = NBL_STATS, \ + .sizeof_stat = sizeof_field(struct nbl_stats, stat_m), \ + .stat_offset = offsetof(struct nbl_stats, stat_n) \ +} + +#define NBL_PRIV_STAT(_name, stat_m, stat_n) { \ + .stat_string = _name, \ + .type = NBL_PRIV_STATS, \ + .sizeof_stat = sizeof_field(struct nbl_priv_stats, stat_m), \ + .stat_offset = offsetof(struct nbl_priv_stats, stat_n) \ +} + +static const struct nbl_ethtool_stats nbl_gstrings_stats[] = { + NBL_NETDEV_STAT("rx_packets", rx_packets, rx_packets), + NBL_NETDEV_STAT("tx_packets", tx_packets, tx_packets), + NBL_NETDEV_STAT("rx_bytes", rx_bytes, rx_bytes), + NBL_NETDEV_STAT("tx_bytes", tx_bytes, tx_bytes), + NBL_STAT("tx_multicast", tx_multicast_packets, tx_multicast_packets), + NBL_STAT("tx_unicast", tx_unicast_packets, tx_unicast_packets), + NBL_STAT("rx_multicast", rx_multicast_packets, rx_multicast_packets), + NBL_STAT("rx_unicast", rx_unicast_packets, rx_unicast_packets), + NBL_NETDEV_STAT("rx_errors", rx_errors, rx_errors), + NBL_NETDEV_STAT("tx_errors", tx_errors, tx_errors), + NBL_NETDEV_STAT("rx_dropped", rx_dropped, rx_dropped), + NBL_NETDEV_STAT("tx_dropped", tx_dropped, tx_dropped), + NBL_NETDEV_STAT("eth_multicast", multicast, multicast), + NBL_NETDEV_STAT("collisions", collisions, collisions), + NBL_NETDEV_STAT("rx_over_errors", rx_over_errors, rx_over_errors), + NBL_NETDEV_STAT("rx_crc_errors", rx_crc_errors, rx_crc_errors), + NBL_NETDEV_STAT("rx_frame_errors", rx_frame_errors, rx_frame_errors), + NBL_NETDEV_STAT("rx_fifo_errors", rx_fifo_errors, rx_fifo_errors), + NBL_NETDEV_STAT("rx_missed_errors", rx_missed_errors, rx_missed_errors), + NBL_NETDEV_STAT("tx_aborted_errors", tx_aborted_errors, tx_aborted_errors), + NBL_NETDEV_STAT("tx_carrier_errors", tx_carrier_errors, tx_carrier_errors), + NBL_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors, tx_fifo_errors), + NBL_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors, tx_heartbeat_errors), + + NBL_STAT("tso_packets", tso_packets, tso_packets), + NBL_STAT("tso_bytes", tso_bytes, tso_bytes), + NBL_STAT("tx_csum_packets", tx_csum_packets, tx_csum_packets), + NBL_STAT("rx_csum_packets", rx_csum_packets, rx_csum_packets), + NBL_STAT("rx_csum_errors", rx_csum_errors, rx_csum_errors), + NBL_STAT("tx_busy", tx_busy, tx_busy), + NBL_STAT("tx_dma_busy", tx_dma_busy, tx_dma_busy), + NBL_STAT("tx_skb_free", tx_skb_free, tx_skb_free), + NBL_STAT("tx_desc_addr_err_cnt", tx_desc_addr_err_cnt, tx_desc_addr_err_cnt), + NBL_STAT("tx_desc_len_err_cnt", tx_desc_len_err_cnt, tx_desc_len_err_cnt), + NBL_STAT("rx_desc_addr_err_cnt", rx_desc_addr_err_cnt, rx_desc_addr_err_cnt), + NBL_STAT("rx_alloc_buf_err_cnt", rx_alloc_buf_err_cnt, rx_alloc_buf_err_cnt), + NBL_STAT("rx_cache_reuse", rx_cache_reuse, rx_cache_reuse), + NBL_STAT("rx_cache_full", rx_cache_full, rx_cache_full), + NBL_STAT("rx_cache_empty", rx_cache_empty, rx_cache_empty), + NBL_STAT("rx_cache_busy", rx_cache_busy, rx_cache_busy), + NBL_STAT("rx_cache_waive", rx_cache_waive, rx_cache_waive), +#ifdef CONFIG_TLS_DEVICE + NBL_STAT("tls_encrypted_packets", tls_encrypted_packets, tls_encrypted_packets), + NBL_STAT("tls_encrypted_bytes", tls_encrypted_bytes, tls_encrypted_bytes), + NBL_STAT("tls_ooo_packets", tls_ooo_packets, tls_ooo_packets), + NBL_STAT("tls_decrypted_packets", tls_decrypted_packets, tls_decrypted_packets), + NBL_STAT("tls_resync_req_num", tls_resync_req_num, tls_resync_req_num), +#endif +}; + +#define NBL_GLOBAL_STATS_LEN ARRAY_SIZE(nbl_gstrings_stats) + +struct nbl_priv_flags_info { + u8 supported_by_capability; + u8 supported_modify; + enum nbl_fix_cap_type capability_type; + char flag_name[ETH_GSTRING_LEN]; +}; + +static const struct nbl_priv_flags_info nbl_gstrings_priv_flags[NBL_ADAPTER_FLAGS_MAX] = { + {1, 0, NBL_P4_CAP, "P4-default"}, + {0, 1, 0, "link-down-on-close"}, + {1, 1, NBL_ETH_SUPPORT_NRZ_RS_FEC_544, "nrz-rs-fec-544"}, +}; + +#define NBL_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(nbl_gstrings_priv_flags) + +static void nbl_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_netdev_priv *priv; + struct nbl_driver_info driver_info; + char firmware_version[ETHTOOL_FWVERS_LEN] = {' '}; + + memset(&driver_info, 0, sizeof(driver_info)); + + priv = netdev_priv(netdev); + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + disp_ops->get_firmware_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + firmware_version, ETHTOOL_FWVERS_LEN); + if (disp_ops->get_driver_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &driver_info)) + strscpy(drvinfo->version, driver_info.driver_version, sizeof(drvinfo->version)); + else + strscpy(drvinfo->version, NBL_DRIVER_VERSION, sizeof(drvinfo->version)); + strscpy(drvinfo->fw_version, firmware_version, sizeof(drvinfo->fw_version)); + if (!priv->rep) { + strscpy(drvinfo->driver, NBL_DRIVER_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + } else { + strscpy(drvinfo->driver, NBL_REP_DRIVER_NAME, sizeof(drvinfo->driver)); + } + + drvinfo->regdump_len = 0; +} + +static void nbl_stats_fill_strings(struct net_device *netdev, u8 *data) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info, *xdp_vsi_info; + char *p = (char *)data; + unsigned int i; + u16 xdp_ring_num = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + xdp_vsi_info = &ring_mgt->vsi_info[NBL_VSI_XDP]; + + for (i = 0; i < NBL_GLOBAL_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", nbl_gstrings_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < vsi_info->active_ring_num; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_descs", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_tx_timeout_cnt", i); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < vsi_info->active_ring_num; i++) { + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_descs", i); + p += ETH_GSTRING_LEN; + } + + if (xdp_vsi_info) + xdp_ring_num = xdp_vsi_info->ring_num; + + for (i = 0; i < xdp_ring_num; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_xdp_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_xdp_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_xdp_descs", i); + p += ETH_GSTRING_LEN; + } + + if (!common->is_vf) + disp_ops->fill_private_stat_strings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), p); +} + +static void nbl_priv_flags_fill_strings(struct net_device *netdev, u8 *data) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + char *p = (char *)data; + unsigned int i; + + for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { + enum nbl_fix_cap_type capability_type = nbl_gstrings_priv_flags[i].capability_type; + + if (nbl_gstrings_priv_flags[i].supported_by_capability) { + if (!disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + capability_type)) + continue; + } + snprintf(p, ETH_GSTRING_LEN, "%s", nbl_gstrings_priv_flags[i].flag_name); + p += ETH_GSTRING_LEN; + } +} + +static void nbl_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, nbl_gstrings_test, NBL_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + nbl_stats_fill_strings(netdev, data); + break; + case ETH_SS_PRIV_FLAGS: + nbl_priv_flags_fill_strings(netdev, data); + break; + default: + break; + } +} + +static int nbl_sset_fill_count(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info, *xdp_vsi_info; + u32 total_queues = 0, private_len = 0, extra_per_queue_entry = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + xdp_vsi_info = &ring_mgt->vsi_info[NBL_VSI_XDP]; + + total_queues = vsi_info->active_ring_num * 2; + if (!common->is_vf) + disp_ops->get_private_stat_len(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &private_len); + + /* For tx_timeout */ + extra_per_queue_entry = vsi_info->active_ring_num; + + /* xdp queue stat */ + if (xdp_vsi_info) + total_queues += xdp_vsi_info->ring_num; + + return NBL_GLOBAL_STATS_LEN + total_queues * + (sizeof(struct nbl_queue_stats) / sizeof(u64)) + + extra_per_queue_entry + private_len; +} + +static int nbl_sset_fill_priv_flags_count(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + unsigned int i; + int count = 0; + + for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { + enum nbl_fix_cap_type capability_type = nbl_gstrings_priv_flags[i].capability_type; + + if (nbl_gstrings_priv_flags[i].supported_by_capability) { + if (!disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + capability_type)) + continue; + } + count++; + } + + return count; +} + +static int nbl_get_sset_count(struct net_device *netdev, int sset) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + switch (sset) { + case ETH_SS_TEST: + if (NBL_COMMON_TO_VF_CAP(common)) + return -EOPNOTSUPP; + else + return NBL_TEST_LEN; + case ETH_SS_STATS: + return nbl_sset_fill_count(netdev); + case ETH_SS_PRIV_FLAGS: + if (NBL_COMMON_TO_VF_CAP(common)) + return -EOPNOTSUPP; + else + return nbl_sset_fill_priv_flags_count(netdev); + default: + return -EOPNOTSUPP; + } +} + +static void nbl_serv_adjust_interrpt_param(struct nbl_service_mgt *serv_mgt, bool ethtool) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + struct net_device *netdev; + struct nbl_netdev_priv *net_priv; + struct nbl_serv_ring_vsi_info *vsi_info; + u64 last_tx_packets; + u64 last_rx_packets; + u64 last_get_stats_jiffies, time_diff; + u64 tx_packets, rx_packets; + u64 tx_rates, rx_rates, pkt_rates; + u16 local_vector_id, vector_num; + u16 intr_suppress_level; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + netdev = net_resource_mgt->netdev; + net_priv = netdev_priv(netdev); + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + last_tx_packets = net_resource_mgt->stats.tx_packets; + last_rx_packets = net_resource_mgt->stats.rx_packets; + last_get_stats_jiffies = net_resource_mgt->get_stats_jiffies; + time_diff = jiffies - last_get_stats_jiffies; + disp_ops->get_net_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &net_resource_mgt->stats); + /* ethtool -S don't adaptive interrupt suppression param */ + if (!vsi_info->itr_dynamic || ethtool || !time_diff) + return; + + tx_packets = net_resource_mgt->stats.tx_packets; + rx_packets = net_resource_mgt->stats.rx_packets; + + net_resource_mgt->get_stats_jiffies = jiffies; + tx_rates = (tx_packets - last_tx_packets) / time_diff * HZ; + rx_rates = (rx_packets - last_rx_packets) / time_diff * HZ; + pkt_rates = max_t(u64, tx_rates, rx_rates); + + intr_suppress_level = + disp_ops->get_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), pkt_rates, + ring_mgt->vectors->intr_suppress_level); + if (intr_suppress_level != ring_mgt->vectors->intr_suppress_level) { + local_vector_id = ring_mgt->vectors[vsi_info->ring_offset].local_vector_id; + vector_num = vsi_info->ring_num; + disp_ops->set_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_vector_id, vector_num, + intr_suppress_level); + ring_mgt->vectors->intr_suppress_level = intr_suppress_level; + } +} + +void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct net_device *netdev; + struct nbl_netdev_priv *net_priv; + struct nbl_adapter *adapter; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + netdev = net_resource_mgt->netdev; + net_priv = netdev_priv(netdev); + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + + if (!test_bit(NBL_RUNNING, adapter->state) || + test_bit(NBL_RESETTING, adapter->state)) + return; + + nbl_serv_adjust_interrpt_param(serv_mgt, ethtool); + netdev->stats.tx_packets = net_resource_mgt->stats.tx_packets; + netdev->stats.tx_bytes = net_resource_mgt->stats.tx_bytes; + + netdev->stats.rx_packets = net_resource_mgt->stats.rx_packets; + netdev->stats.rx_bytes = net_resource_mgt->stats.rx_bytes; + + /* net_device_stats */ + netdev->stats.rx_errors = 0; + netdev->stats.tx_errors = 0; + netdev->stats.rx_dropped = 0; + netdev->stats.tx_dropped = 0; + netdev->stats.multicast = 0; + netdev->stats.rx_length_errors = 0; +} + +static void +nbl_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct rtnl_link_stats64 temp_stats; + struct rtnl_link_stats64 *net_stats; + struct nbl_stats *nbl_stats; + struct nbl_queue_stats queue_stats = { 0 }; + struct nbl_queue_err_stats queue_err_stats = { 0 }; + struct nbl_serv_ring_vsi_info *vsi_info, *xdp_vsi_info; + u32 private_len = 0; + char *p = NULL; + int i, j, k; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + xdp_vsi_info = &ring_mgt->vsi_info[NBL_VSI_XDP]; + + nbl_serv_update_stats(serv_mgt, true); + net_stats = dev_get_stats(netdev, &temp_stats); + nbl_stats = (struct nbl_stats *)((char *)net_resource_mgt + + offsetof(struct nbl_serv_net_resource_mgt, stats)); + + i = NBL_GLOBAL_STATS_LEN; + for (j = 0; j < vsi_info->active_ring_num; j++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + j, &queue_stats, true); + disp_ops->get_queue_err_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + j, &queue_err_stats, true); + data[i] = queue_stats.packets; + data[i + 1] = queue_stats.bytes; + data[i + 2] = queue_stats.descs; + data[i + 3] = ring_mgt->tx_rings[vsi_info->ring_offset + j].tx_timeout_count; + i += 4; + } + + for (j = 0; j < vsi_info->active_ring_num; j++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + j, &queue_stats, false); + disp_ops->get_queue_err_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + j, &queue_err_stats, false); + data[i] = queue_stats.packets; + data[i + 1] = queue_stats.bytes; + data[i + 2] = queue_stats.descs; + i += 3; + } + + for (j = 0; j < xdp_vsi_info->ring_num; j++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + ring_mgt->xdp_ring_offset + j, &queue_stats, true); + data[i] = queue_stats.packets; + data[i + 1] = queue_stats.bytes; + data[i + 2] = queue_stats.descs; + i += 3; + } + + for (k = 0; k < NBL_GLOBAL_STATS_LEN; k++) { + switch (nbl_gstrings_stats[k].type) { + case NBL_NETDEV_STATS: + p = (char *)net_stats + nbl_gstrings_stats[k].stat_offset; + break; + case NBL_STATS: + p = (char *)nbl_stats + nbl_gstrings_stats[k].stat_offset; + break; + default: + data[k] = 0; + continue; + } + data[k] = (nbl_gstrings_stats[k].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + if (!common->is_vf) { + disp_ops->get_private_stat_len(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &private_len); + disp_ops->get_private_stat_data(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, &data[i], + private_len * sizeof(u64)); + } +} + +static int nbl_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int err; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + err = disp_ops->get_module_eeprom(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), eeprom, data); + + return err; +} + +static int nbl_get_module_info(struct net_device *netdev, struct ethtool_modinfo *info) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int err; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + err = disp_ops->get_module_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), info); + + if (err) + err = -EIO; + + return err; +} + +static int nbl_get_eeprom_length(struct net_device *netdev) +{ + return NBL_EEPROM_LENGTH; +} + +static int nbl_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EINVAL; +} + +static void nbl_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + channels->max_combined = vsi_info->ring_num; + channels->combined_count = vsi_info->active_ring_num; + channels->max_rx = 0; + channels->max_tx = 0; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->max_other = 0; +} + +static int nbl_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_tc_mgt *tc_mgt = NBL_SERV_MGT_TO_TC_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(netdev); + struct nbl_serv_ring_vsi_info *vsi_info; + u16 queue_pairs = channels->combined_count; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (tc_mgt->num_tc) { + netdev_info(netdev, "Cannot set channels since mqprio is enabled.\n"); + return -EINVAL; + } + + /* We don't support separate rx/tx channels. + * We don't allow setting 'other' channels. + */ + if (channels->rx_count || channels->tx_count || channels->other_count) + return -EINVAL; + + if (queue_pairs > vsi_info->ring_num || queue_pairs == 0) + return -EINVAL; + + vsi_info->active_ring_num = queue_pairs; + + nbl_serv_cpu_affinity_init(serv_mgt, queue_pairs); + netif_set_real_num_tx_queues(netdev, queue_pairs); + netif_set_real_num_rx_queues(netdev, queue_pairs); + + disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), queue_pairs); + + return 0; +} + +static u32 nbl_get_link(struct net_device *netdev) +{ + return netif_carrier_ok(netdev) ? 1 : 0; +} + +static void nbl_link_modes_to_ethtool(u64 modes, unsigned long *ethtool_modes_map) +{ + if (modes & BIT(NBL_PORT_CAP_AUTONEG)) + __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, ethtool_modes_map); + + if (modes & BIT(NBL_PORT_CAP_FEC_NONE)) + __set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_FEC_RS)) + __set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_FEC_BASER)) + __set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, ethtool_modes_map); + + if ((modes & BIT(NBL_PORT_CAP_RX_PAUSE)) && (modes & BIT(NBL_PORT_CAP_TX_PAUSE))) { + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, ethtool_modes_map); + } else if ((modes & BIT(NBL_PORT_CAP_RX_PAUSE)) && !(modes & BIT(NBL_PORT_CAP_TX_PAUSE))) { + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, ethtool_modes_map); + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, ethtool_modes_map); + } else if (!(modes & BIT(NBL_PORT_CAP_RX_PAUSE)) && (modes & BIT(NBL_PORT_CAP_TX_PAUSE))) { + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, ethtool_modes_map); + } + + if (modes & BIT(NBL_PORT_CAP_1000BASE_T)) { + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ethtool_modes_map); + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ethtool_modes_map); + } + if (modes & BIT(NBL_PORT_CAP_1000BASE_X)) + __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_10GBASE_T)) + __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_10GBASE_KR)) + __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_10GBASE_SR)) + __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_25GBASE_KR)) + __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_25GBASE_SR)) + __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_25GBASE_CR)) + __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_KR2)) + __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_SR2)) + __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_CR2)) + __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50G_AUI2)) + __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_KR_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_SR_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50G_AUI_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_50GBASE_CR_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_KR4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_SR4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_CR4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100G_AUI4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100G_CAUI4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_KR2_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_SR2_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100GBASE_CR2_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, ethtool_modes_map); + if (modes & BIT(NBL_PORT_CAP_100G_AUI2_PAM4)) + __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, ethtool_modes_map); +} + +static int nbl_serv_get_port_state(struct nbl_service_mgt *serv_mgt, + struct nbl_port_state *port_state) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + int ret; + + ret = disp_ops->get_port_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_ETH_ID(serv_mgt->common), port_state); + + if (port_state->module_repluged) + net_resource_mgt->configured_fec = 0; + + return ret; +} + +static int nbl_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_port_state port_state = {0}; + u32 advertising_speed = 0; + int ret = 0; + + if (test_bit(NBL_FATAL_ERR, adapter->state)) + return -EIO; + + ret = nbl_serv_get_port_state(serv_mgt, &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.port = PORT_OTHER; + } else { + cmd->base.autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (port_state.link_state) { + cmd->base.speed = port_state.link_speed; + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + advertising_speed = net_resource_mgt->configured_speed ? + net_resource_mgt->configured_speed : cmd->base.speed; + + switch (port_state.port_type) { + case NBL_PORT_TYPE_UNKNOWN: + cmd->base.port = PORT_OTHER; + break; + case NBL_PORT_TYPE_FIBRE: + __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, cmd->link_modes.advertising); + cmd->base.port = PORT_FIBRE; + break; + case NBL_PORT_TYPE_COPPER: + __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, cmd->link_modes.advertising); + cmd->base.port = PORT_DA; + break; + default: + cmd->base.port = PORT_OTHER; + } + } + + if (!cmd->base.autoneg) { + port_state.port_advertising &= ~NBL_PORT_CAP_SPEED_MASK; + switch (advertising_speed) { + case SPEED_1000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_1G_MASK; + break; + case SPEED_10000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_10G_MASK; + break; + case SPEED_25000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_25G_MASK; + break; + case SPEED_50000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_50G_MASK; + break; + case SPEED_100000: + port_state.port_advertising |= NBL_PORT_CAP_SPEED_100G_MASK; + break; + default: + break; + } + } + + nbl_link_modes_to_ethtool(port_state.port_caps, cmd->link_modes.supported); + nbl_link_modes_to_ethtool(port_state.port_advertising, cmd->link_modes.advertising); + nbl_link_modes_to_ethtool(port_state.port_lp_advertising, cmd->link_modes.lp_advertising); + + __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, cmd->link_modes.supported); + __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, cmd->link_modes.supported); + return 0; +} + +static u32 nbl_conver_portrate_to_speed(u8 port_rate) +{ + switch (port_rate) { + case NBL_PORT_MAX_RATE_1G: + return SPEED_1000; + case NBL_PORT_MAX_RATE_10G: + return SPEED_10000; + case NBL_PORT_MAX_RATE_25G: + return SPEED_25000; + case NBL_PORT_MAX_RATE_100G: + case NBL_PORT_MAX_RATE_100G_PAM4: + return SPEED_100000; + default: + return SPEED_25000; + } + + /* default set 25G */ + return SPEED_25000; +} + +static u32 nbl_conver_fw_rate_to_speed(u8 fw_port_max_speed) +{ + switch (fw_port_max_speed) { + case NBL_FW_PORT_SPEED_10G: + return SPEED_10000; + case NBL_FW_PORT_SPEED_25G: + return SPEED_25000; + case NBL_FW_PORT_SPEED_50G: + return SPEED_50000; + case NBL_FW_PORT_SPEED_100G: + return SPEED_100000; + default: + return SPEED_25000; + } + + /* default set 25G */ + return SPEED_25000; +} + +static int nbl_set_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_phy_caps *phy_caps; + struct nbl_port_state port_state = {0}; + struct nbl_port_advertising port_advertising = {0}; + u32 autoneg = 0; + u32 speed, fw_speed, module_speed, max_speed; + u64 speed_advert = 0; + u8 active_fec = 0; + int ret = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + phy_caps = &net_resource_mgt->phy_caps; + + ret = nbl_serv_get_port_state(serv_mgt, &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, "Optical module is not inplace\n"); + return -EINVAL; + } + + if (cmd->base.autoneg) { + if (!(port_state.port_caps & BIT(NBL_PORT_CAP_AUTONEG))) { + netdev_err(netdev, "autoneg is not support\n"); + return -EOPNOTSUPP; + } + } + + if (cmd->base.duplex == DUPLEX_HALF) { + netdev_err(netdev, "half duplex is not support\n"); + return -EOPNOTSUPP; + } + + autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (cmd->base.autoneg == autoneg && cmd->base.speed == port_state.link_speed && + port_state.link_state) { + netdev_info(netdev, "eth configure is not changed\n"); + return 0; + } + + if (autoneg == AUTONEG_ENABLE && cmd->base.autoneg == autoneg) { + netdev_err(netdev, "unsupport to change eth configure when autoneg\n"); + return -EOPNOTSUPP; + } + + speed = cmd->base.speed; + fw_speed = nbl_conver_fw_rate_to_speed(port_state.fw_port_max_speed); + module_speed = nbl_conver_portrate_to_speed(port_state.port_max_rate); + max_speed = fw_speed > module_speed ? module_speed : fw_speed; + if (speed == SPEED_UNKNOWN || cmd->base.autoneg) + speed = max_speed; + + if (speed > max_speed) { + netdev_err(netdev, "speed %d is not support, exit\n", cmd->base.speed); + return -EINVAL; + } + + speed_advert = nbl_speed_to_link_mode(speed, cmd->base.autoneg); + speed_advert &= port_state.port_caps; + if (!speed_advert) { + netdev_err(netdev, "speed %d is not support, exit\n", cmd->base.speed); + return -EINVAL; + } + + if (cmd->base.autoneg || port_state.port_caps & BIT(NBL_PORT_CAP_FEC_AUTONEG)) { + switch (net_resource_mgt->configured_fec) { + case ETHTOOL_FEC_OFF: + active_fec = NBL_PORT_FEC_OFF; + break; + case ETHTOOL_FEC_BASER: + active_fec = NBL_PORT_FEC_BASER; + break; + case ETHTOOL_FEC_RS: + active_fec = NBL_PORT_FEC_RS; + break; + default: + active_fec = NBL_PORT_FEC_AUTO; + } + } else { + /* when change speed, we should set appropriate fec mode */ + switch (speed) { + case SPEED_1000: + active_fec = NBL_ETH_1G_DEFAULT_FEC_MODE; + net_resource_mgt->configured_fec = ETHTOOL_FEC_OFF; + break; + case SPEED_10000: + active_fec = NBL_ETH_10G_DEFAULT_FEC_MODE; + net_resource_mgt->configured_fec = ETHTOOL_FEC_OFF; + break; + case SPEED_25000: + active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; + net_resource_mgt->configured_fec = ETHTOOL_FEC_RS; + break; + case SPEED_50000: + case SPEED_100000: + active_fec = NBL_ETH_100G_DEFAULT_FEC_MODE; + net_resource_mgt->configured_fec = ETHTOOL_FEC_RS; + break; + default: + active_fec = NBL_PORT_FEC_RS; + net_resource_mgt->configured_fec = ETHTOOL_FEC_RS; + } + } + + port_advertising.eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + port_advertising.speed_advert = speed_advert; + port_advertising.autoneg = cmd->base.autoneg; + port_advertising.active_fec = active_fec; + + /* update speed */ + ret = disp_ops->set_port_advertising(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &port_advertising); + if (ret) { + netdev_err(netdev, "set autoneg %d speed %d failed %d\n", + cmd->base.autoneg, cmd->base.speed, ret); + return -EIO; + } + + net_resource_mgt->configured_speed = speed; + + return 0; +} + +static void nbl_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dispatch_mgt *disp_mgt = NBL_ADAPTER_TO_DISP_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + u16 max_desc_num; + + if (!priv->rep) { + max_desc_num = disp_ops->get_max_desc_num(disp_mgt); + ringparam->tx_max_pending = max_desc_num; + ringparam->rx_max_pending = max_desc_num; + ringparam->tx_pending = disp_ops->get_tx_desc_num(disp_mgt, 0); + ringparam->rx_pending = disp_ops->get_rx_desc_num(disp_mgt, 0); + } else { + ringparam->tx_max_pending = NBL_REP_QUEUE_MGT_DESC_MAX; + ringparam->rx_max_pending = NBL_REP_QUEUE_MGT_DESC_MAX; + ringparam->tx_pending = NBL_REP_QUEUE_MGT_DESC_NUM; + ringparam->rx_pending = NBL_REP_QUEUE_MGT_DESC_NUM; + } +} + +static int nbl_check_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam, + u16 max_desc_num, u16 min_desc_num) +{ + /* check if tx_pending is out of range or power of 2 */ + if (ringparam->tx_pending > max_desc_num || + ringparam->tx_pending < min_desc_num) { + netdev_err(netdev, "Tx descriptors requested: %d, out of range[%d-%d]\n", + ringparam->tx_pending, min_desc_num, max_desc_num); + return -EINVAL; + } + if (ringparam->tx_pending & (ringparam->tx_pending - 1)) { + netdev_err(netdev, "Tx descriptors requested: %d is not power of 2\n", + ringparam->tx_pending); + return -EINVAL; + } + + /* check if rx_pending is out of range or power of 2 */ + if (ringparam->rx_pending > max_desc_num || + ringparam->rx_pending < min_desc_num) { + netdev_err(netdev, "Rx descriptors requested: %d, out of range[%d-%d]\n", + ringparam->rx_pending, min_desc_num, max_desc_num); + return -EINVAL; + } + if (ringparam->rx_pending & (ringparam->rx_pending - 1)) { + netdev_err(netdev, "Rx descriptors requested: %d is not power of 2\n", + ringparam->rx_pending); + return -EINVAL; + } + + if (ringparam->rx_jumbo_pending || ringparam->rx_mini_pending) { + netdev_err(netdev, "rx_jumbo_pending or rx_mini_pending is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int nbl_pre_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dispatch_mgt *disp_mgt = NBL_ADAPTER_TO_DISP_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + int timeout = 50; + + if (ringparam->rx_pending == disp_ops->get_rx_desc_num(disp_mgt, 0) && + ringparam->tx_pending == disp_ops->get_tx_desc_num(disp_mgt, 0)) { + netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); + return 0; + } + + while (test_and_set_bit(NBL_RESETTING, adapter->state)) { + timeout--; + if (!timeout) { + netdev_err(netdev, "Timeout while resetting in set ringparam\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + /* configure params later */ + return 1; +} + +static int nbl_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_dispatch_mgt *disp_mgt = NBL_ADAPTER_TO_DISP_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + u16 max_desc_num, min_desc_num; + u16 new_tx_count, new_rx_count; + u16 old_tx_count, old_rx_count; + int was_running; + int i; + int err; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + max_desc_num = disp_ops->get_max_desc_num(disp_mgt); + min_desc_num = disp_ops->get_min_desc_num(disp_mgt); + err = nbl_check_set_ringparam(netdev, ringparam, max_desc_num, min_desc_num); + if (err < 0) + return err; + + err = nbl_pre_set_ringparam(netdev, ringparam); + /* if either error occur or nothing to change, return */ + if (err <= 0) + return err; + + old_tx_count = ring_mgt->tx_desc_num; + old_rx_count = ring_mgt->rx_desc_num; + new_tx_count = ringparam->tx_pending; + new_rx_count = ringparam->rx_pending; + + netdev_info(netdev, "set tx_desc_num:%d, rx_desc_num:%d\n", new_tx_count, new_rx_count); + + was_running = netif_running(netdev); + + if (was_running) { + err = nbl_serv_netdev_stop(netdev); + if (err && err != -EBUSY) { + netdev_err(netdev, "Netdev stop failed while setting ringparam\n"); + clear_bit(NBL_RESETTING, adapter->state); + return err; + } + } + + ring_mgt->tx_desc_num = new_tx_count; + ring_mgt->rx_desc_num = new_rx_count; + + for (i = vsi_info->ring_offset; i < vsi_info->ring_offset + vsi_info->ring_num; i++) + disp_ops->set_tx_desc_num(disp_mgt, i, new_tx_count); + + for (i = vsi_info->ring_offset; i < vsi_info->ring_offset + vsi_info->ring_num; i++) + disp_ops->set_rx_desc_num(disp_mgt, i, new_rx_count); + + if (was_running) { + err = nbl_serv_netdev_open(netdev); + if (err) { + netdev_err(netdev, "Netdev open failed after setting ringparam\n"); + clear_bit(NBL_RESETTING, adapter->state); + ring_mgt->tx_desc_num = old_tx_count; + ring_mgt->rx_desc_num = old_rx_count; + return err; + } + } + + clear_bit(NBL_RESETTING, adapter->state); + + return 0; +} + +static int nbl_fd_translate_cls_rule(u16 type, u16 length, u8 *val, void *data) +{ + struct ethtool_rxnfc *cmd = (struct ethtool_rxnfc *)(data); + struct ethtool_rx_flow_spec *fs = &cmd->fs; + u64 udf_val, udf_mask; + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + u16 ring, vf, vsi; + + switch (type) { + case NBL_CHAN_FDIR_KEY_SRC_MAC: + ether_addr_copy(fs->h_u.ether_spec.h_source, val); + ether_addr_copy(fs->m_u.ether_spec.h_source, val + 6); + break; + case NBL_CHAN_FDIR_KEY_DST_MAC: + if (flow_type == ETHER_FLOW) { + ether_addr_copy(fs->h_u.ether_spec.h_dest, val); + ether_addr_copy(fs->m_u.ether_spec.h_dest, val + 6); + } else { + ether_addr_copy(fs->h_ext.h_dest, val); + ether_addr_copy(fs->m_ext.h_dest, val + 6); + fs->flow_type |= FLOW_MAC_EXT; + } + break; + case NBL_CHAN_FDIR_KEY_PROTO: + if (flow_type == ETHER_FLOW) { + fs->h_u.ether_spec.h_proto = *(u16 *)val; + fs->m_u.ether_spec.h_proto = *(u16 *)(val + 2); + } + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv4: + if (flow_type == IPV4_USER_FLOW) { + fs->h_u.usr_ip4_spec.ip4src = *(u32 *)val; + fs->m_u.usr_ip4_spec.ip4src = *(u32 *)(val + 4); + } else { + fs->h_u.tcp_ip4_spec.ip4src = *(u32 *)val; + fs->m_u.tcp_ip4_spec.ip4src = *(u32 *)(val + 4); + } + break; + case NBL_CHAN_FDIR_KEY_DST_IPv4: + if (flow_type == IPV4_USER_FLOW) { + fs->h_u.usr_ip4_spec.ip4dst = *(u32 *)val; + fs->m_u.usr_ip4_spec.ip4dst = *(u32 *)(val + 4); + } else { + fs->h_u.tcp_ip4_spec.ip4dst = *(u32 *)val; + fs->m_u.tcp_ip4_spec.ip4dst = *(u32 *)(val + 4); + } + break; + case NBL_CHAN_FDIR_KEY_L4PROTO: + if (flow_type == IPV4_USER_FLOW) { + fs->h_u.usr_ip4_spec.proto = *(u8 *)val; + fs->m_u.usr_ip4_spec.proto = *(u8 *)(val + 1); + } else if (flow_type == IPV6_USER_FLOW) { + fs->h_u.usr_ip6_spec.l4_proto = *(u8 *)val; + fs->m_u.usr_ip6_spec.l4_proto = *(u8 *)(val + 1); + } + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv6: + if (flow_type == IPV6_USER_FLOW) { + memcpy(&fs->h_u.usr_ip6_spec.ip6src, val, + sizeof(fs->h_u.usr_ip6_spec.ip6src)); + memcpy(&fs->m_u.usr_ip6_spec.ip6src, val + 16, + sizeof(fs->m_u.usr_ip6_spec.ip6src)); + } else { + memcpy(&fs->h_u.tcp_ip6_spec.ip6src, val, + sizeof(fs->h_u.tcp_ip6_spec.ip6src)); + memcpy(&fs->m_u.tcp_ip6_spec.ip6src, val + 16, + sizeof(fs->m_u.tcp_ip6_spec.ip6src)); + } + break; + case NBL_CHAN_FDIR_KEY_DST_IPv6: + if (flow_type == IPV6_USER_FLOW) { + memcpy(&fs->h_u.usr_ip6_spec.ip6dst, val, + sizeof(fs->h_u.usr_ip6_spec.ip6dst)); + memcpy(&fs->m_u.usr_ip6_spec.ip6dst, val + 16, + sizeof(fs->m_u.usr_ip6_spec.ip6dst)); + } else { + memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, val, + sizeof(fs->h_u.tcp_ip6_spec.ip6dst)); + memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, val + 16, + sizeof(fs->m_u.tcp_ip6_spec.ip6dst)); + } + break; + case NBL_CHAN_FDIR_KEY_SPORT: + if (flow_type == TCP_V4_FLOW || flow_type == UDP_V4_FLOW) { + fs->h_u.tcp_ip4_spec.psrc = *(u16 *)val; + fs->m_u.tcp_ip4_spec.psrc = *(u16 *)(val + 2); + } else if (flow_type == TCP_V6_FLOW || flow_type == UDP_V6_FLOW) { + fs->h_u.tcp_ip6_spec.psrc = *(u16 *)val; + fs->m_u.tcp_ip6_spec.psrc = *(u16 *)(val + 2); + } + break; + case NBL_CHAN_FDIR_KEY_DPORT: + if (flow_type == TCP_V4_FLOW || flow_type == UDP_V4_FLOW) { + fs->h_u.tcp_ip4_spec.pdst = *(u16 *)val; + fs->m_u.tcp_ip4_spec.pdst = *(u16 *)(val + 2); + } else if (flow_type == TCP_V6_FLOW || flow_type == UDP_V6_FLOW) { + fs->h_u.tcp_ip6_spec.pdst = *(u16 *)val; + fs->m_u.tcp_ip6_spec.pdst = *(u16 *)(val + 2); + } + break; + case NBL_CHAN_FDIR_KEY_UDF: + udf_val = cpu_to_be64p((u64 *)val); + udf_mask = cpu_to_be64p((u64 *)(val + 8)); + + memcpy(fs->h_ext.data, &udf_val, sizeof(udf_val)); + memcpy(fs->m_ext.data, &udf_mask, sizeof(udf_mask)); + fs->flow_type |= FLOW_EXT; + break; + case NBL_CHAN_FDIR_ACTION_QUEUE: + ring = *(u16 *)val; + vf = *(u16 *)(val + 2); + fs->ring_cookie = (u64)ring | (u64)vf << ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + break; + case NBL_CHAN_FDIR_ACTION_VSI: + vsi = *(u16 *)(val + 4); + if (vsi == 0xFFFF) + fs->ring_cookie = RX_CLS_FLOW_DISC; + break; + default: + break; + } + + return 0; +} + +static void nbl_fd_flow_type_translate(enum nbl_chan_fdir_flow_type flow_type, + struct ethtool_rxnfc *cmd) +{ + switch (flow_type) { + case NBL_CHAN_FDIR_FLOW_FULL: + case NBL_CHAN_FDIR_FLOW_ETHER: + cmd->fs.flow_type = ETHER_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_IPv4: + cmd->fs.flow_type = IPV4_USER_FLOW; + cmd->fs.h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + cmd->fs.m_u.usr_ip4_spec.ip_ver = 0xFF; + break; + case NBL_CHAN_FDIR_FLOW_IPv6: + cmd->fs.flow_type = IPV6_USER_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_TCP_IPv4: + cmd->fs.flow_type = TCP_V4_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_TCP_IPv6: + cmd->fs.flow_type = TCP_V6_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_UDP_IPv4: + cmd->fs.flow_type = UDP_V4_FLOW; + break; + case NBL_CHAN_FDIR_FLOW_UDP_IPv6: + cmd->fs.flow_type = UDP_V6_FLOW; + break; + default: + break; + } +} + +static int nbl_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_chan_param_fdir_replace *info; + struct nbl_chan_param_get_fd_flow_all param; + u32 *locs_tmp = NULL; + int ret = 0, start = 0, num = 0, total_num = 0, i; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = vsi_info->active_ring_num; + break; + case ETHTOOL_GRXCLSRLCNT: + ret = disp_ops->get_fd_flow_cnt(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, + NBL_COMMON_TO_VSI_ID(common)); + if (ret < 0) + return ret; + + cmd->rule_cnt = ret; + return 0; + case ETHTOOL_GRXCLSRULE: + info = kzalloc(NBL_CHAN_FDIR_FLOW_RULE_SIZE, GFP_KERNEL); + if (!info) + return -ENOMEM; + ret = disp_ops->get_fd_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), + cmd->fs.location, + NBL_CHAN_FDIR_RULE_NORMAL, + info); + if (!ret) { + nbl_fd_flow_type_translate(info->flow_type, cmd); + cmd->fs.location = info->location; + nbl_flow_direct_parse_tlv_data(info->tlv, info->tlv_length, + nbl_fd_translate_cls_rule, cmd); + } + kfree(info); + break; + case ETHTOOL_GRXCLSRLALL: + total_num = cmd->rule_cnt; + + locs_tmp = kcalloc(NBL_CHAN_GET_FD_LOCS_MAX, sizeof(*locs_tmp), GFP_KERNEL); + if (!locs_tmp) + return -ENOMEM; + + while (total_num > 0) { + num = total_num > NBL_CHAN_GET_FD_LOCS_MAX ? NBL_CHAN_GET_FD_LOCS_MAX + : total_num; + param.rule_type = NBL_CHAN_FDIR_RULE_NORMAL; + param.start = start; + param.num = num; + param.vsi_id = NBL_COMMON_TO_VSI_ID(common); + ret = disp_ops->get_fd_flow_all(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + ¶m, locs_tmp); + if (ret) { + kfree(locs_tmp); + return ret; + } + + for (i = 0; i < num; i++) + rule_locs[start + i] = locs_tmp[i]; + + start += num; + total_num -= num; + } + + cmd->data = disp_ops->get_fd_flow_max(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + kfree(locs_tmp); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int nbl_format_flow_ext_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + u64 udf_value = be64_to_cpup((__force __be64 *)fs->h_ext.data); + u64 udf_mask = be64_to_cpup((__force __be64 *)fs->m_ext.data); + u8 *tlv_start = info->tlv + *offset; + u16 tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 16; + + if (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci) + return -EINVAL; + + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_UDF; + *(u16 *)(tlv_start + 2) = 16; + memcpy(tlv_start + 4, &udf_value, sizeof(udf_value)); + memcpy(tlv_start + 12, &udf_mask, sizeof(udf_mask)); + *offset += tlv_length; + + return 0; +} + +static int nbl_format_flow_mac_ext_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + u8 *tlv_start = info->tlv + *offset; + u16 tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 2 * ETH_ALEN; + + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_MAC; + *(u16 *)(tlv_start + 2) = 2 * ETH_ALEN; + ether_addr_copy(tlv_start + 4, fs->h_ext.h_dest); + ether_addr_copy(tlv_start + 10, fs->m_ext.h_dest); + *offset += tlv_length; + + return 0; +} + +static int nbl_format_ether_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethhdr *ether_spec = &fs->h_u.ether_spec; + struct ethhdr *ether_mask = &fs->m_u.ether_spec; + u8 *tlv_start; + u16 tlv_length; + bool valid = 0; + + if (!is_zero_ether_addr(ether_mask->h_dest)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 2 * ETH_ALEN; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_MAC; + *(u16 *)(tlv_start + 2) = 2 * ETH_ALEN; + ether_addr_copy(tlv_start + 4, ether_spec->h_dest); + ether_addr_copy(tlv_start + 10, ether_mask->h_dest); + *offset += tlv_length; + valid = 1; + } + + if (!is_zero_ether_addr(ether_mask->h_source)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 2 * ETH_ALEN; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_MAC; + *(u16 *)(tlv_start + 2) = 2 * ETH_ALEN; + ether_addr_copy(tlv_start + 4, ether_spec->h_source); + ether_addr_copy(tlv_start + 10, ether_mask->h_source); + *offset += tlv_length; + valid = 1; + } + + if (ether_mask->h_proto) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = ether_spec->h_proto; + *(u16 *)(tlv_start + 6) = ether_mask->h_proto; + *offset += tlv_length; + valid = 1; + } + + if (!valid) + return -EINVAL; + + return 0; +} + +static int nbl_format_ipv4_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_usrip4_spec *usr_ip4_spec = &fs->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *usr_ip4_mask = &fs->m_u.usr_ip4_spec; + u8 *tlv_start; + u16 tlv_length; + + if (usr_ip4_mask->ip4src) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = usr_ip4_spec->ip4src; + *(u32 *)(tlv_start + 8) = usr_ip4_mask->ip4src; + *offset += tlv_length; + } + + if (usr_ip4_mask->ip4dst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = usr_ip4_spec->ip4dst; + *(u32 *)(tlv_start + 8) = usr_ip4_mask->ip4dst; + *offset += tlv_length; + } + + if (usr_ip4_mask->proto) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = usr_ip4_spec->proto; + *(u8 *)(tlv_start + 5) = usr_ip4_mask->proto; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IP); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + return 0; +} + +static int nbl_format_ipv6_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_usrip6_spec *usr_ip6_spec = &fs->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *usr_ip6_mask = &fs->m_u.usr_ip6_spec; + u8 *tlv_start; + u16 tlv_length; + + if (!ipv6_addr_any((struct in6_addr *)usr_ip6_mask->ip6src)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, usr_ip6_spec->ip6src, sizeof(usr_ip6_spec->ip6src)); + memcpy(tlv_start + 20, usr_ip6_mask->ip6src, sizeof(usr_ip6_mask->ip6src)); + *offset += tlv_length; + } + + if (!ipv6_addr_any((struct in6_addr *)usr_ip6_mask->ip6dst)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, usr_ip6_spec->ip6dst, sizeof(usr_ip6_spec->ip6dst)); + memcpy(tlv_start + 20, usr_ip6_mask->ip6dst, sizeof(usr_ip6_mask->ip6dst)); + *offset += tlv_length; + } + + if (usr_ip6_mask->l4_proto) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = usr_ip6_spec->l4_proto; + *(u8 *)(tlv_start + 5) = usr_ip6_mask->l4_proto; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IPV6); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + return 0; +} + +static int nbl_format_tcpv4_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_tcpip4_spec *tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *tcp_ip4_mask = &fs->m_u.tcp_ip4_spec; + u8 *tlv_start; + u16 tlv_length; + + if (tcp_ip4_mask->ip4src) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = tcp_ip4_spec->ip4src; + *(u32 *)(tlv_start + 8) = tcp_ip4_mask->ip4src; + *offset += tlv_length; + } + + if (tcp_ip4_mask->ip4dst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = tcp_ip4_spec->ip4dst; + *(u32 *)(tlv_start + 8) = tcp_ip4_mask->ip4dst; + *offset += tlv_length; + } + + if (tcp_ip4_mask->psrc) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = tcp_ip4_spec->psrc; + *(u16 *)(tlv_start + 6) = tcp_ip4_mask->psrc; + *offset += tlv_length; + } + + if (tcp_ip4_mask->pdst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = tcp_ip4_spec->pdst; + *(u16 *)(tlv_start + 6) = tcp_ip4_mask->pdst; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = IPPROTO_TCP; + *(u8 *)(tlv_start + 5) = 0xFF; + *offset += tlv_length; + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IP); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + + return 0; +} + +static int nbl_format_tcpv6_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_tcpip6_spec *tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *tcp_ip6_mask = &fs->m_u.tcp_ip6_spec; + u8 *tlv_start; + u16 tlv_length; + + if (!ipv6_addr_any((struct in6_addr *)tcp_ip6_mask->ip6src)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, tcp_ip6_spec->ip6src, sizeof(tcp_ip6_spec->ip6src)); + memcpy(tlv_start + 20, tcp_ip6_mask->ip6src, sizeof(tcp_ip6_mask->ip6src)); + *offset += tlv_length; + } + + if (!ipv6_addr_any((struct in6_addr *)tcp_ip6_mask->ip6dst)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, tcp_ip6_spec->ip6dst, sizeof(tcp_ip6_spec->ip6dst)); + memcpy(tlv_start + 20, tcp_ip6_mask->ip6dst, sizeof(tcp_ip6_mask->ip6dst)); + *offset += tlv_length; + } + + if (tcp_ip6_mask->psrc) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = tcp_ip6_spec->psrc; + *(u16 *)(tlv_start + 6) = tcp_ip6_mask->psrc; + *offset += tlv_length; + } + + if (tcp_ip6_mask->pdst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = tcp_ip6_spec->pdst; + *(u16 *)(tlv_start + 6) = tcp_ip6_mask->pdst; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = IPPROTO_TCP; + *(u8 *)(tlv_start + 5) = 0xFF; + *offset += tlv_length; + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IPV6); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + + return 0; +} + +static int nbl_format_udpv4_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_tcpip4_spec *udp_ip4_spec = &fs->h_u.udp_ip4_spec; + struct ethtool_tcpip4_spec *udp_ip4_mask = &fs->m_u.udp_ip4_spec; + u8 *tlv_start; + u16 tlv_length; + + if (udp_ip4_mask->ip4src) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = udp_ip4_spec->ip4src; + *(u32 *)(tlv_start + 8) = udp_ip4_mask->ip4src; + *offset += tlv_length; + } + + if (udp_ip4_mask->ip4dst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv4; + *(u16 *)(tlv_start + 2) = 8; + *(u32 *)(tlv_start + 4) = udp_ip4_spec->ip4dst; + *(u32 *)(tlv_start + 8) = udp_ip4_mask->ip4dst; + *offset += tlv_length; + } + + if (udp_ip4_mask->psrc) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = udp_ip4_spec->psrc; + *(u16 *)(tlv_start + 6) = udp_ip4_mask->psrc; + *offset += tlv_length; + } + + if (udp_ip4_mask->pdst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = udp_ip4_spec->pdst; + *(u16 *)(tlv_start + 6) = udp_ip4_mask->pdst; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = IPPROTO_UDP; + *(u8 *)(tlv_start + 5) = 0xFF; + *offset += tlv_length; + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IP); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + + return 0; +} + +static int nbl_format_udpv6_flow_rule(struct ethtool_rx_flow_spec *fs, + struct nbl_chan_param_fdir_replace *info, + int *offset) +{ + struct ethtool_tcpip6_spec *udp_ip6_spec = &fs->h_u.udp_ip6_spec; + struct ethtool_tcpip6_spec *udp_ip6_mask = &fs->m_u.udp_ip6_spec; + u8 *tlv_start; + u16 tlv_length; + + if (!ipv6_addr_any((struct in6_addr *)udp_ip6_mask->ip6src)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SRC_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, udp_ip6_spec->ip6src, sizeof(udp_ip6_spec->ip6src)); + memcpy(tlv_start + 20, udp_ip6_mask->ip6src, sizeof(udp_ip6_mask->ip6src)); + *offset += tlv_length; + } + + if (!ipv6_addr_any((struct in6_addr *)udp_ip6_mask->ip6dst)) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 32; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DST_IPv6; + *(u16 *)(tlv_start + 2) = 32; + memcpy(tlv_start + 4, udp_ip6_spec->ip6dst, sizeof(udp_ip6_spec->ip6dst)); + memcpy(tlv_start + 20, udp_ip6_mask->ip6dst, sizeof(udp_ip6_mask->ip6dst)); + *offset += tlv_length; + } + + if (udp_ip6_mask->psrc) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_SPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = udp_ip6_spec->psrc; + *(u16 *)(tlv_start + 6) = udp_ip6_mask->psrc; + *offset += tlv_length; + } + + if (udp_ip6_mask->pdst) { + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_DPORT; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = udp_ip6_spec->pdst; + *(u16 *)(tlv_start + 6) = udp_ip6_mask->pdst; + *offset += tlv_length; + } + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_L4PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u8 *)(tlv_start + 4) = IPPROTO_UDP; + *(u8 *)(tlv_start + 5) = 0xFF; + *offset += tlv_length; + + tlv_start = info->tlv + *offset; + tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 4; + if (*offset > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_KEY_PROTO; + *(u16 *)(tlv_start + 2) = 4; + *(u16 *)(tlv_start + 4) = htons(ETH_P_IPV6); + *(u16 *)(tlv_start + 6) = 0xFFFF; + *offset += tlv_length; + + return 0; +} + +static struct nbl_chan_param_fdir_replace *nbl_format_fdir_rule(struct ethtool_rx_flow_spec *fs) +{ + struct nbl_chan_param_fdir_replace *info; + int ret = 0, offset = 0; + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + + info = kzalloc(NBL_CHAN_FDIR_FLOW_RULE_SIZE, GFP_KERNEL); + if (!info) + return NULL; + + if (fs->flow_type & FLOW_RSS) { + ret = -EINVAL; + goto check_failed; + } + + if (fs->flow_type & FLOW_EXT) { + ret = nbl_format_flow_ext_rule(fs, info, &offset); + if (ret) + goto check_failed; + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ret = nbl_format_flow_mac_ext_rule(fs, info, &offset); + if (ret) + goto check_failed; + } + + switch (flow_type) { + case ETHER_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_ETHER; + ret = nbl_format_ether_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case IPV4_USER_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_IPv4; + ret = nbl_format_ipv4_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case IPV6_USER_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_IPv6; + ret = nbl_format_ipv6_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case TCP_V4_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_TCP_IPv4; + ret = nbl_format_tcpv4_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case TCP_V6_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_TCP_IPv6; + ret = nbl_format_tcpv6_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case UDP_V4_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_UDP_IPv4; + ret = nbl_format_udpv4_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + case UDP_V6_FLOW: + info->flow_type = NBL_CHAN_FDIR_FLOW_UDP_IPv6; + ret = nbl_format_udpv6_flow_rule(fs, info, &offset); + if (ret) + goto check_failed; + break; + default: + ret = -EOPNOTSUPP; + goto check_failed; + } + + info->rule_type = NBL_CHAN_FDIR_RULE_NORMAL; + info->order = 1; + info->tlv_length = offset; + info->base_length = sizeof(*info); + info->location = fs->location; + return info; + +check_failed: + kfree(info); + return NULL; +} + +static int nbl_format_fdir_action(struct nbl_chan_param_fdir_replace *info, + u16 ring, u16 vf_id, u16 dport, u16 global_queue_id) +{ + u8 *tlv_start; + u16 tlv_length = NBL_CHAN_FDIR_TLV_HEADER_LEN + 8; + + if (info->tlv_length > (NBL_CHAN_FDIR_FLOW_TLV_SIZE - tlv_length)) + return -EINVAL; + + tlv_start = info->tlv + info->tlv_length; + if (dport != 0xFFFF) + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_ACTION_QUEUE; + else + *(u16 *)(tlv_start) = NBL_CHAN_FDIR_ACTION_VSI; + + *(u16 *)(tlv_start + 2) = 8; + *(u16 *)(tlv_start + 4) = info->ring = ring; + *(u16 *)(tlv_start + 6) = info->vf = vf_id; + *(u16 *)(tlv_start + 8) = info->dport = dport; + *(u16 *)(tlv_start + 10) = info->global_queue_id = global_queue_id; + + info->tlv_length += tlv_length; + return 0; +} + +static int nbl_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + struct nbl_chan_param_fdir_replace *info; + u64 ring_cookie = cmd->fs.ring_cookie; + int ret = -EOPNOTSUPP; + u32 ring = 0; + u16 vf = 0; + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + u16 global_queue_id = NBL_INVALID_QUEUE_ID, dport = 0xFFFF; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + if (ring_cookie == RX_CLS_FLOW_WAKE) + return -EINVAL; + + if (ring_cookie != RX_CLS_FLOW_DISC) { + dport = vsi_id; + ring = ethtool_get_flow_spec_ring(cmd->fs.ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(cmd->fs.ring_cookie); + + if (vf == 0 && (ring < vsi_info->ring_offset || + ring >= vsi_info->ring_offset + vsi_info->active_ring_num)) + return -EINVAL; + + /* vf = real_vf_idx + 1, 0 means direct to rx queue. */ + if (vf > net_resource_mgt->total_vfs) + return -EINVAL; + + if (vf) + dport = disp_ops->get_vf_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, vf - 1); + global_queue_id = disp_ops->get_vsi_global_queue_id + (NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), dport, ring); + } + + info = nbl_format_fdir_rule(&cmd->fs); + if (!info) + return -EINVAL; + + info->vsi = vsi_id; + ret = nbl_format_fdir_action(info, ring, vf, dport, global_queue_id); + if (ret) { + kfree(info); + return ret; + } + ret = disp_ops->replace_fd_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), info); + kfree(info); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = disp_ops->remove_fd_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, + cmd->fs.location, vsi_id); + break; + default: + break; + } + + return ret; +} + +static u32 nbl_get_rxfh_indir_size(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 rxfh_indir_size = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + + return rxfh_indir_size; +} + +static u32 nbl_get_rxfh_key_size(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + u32 rxfh_rss_key_size = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_rxfh_rss_key_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &rxfh_rss_key_size); + + return rxfh_rss_key_size; +} + +static int nbl_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 rxfh_key_size = 0; + u32 rxfh_indir_size = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + disp_ops->get_rxfh_rss_key_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &rxfh_key_size); + disp_ops->get_rxfh_indir_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), &rxfh_indir_size); + + if (indir) + disp_ops->get_rxfh_indir(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), indir, rxfh_indir_size); + if (key) + disp_ops->get_rxfh_rss_key(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), key, rxfh_key_size); + if (hfunc) + disp_ops->get_rxfh_rss_alg_sel(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + hfunc, NBL_COMMON_TO_ETH_ID(serv_mgt->common)); + + return 0; +} + +static u32 nbl_get_msglevel(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + u32 debug_lvl = common->debug_lvl; + + if (debug_lvl) + netdev_dbg(netdev, "nbl debug_lvl: 0x%08X\n", debug_lvl); + + return common->msg_enable; +} + +static void nbl_set_msglevel(struct net_device *netdev, u32 msglevel) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + + if (NBL_DEBUG_USER & msglevel) + common->debug_lvl = msglevel; + else + common->msg_enable = msglevel; +} + +static int nbl_get_regs_len(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_reg_dump_len(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_get_ethtool_dump_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_reg_dump(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), p, regs->len); +} + +static int nbl_get_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + u16 local_vector_id, configured_usecs; + struct nbl_chan_param_get_coalesce coalesce_param = {0}; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (q_num >= vsi_info->ring_offset + vsi_info->ring_num) { + netdev_err(netdev, "q_num %d is too larger\n", q_num); + return -EINVAL; + } + + local_vector_id = ring_mgt->vectors[q_num + vsi_info->ring_offset].local_vector_id; + configured_usecs = ring_mgt->vectors[q_num + vsi_info->ring_offset].intr_rate_usecs; + disp_ops->get_coalesce(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_vector_id, &coalesce_param); + + NBL_SET_INTR_COALESCE(ec, coalesce_param.tx_coalesce_usecs, + coalesce_param.tx_max_coalesced_frames, + coalesce_param.rx_coalesce_usecs, + coalesce_param.rx_max_coalesced_frames); + + if (vsi_info->itr_dynamic) { + ec->use_adaptive_tx_coalesce = 1; + ec->use_adaptive_rx_coalesce = 1; + } else { + if (configured_usecs) { + ec->tx_coalesce_usecs = configured_usecs; + ec->rx_coalesce_usecs = configured_usecs; + } + } + return 0; +} + +static int __nbl_set_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct ethtool_coalesce ec_local = {0}; + u16 local_vector_id, pnum, rate; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (q_num >= vsi_info->ring_offset + vsi_info->ring_num) { + netdev_err(netdev, "q_num %d is too larger\n", q_num); + return -EINVAL; + } + + if (ec->rx_max_coalesced_frames > U16_MAX) { + netdev_err(netdev, "rx_frames %d out of range: [0 - %d]\n", + ec->rx_max_coalesced_frames, U16_MAX); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs > U16_MAX) { + netdev_err(netdev, "rx_usecs %d out of range: [0 - %d]\n", + ec->rx_coalesce_usecs, U16_MAX); + return -EINVAL; + } + + if (ec->tx_max_coalesced_frames != ec->rx_max_coalesced_frames || + ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) { + netdev_err(netdev, "tx and rx using the same interrupt, " + "rx params should equal to tx params\n"); + return -EINVAL; + } + + if (ec->use_adaptive_tx_coalesce != ec->use_adaptive_rx_coalesce) { + netdev_err(netdev, "rx and tx adaptive need configure as same value.\n"); + return -EINVAL; + } + + if (vsi_info->itr_dynamic) { + nbl_get_per_queue_coalesce(netdev, q_num, &ec_local); + if (ec_local.rx_coalesce_usecs != ec->rx_coalesce_usecs || + ec_local.rx_max_coalesced_frames != ec->rx_max_coalesced_frames) { + netdev_err(netdev, + "interrupt throttling cannot be changged if adaptive is enable.\n"); + return -EINVAL; + } + return 0; + } + + local_vector_id = ring_mgt->vectors[q_num + vsi_info->ring_offset].local_vector_id; + pnum = (u16)ec->tx_max_coalesced_frames; + rate = (u16)ec->tx_coalesce_usecs; + ring_mgt->vectors[q_num + vsi_info->ring_offset].intr_rate_usecs = rate; + + disp_ops->set_coalesce(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), local_vector_id, + 1, pnum, rate); + return 0; +} + +static int nbl_set_per_queue_coalesce(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (vsi_info->itr_dynamic != (!!ec->use_adaptive_rx_coalesce)) { + netdev_err(netdev, "modify interrupt adaptive by queue is not supported.\n"); + return -EINVAL; + } + + return __nbl_set_per_queue_coalesce(netdev, q_num, ec); +} + +static int nbl_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + u32 q_num = 0; + + return nbl_get_per_queue_coalesce(netdev, q_num, ec); +} + +static int nbl_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct ethtool_coalesce ec_local = {0}; + u16 local_vector_id; + u16 intr_suppress_level; + u16 q_num; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (ec->rx_max_coalesced_frames > U16_MAX) { + netdev_err(netdev, "rx_frames %d out of range: [0 - %d]\n", + ec->rx_max_coalesced_frames, U16_MAX); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs > U16_MAX) { + netdev_err(netdev, "rx_usecs %d out of range: [0 - %d]\n", + ec->rx_coalesce_usecs, U16_MAX); + return -EINVAL; + } + + if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames) { + netdev_err(netdev, "rx_frames and tx_frames need configure as same value.\n"); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs != ec->tx_coalesce_usecs) { + netdev_err(netdev, "rx_usecs and tx_usecs need configure as same value.\n"); + return -EINVAL; + } + + if (ec->use_adaptive_tx_coalesce != ec->use_adaptive_rx_coalesce) { + netdev_err(netdev, "rx and tx adaptive need configure as same value.\n"); + return -EINVAL; + } + + if (vsi_info->itr_dynamic && ec->use_adaptive_rx_coalesce) { + nbl_get_per_queue_coalesce(netdev, 0, &ec_local); + if (ec_local.rx_coalesce_usecs != ec->rx_coalesce_usecs || + ec_local.rx_max_coalesced_frames != ec->rx_max_coalesced_frames) { + netdev_err(netdev, + "interrupt throttling cannont be changged if adaptive is enable.\n"); + return -EINVAL; + } + } + + if (ec->use_adaptive_rx_coalesce) { + vsi_info->itr_dynamic = true; + local_vector_id = ring_mgt->vectors[vsi_info->ring_offset].local_vector_id; + intr_suppress_level = ring_mgt->vectors->intr_suppress_level; + disp_ops->set_intr_suppress_level(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_vector_id, vsi_info->ring_num, + intr_suppress_level); + } else { + vsi_info->itr_dynamic = false; + for (q_num = 0; q_num < vsi_info->ring_num; q_num++) + __nbl_set_per_queue_coalesce(netdev, + vsi_info->ring_offset + q_num, + ec); + } + + return 0; +} + +static u64 nbl_link_test(struct net_device *netdev) +{ + bool link_up; + + /* TODO will get from emp in later version */ + link_up = 0; + + return link_up; +} + +static int nbl_loopback_setup_rings(struct nbl_adapter *adapter, struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + + return nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, 1, 0); +} + +static void nbl_loopback_free_rings(struct nbl_adapter *adapter, struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); +} + +static void nbl_loopback_create_skb(struct sk_buff *skb, u32 size) +{ + if (!skb) + return; + + memset(skb->data, NBL_SELF_TEST_PADDING_DATA_1, size); + size >>= 1; + memset(&skb->data[size], NBL_SELF_TEST_PADDING_DATA_2, size); + skb->data[size + NBL_SELF_TEST_POS_2] = NBL_SELF_TEST_BYTE_1; + skb->data[size + NBL_SELF_TEST_POS_3] = NBL_SELF_TEST_BYTE_2; +} + +static s32 nbl_loopback_check_skb(struct sk_buff *skb, u32 size) +{ + size >>= 1; + + if (skb->data[NBL_SELF_TEST_POS_1] != NBL_SELF_TEST_PADDING_DATA_1 || + skb->data[size + NBL_SELF_TEST_POS_2] != NBL_SELF_TEST_BYTE_1 || + skb->data[size + NBL_SELF_TEST_POS_3] != NBL_SELF_TEST_BYTE_2) + return -1; + + return 0; +} + +static s32 nbl_loopback_run_test(struct net_device *netdev) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_resource_pt_ops *pt_ops = NBL_ADAPTER_TO_RES_PT_OPS(adapter); + struct sk_buff *skb_tx[NBL_SELF_TEST_PKT_NUM] = {NULL}, *skb_rx; + u32 size = NBL_SELF_TEST_BUFF_SIZE; + u32 count; + u32 tx_count = 0; + s32 result = 0; + int i; + + for (i = 0; i < NBL_SELF_TEST_PKT_NUM; i++) { + skb_tx[i] = alloc_skb(size, GFP_KERNEL); + if (!skb_tx[i]) + goto alloc_skb_faied; + + nbl_loopback_create_skb(skb_tx[i], size); + skb_put(skb_tx[i], size); + skb_tx[i]->queue_mapping = 0; + } + + count = min_t(u16, serv_mgt->ring_mgt.tx_desc_num, NBL_SELF_TEST_PKT_NUM); + count = min_t(u16, serv_mgt->ring_mgt.rx_desc_num, count); + + for (i = 0; i < count; i++) { + skb_get(skb_tx[i]); + if (pt_ops->self_test_xmit(skb_tx[i], netdev) != NETDEV_TX_OK) + netdev_err(netdev, "Fail to tx lb skb %p", skb_tx[i]); + else + tx_count++; + } + + if (tx_count < count) { + for (i = 0; i < NBL_SELF_TEST_PKT_NUM; i++) + kfree_skb(skb_tx[i]); + result |= BIT(NBL_LB_ERR_TX_FAIL); + return result; + } + + /* Wait for rx packets loopback */ + msleep(1000); + + for (i = 0; i < tx_count; i++) { + skb_rx = NULL; + skb_rx = disp_ops->clean_rx_lb_test(NBL_ADAPTER_TO_DISP_MGT(adapter), 0); + if (!skb_rx) { + netdev_err(netdev, "Fail to rx lb skb, should rx %d but fail on %d", + tx_count, i); + break; + } + if (nbl_loopback_check_skb(skb_rx, size)) { + netdev_err(netdev, "Fail to check lb skb %d(%p)", i, skb_rx); + kfree(skb_rx); + break; + } + kfree(skb_rx); + } + + if (i != tx_count) + result |= BIT(NBL_LB_ERR_RX_FAIL); + + for (i = 0; i < NBL_SELF_TEST_PKT_NUM; i++) + kfree_skb(skb_tx[i]); + + return result; + +alloc_skb_faied: + for (i = 0; i < NBL_SELF_TEST_PKT_NUM; i++) { + if (skb_tx[i]) + kfree_skb(skb_tx[i]); + } + result |= BIT(NBL_LB_ERR_SKB_ALLOC); + return result; +} + +static u64 nbl_loopback_test(struct net_device *netdev) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = &serv_mgt->ring_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter)->ops; + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + u8 origin_num_txq, origin_num_rxq, origin_active_q; + u64 result = 0; + + /* In loopback test, we only need one queue */ + origin_num_txq = ring_mgt->tx_ring_num; + origin_num_rxq = ring_mgt->rx_ring_num; + origin_active_q = vsi_info->active_ring_num; + ring_mgt->tx_ring_num = NBL_SELF_TEST_Q_NUM; + ring_mgt->rx_ring_num = NBL_SELF_TEST_Q_NUM; + + if (nbl_loopback_setup_rings(adapter, netdev)) { + netdev_err(netdev, "Fail to setup rings"); + result |= BIT(NBL_LB_ERR_RING_SETUP); + goto lb_setup_rings_failed; + } + + if (disp_ops->set_eth_loopback(NBL_ADAPTER_TO_DISP_MGT(adapter), NBL_ETH_LB_ON)) { + netdev_err(netdev, "Fail to setup lb on"); + result |= BIT(NBL_LB_ERR_LB_MODE_SETUP); + goto set_eth_lb_failed; + } + + result |= nbl_loopback_run_test(netdev); + + if (disp_ops->set_eth_loopback(NBL_ADAPTER_TO_DISP_MGT(adapter), NBL_ETH_LB_OFF)) { + netdev_err(netdev, "Fail to setup lb off"); + result |= BIT(NBL_LB_ERR_LB_MODE_SETUP); + goto set_eth_lb_failed; + } + +set_eth_lb_failed: + nbl_loopback_free_rings(adapter, netdev); +lb_setup_rings_failed: + ring_mgt->tx_ring_num = origin_num_txq; + ring_mgt->rx_ring_num = origin_num_rxq; + vsi_info->active_ring_num = origin_active_q; + + return result; +} + +static u32 nbl_mailbox_check_active_vf(struct nbl_adapter *adapter) +{ + struct nbl_dispatch_ops_tbl *disp_ops_tbl = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + + return disp_ops_tbl->ops->check_active_vf(NBL_ADAPTER_TO_DISP_MGT(adapter)); +} + +static void nbl_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + bool if_running = netif_running(netdev); + u32 active_vf; + s64 cur_time = 0; + int ret; + + cur_time = ktime_get_real_seconds(); + /* test too frequently will cause to fail */ + if (cur_time - priv->last_st_time < NBL_SELF_TEST_TIME_GAP) { + /* pass by defalut */ + netdev_info(netdev, "Self test too fast, pass by default!"); + data[NBL_ETH_TEST_REG] = 0; + data[NBL_ETH_TEST_EEPROM] = 0; + data[NBL_ETH_TEST_INTR] = 0; + data[NBL_ETH_TEST_LOOP] = 0; + data[NBL_ETH_TEST_LINK] = 0; + return; + } + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + active_vf = nbl_mailbox_check_active_vf(adapter); + + if (active_vf) { + netdev_err(netdev, "Cannot perform offline test when VFs are active"); + data[NBL_ETH_TEST_REG] = 1; + data[NBL_ETH_TEST_EEPROM] = 1; + data[NBL_ETH_TEST_INTR] = 1; + data[NBL_ETH_TEST_LOOP] = 1; + data[NBL_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + /* If online, take if offline */ + if (if_running) { + ret = nbl_serv_netdev_stop(netdev); + if (ret) { + netdev_err(netdev, "Could not stop device %s, err %d\n", + pci_name(adapter->pdev), ret); + goto netdev_stop_failed; + } + } + + set_bit(NBL_TESTING, adapter->state); + + data[NBL_ETH_TEST_LINK] = nbl_link_test(netdev); + data[NBL_ETH_TEST_EEPROM] = 0; + data[NBL_ETH_TEST_INTR] = 0; + data[NBL_ETH_TEST_LOOP] = nbl_loopback_test(netdev); + data[NBL_ETH_TEST_REG] = 0; + + if (data[NBL_ETH_TEST_LINK] || + data[NBL_ETH_TEST_EEPROM] || + data[NBL_ETH_TEST_INTR] || + data[NBL_ETH_TEST_LOOP] || + data[NBL_ETH_TEST_REG]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(NBL_TESTING, adapter->state); + if (if_running) { + ret = nbl_serv_netdev_open(netdev); + if (ret) { + netdev_err(netdev, "Could not open device %s, err %d\n", + pci_name(adapter->pdev), ret); + } + } + } else { + /* Online test */ + data[NBL_ETH_TEST_LINK] = nbl_link_test(netdev); + + if (data[NBL_ETH_TEST_LINK]) + eth_test->flags |= ETH_TEST_FL_FAILED; + /* Only test offlined; pass by default */ + data[NBL_ETH_TEST_EEPROM] = 0; + data[NBL_ETH_TEST_INTR] = 0; + data[NBL_ETH_TEST_LOOP] = 0; + data[NBL_ETH_TEST_REG] = 0; + } + +netdev_stop_failed: + priv->last_st_time = ktime_get_real_seconds(); +} + +static u32 nbl_get_priv_flags(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u32 ret_flags = 0; + unsigned int i; + int count = 0; + + for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { + enum nbl_fix_cap_type capability_type = nbl_gstrings_priv_flags[i].capability_type; + + if (nbl_gstrings_priv_flags[i].supported_by_capability) { + if (!disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + capability_type)) + continue; + } + + if (test_bit(i, serv_mgt->flags)) + ret_flags |= BIT(count); + count++; + } + + netdev_dbg(netdev, "get priv flag: 0x%08x, mgt flags: 0x%08x.\n", + ret_flags, *(u32 *)serv_mgt->flags); + + return ret_flags; +} + +static int nbl_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + unsigned int i; + int count = 0; + u32 new_flags = 0; + + for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { + enum nbl_fix_cap_type capability_type = nbl_gstrings_priv_flags[i].capability_type; + + if (nbl_gstrings_priv_flags[i].supported_by_capability) { + if (!disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + capability_type)) + continue; + } + + if (!nbl_gstrings_priv_flags[i].supported_modify && + (!((priv_flags & BIT(count))) != !test_bit(i, serv_mgt->flags))) { + netdev_err(netdev, "set priv flag: 0x%08x, flag %s not support modify\n", + priv_flags, nbl_gstrings_priv_flags[i].flag_name); + return -EOPNOTSUPP; + } + + if (priv_flags & BIT(count)) + new_flags |= BIT(i); + count++; + } + *serv_mgt->flags = new_flags; + + netdev_dbg(netdev, "set priv flag: 0x%08x, mgt flags: 0x%08x.\n", + priv_flags, *(u32 *)serv_mgt->flags); + + return 0; +} + +static int nbl_set_pause_param(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_phy_caps *phy_caps; + struct nbl_port_state port_state = {0}; + struct nbl_port_advertising port_advertising = {0}; + u32 autoneg = 0; + /* cannot set default 0, 0 means pause donot change */ + u8 active_fc = NBL_PORT_TXRX_PAUSE_OFF; + int ret = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + phy_caps = &net_resource_mgt->phy_caps; + + ret = nbl_serv_get_port_state(serv_mgt, &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, "Optical module is not inplace\n"); + return -EINVAL; + } + + autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (param->autoneg == AUTONEG_ENABLE) { + netdev_info(netdev, "pause autoneg is not support\n"); + return -EOPNOTSUPP; + } + + /* check if the pause mode is changed */ + if (param->rx_pause == !!(port_state.active_fc & NBL_PORT_RX_PAUSE) && + param->tx_pause == !!(port_state.active_fc & NBL_PORT_TX_PAUSE)) { + netdev_info(netdev, "pause param is not changed\n"); + return 0; + } + + if (param->rx_pause) + active_fc |= NBL_PORT_RX_PAUSE; + + if (param->tx_pause) + active_fc |= NBL_PORT_TX_PAUSE; + + port_advertising.eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + port_advertising.active_fc = active_fc; + port_advertising.autoneg = autoneg; + + /* update pause mode */ + ret = disp_ops->set_port_advertising(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &port_advertising); + if (ret) { + netdev_err(netdev, "pause mode set failed %d\n", ret); + return ret; + } + + return 0; +} + +static void nbl_get_pause_param(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_port_state port_state = {0}; + int ret = 0; + + ret = nbl_serv_get_port_state(serv_mgt, &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return; + } + + param->autoneg = AUTONEG_DISABLE; + param->rx_pause = !!(port_state.active_fc & NBL_PORT_RX_PAUSE); + param->tx_pause = !!(port_state.active_fc & NBL_PORT_TX_PAUSE); +} + +static int nbl_set_fec_param(struct net_device *netdev, struct ethtool_fecparam *fec) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_port_state port_state = {0}; + struct nbl_port_advertising port_advertising = {0}; + u32 fec_mode = fec->fec; + u8 active_fec = 0; + u8 autoneg; + int ret = 0; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + ret = nbl_serv_get_port_state(serv_mgt, &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, "Optical module is not inplace\n"); + return -EINVAL; + } + + if (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) { + netdev_err(netdev, "unsupport to set fec mode when autoneg\n"); + return -EOPNOTSUPP; + } + + autoneg = ((port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) || + (port_state.port_caps & BIT(NBL_PORT_CAP_FEC_AUTONEG))) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* check if the fec mode is supported */ + if (fec_mode == ETHTOOL_FEC_OFF) { + active_fec = NBL_PORT_FEC_OFF; + if (!(port_state.port_caps & BIT(NBL_PORT_CAP_FEC_OFF))) { + netdev_err(netdev, "unsupported fec mode off\n"); + return -EOPNOTSUPP; + } + } + if (fec_mode == ETHTOOL_FEC_RS) { + active_fec = NBL_PORT_FEC_RS; + if (!(port_state.port_caps & BIT(NBL_PORT_CAP_FEC_RS))) { + netdev_err(netdev, "unsupported fec mode RS\n"); + return -EOPNOTSUPP; + } + } + if (fec_mode == ETHTOOL_FEC_BASER) { + active_fec = NBL_PORT_FEC_BASER; + if (!(port_state.port_caps & BIT(NBL_PORT_CAP_FEC_BASER))) { + netdev_err(netdev, "unsupported fec mode BaseR\n"); + return -EOPNOTSUPP; + } + } + if (fec_mode == ETHTOOL_FEC_AUTO) { + active_fec = NBL_PORT_FEC_AUTO; + if (!autoneg) { + netdev_err(netdev, "unsupported fec mode auto\n"); + return -EOPNOTSUPP; + } + } + + if (fec_mode == net_resource_mgt->configured_fec) { + netdev_err(netdev, "fec mode is not changed\n"); + return 0; + } + + if (fec_mode == ETHTOOL_FEC_RS) { + if (port_state.link_speed == 10000) { + netdev_err(netdev, "speed 10G cannot set fec RS, only can set fec baseR\n"); + return -EINVAL; + } + } + + port_advertising.eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + port_advertising.active_fec = active_fec; + port_advertising.autoneg = (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* update fec mode */ + ret = disp_ops->set_port_advertising(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &port_advertising); + if (ret) { + netdev_err(netdev, "fec mode set failed %d\n", ret); + return ret; + } + + net_resource_mgt->configured_fec = fec_mode; + + return 0; +} + +static int nbl_get_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_port_state port_state = {0}; + u32 fec = 0; + u32 active_fec = 0; + u8 autoneg = 0; + int ret = 0; + + ret = nbl_serv_get_port_state(serv_mgt, &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, " Optical module is not inplace\n"); + return -EINVAL; + } + + autoneg = ((port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) || + (port_state.port_caps & BIT(NBL_PORT_CAP_FEC_AUTONEG))) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (port_state.active_fec == NBL_PORT_FEC_OFF) + active_fec = ETHTOOL_FEC_OFF; + if (port_state.active_fec == NBL_PORT_FEC_RS) + active_fec = ETHTOOL_FEC_RS; + if (port_state.active_fec == NBL_PORT_FEC_BASER) + active_fec = ETHTOOL_FEC_BASER; + + if (net_resource_mgt->configured_fec) + fec = net_resource_mgt->configured_fec; + else if (autoneg) + fec = ETHTOOL_FEC_AUTO; + else + fec = active_fec; + + if (port_state.port_advertising & BIT(NBL_PORT_CAP_AUTONEG)) + fec = ETHTOOL_FEC_AUTO; + + fecparam->fec = fec; + fecparam->active_fec = active_fec; + + return 0; +} + +static int nbl_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + static u32 led_ctrl_reg; + enum nbl_led_reg_ctrl led_ctrl_op; + u8 eth_id; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + led_ctrl_op = NBL_LED_REG_ACTIVE; + break; + case ETHTOOL_ID_ON: + led_ctrl_op = NBL_LED_REG_ON; + break; + case ETHTOOL_ID_OFF: + led_ctrl_op = NBL_LED_REG_OFF; + break; + case ETHTOOL_ID_INACTIVE: + led_ctrl_op = NBL_LED_REG_INACTIVE; + break; + default: + return 0; + } + return disp_ops->ctrl_port_led(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, led_ctrl_op, &led_ctrl_reg); +} + +static int nbl_nway_reset(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_port_state port_state = {0}; + int ret; + u8 eth_id; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + eth_id = NBL_COMMON_TO_ETH_ID(serv_mgt->common); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + ret = nbl_serv_get_port_state(serv_mgt, &port_state); + if (ret) { + netdev_err(netdev, "Get port_state failed %d\n", ret); + return -EIO; + } + + if (!port_state.module_inplace) { + netdev_err(netdev, "Optical module is not inplace\n"); + return -EOPNOTSUPP; + } + + net_resource_mgt->configured_fec = 0; + net_resource_mgt->configured_speed = + nbl_conver_portrate_to_speed(port_state.port_max_rate); + + return disp_ops->nway_reset(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id); +} + +static void nbl_rep_stats_fill_strings(struct net_device *netdev, u8 *data) +{ + char *p = (char *)data; + + snprintf(p, ETH_GSTRING_LEN, "tx_packets"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_bytes"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_packets"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_bytes"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_dropped"); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_dropped"); + p += ETH_GSTRING_LEN; +} + +static void nbl_rep_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + nbl_rep_stats_fill_strings(netdev, data); +} + +static int nbl_rep_get_sset_count(struct net_device *netdev, int sset) +{ + u32 total_queues = 0; + + if (sset == ETH_SS_STATS) { + total_queues = NBL_REP_PER_VSI_QUEUE_NUM * 2; + return total_queues * (sizeof(struct nbl_rep_stats) / sizeof(u64)); + } else { + return -EOPNOTSUPP; + } +} + +static void +nbl_rep_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_rep_stats rep_stats = {0}; + int i = 0; + + disp_ops->get_rep_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->rep->rep_vsi_id, &rep_stats, true); + data[i++] = rep_stats.packets; + data[i++] = rep_stats.bytes; + disp_ops->get_rep_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->rep->rep_vsi_id, &rep_stats, false); + data[i++] = rep_stats.packets; + data[i++] = rep_stats.bytes; + nbl_serv_get_rep_drop_stats(serv_mgt, priv->rep->rep_vsi_id, &rep_stats); + data[i] = rep_stats.dropped; +} + +/* NBL_SERV_ETHTOOL_OPS_TBL(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_SERV_ETHTOOL_OPS_TBL \ +do { \ + NBL_SERV_SET_ETHTOOL_OPS(get_drvinfo, nbl_get_drvinfo); \ + NBL_SERV_SET_ETHTOOL_OPS(get_strings, nbl_get_strings); \ + NBL_SERV_SET_ETHTOOL_OPS(get_sset_count, nbl_get_sset_count); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ethtool_stats, nbl_get_ethtool_stats); \ + NBL_SERV_SET_ETHTOOL_OPS(get_module_eeprom, nbl_get_module_eeprom); \ + NBL_SERV_SET_ETHTOOL_OPS(get_module_info, nbl_get_module_info); \ + NBL_SERV_SET_ETHTOOL_OPS(get_eeprom_length, nbl_get_eeprom_length); \ + NBL_SERV_SET_ETHTOOL_OPS(get_eeprom, nbl_get_eeprom); \ + NBL_SERV_SET_ETHTOOL_OPS(get_channels, nbl_get_channels); \ + NBL_SERV_SET_ETHTOOL_OPS(set_channels, nbl_set_channels); \ + NBL_SERV_SET_ETHTOOL_OPS(get_link, nbl_get_link); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ksettings, nbl_get_ksettings); \ + NBL_SERV_SET_ETHTOOL_OPS(set_ksettings, nbl_set_ksettings); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ringparam, nbl_get_ringparam); \ + NBL_SERV_SET_ETHTOOL_OPS(set_ringparam, nbl_set_ringparam); \ + NBL_SERV_SET_ETHTOOL_OPS(get_coalesce, nbl_get_coalesce); \ + NBL_SERV_SET_ETHTOOL_OPS(set_coalesce, nbl_set_coalesce); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rxnfc, nbl_get_rxnfc); \ + NBL_SERV_SET_ETHTOOL_OPS(set_rxnfc, nbl_set_rxnfc); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rxfh_indir_size, nbl_get_rxfh_indir_size); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rxfh_key_size, nbl_get_rxfh_key_size); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rxfh, nbl_get_rxfh); \ + NBL_SERV_SET_ETHTOOL_OPS(get_msglevel, nbl_get_msglevel); \ + NBL_SERV_SET_ETHTOOL_OPS(set_msglevel, nbl_set_msglevel); \ + NBL_SERV_SET_ETHTOOL_OPS(get_regs_len, nbl_get_regs_len); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ethtool_dump_regs, nbl_get_ethtool_dump_regs); \ + NBL_SERV_SET_ETHTOOL_OPS(get_per_queue_coalesce, nbl_get_per_queue_coalesce); \ + NBL_SERV_SET_ETHTOOL_OPS(set_per_queue_coalesce, nbl_set_per_queue_coalesce); \ + NBL_SERV_SET_ETHTOOL_OPS(self_test, nbl_self_test); \ + NBL_SERV_SET_ETHTOOL_OPS(get_priv_flags, nbl_get_priv_flags); \ + NBL_SERV_SET_ETHTOOL_OPS(set_priv_flags, nbl_set_priv_flags); \ + NBL_SERV_SET_ETHTOOL_OPS(set_pause_param, nbl_set_pause_param); \ + NBL_SERV_SET_ETHTOOL_OPS(get_pause_param, nbl_get_pause_param); \ + NBL_SERV_SET_ETHTOOL_OPS(set_fec_param, nbl_set_fec_param); \ + NBL_SERV_SET_ETHTOOL_OPS(get_fec_param, nbl_get_fec_param); \ + NBL_SERV_SET_ETHTOOL_OPS(get_ts_info, ethtool_op_get_ts_info); \ + NBL_SERV_SET_ETHTOOL_OPS(set_phys_id, nbl_set_phys_id); \ + NBL_SERV_SET_ETHTOOL_OPS(nway_reset, nbl_nway_reset); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rep_strings, nbl_rep_get_strings); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rep_sset_count, nbl_rep_get_sset_count); \ + NBL_SERV_SET_ETHTOOL_OPS(get_rep_ethtool_stats, nbl_rep_get_ethtool_stats); \ +} while (0) + +void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops) +{ +#define NBL_SERV_SET_ETHTOOL_OPS(name, func) do {serv_ops->NBL_NAME(name) = func; ; } while (0) + NBL_SERV_ETHTOOL_OPS_TBL; +#undef NBL_SERV_SET_ETHTOOL_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..aa17c7aa08d58b0d1f264b4d134edbcce9733eda --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ethtool.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_ETHTOOL_H_ +#define _NBL_ETHTOOL_H_ + +#include "nbl_service.h" + +#define NBL_SELF_TEST_TIME_GAP 5 /* 5 seconds */ +#define NBL_SELF_TEST_BUFF_SIZE 128 +#define NBL_SELF_TEST_PADDING_DATA_1 0xFF +#define NBL_SELF_TEST_PADDING_DATA_2 0xA5 +#define NBL_SELF_TEST_POS_1 3 +#define NBL_SELF_TEST_POS_2 10 +#define NBL_SELF_TEST_POS_3 12 +#define NBL_SELF_TEST_BYTE_1 0xBE +#define NBL_SELF_TEST_BYTE_2 0xAF +#define NBL_SELF_TEST_PKT_NUM 32 + +#define NBL_SELF_TEST_Q_NUM 1 + +enum nbl_eth_lb_enable { + NBL_ETH_LB_OFF, + NBL_ETH_LB_ON, +}; + +enum nbl_ethtool_lb_test_err_code { + NBL_LB_ERR_NON = 0, + NBL_LB_ERR_RING_SETUP, + NBL_LB_ERR_LB_MODE_SETUP, + NBL_LB_ERR_SKB_ALLOC, + NBL_LB_ERR_TX_FAIL, + NBL_LB_ERR_RX_FAIL +}; + +void nbl_serv_update_stats(struct nbl_service_mgt *serv_mgt, bool ethtool); +void nbl_serv_setup_ethtool_ops(struct nbl_service_ops *serv_ops_tbl); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c new file mode 100644 index 0000000000000000000000000000000000000000..c47bad5765737bef8bdfe6b164b1c7f26f27edb4 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.c @@ -0,0 +1,135 @@ +#include +#include +#include +#include +#include "nbl_hwmon.h" + +static const char * const nbl_hwmon_sensor_name[] = { + "Sensor0", + "Module0", + "Module1", + "Module2", + "Module3", +}; + +static umode_t nbl_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + return NBL_HWMON_VISIBLE; +} + +static int nbl_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct nbl_adapter *adapter = dev_get_drvdata(dev); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + enum nbl_hwmon_type hwmon_type; + u32 temp; + + switch (channel) { + case NBL_HWMON_CHIP_SENSOR: + switch (attr) { + case hwmon_temp_input: + hwmon_type = NBL_HWMON_TEMP_INPUT; + break; + case hwmon_temp_max: + hwmon_type = NBL_HWMON_TEMP_MAX; + break; + case hwmon_temp_crit: + hwmon_type = NBL_HWMON_TEMP_CRIT; + break; + case hwmon_temp_highest: + hwmon_type = NBL_HWMON_TEMP_HIGHEST; + break; + default: + return -EOPNOTSUPP; + } + temp = serv_ops->get_chip_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + hwmon_type, channel); + *val = temp; + return 0; + case NBL_HWMON_LIGHT_MODULE: + switch (attr) { + case hwmon_temp_input: + hwmon_type = NBL_HWMON_TEMP_INPUT; + break; + case hwmon_temp_max: + hwmon_type = NBL_HWMON_TEMP_MAX; + break; + case hwmon_temp_crit: + hwmon_type = NBL_HWMON_TEMP_CRIT; + break; + default: + return -EOPNOTSUPP; + } + temp = serv_ops->get_module_temperature(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + eth_id, hwmon_type); + *val = temp; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static __maybe_unused int nbl_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct nbl_adapter *adapter = dev_get_drvdata(dev); + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 func_id = NBL_COMMON_TO_PCI_FUNC_ID(common); + + switch (channel) { + case NBL_HWMON_CHIP_SENSOR: + *str = nbl_hwmon_sensor_name[channel]; + return 0; + case NBL_HWMON_LIGHT_MODULE: + *str = nbl_hwmon_sensor_name[channel + func_id]; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_channel_info *nbl_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_HIGHEST | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_LABEL), + NULL +}; + +static const struct hwmon_ops nbl_hwmon_ops = { + .is_visible = nbl_hwmon_is_visible, + .read = nbl_hwmon_read, + .read_string = nbl_hwmon_read_string, +}; + +static const struct hwmon_chip_info nbl_hwmon_chip_info = { + .ops = &nbl_hwmon_ops, + .info = nbl_hwmon_info, +}; + +int nbl_dev_setup_hwmon(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + struct device *dev = NBL_DEV_MGT_TO_DEV(dev_mgt); + + common_dev->hwmon_dev = hwmon_device_register_with_info(dev, "nbl", adapter, + &nbl_hwmon_chip_info, NULL); + + return PTR_ERR_OR_ZERO(common_dev->hwmon_dev); +} + +void nbl_dev_remove_hwmon(struct nbl_adapter *adapter) +{ + struct nbl_dev_mgt *dev_mgt = (struct nbl_dev_mgt *)NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_dev_common *common_dev = NBL_DEV_MGT_TO_COMMON_DEV(dev_mgt); + + if (common_dev->hwmon_dev) + hwmon_device_unregister(common_dev->hwmon_dev); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h new file mode 100644 index 0000000000000000000000000000000000000000..5affd6cf993bc5fbd945647a7057438ff4b1b1bb --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_hwmon.h @@ -0,0 +1,12 @@ + +#ifndef _NBL_HWMON_H +#define _NBL_HWMON_H + +#include "nbl_dev.h" + +#define NBL_HWMON_TEMP_OFF 16 +#define NBL_HWMON_VISIBLE 0444 +#define NBL_HWMON_CHIP_SENSOR 0 +#define NBL_HWMON_LIGHT_MODULE 1 + +#endif /*_NBL_HWMON_H*/ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c new file mode 100644 index 0000000000000000000000000000000000000000..bdfd10e1c1ac6b3bfdd113d579b01af058802e20 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.c @@ -0,0 +1,577 @@ +#include "nbl_ipsec.h" +#ifdef CONFIG_TLS_DEVICE +static int nbl_validate_xfrm_state(struct net_device *netdev, struct xfrm_state *x) +{ + if (x->id.proto != IPPROTO_ESP) { + netdev_err(netdev, "Only ESP xfrm state may be offloaded\n"); + return -EINVAL; + } + + if (x->props.aalgo != SADB_AALG_NONE) { + netdev_err(netdev, "Cannot offload authenticated xfrm states\n"); + return -EINVAL; + } + + if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV8 && + x->props.ealgo != SADB_X_EALG_AES_GCM_ICV12 && + x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) { + netdev_err(netdev, "Only aes-gcm/sm4 xfrm state may be offloaded\n"); + return -EINVAL; + } + + if (x->props.family != AF_INET && x->props.family != AF_INET6) { + netdev_err(netdev, "Only IPv4/6 xfrm state may be offloaded\n"); + return -EINVAL; + } + + if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) { + netdev_err(netdev, "Only transport and tunnel xfrm state may be offloaded\n"); + return -EINVAL; + } + + if (!x->aead) { + netdev_err(netdev, "Cannot offload xfrm state without aead\n"); + return -EINVAL; + } + + if (x->aead->alg_key_len != NBL_IPSEC_AES_128_ALG_LEN && + x->aead->alg_key_len != NBL_IPSEC_AES_256_ALG_LEN) { + netdev_err(netdev, "Cannot offload xfrm key length other than 128/256 bit\n"); + return -EINVAL; + } + + if (x->aead->alg_icv_len != NBL_IPSEC_ICV_LEN_64 && + x->aead->alg_icv_len != NBL_IPSEC_ICV_LEN_96 && + x->aead->alg_icv_len != NBL_IPSEC_ICV_LEN_128) { + netdev_err(netdev, "Cannot offload xfrm icv length other than 64/96/128 bit\n"); + return -EINVAL; + } + + if (x->replay_esn && x->replay_esn->replay_window && + x->replay_esn->replay_window != NBL_IPSEC_WINDOW_32 && + x->replay_esn->replay_window != NBL_IPSEC_WINDOW_64 && + x->replay_esn->replay_window != NBL_IPSEC_WINDOW_128 && + x->replay_esn->replay_window != NBL_IPSEC_WINDOW_256) { + netdev_err(netdev, + "Cannot offload xfrm replay_window other than 32/64/128/256 bit\n"); + return -EINVAL; + } + + if (!(x->props.flags & XFRM_STATE_ESN) && x->props.replay_window && + x->props.replay_window != NBL_IPSEC_WINDOW_32 && + x->props.replay_window != NBL_IPSEC_WINDOW_64 && + x->props.replay_window != NBL_IPSEC_WINDOW_128 && + x->props.replay_window != NBL_IPSEC_WINDOW_256) { + netdev_err(netdev, + "Cannot offload xfrm replay_window other than 32/64/128/256 bit\n"); + return -EINVAL; + } + + if (!x->geniv) { + netdev_err(netdev, "Cannot offload xfrm state without geniv\n"); + return -EINVAL; + } + + if (strcmp(x->geniv, "seqiv")) { + netdev_err(netdev, "Cannot offload xfrm state with geniv other than seqiv\n"); + return -EINVAL; + } + + if ((x->lft.hard_byte_limit != XFRM_INF || x->lft.soft_byte_limit != XFRM_INF) && + (x->lft.hard_packet_limit != XFRM_INF || x->lft.soft_packet_limit != XFRM_INF)) { + netdev_err(netdev, + "Offloaded xfrm state does not support both byte & packet limits\n"); + return -EINVAL; + } + + if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit && + x->lft.soft_byte_limit != XFRM_INF) { + netdev_err(netdev, "Hard byte limit must be greater than soft limit\n"); + return -EINVAL; + } + + if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit && + x->lft.soft_packet_limit != XFRM_INF) { + netdev_err(netdev, "Hard packet limit must be greater than soft limit\n"); + return -EINVAL; + } + + return 0; +} + +static void nbl_ipsec_update_esn_state(struct xfrm_state *x, struct nbl_ipsec_esn_state *esn_state) +{ + bool esn = !!(x->props.flags & XFRM_STATE_ESN); + bool inbound = !!(x->xso.flags & XFRM_OFFLOAD_INBOUND); + u32 bottom = 0; + + if (!esn) { + esn_state->enable = 0; + if (!inbound) { + esn_state->sn = x->replay.oseq + 1; + esn_state->wrap_en = (x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP); + return; + } + + esn_state->sn = x->replay.seq + 1; + if (x->props.replay_window) { + esn_state->window_en = 1; + esn_state->option = ilog2(x->props.replay_window / NBL_IPSEC_WINDOW_32); + } + return; + } + + esn_state->enable = 1; + if (!inbound) { + esn_state->sn = x->replay_esn->oseq + 1; + esn_state->esn = x->replay_esn->oseq_hi; + return; + } + + if (x->replay_esn->seq >= x->replay_esn->replay_window) + bottom = x->replay_esn->seq - x->replay_esn->replay_window + 1; + + if (x->replay_esn->seq < NBL_IPSEC_REPLAY_MID_SEQ) + esn_state->overlap = 1; + + esn_state->sn = x->replay_esn->seq + 1; + esn_state->esn = xfrm_replay_seqhi(x, htonl(bottom)); + if (x->replay_esn->replay_window) { + esn_state->window_en = 1; + esn_state->option = ilog2(x->replay_esn->replay_window / NBL_IPSEC_WINDOW_32); + } +} + +static void nbl_ipsec_init_cfg_info(struct xfrm_state *x, struct nbl_ipsec_cfg_info *cfg_info) +{ + cfg_info->sa_key.family = x->props.family; + cfg_info->sa_key.mark = x->mark.v & x->mark.m; + cfg_info->sa_key.spi = x->id.spi; + cfg_info->vld = true; + memcpy(&cfg_info->sa_key.daddr, x->id.daddr.a6, sizeof(x->id.daddr.a6)); + + if (x->lft.hard_byte_limit != XFRM_INF) { + cfg_info->limit_type = NBL_IPSEC_LIFETIME_BYTE; + cfg_info->hard_limit = x->lft.hard_byte_limit; + if (x->lft.soft_byte_limit != XFRM_INF) + cfg_info->soft_limit = x->lft.soft_byte_limit; + } + + if (x->lft.hard_packet_limit != XFRM_INF) { + cfg_info->limit_type = NBL_IPSEC_LIFETIME_PACKET; + cfg_info->hard_limit = x->lft.hard_packet_limit; + if (x->lft.soft_packet_limit != XFRM_INF) + cfg_info->soft_limit = x->lft.soft_packet_limit; + } + + if (cfg_info->hard_limit == 0) + return; + if (cfg_info->soft_limit == 0) + cfg_info->soft_limit = NBL_GET_SOFT_BY_HARD(cfg_info->hard_limit); + + cfg_info->limit_enable = 1; + cfg_info->hard_round = cfg_info->hard_limit >> NBL_IPSEC_LIFETIME_ROUND; + cfg_info->hard_remain = cfg_info->hard_limit & NBL_IPSEC_LIFETIME_REMAIN; + cfg_info->soft_round = cfg_info->soft_limit >> NBL_IPSEC_LIFETIME_ROUND; + cfg_info->soft_remain = cfg_info->soft_limit & NBL_IPSEC_LIFETIME_REMAIN; + + if (cfg_info->hard_round <= 1) { + cfg_info->lft_cnt = cfg_info->hard_limit; + cfg_info->lft_diff = cfg_info->hard_limit - cfg_info->soft_limit; + cfg_info->hard_round = 0; + cfg_info->soft_round = 0; + } else { + cfg_info->lft_cnt = (1 << NBL_IPSEC_LIFETIME_ROUND) + cfg_info->soft_remain; + cfg_info->lft_diff = (1 << NBL_IPSEC_LIFETIME_ROUND); + } +} + +static void nbl_ipsec_build_accel_xfrm_attrs(struct xfrm_state *x, + struct nbl_accel_esp_xfrm_attrs *attrs) +{ + struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; + struct aead_geniv_ctx *geniv_ctx; + unsigned int key_len, icv_len; + int i; + u8 key[NBL_IPSEC_KEY_LEN_TOTAL] = {0}; + __be32 salt; + + /* key */ + key_len = NBL_GET_KEYLEN_BY_ALG(x->aead->alg_key_len); + for (i = 0; i < key_len; i++) + key[key_len - i - 1] = x->aead->alg_key[i]; + memcpy(aes_gcm->aes_key, key, key_len); + if (strncmp(x->aead->alg_name, "rfc4106(gcm(aes))", sizeof(x->aead->alg_name)) == 0) { + if (key_len == NBL_IPSEC_AES128_KEY_LEN) + aes_gcm->crypto_type = NBL_IPSEC_AES_GCM_128; + else + aes_gcm->crypto_type = NBL_IPSEC_AES_GCM_256; + } else { + aes_gcm->crypto_type = NBL_IPSEC_SM4_GCM; + } + + /* salt and seq_iv */ + geniv_ctx = crypto_aead_ctx(x->data); + memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, sizeof(u64)); + memcpy(&salt, x->aead->alg_key + key_len, sizeof(u32)); + aes_gcm->salt = be32_to_cpu(salt); + + /* icv len */ + icv_len = x->aead->alg_icv_len; + if (icv_len == NBL_IPSEC_ICV_LEN_64) + aes_gcm->icv_len = NBL_IPSEC_ICV_64_TYPE; + else if (icv_len == NBL_IPSEC_ICV_LEN_96) + aes_gcm->icv_len = NBL_IPSEC_ICV_96_TYPE; + else + aes_gcm->icv_len = NBL_IPSEC_ICV_128_TYPE; + + /* tunnel mode */ + attrs->tunnel_mode = x->props.mode; + /* spi */ + attrs->spi = be32_to_cpu(x->id.spi); + + /* nat traversal */ + if (x->encap) { + attrs->nat_flag = 1; + attrs->sport = be16_to_cpu(x->encap->encap_sport); + attrs->dport = be16_to_cpu(x->encap->encap_dport); + } + + /* source, destination ips */ + memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); + memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr)); + attrs->is_ipv6 = (x->props.family != AF_INET); +} + +static void nbl_ipsec_free_tx_index(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + netdev_info(netdev, "nbl ipsec egress free index %u\n", index); + disp_ops->free_ipsec_tx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_ipsec_free_rx_index(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + netdev_info(netdev, "nbl ipsec ingress free index %u\n", index); + disp_ops->free_ipsec_rx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static int nbl_ipsec_alloc_tx_index(struct net_device *netdev, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_common_info *common; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + cfg_info->vsi = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->alloc_ipsec_tx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), cfg_info); +} + +static int nbl_ipsec_alloc_rx_index(struct net_device *netdev, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_common_info *common; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + cfg_info->vsi = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->alloc_ipsec_rx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), cfg_info); +} + +static void nbl_ipsec_cfg_tx_sad(struct net_device *netdev, u32 index, + struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_ipsec_tx_sad(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, sa_entry); +} + +static void nbl_ipsec_cfg_rx_sad(struct net_device *netdev, u32 index, + struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_ipsec_rx_sad(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, sa_entry); +} + +static int nbl_ipsec_add_rx_flow(struct net_device *netdev, u32 index, + struct nbl_accel_esp_xfrm_attrs *attrs) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 data[NBL_IPSEC_SPI_DIP__LEN] = {0}; + u32 dip[NBL_IPSEC_FLOW_IP_LEN] = {0}; + int i; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + memcpy(data, &attrs->spi, sizeof(attrs->spi)); + if (attrs->is_ipv6) { + for (i = 0; i < NBL_IPSEC_FLOW_IP_LEN; i++) + dip[i] = ntohl(attrs->daddr.a6[NBL_IPSEC_FLOW_IP_LEN - 1 - i]); + } else { + dip[0] = ntohl(attrs->daddr.a4); + } + memcpy(data + 1, dip, sizeof(dip)); + + return disp_ops->add_ipsec_rx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, data, + NBL_COMMON_TO_VSI_ID(common)); +} + +static int nbl_ipsec_add_tx_flow(struct net_device *netdev, u32 index, struct xfrm_selector *sel) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 data[NBL_IPSEC_FLOW_TOTAL_LEN] = {0}; + u32 sip[NBL_IPSEC_FLOW_IP_LEN] = {0}; + u32 dip[NBL_IPSEC_FLOW_IP_LEN] = {0}; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + switch (sel->family) { + case AF_INET: + data[0] = AF_INET; + data[NBL_IPSEC_FLOW_SIP_OFF] = ntohl(sel->saddr.a4); + data[NBL_IPSEC_FLOW_DIP_OFF] = ntohl(sel->daddr.a4); + break; + case AF_INET6: + data[0] = AF_INET6; + be32_to_cpu_array(sip, sel->saddr.a6, NBL_IPSEC_FLOW_IP_LEN); + be32_to_cpu_array(dip, sel->daddr.a6, NBL_IPSEC_FLOW_IP_LEN); + memcpy(data + NBL_IPSEC_FLOW_SIP_OFF, sip, sizeof(sip)); + memcpy(data + NBL_IPSEC_FLOW_DIP_OFF, dip, sizeof(dip)); + break; + default: + return -EINVAL; + } + + return disp_ops->add_ipsec_tx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, data, + NBL_COMMON_TO_VSI_ID(common)); +} + +static int nbl_xfrm_add_state(struct xfrm_state *x, struct netlink_ext_ack *extack) +{ + struct nbl_ipsec_sa_entry *sa_entry; + struct net_device *netdev = x->xso.dev; + int index; + int ret = 0; + + if (nbl_validate_xfrm_state(netdev, x)) + return -EINVAL; + + sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); + if (!sa_entry) + return -ENOMEM; + + nbl_ipsec_init_cfg_info(x, &sa_entry->cfg_info); + nbl_ipsec_update_esn_state(x, &sa_entry->esn_state); + nbl_ipsec_build_accel_xfrm_attrs(x, &sa_entry->attrs); + + if (x->xso.flags & XFRM_OFFLOAD_INBOUND) { + index = nbl_ipsec_alloc_rx_index(netdev, &sa_entry->cfg_info); + if (index < 0) { + netdev_err(netdev, "No enough rx session resources\n"); + kfree(sa_entry); + return -ENOSPC; + } + netdev_info(netdev, "nbl ipsec ingress index %d\n", index); + + ret = nbl_ipsec_add_rx_flow(netdev, index, &sa_entry->attrs); + if (ret) { + netdev_err(netdev, "No enough rx flow resources for %d\n", index); + nbl_ipsec_free_rx_index(netdev, index); + kfree(sa_entry); + return -ENOSPC; + } + nbl_ipsec_cfg_rx_sad(netdev, index, sa_entry); + } else { + index = nbl_ipsec_alloc_tx_index(netdev, &sa_entry->cfg_info); + if (index < 0) { + netdev_err(netdev, "No enough tx session resources\n"); + kfree(sa_entry); + return -ENOSPC; + } + netdev_info(netdev, "nbl ipsec egress index %d\n", index); + + ret = nbl_ipsec_add_tx_flow(netdev, index, &x->sel); + if (ret) { + netdev_err(netdev, "No enough tx flow resources for %d\n", index); + nbl_ipsec_free_tx_index(netdev, index); + kfree(sa_entry); + return -ENOSPC; + } + nbl_ipsec_cfg_tx_sad(netdev, index, sa_entry); + } + + sa_entry->index = (u32)index; + x->xso.offload_handle = (unsigned long)sa_entry; + + return 0; +} + +static void nbl_ipsec_del_tx_flow(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_ipsec_tx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_ipsec_del_rx_flow(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_ipsec_rx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_xfrm_del_state(struct xfrm_state *x) +{ + struct nbl_ipsec_sa_entry *sa_entry = (struct nbl_ipsec_sa_entry *)x->xso.offload_handle; + struct net_device *netdev = x->xso.dev; + + if (x->xso.flags & XFRM_OFFLOAD_INBOUND) + nbl_ipsec_del_rx_flow(netdev, sa_entry->index); + else + nbl_ipsec_del_tx_flow(netdev, sa_entry->index); +} + +static void nbl_xfrm_free_state(struct xfrm_state *x) +{ + struct nbl_ipsec_sa_entry *sa_entry = (struct nbl_ipsec_sa_entry *)x->xso.offload_handle; + struct net_device *netdev = x->xso.dev; + + if (x->xso.flags & XFRM_OFFLOAD_INBOUND) + nbl_ipsec_free_rx_index(netdev, sa_entry->index); + else + nbl_ipsec_free_tx_index(netdev, sa_entry->index); + + kfree(sa_entry); +} + +static bool nbl_offload_ok(struct sk_buff *skb, struct xfrm_state *x) +{ +#define NBL_IP_HEADER_LEN 5 + if (x->props.family == AF_INET) { + if (ip_hdr(skb)->ihl != NBL_IP_HEADER_LEN) + return false; + } else { + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + + return true; +} + +static void nbl_xfrm_advance_esn_state(struct xfrm_state *x) +{ + // not need to do anything +} + +static bool nbl_check_ipsec_status(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + return disp_ops->check_ipsec_status(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_handle_dipsec_lft_event(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + union nbl_ipsec_lft_info lft_info = {0}; + + lft_info.data = disp_ops->get_dipsec_lft_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (lft_info.soft_vld) + disp_ops->handle_dipsec_soft_expire(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + lft_info.soft_sad_index); + + if (lft_info.hard_vld) + disp_ops->handle_dipsec_hard_expire(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + lft_info.hard_sad_index); +} + +static void nbl_handle_uipsec_lft_event(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + union nbl_ipsec_lft_info lft_info = {0}; + + lft_info.data = disp_ops->get_uipsec_lft_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (lft_info.soft_vld) + disp_ops->handle_uipsec_soft_expire(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + lft_info.soft_sad_index); + + if (lft_info.hard_vld) + disp_ops->handle_uipsec_hard_expire(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + lft_info.hard_sad_index); +} + +static void nbl_handle_ipsec_event(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + + nbl_handle_dipsec_lft_event(serv_mgt); + nbl_handle_uipsec_lft_event(serv_mgt); +} + +#define NBL_SERV_XFRM_OPS_TBL \ +do { \ + NBL_SERV_SET_XFRM_OPS(add_xdo_dev_state, nbl_xfrm_add_state); \ + NBL_SERV_SET_XFRM_OPS(delete_xdo_dev_state, nbl_xfrm_del_state); \ + NBL_SERV_SET_XFRM_OPS(free_xdo_dev_state, nbl_xfrm_free_state); \ + NBL_SERV_SET_XFRM_OPS(xdo_dev_offload_ok, nbl_offload_ok); \ + NBL_SERV_SET_XFRM_OPS(xdo_dev_state_advance_esn, nbl_xfrm_advance_esn_state); \ + NBL_SERV_SET_XFRM_OPS(check_ipsec_status, nbl_check_ipsec_status); \ + NBL_SERV_SET_XFRM_OPS(handle_ipsec_event, nbl_handle_ipsec_event); \ +} while (0) + +void nbl_serv_setup_xfrm_ops(struct nbl_service_ops *serv_ops) +{ +#define NBL_SERV_SET_XFRM_OPS(name, func) do {serv_ops->NBL_NAME(name) = func; ; } while (0) + NBL_SERV_XFRM_OPS_TBL; +#undef NBL_SERV_SET_XFRM_OPS +} + +#else +void nbl_serv_setup_xfrm_ops(struct nbl_service_ops *serv_ops) +{ +} +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h new file mode 100644 index 0000000000000000000000000000000000000000..747dcc057bdbfcff71a3f7d67da5e5da92ff02d6 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ipsec.h @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_IPSEC_H +#define _NBL_IPSEC_H + +#include +#include +#include +#include +#include "nbl_service.h" +#ifdef CONFIG_TLS_DEVICE +#include +#include +#include +#include +#include + +#define NBL_IPSEC_AES_128_ALG_LEN (128 + 32) +#define NBL_IPSEC_AES_256_ALG_LEN (256 + 32) + +#define NBL_IPSEC_ICV_LEN_64 64 +#define NBL_IPSEC_ICV_LEN_96 96 +#define NBL_IPSEC_ICV_LEN_128 128 + +#define NBL_IPSEC_WINDOW_32 32 +#define NBL_IPSEC_WINDOW_64 64 +#define NBL_IPSEC_WINDOW_128 128 +#define NBL_IPSEC_WINDOW_256 256 + +#define NBL_IPSEC_LIFETIME_BYTE 0 +#define NBL_IPSEC_LIFETIME_PACKET 1 +#define NBL_IPSEC_LIFETIME_ROUND 31 +#define NBL_IPSEC_LIFETIME_REMAIN (0x7fffffff) +#define NBL_IPSEC_REPLAY_MID_SEQ (0X80000000L) +#define NBL_GET_SOFT_BY_HARD(hard) (((hard) >> 2) * 3) + +#define NBL_GET_KEYLEN_BY_ALG(alg_key_len) ((((alg_key_len) + 7) / 8) - 4) +#define NBL_IPSEC_KEY_LEN_TOTAL 32 +#define NBL_IPSEC_AES128_KEY_LEN 16 +#define NBL_IPSEC_AES_GCM_128 0 +#define NBL_IPSEC_AES_GCM_256 1 +#define NBL_IPSEC_SM4_GCM 2 + +#define NBL_IPSEC_ICV_64_TYPE 0 +#define NBL_IPSEC_ICV_96_TYPE 1 +#define NBL_IPSEC_ICV_128_TYPE 2 + +#define NBL_IPSEC_SPI_DIP__LEN 5 +#define NBL_IPSEC_FLOW_TOTAL_LEN 12 +#define NBL_IPSEC_FLOW_IP_LEN 4 +#define NBL_IPSEC_FLOW_SIP_OFF 1 +#define NBL_IPSEC_FLOW_DIP_OFF 5 + +#define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2 +#endif + +void nbl_serv_setup_xfrm_ops(struct nbl_service_ops *serv_ops_tbl); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c new file mode 100644 index 0000000000000000000000000000000000000000..7f8f005d170fa9e8d41cd053f81cc0fb3e5814a7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.c @@ -0,0 +1,391 @@ +#include "nbl_ktls.h" +#ifdef CONFIG_TLS_DEVICE + +static void nbl_ktls_free_tx_index(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->free_ktls_tx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_ktls_free_rx_index(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->free_ktls_rx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static int nbl_ktls_alloc_tx_index(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u16 vsi; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + vsi = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->alloc_ktls_tx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi); +} + +static int nbl_ktls_alloc_rx_index(struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u16 vsi; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + vsi = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->alloc_ktls_rx_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi); +} + +static void nbl_ktls_cfg_tx_keymat(struct net_device *netdev, u32 index, + struct tls_crypto_info *crypto_info, + struct nbl_ktls_offload_context_tx *priv_tx) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct tls12_crypto_info_aes_gcm_128 *crypto_info_aes_128; + struct tls12_crypto_info_aes_gcm_256 *crypto_info_aes_256; + struct tls12_crypto_info_sm4_gcm *crypto_info_sm4; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: + crypto_info_aes_128 = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + + disp_ops->cfg_ktls_tx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_AES_GCM_128, + crypto_info_aes_128->salt, + crypto_info_aes_128->key, + TLS_CIPHER_AES_GCM_128_KEY_SIZE); + memcpy(priv_tx->iv, crypto_info_aes_128->iv, NBL_KTLS_IV_LEN); + memcpy(priv_tx->rec_num, crypto_info_aes_128->rec_seq, NBL_KTLS_REC_LEN); + break; + case TLS_CIPHER_AES_GCM_256: + crypto_info_aes_256 = (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; + + disp_ops->cfg_ktls_tx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_AES_GCM_256, + crypto_info_aes_256->salt, + crypto_info_aes_256->key, + TLS_CIPHER_AES_GCM_256_KEY_SIZE); + memcpy(priv_tx->iv, crypto_info_aes_256->iv, NBL_KTLS_IV_LEN); + memcpy(priv_tx->rec_num, crypto_info_aes_256->rec_seq, NBL_KTLS_REC_LEN); + break; + case TLS_CIPHER_SM4_GCM: + crypto_info_sm4 = (struct tls12_crypto_info_sm4_gcm *)crypto_info; + + disp_ops->cfg_ktls_tx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_SM4_GCM, + crypto_info_sm4->salt, + crypto_info_sm4->key, + TLS_CIPHER_SM4_GCM_KEY_SIZE); + memcpy(priv_tx->iv, crypto_info_sm4->iv, NBL_KTLS_IV_LEN); + memcpy(priv_tx->rec_num, crypto_info_sm4->rec_seq, NBL_KTLS_REC_LEN); + break; + } +} + +static int nbl_ktls_add_tx(struct net_device *netdev, struct sock *sk, + struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn) +{ + struct tls_context *tls_ctx; + struct nbl_ktls_offload_context_tx *priv_tx; + struct nbl_ktls_offload_context_tx **ctx; + int index; + + priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL); + if (!priv_tx) + return -ENOMEM; + + /* get unused index */ + index = nbl_ktls_alloc_tx_index(netdev); + if (index < 0) { + netdev_err(netdev, "No enough tx session resources\n"); + kfree(priv_tx); + return -ENOSPC; + } + + netdev_info(netdev, "nbl ktls egress index %d, start seq %u\n", + index, start_offload_tcp_sn); + nbl_ktls_cfg_tx_keymat(netdev, index, crypto_info, priv_tx); + + priv_tx->index = (u32)index; + priv_tx->expected_tcp = start_offload_tcp_sn; + tls_ctx = tls_get_ctx(sk); + priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx); + priv_tx->ctx_post_pending = true; + ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + *ctx = priv_tx; + + return 0; +} + +static void nbl_ktls_cfg_rx_keymat(struct net_device *netdev, u32 index, + struct tls_crypto_info *crypto_info, + struct nbl_ktls_offload_context_rx *priv_rx) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct tls12_crypto_info_aes_gcm_128 *crypto_info_aes_128; + struct tls12_crypto_info_aes_gcm_256 *crypto_info_aes_256; + struct tls12_crypto_info_sm4_gcm *crypto_info_sm4; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: + crypto_info_aes_128 = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + + disp_ops->cfg_ktls_rx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_AES_GCM_128, + crypto_info_aes_128->salt, + crypto_info_aes_128->key, + TLS_CIPHER_AES_GCM_128_KEY_SIZE); + memcpy(priv_rx->rec_num, crypto_info_aes_128->rec_seq, NBL_KTLS_REC_LEN); + break; + case TLS_CIPHER_AES_GCM_256: + crypto_info_aes_256 = (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; + + disp_ops->cfg_ktls_rx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_AES_GCM_256, + crypto_info_aes_256->salt, + crypto_info_aes_256->key, + TLS_CIPHER_AES_GCM_256_KEY_SIZE); + memcpy(priv_rx->rec_num, crypto_info_aes_256->rec_seq, NBL_KTLS_REC_LEN); + break; + case TLS_CIPHER_SM4_GCM: + crypto_info_sm4 = (struct tls12_crypto_info_sm4_gcm *)crypto_info; + + disp_ops->cfg_ktls_rx_keymat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + NBL_KTLS_SM4_GCM, + crypto_info_sm4->salt, + crypto_info_sm4->key, + TLS_CIPHER_SM4_GCM_KEY_SIZE); + memcpy(priv_rx->rec_num, crypto_info_sm4->rec_seq, NBL_KTLS_REC_LEN); + break; + } +} + +static void nbl_ktls_cfg_rx_record(struct net_device *netdev, u32 index, + u32 tcp_sn, u64 rec_num, bool init) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + netdev_info(netdev, "nbl ktls cfg index %u, tcp_seq %u, rec_num %llu, init %u.\n", + index, tcp_sn, rec_num, init); + disp_ops->cfg_ktls_rx_record(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, + tcp_sn, rec_num, init); +} + +static int nbl_ktls_add_rx_flow(struct net_device *netdev, u32 index, struct sock *sk) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + u32 data[NBL_KTLS_FLOW_TOTAL_LEN] = {0}; + u32 sip[NBL_KTLS_FLOW_IP_LEN] = {0}; + u32 dip[NBL_KTLS_FLOW_IP_LEN] = {0}; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + switch (sk->sk_family) { + case AF_INET: + data[NBL_KTLS_FLOW_TYPE_OFF] = AF_INET; + data[NBL_KTLS_FLOW_SIP_OFF] = ntohl(inet_sk(sk)->inet_daddr); + data[NBL_KTLS_FLOW_DIP_OFF] = ntohl(inet_sk(sk)->inet_rcv_saddr); + break; + case AF_INET6: + data[NBL_KTLS_FLOW_TYPE_OFF] = AF_INET6; + be32_to_cpu_array(sip, sk->sk_v6_daddr.s6_addr32, NBL_KTLS_FLOW_IP_LEN); + be32_to_cpu_array(dip, inet6_sk(sk)->saddr.s6_addr32, NBL_KTLS_FLOW_IP_LEN); + memcpy(data + NBL_KTLS_FLOW_SIP_OFF, sip, sizeof(sip)); + memcpy(data + NBL_KTLS_FLOW_DIP_OFF, dip, sizeof(dip)); + break; + default: + return -EINVAL; + } + + data[NBL_KTLS_FLOW_DPORT_OFF] = ntohs(inet_sk(sk)->inet_dport); + data[NBL_KTLS_FLOW_SPORT_OFF] = ntohs(inet_sk(sk)->inet_sport); + + return disp_ops->add_ktls_rx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index, data, + NBL_COMMON_TO_VSI_ID(common)); +} + +static int nbl_ktls_add_rx(struct net_device *netdev, struct sock *sk, + struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn) +{ + struct nbl_ktls_offload_context_rx *priv_rx; + struct nbl_ktls_offload_context_rx **ctx; + struct tls_context *tls_ctx; + int index; + u64 rec_num; + int ret = 0; + + priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL); + if (!priv_rx) + return -ENOMEM; + + /* get unused index */ + index = nbl_ktls_alloc_rx_index(netdev); + if (index < 0) { + netdev_err(netdev, "No enough rx session resources\n"); + kfree(priv_rx); + return -ENOSPC; + } + + netdev_info(netdev, "nbl ktls ingress index %d, expected seq %u\n", + index, start_offload_tcp_sn); + ret = nbl_ktls_add_rx_flow(netdev, index, sk); + if (ret) { + netdev_err(netdev, "No enough rx flow resources for %d\n", index); + nbl_ktls_free_rx_index(netdev, index); + kfree(priv_rx); + return -ENOSPC; + } + nbl_ktls_cfg_rx_keymat(netdev, index, crypto_info, priv_rx); + rec_num = be64_to_cpu(*(__be64 *)priv_rx->rec_num) - 1; + nbl_ktls_cfg_rx_record(netdev, index, start_offload_tcp_sn, rec_num, true); + + priv_rx->index = (u32)index; + tls_ctx = tls_get_ctx(sk); + ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + *ctx = priv_rx; + tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ); + + return 0; +} + +static int nbl_ktls_add(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn) +{ + int err = 0; + + if (crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128 && + crypto_info->cipher_type != TLS_CIPHER_SM4_GCM && + crypto_info->cipher_type != TLS_CIPHER_AES_GCM_256) { + netdev_info(netdev, "Unsupported cipher type %u\n", crypto_info->cipher_type); + return -EOPNOTSUPP; + } + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) + err = nbl_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn); + else + err = nbl_ktls_add_rx(netdev, sk, crypto_info, start_offload_tcp_sn); + + return err; +} + +static void nbl_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx) +{ + struct nbl_ktls_offload_context_tx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + struct nbl_ktls_offload_context_tx *priv_tx = *ctx; + + netdev_info(netdev, "nbl ktls egress free index %u\n", priv_tx->index); + nbl_ktls_free_tx_index(netdev, priv_tx->index); + kfree(priv_tx); +} + +static void nbl_ktls_del_rx_flow(struct net_device *netdev, u32 index) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_ktls_rx_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), index); +} + +static void nbl_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) +{ + struct nbl_ktls_offload_context_rx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + struct nbl_ktls_offload_context_rx *priv_rx = *ctx; + + netdev_info(netdev, "nbl ktls ingress free index %u\n", priv_rx->index); + nbl_ktls_free_rx_index(netdev, priv_rx->index); + nbl_ktls_del_rx_flow(netdev, priv_rx->index); + kfree(priv_rx); +} + +static void nbl_ktls_del(struct net_device *netdev, struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction) +{ + if (direction == TLS_OFFLOAD_CTX_DIR_TX) + nbl_ktls_del_tx(netdev, tls_ctx); + else + nbl_ktls_del_rx(netdev, tls_ctx); +} + +static int nbl_ktls_rx_resync(struct net_device *netdev, struct sock *sk, + u32 tcp_seq, u8 *rec_num) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct nbl_ktls_offload_context_rx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + struct nbl_ktls_offload_context_rx *priv = *ctx; + + nbl_ktls_cfg_rx_record(netdev, priv->index, priv->tcp_seq, + be64_to_cpu(*(__be64 *)rec_num), false); + + return 0; +} + +static int nbl_ktls_resync(struct net_device *netdev, struct sock *sk, + u32 tcp_seq, u8 *rec_num, + enum tls_offload_ctx_dir direction) +{ + if (direction != TLS_OFFLOAD_CTX_DIR_RX) + return -1; + + return nbl_ktls_rx_resync(netdev, sk, tcp_seq, rec_num); +} + +#define NBL_SERV_KTLS_OPS_TBL \ +do { \ + NBL_SERV_SET_KTLS_OPS(add_tls_dev, nbl_ktls_add); \ + NBL_SERV_SET_KTLS_OPS(del_tls_dev, nbl_ktls_del); \ + NBL_SERV_SET_KTLS_OPS(resync_tls_dev, nbl_ktls_resync); \ +} while (0) + +void nbl_serv_setup_ktls_ops(struct nbl_service_ops *serv_ops) +{ +#define NBL_SERV_SET_KTLS_OPS(name, func) do {serv_ops->NBL_NAME(name) = func; ; } while (0) + NBL_SERV_KTLS_OPS_TBL; +#undef NBL_SERV_SET_KTLS_OPS +} + +#else + +void nbl_serv_setup_ktls_ops(struct nbl_service_ops *serv_ops) {} + +#endif /* end ifdef CONFIG_TLS_DEVICE*/ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h new file mode 100644 index 0000000000000000000000000000000000000000..bed7435be17b3eb13f21f7ab186e36359b3abb8c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_ktls.h @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_KTLS_H +#define _NBL_KTLS_H + +#include "nbl_service.h" +#include +#include +#include +#include +#ifdef CONFIG_TLS_DEVICE +#include + +#define NBL_KTLS_AES_GCM_128 0 +#define NBL_KTLS_AES_GCM_256 1 +#define NBL_KTLS_SM4_GCM 2 +#define NBL_KTLS_FLOW_TYPE_OFF 0 +#define NBL_KTLS_FLOW_SIP_OFF 1 +#define NBL_KTLS_FLOW_DIP_OFF 5 +#define NBL_KTLS_FLOW_DPORT_OFF 9 +#define NBL_KTLS_FLOW_SPORT_OFF 10 +#define NBL_KTLS_FLOW_IP_LEN 4 +#define NBL_KTLS_FLOW_TOTAL_LEN 12 + +#endif /* end ifdef CONFIG_TLS_DEVICE*/ + +void nbl_serv_setup_ktls_ops(struct nbl_service_ops *serv_ops_tbl); + +#endif /*_NBL_KTLS_H*/ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c new file mode 100644 index 0000000000000000000000000000000000000000..a67c256b0ab9d780e5bc22c301a0ed99c60935e5 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.c @@ -0,0 +1,1232 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_lag.h" +#include "nbl_dev.h" + +struct list_head lag_resource_head; +/* mutex for lag resource */ +struct mutex nbl_lag_mutex; + +static inline void init_lag_instance(struct nbl_lag_instance *lag_info, struct net_device *bond_dev) +{ + lag_info->bond_netdev = bond_dev; + INIT_LIST_HEAD(&lag_info->mem_list_head); + lag_info->linkup = 0; + lag_info->lag_enable = 0; + lag_info->lag_id = NBL_INVALID_LAG_ID; + memset(&lag_info->lag_upper_info, 0, sizeof(lag_info->lag_upper_info)); +} + +static struct nbl_lag_instance *find_lag_by_lagid(u32 board_key, u8 lag_id) +{ + struct nbl_lag_resource *find_resource = NULL; + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_tmp, *lag_info = NULL; + + if (!nbl_lag_id_valid(lag_id)) + goto ret; + + /* find the lag resource by the bus id, identify a card */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + if (lag_resource_tmp->board_key == board_key) { + find_resource = lag_resource_tmp; + break; + } + } + + if (!find_resource) + goto ret; + + /* find the lag instance by lag_id */ + list_for_each_entry(lag_tmp, &find_resource->lag_instance_head, instance_node) { + if (lag_tmp->lag_id == lag_id) { + lag_info = lag_tmp; + break; + } + } + +ret: + return lag_info; +} + +static struct nbl_lag_instance *find_lag_by_bonddev(u32 board_key, struct net_device *bond_dev) +{ + struct nbl_lag_resource *find_resource = NULL; + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_tmp, *lag_info = NULL; + + if (!bond_dev) + goto ret; + + /* find the lag resource by the bus id, identify a card */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + if (lag_resource_tmp->board_key == board_key) { + find_resource = lag_resource_tmp; + break; + } + } + + if (!find_resource) + goto ret; + + /* find the lag instance by bonddev */ + list_for_each_entry(lag_tmp, &find_resource->lag_instance_head, instance_node) { + if (lag_tmp->bond_netdev == bond_dev) { + lag_info = lag_tmp; + break; + } + } + +ret: + return lag_info; +} + +static struct nbl_lag_instance *alloc_lag_instance(u32 board_key, struct net_device *bond_dev, + struct nbl_lag_resource **find_resource) +{ + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_tmp, *lag_info = NULL; + + /* find the lag resource by the bus id, identify a card */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + if (lag_resource_tmp->board_key == board_key) { + *find_resource = lag_resource_tmp; + break; + } + } + + if (!(*find_resource)) + goto ret; + + /* find the lag instance by bond_dev */ + list_for_each_entry(lag_tmp, &(*find_resource)->lag_instance_head, instance_node) { + /* mark the idle lag instance */ + if (!lag_info && !lag_tmp->bond_netdev) + lag_info = lag_tmp; + if (lag_tmp->bond_netdev == bond_dev) { + lag_info = lag_tmp; + break; + } + } + /* if not found and no idle lag instance, then alloc a new lag instance */ + if (!lag_info) { + lag_info = kzalloc(sizeof(*lag_info), GFP_KERNEL); + if (!lag_info) + goto ret; + + init_lag_instance(lag_info, bond_dev); + list_add_tail(&lag_info->instance_node, &(*find_resource)->lag_instance_head); + } + +ret: + return lag_info; +} + +static void nbl_display_lag_info(struct nbl_dev_mgt *dev_mgt, u8 lag_id) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct net_device *current_netdev; + const char *member_name, *upper_name; + struct nbl_lag_member *mem_tmp; + struct nbl_lag_instance *lag_info = NULL; + u32 board_key; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + lag_info = find_lag_by_lagid(board_key, lag_id); + + if (!lag_info) + return; + + current_netdev = net_dev->netdev; + upper_name = lag_info->bond_netdev ? netdev_name(lag_info->bond_netdev) : "unset"; + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bond dev %s: enabled is %u, lag_id is %u.\n", + upper_name, lag_info->lag_enable, lag_info->lag_id); + + if (lag_info && lag_info->lag_enable) { + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bond dev %s: tx_type is %d, hash_type is %d.\n", upper_name, + lag_info->lag_upper_info.tx_type, + lag_info->lag_upper_info.hash_type); + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + member_name = current_netdev ? + netdev_name(current_netdev) : "unset"; + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "%s(%s): lag_id: %d, eth_id: %d, bonded: %d, linkup: %d, tx_enabled: %d.\n", + upper_name, member_name, mem_tmp->lag_id, + mem_tmp->logic_eth_id, mem_tmp->bonded, + mem_tmp->lower_state.link_up, + mem_tmp->lower_state.tx_enabled); + } + } +} + +static void nbl_lag_create_bond_adev(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_instance *lag_info) +{ + struct nbl_event_rdma_bond_update event_data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_lag_member *mem_tmp, *notify_mem = NULL; + int mem_num = 0; + int i = 0; + + memset(&event_data, 0, sizeof(event_data)); + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + event_data.param.member_list[mem_num].vsi_id = mem_tmp->vsi_id; + event_data.param.member_list[mem_num].eth_id = mem_tmp->eth_id; + mem_num++; + if (!notify_mem || notify_mem->eth_id > mem_tmp->eth_id) + notify_mem = mem_tmp; + } + + if (!notify_mem) { + nbl_err(common, NBL_DEBUG_MAIN, + "notify to create the bond adev failed, member count %u.\n", mem_num); + return; + } + + for (i = 0; i < mem_num; i++) { + event_data.subevent = NBL_SUBEVENT_RELEASE_ADEV; + /* Notify the dev to release the rdma adev first. */ + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, + event_data.param.member_list[i].vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); + } + + /* Notify the rdma dev to create the bond adev. */ + event_data.subevent = NBL_SUBEVENT_CREATE_BOND_ADEV; + event_data.param.bond_netdev = lag_info->bond_netdev; + event_data.param.lag_id = lag_info->lag_id; + event_data.param.lag_num = mem_num; + + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, notify_mem->vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); + + notify_mem->is_bond_adev = true; +} + +static void nbl_lag_member_recover_adev(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_instance *lag_info, + struct nbl_lag_member *lag_mem) +{ + struct nbl_event_rdma_bond_update event_data; + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_lag_member *mem_tmp, *adev_mem = NULL; + int i = 0, has_self = 0, mem_num = 0; + + memset(&event_data, 0, sizeof(event_data)); + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) + if (mem_tmp == lag_mem) + has_self = 1; + + if (!has_self) + return; + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + event_data.param.member_list[mem_num].vsi_id = mem_tmp->vsi_id; + event_data.param.member_list[mem_num].eth_id = mem_tmp->eth_id; + mem_num++; + + if (mem_tmp->is_bond_adev) + adev_mem = mem_tmp; + } + + /* If we cannot find a member with adev, then we have nothing to do, return */ + if (!adev_mem) + return; + + /* Notify the rdma dev to delete the bond adev. */ + event_data.subevent = NBL_SUBEVENT_RELEASE_BOND_ADEV; + event_data.param.bond_netdev = lag_info->bond_netdev; + event_data.param.lag_id = lag_info->lag_id; + event_data.param.lag_num = mem_num; + + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, adev_mem->vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); + + for (i = 0; i < mem_num; i++) { + event_data.subevent = NBL_SUBEVENT_CREATE_ADEV; + /* Notify the dev to restore the rdma adev. */ + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, + event_data.param.member_list[i].vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); + } + + adev_mem->is_bond_adev = false; +} + +static void update_lag_member_list(struct nbl_dev_mgt *dev_mgt, + u8 lag_id, + struct nbl_lag_instance *lag_info, + struct nbl_lag_member *lag_mem) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_lag_member *mem_tmp; + struct nbl_event_rdma_bond_update event_data; + struct nbl_lag_member_list_param mem_list_param = {0}; + u16 mem_id, tx_enabled_id = U16_MAX; + u8 fwd; + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + if (nbl_lag_mem_is_active(mem_tmp)) + tx_enabled_id = mem_tmp->eth_id; + } + + memset(&event_data, 0, sizeof(event_data)); + mem_id = 0; + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + if (mem_id < NBL_LAG_MAX_PORTS) { + /* The member list is mainly for dup-arp/nd cfg. + * If we only use port_list, which only contains active eth_id, the + * following problem will occur: + * 1. Add pf0 & pf1 to bond + * 2. pf0 up, pf0 cfg member_list, right now only pf0 is active, so + * port_list contains only eth0 + * 3. pf1 up, pf1 cfg member_list, now both pf0 and pf1 are up, so + * port_list contains eth0 & eth1 + * In this case, pf1 knows that it should dup-arp to two ports, but + * pf0 is unaware, so if kernel use pf0 to send pkts, it cannot dup. + */ + mem_list_param.member_list[mem_id].eth_id = mem_tmp->eth_id; + mem_list_param.member_list[mem_id].vsi_id = mem_tmp->vsi_id; + + if (nbl_lag_mem_is_active(mem_tmp)) { + mem_list_param.port_list[mem_id] = mem_tmp->eth_id; + mem_list_param.member_list[mem_id].active = true; + } else if (tx_enabled_id < U16_MAX) { + mem_list_param.port_list[mem_id] = tx_enabled_id; + } + } + mem_id++; + } + mem_list_param.lag_num = mem_id; + if (lag_info->lag_upper_info.tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && mem_id > 1) + mem_list_param.duppkt_enable = true; + + if (tx_enabled_id < U16_MAX) + for ( ; mem_id < NBL_LAG_MAX_PORTS; ) + mem_list_param.port_list[mem_id++] = tx_enabled_id; + + mem_list_param.lag_id = lag_id; + serv_ops->cfg_lag_member_list(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), &mem_list_param); + + serv_ops->cfg_lag_member_up_attr(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + lag_mem->eth_id, lag_id, lag_mem->bonded ? true : false); + if (!lag_mem->bonded) { + fwd = NBL_LAG_MEM_FWD_DROP; + serv_ops->cfg_lag_member_fwd(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + lag_mem->eth_id, lag_id, fwd); + } + + mem_list_param.bond_netdev = lag_info->bond_netdev; + memcpy(&event_data.param, &mem_list_param, sizeof(event_data.param)); + event_data.subevent = NBL_SUBEVENT_UPDATE_BOND_MEMBER; + + /* Make sure only notify the dev who has been created the rdma bond adev to update the + * bond member list info. + */ + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) + if (mem_tmp->is_bond_adev) + nbl_event_notify(NBL_EVENT_RDMA_BOND_UPDATE, &event_data, mem_tmp->vsi_id, + NBL_COMMON_TO_BOARD_ID(common)); +} + +static void nbl_update_lag_cfg(struct nbl_lag_member *lag_mem, u8 lag_id, u32 flag) +{ + struct nbl_dev_mgt *dev_mgt = NBL_NETDEV_TO_DEV_MGT(lag_mem->netdev); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt); + struct nbl_dev_vsi *vsi = net_dev->vsi_ctrl.vsi_list[NBL_VSI_DATA]; + u16 eth_id; + u8 fwd; + const char *upper_name; + struct nbl_lag_instance *lag_info = NULL; + bool sfp_tx_enable, lag_enable; + u32 board_key; + + eth_id = lag_mem->eth_id; + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + + if (flag & NBL_LAG_UPDATE_LACP_PKT) { + lag_enable = lag_mem->bonded ? true : false; + serv_ops->enable_lag_protocol(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + eth_id, lag_enable); + vsi->feature.has_lacp = lag_enable; + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "%s lag protocol for lag_id: %d.\n", + lag_enable ? "enable" : "disable", lag_id); + } + + if (flag & NBL_LAG_UPDATE_SFP_TX) { + if (lag_mem->bonded) + sfp_tx_enable = lag_mem->lower_state.link_up; + else + sfp_tx_enable = true; + serv_ops->set_sfp_state(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), lag_mem->netdev, + (u8)eth_id, sfp_tx_enable, true); + } + + if (!nbl_lag_id_valid(lag_id)) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "lag_id: %d is invalid, flag: 0x%08x.\n", lag_id, flag); + return; + } + + lag_info = find_lag_by_lagid(board_key, lag_id); + + if (!lag_info) + return; + + upper_name = lag_info->bond_netdev ? netdev_name(lag_info->bond_netdev) : "unset"; + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bond dev %s: lag_id: %d, eth_id: %u, enabled: %d, linkup: %s, flag: 0x%08x.\n", + upper_name, lag_id, lag_mem->logic_eth_id, lag_info->lag_enable, + lag_info->linkup ? "up" : "down", flag); + + if (flag & NBL_LAG_UPDATE_HASH) + serv_ops->cfg_lag_hash_algorithm(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), eth_id, + lag_id, lag_info->lag_upper_info.hash_type); + + if (flag & NBL_LAG_UPDATE_LINK) { + if (lag_mem->bonded) { + fwd = NBL_LAG_MEM_FWD_DROP; + if (lag_info->linkup) + fwd = nbl_lag_mem_is_active(lag_mem) ? + NBL_LAG_MEM_FWD_NORMAL : NBL_LAG_MEM_FWD_DROP; + serv_ops->cfg_lag_member_fwd(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + eth_id, lag_id, fwd); + } + } + + if (flag & NBL_LAG_UPDATE_MEMBER) + update_lag_member_list(dev_mgt, lag_id, lag_info, lag_mem); +} + +static int del_lag_member(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_instance *lag_info, + struct netdev_notifier_changeupper_info *info) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + u8 mem_count = 0; + struct nbl_lag_member *mem_tmp = NULL; + + lag_mem = net_dev->lag_mem; + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + mem_count++; + if (lag_mem == mem_tmp) + break; + } + + if (nbl_list_entry_is_head(mem_tmp, &lag_info->mem_list_head, mem_list_node)) + return -ENOENT; + + if (mem_count == 0 || mem_count > NBL_LAG_MAX_PORTS) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "lag member device has been deleted.\n"); + return -1; + } + + lag_mem->bonded = 0; + lag_mem->lag_id = NBL_INVALID_LAG_ID; + memset(&lag_mem->lower_state, 0, sizeof(lag_mem->lower_state)); + list_del(&lag_mem->mem_list_node); + + return 0; +} + +static int add_lag_member(struct nbl_dev_mgt *dev_mgt, + struct nbl_lag_instance *lag_info, + u8 lag_id, + struct netdev_notifier_changeupper_info *info) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + u8 mem_count = 0; + struct netdev_lag_upper_info *upper_info; + struct nbl_lag_member *mem_tmp = NULL; + + lag_mem = net_dev->lag_mem; + upper_info = info->upper_info; + + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, mem_list_node) { + mem_count++; + if (lag_mem == mem_tmp) + return 0; + } + + if (mem_count < NBL_LAG_MAX_PORTS) { + lag_mem->bonded = 1; + lag_mem->lag_id = lag_id; + list_add_tail(&lag_mem->mem_list_node, &lag_info->mem_list_head); + } else { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "no available lag member resource.\n"); + return -1; + } + return 0; +} + +static bool is_lag_can_offload(struct nbl_dev_mgt *dev_mgt, + const struct nbl_lag_instance *lag_info) +{ + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_info_tmp; + u32 count = 0; + + if (!(lag_info->lag_upper_info.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || + lag_info->lag_upper_info.tx_type == NETDEV_LAG_TX_TYPE_HASH)) { + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bond dev %s tx_type %d is not allowed.\n", + netdev_name(lag_info->bond_netdev), lag_info->lag_upper_info.tx_type); + return false; + } + + /* if the lag instance's all lag members only belong to one card, this lag can offload */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + list_for_each_entry(lag_info_tmp, + &lag_resource_tmp->lag_instance_head, instance_node) { + if (lag_info_tmp->bond_netdev == lag_info->bond_netdev && + !list_empty(&lag_info_tmp->mem_list_head)) + count++; + } + } + + return (count == 1) ? true : false; +} + +static int enable_lag_instance(struct nbl_lag_resource *lag_resource, + struct nbl_lag_instance *lag_info) +{ + u8 lag_id; + struct nbl_lag_member *lag_mem; + + if (lag_info->lag_enable) + return 0; + + /* enable the lag instance, and distribute a lag id, then updating all members' lag id */ + lag_id = find_first_zero_bit(lag_resource->lag_id_bitmap, NBL_LAG_MAX_NUM); + if (!nbl_lag_id_valid(lag_id)) + return -1; + + set_bit(lag_id, lag_resource->lag_id_bitmap); + + list_for_each_entry(lag_mem, &lag_info->mem_list_head, mem_list_node) + lag_mem->lag_id = lag_id; + + lag_info->lag_id = lag_id; + lag_info->lag_enable = 1; + return 0; +} + +static void disable_lag_instance(struct nbl_lag_resource *lag_resource, + struct nbl_lag_instance *lag_info) +{ + u8 lag_id; + + /* retrieving the lag id resource, then disable and init the lag instance. + * don't free the lag instance for reusing later if needed, all lag instance + * resource will be freed in lag dinit function. + */ + lag_id = lag_info->lag_id; + clear_bit(lag_id, lag_resource->lag_id_bitmap); + + init_lag_instance(lag_info, NULL); +} + +static void nbl_lag_changeupper_event(struct nbl_dev_mgt *dev_mgt, void *ptr, u32 *flag) +{ + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem, *mem_tmp; + struct nbl_lag_resource *lag_resource = NULL; + struct netdev_notifier_changeupper_info *info; + struct netdev_lag_upper_info *upper_info; + struct net_device *netdev; + struct nbl_lag_instance *lag_info; + const char *upper_name, *device_name; + struct net_device *current_netdev; + u8 lag_id = NBL_INVALID_LAG_ID; + u32 board_key; + int ret; + + info = ptr; + netdev = netdev_notifier_info_to_dev(ptr); + + lag_mem = net_dev->lag_mem; + current_netdev = net_dev->netdev; + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + + /* not for this netdev */ + if (netdev != current_netdev) + return; + + device_name = netdev ? netdev_name(netdev) : "unset"; + + if (!info->upper_dev) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changeupper(%s) event received, but upper dev is null\n", device_name); + return; + } + + upper_info = info->upper_info; + upper_name = netdev_name(info->upper_dev); + + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changeupper(%s) event bond %s, linking: %d, master: %d, tx_type: %d, hash_type: %d.\n", + device_name, + upper_name, info->linking, info->master, + upper_info ? upper_info->tx_type : 0, + upper_info ? upper_info->hash_type : 0); + + if (!netif_is_lag_master(info->upper_dev)) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changeupper(%s) event received, but not master.\n", device_name); + return; + } + + lag_info = alloc_lag_instance(board_key, info->upper_dev, &lag_resource); + if (!lag_info || !lag_resource) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changeupper(%s) event received, but have no lag resource for board_key 0x%x.\n", + device_name, board_key); + return; + } + + lag_id = lag_info->lag_id; + if (info->linking) { + ret = add_lag_member(dev_mgt, lag_info, lag_id, info); + if (!ret) { + /* updating the lag info when the first device bonding to this lag */ + if (nbl_list_is_first(&lag_mem->mem_list_node, &lag_info->mem_list_head)) { + lag_info->bond_netdev = info->upper_dev; + lag_info->linkup = (lag_info->bond_netdev->flags & IFF_UP) ? 1 : 0; + lag_info->lag_upper_info.tx_type = upper_info->tx_type; + lag_info->lag_upper_info.hash_type = upper_info->hash_type; + } else if (is_lag_can_offload(dev_mgt, lag_info)) { + /* if the lag can offload after the second device bonding to it, + * will enable the lag instance and assign a lag id for this lag, + * then update the offloading configuration. + */ + if (enable_lag_instance(lag_resource, lag_info)) + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "enable lag failed, lag-bitmap: %lx.\n", + lag_resource->lag_id_bitmap[0]); + else + nbl_lag_create_bond_adev(dev_mgt, lag_info); + } + if (lag_info->lag_enable) { + *flag |= NBL_LAG_UPDATE_HASH | NBL_LAG_UPDATE_MEMBER | + NBL_LAG_UPDATE_LINK; + list_for_each_entry(mem_tmp, &lag_info->mem_list_head, + mem_list_node) + nbl_update_lag_cfg(mem_tmp, mem_tmp->lag_id, *flag); + *flag = 0; + } + *flag = NBL_LAG_UPDATE_LACP_PKT; + serv_ops->set_lag_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + lag_info->bond_netdev, lag_info->lag_id); + } + } else { + nbl_lag_member_recover_adev(dev_mgt, lag_info, lag_mem); + + ret = del_lag_member(dev_mgt, lag_info, info); + if (!ret) { + /* updating the offloading configuration if the lag enabled. If all + * members unbonded, will disable and init this lag instance, and + * retrieve the lag id resource. + */ + if (lag_info->lag_enable) { + *flag |= NBL_LAG_UPDATE_MEMBER; + nbl_update_lag_cfg(lag_mem, lag_id, *flag); + } + if (list_empty(&lag_info->mem_list_head)) + disable_lag_instance(lag_resource, lag_info); + *flag = NBL_LAG_UPDATE_LACP_PKT | NBL_LAG_UPDATE_SFP_TX; + serv_ops->unset_lag_info(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt)); + } + } +} + +static void nbl_lag_changelower_event(struct nbl_dev_mgt *dev_mgt, void *ptr, u32 *flag) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct netdev_notifier_changelowerstate_info *info; + struct netdev_lag_lower_state_info *lower_stat_info; + struct net_device *netdev; + const char *device_name; + struct net_device *current_netdev; + struct nbl_lag_instance *lag_info; + u8 lag_id; + u32 board_key; + + info = ptr; + netdev = netdev_notifier_info_to_dev(ptr); + lower_stat_info = info->lower_state_info; + if (!lower_stat_info) + return; + + device_name = netdev ? netdev_name(netdev) : "unset"; + + lag_mem = net_dev->lag_mem; + current_netdev = net_dev->netdev; + + /* not for this netdev */ + if (netdev != current_netdev) + return; + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changelower(%s) event link_up: %d, tx_enabled: %d.\n", + device_name, + lower_stat_info->link_up, + lower_stat_info->tx_enabled); + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + if (lag_mem->bonded) { + lag_mem->lower_state.link_up = lower_stat_info->link_up; + lag_mem->lower_state.tx_enabled = lower_stat_info->tx_enabled; + lag_id = lag_mem->lag_id; + lag_info = find_lag_by_lagid(board_key, lag_id); + if (!lag_info) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "changelower(%s) event received, but have no lag resource for board_key 0x%x.\n", + device_name, board_key); + return; + } + + if (lag_info->lag_enable) + *flag |= NBL_LAG_UPDATE_MEMBER | NBL_LAG_UPDATE_LINK; + } +} + +static void nbl_lag_info_event(struct nbl_dev_mgt *dev_mgt, void *ptr, u32 *flag) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct net_device *netdev; + struct netdev_notifier_bonding_info *info; + struct netdev_bonding_info *bonding_info; + const char *lag_mem_name; + struct net_device *current_netdev; + struct nbl_lag_instance *lag_info; + u8 lag_id; + u32 board_key; + + info = ptr; + netdev = netdev_notifier_info_to_dev(ptr); + info = ptr; + bonding_info = &info->bonding_info; + lag_mem = net_dev->lag_mem; + current_netdev = net_dev->netdev; + + if (!current_netdev || netdev != current_netdev) + return; + + lag_mem_name = netdev_name(current_netdev); + + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bondinfo(%s) event, bond_mode: %d, num_slaves: %d, miimon: %d.\n", + lag_mem_name, bonding_info->master.bond_mode, + bonding_info->master.num_slaves, + bonding_info->master.miimon); + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bondinfo(%s) event, slave_id: %d, slave_name: %s, link: %d, state: %d, failure_count: %d.\n", + lag_mem_name, bonding_info->slave.slave_id, + bonding_info->slave.slave_name, bonding_info->slave.link, + bonding_info->slave.state, bonding_info->slave.link_failure_count); + + if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) { + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Bonding event recv, but mode not active/backup.\n"); + return; + } + + if (bonding_info->slave.state == BOND_STATE_BACKUP) { + if (lag_mem->bonded) { + lag_mem->lower_state.tx_enabled = 0; + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + lag_id = lag_mem->lag_id; + lag_info = find_lag_by_lagid(board_key, lag_id); + if (!lag_info) { + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "bondinfo(%s) event received, but have no lag resource for board_key 0x%x.\n", + lag_mem_name, board_key); + return; + } + if (lag_info->lag_enable) + *flag |= NBL_LAG_UPDATE_MEMBER | NBL_LAG_UPDATE_LINK; + } + } +} + +static void nbl_lag_updown_event(struct nbl_dev_mgt *dev_mgt, void *ptr, bool is_up, u32 *flag) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct netdev_notifier_info *info; + struct net_device *event_netdev = NULL; + struct nbl_lag_instance *lag_info = NULL; + const char *device_name; + u8 linkup; + u32 board_key; + + info = ptr; + event_netdev = netdev_notifier_info_to_dev(ptr); + device_name = netdev_name(event_netdev); + lag_mem = net_dev->lag_mem; + if (!lag_mem->bonded) + return; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + lag_info = find_lag_by_bonddev(board_key, event_netdev); + + if (!(lag_info || net_dev->netdev == event_netdev)) + return; + + linkup = is_up ? 1 : 0; + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "netdev%s(%s) event received.\n", linkup ? "up" : "down", device_name); + + /* bond dev up/down event */ + if (lag_info) { + lag_info->linkup = linkup; + /* if the lag link change, update the member's fwd type */ + if (lag_info->lag_enable) { + *flag |= NBL_LAG_UPDATE_LINK; + if (linkup) + *flag |= NBL_LAG_UPDATE_MEMBER; + } + } else { /* lag member dev up/down event */ + lag_mem->lower_state.link_up = linkup; + *flag |= NBL_LAG_UPDATE_SFP_TX; + } +} + +static void nbl_lag_change_event(struct nbl_dev_mgt *dev_mgt, void *ptr, u32 *flag) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct netdev_notifier_change_info *info; + struct net_device *lag_netdev = NULL; + struct bonding *bond; + enum netdev_lag_hash new_hash; + struct nbl_lag_instance *lag_info = NULL; + const char *device_name; + u32 board_key; + + info = ptr; + lag_netdev = netdev_notifier_info_to_dev(ptr); + + device_name = lag_netdev ? netdev_name(lag_netdev) : "unset"; + lag_mem = net_dev->lag_mem; + if (!lag_mem->bonded) + return; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + + lag_info = find_lag_by_bonddev(board_key, lag_netdev); + + if (!lag_info) + return; + + bond = netdev_priv(lag_netdev); + + switch (bond->params.xmit_policy) { + case BOND_XMIT_POLICY_LAYER2: + new_hash = NETDEV_LAG_HASH_L2; + break; + case BOND_XMIT_POLICY_LAYER34: + new_hash = NETDEV_LAG_HASH_L34; + break; + case BOND_XMIT_POLICY_LAYER23: + new_hash = NETDEV_LAG_HASH_L23; + break; + case BOND_XMIT_POLICY_ENCAP23: + new_hash = NETDEV_LAG_HASH_E23; + break; + case BOND_XMIT_POLICY_ENCAP34: + new_hash = NETDEV_LAG_HASH_E34; + break; + case BOND_XMIT_POLICY_VLAN_SRCMAC: + new_hash = NETDEV_LAG_HASH_VLAN_SRCMAC; + break; + default: + new_hash = NETDEV_LAG_HASH_UNKNOWN; + break; + } + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "netdevchange(%s) event received, old hash: %d, new hash: %d.\n", + device_name, lag_info->lag_upper_info.hash_type, new_hash); + + if (lag_info->lag_upper_info.hash_type != new_hash) { + lag_info->lag_upper_info.hash_type = new_hash; + if (lag_info->lag_enable) + *flag |= NBL_LAG_UPDATE_HASH; + } +} + +static int +nbl_lag_event_handler(struct notifier_block *notify_blk, unsigned long event, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct nbl_lag_member *lag_mem; + struct nbl_dev_mgt *dev_mgt; + u32 update_flag = 0; + u8 bus_id; + u8 lag_id = NBL_INVALID_LAG_ID; + + lag_mem = container_of(notify_blk, struct nbl_lag_member, notify_block); + + if (!lag_mem) + return NOTIFY_DONE; + + dev_mgt = (struct nbl_dev_mgt *)NBL_NETDEV_TO_DEV_MGT(lag_mem->netdev); + + bus_id = NBL_DEV_MGT_TO_COMMON(dev_mgt)->bus; + + nbl_debug(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, "nbl kernel(%s) receive event: %s.\n", + netdev_name(netdev), netdev_cmd_to_name(event)); + + mutex_lock(&nbl_lag_mutex); + /* record the bonded slave's lag_id */ + if (lag_mem->bonded) + lag_id = lag_mem->lag_id; + + switch (event) { + case NETDEV_CHANGEUPPER: + nbl_lag_changeupper_event(dev_mgt, ptr, &update_flag); + break; + case NETDEV_CHANGELOWERSTATE: + nbl_lag_changelower_event(dev_mgt, ptr, &update_flag); + break; + case NETDEV_BONDING_INFO: + nbl_lag_info_event(dev_mgt, ptr, &update_flag); + break; + case NETDEV_DOWN: + nbl_lag_updown_event(dev_mgt, ptr, false, &update_flag); + break; + case NETDEV_UP: + nbl_lag_updown_event(dev_mgt, ptr, true, &update_flag); + break; + case NETDEV_CHANGE: + case NETDEV_FEAT_CHANGE: + nbl_lag_change_event(dev_mgt, ptr, &update_flag); + break; + default: + goto unlock; + } + /* update the new slave's lag_id */ + if (!nbl_lag_id_valid(lag_id)) + lag_id = lag_mem->lag_id; + + if (update_flag) { + nbl_update_lag_cfg(lag_mem, lag_id, update_flag); + nbl_display_lag_info(dev_mgt, lag_id); + } + +unlock: + mutex_unlock(&nbl_lag_mutex); + + return NOTIFY_DONE; +} + +u32 nbl_lag_get_other_active_members(struct nbl_dev_mgt *dev_mgt, + u16 eth_list[], u32 array_size) +{ + u32 active_count = 0; +// u8 lag_id; +// struct nbl_adapter *adapter; +// struct nbl_lag_member *lag_mem; +// struct nbl_lag_instance *lag_info; +// struct nbl_lag_member *mem_tmp; +// struct list_head *iter; +// const char *upper_name; +// +// lag_mgt = NBL_RES_MGT_TO_LAG_MGT(res_mgt); +// lag_mem = &lag_mgt->lag_mem; +// lag_id = lag_mem->lag_id; +// +// if (!nbl_lag_id_valid(lag_id)) { +// nbl_err(NBL_ADAPTER_TO_COMMON(adapter), NBL_DEBUG_MAIN, +// "params err, lag_id: %u.\n", lag_id); +// return active_count; +// } +// +// mutex_lock(&nbl_lag_mutex); +// +// lag_info = &nbl_lag_info[lag_id]; +// +// upper_name = lag_info->bond_netdev ? netdev_name(lag_info->bond_netdev) : "unset"; +// nbl_debug(NBL_ADAPTER_TO_COMMON(adapter), NBL_DEBUG_MAIN, +// "bond dev %s: lag_id: %u, enabled: %u.\n", +// upper_name, lag_id, lag_info->lag_enable); +// +// if (lag_info->lag_enable) { +// list_for_each(iter, &lag_info->mem_list_head) { +// mem_tmp = list_entry(iter, struct nbl_lag_member, mem_list_node); +// if (mem_tmp == lag_mem) +// continue; +// if (nbl_lag_mem_is_active(mem_tmp) && active_count < array_size) +// eth_list[active_count++] = mem_tmp->eth_id; +// +// nbl_debug(NBL_ADAPTER_TO_COMMON(adapter), NBL_DEBUG_MAIN, +// "eth_id: %u, bonded: %u, linkup: %u, tx_enabled: %u, " +// "active_count: %u, array_size: %u.\n", +// mem_tmp->logic_eth_id, mem_tmp->bonded, +// mem_tmp->lower_state.link_up, +// mem_tmp->lower_state.tx_enabled, +// active_count, array_size); +// } +// } +// +// mutex_unlock(&nbl_lag_mutex); + return active_count; +} + +static void nbl_unregister_lag_handler(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_lag_member *lag_mem; + struct notifier_block *notif_blk; + struct netdev_net_notifier *netdevice_nn; + + lag_mem = net_dev->lag_mem; + notif_blk = &lag_mem->notify_block; + if (notif_blk->notifier_call) { + netdevice_nn = &lag_mem->netdevice_nn; + unregister_netdevice_notifier_dev_net(net_dev->netdev, notif_blk, netdevice_nn); + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "nbl lag event handler unregistered.\n"); + } +} + +static int nbl_register_lag_handler(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct notifier_block *notify_blk; + struct nbl_lag_member *lag_mem; + struct netdev_net_notifier *netdevice_nn; + + lag_mem = net_dev->lag_mem; + notify_blk = &lag_mem->notify_block; + + /* register the lag related event handler function for each device */ + if (!notify_blk->notifier_call) { + notify_blk->notifier_call = nbl_lag_event_handler; + netdevice_nn = &lag_mem->netdevice_nn; + if (register_netdevice_notifier_dev_net(net_dev->netdev, + notify_blk, netdevice_nn)) { + notify_blk->notifier_call = NULL; + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Failed to register nbl lag event handler!\n"); + return -EINVAL; + } + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "nbl lag event handler registered.\n"); + } + return 0; +} + +static int nbl_lag_alloc_resource(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_lag_resource *lag_resource_tmp; + u32 lag_resource_num = 0; + u32 board_key; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + /* find the lag resource with the bus_id firstly, increasing the refcount if found */ + list_for_each_entry(lag_resource_tmp, &lag_resource_head, resource_node) { + lag_resource_num++; + if (lag_resource_tmp->board_key == board_key) { + kref_get(&lag_resource_tmp->kref); + goto ret_ok; + } + } + + /* checking the max cards we supported */ + if (lag_resource_num >= NBL_LAG_MAX_RESOURCE_NUM) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Lag resource num %u exceed the max num %u.\n", + lag_resource_num, NBL_LAG_MAX_RESOURCE_NUM); + goto ret_fail; + } + + /* alloc the lag resource when the card's first device registered */ + lag_resource_tmp = kzalloc(sizeof(*lag_resource_tmp), GFP_KERNEL); + if (!lag_resource_tmp) + goto ret_fail; + + kref_init(&lag_resource_tmp->kref); + lag_resource_tmp->board_key = board_key; + INIT_LIST_HEAD(&lag_resource_tmp->lag_instance_head); + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Alloc lag resource for board_key 0x%x, refcount %u.\n", + board_key, kref_read(&lag_resource_tmp->kref)); + /* add the new lag resource into the resource list */ + list_add_tail(&lag_resource_tmp->resource_node, &lag_resource_head); + +ret_ok: + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Return lag resource for board_key 0x%x, refcount %u.\n", + board_key, kref_read(&lag_resource_tmp->kref)); + return 0; +ret_fail: + return -1; +} + +static void delete_and_free_lag_resource(struct kref *kref) +{ + struct nbl_lag_resource *lag_resource_tmp; + struct nbl_lag_instance *lag_info, *lag_tmp; + + lag_resource_tmp = container_of(kref, struct nbl_lag_resource, kref); + + /* release all lag instances firstly */ + list_for_each_entry_safe(lag_info, lag_tmp, + &lag_resource_tmp->lag_instance_head, instance_node) { + list_del(&lag_info->instance_node); + kfree(lag_info); + } + + list_del(&lag_resource_tmp->resource_node); + kfree(lag_resource_tmp); +} + +static void nbl_lag_free_resource(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_lag_resource *lag_resource, *lag_resource_tmp; + int ret; + u32 board_key; + + board_key = pci_domain_nr(dev_mgt->common->pdev->bus) << 16 | + dev_mgt->common->pdev->bus->number; + list_for_each_entry_safe(lag_resource, lag_resource_tmp, + &lag_resource_head, resource_node) { + if (lag_resource->board_key == board_key) { + /* release the lag resource */ + ret = kref_put(&lag_resource->kref, delete_and_free_lag_resource); + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Release the lag resource for board_key 0x%x, refcount %d.\n", + board_key, ret ? -1 : kref_read(&lag_resource->kref)); + } + } +} + +int nbl_init_lag(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param) +{ + int ret = 0; + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_lag_member *lag_mem; + u8 lag_id; + + if (!param->caps.support_lag) + return 0; + + lag_mem = net_dev->lag_mem; + if (!lag_mem) { + lag_mem = devm_kzalloc(NBL_DEV_MGT_TO_DEV(dev_mgt), + sizeof(*net_dev->lag_mem), GFP_KERNEL); + if (!lag_mem) + return -ENOMEM; + } + + lag_mem->bonded = 0; + lag_mem->lower_state.link_up = 0; + lag_mem->lower_state.tx_enabled = 0; + memset(&lag_mem->notify_block, 0, sizeof(lag_mem->notify_block)); + lag_mem->vsi_id = NBL_COMMON_TO_VSI_ID(NBL_DEV_MGT_TO_COMMON(dev_mgt)); + lag_mem->lag_id = NBL_INVALID_LAG_ID; + lag_mem->eth_id = NBL_DEV_MGT_TO_COMMON(dev_mgt)->eth_id; + lag_mem->logic_eth_id = NBL_DEV_MGT_TO_COMMON(dev_mgt)->logic_eth_id; + lag_mem->netdev = net_dev->netdev; + net_dev->lag_mem = lag_mem; + + mutex_lock(&nbl_lag_mutex); + ret = nbl_lag_alloc_resource(dev_mgt); + if (ret) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Failed to alloc lag resource.\n"); + goto err_alloc; + } + + for (lag_id = 0; nbl_lag_id_valid(lag_id); lag_id++) + nbl_display_lag_info(dev_mgt, lag_id); + + mutex_unlock(&nbl_lag_mutex); + + ret = nbl_register_lag_handler(dev_mgt); + if (ret) { + nbl_err(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, + "Failed to register nbl lag event handler\n"); + goto err_register; + } + + ret = serv_ops->register_indr_dev_tc_offload(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + net_dev->netdev); + if (ret) + goto err_reg_lag_tc_offload; + + net_dev->lag_inited = 1; + + nbl_info(NBL_DEV_MGT_TO_COMMON(dev_mgt), NBL_DEBUG_MAIN, "Init the nbl lag success.\n"); + return 0; + +err_reg_lag_tc_offload: + nbl_unregister_lag_handler(dev_mgt); +err_register: + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->lag_mem); + net_dev->lag_mem = NULL; + nbl_lag_free_resource(dev_mgt); + return ret; +err_alloc: + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->lag_mem); + net_dev->lag_mem = NULL; + mutex_unlock(&nbl_lag_mutex); + return ret; +} + +int nbl_deinit_lag(struct nbl_dev_mgt *dev_mgt) +{ + struct nbl_dev_net *net_dev = dev_mgt->net_dev; + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + + if (!net_dev->lag_inited) + return 0; + + serv_ops->unregister_indr_dev_tc_offload(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + net_dev->netdev); + nbl_unregister_lag_handler(dev_mgt); + + mutex_lock(&nbl_lag_mutex); + nbl_lag_free_resource(dev_mgt); + mutex_unlock(&nbl_lag_mutex); + + if (net_dev->lag_mem) + devm_kfree(NBL_DEV_MGT_TO_DEV(dev_mgt), net_dev->lag_mem); + net_dev->lag_mem = NULL; + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h new file mode 100644 index 0000000000000000000000000000000000000000..3d5b9426e1ea957453bbf2b8bafb8354752f4fa3 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_lag.h @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_LAG_H +#define _NBL_LAG_H +#include +#include "nbl_dev.h" + +#define NBL_INVALID_LAG_ID 0xf +#define nbl_lag_id_valid(lag_id) ((lag_id) < NBL_LAG_MAX_NUM) + +#define NBL_LAG_ENABLE BIT(0) +#define NBL_LAG_DISABLE BIT(1) +#define NBL_LAG_UPDATE_HASH BIT(2) +#define NBL_LAG_UPDATE_MEMBER BIT(3) +#define NBL_LAG_UPDATE_LINK BIT(4) +#define NBL_LAG_UPDATE_SFP_TX BIT(5) +#define NBL_LAG_UPDATE_LACP_PKT BIT(6) + +enum nbl_lag_mem_fwd { + NBL_LAG_MEM_FWD_DROP = 0, + NBL_LAG_MEM_FWD_NORMAL = 1, +}; + +struct nbl_lag_instance { + struct net_device *bond_netdev; + struct netdev_lag_upper_info lag_upper_info; + struct list_head mem_list_head; + struct list_head instance_node; + u8 linkup; + u8 lag_enable; + u8 lag_id; +}; + +struct nbl_lag_resource { + struct kref kref; + struct list_head resource_node; + u32 board_key; /* domain << 16 | bus_id */ + DECLARE_BITMAP(lag_id_bitmap, NBL_LAG_MAX_NUM); + struct list_head lag_instance_head; +}; + +static inline bool nbl_lag_mem_is_active(const struct nbl_lag_member *lag_mem) +{ + return lag_mem->bonded && lag_mem->lower_state.link_up && lag_mem->lower_state.tx_enabled; +} + +int nbl_init_lag(struct nbl_dev_mgt *dev_mgt, struct nbl_init_param *param); +int nbl_deinit_lag(struct nbl_dev_mgt *dev_mgt); +u32 nbl_lag_get_other_active_members(struct nbl_dev_mgt *dev_mgt, + u16 eth_list[], u32 array_size); +#endif /* _NBL_LAG_H */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c new file mode 100644 index 0000000000000000000000000000000000000000..5aec71bcda853a658633dafc2122856f629db351 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.c @@ -0,0 +1,5669 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ +#include "nbl_ethtool.h" +#include "nbl_ktls.h" +#include "nbl_ipsec.h" +#include "nbl_tc.h" + +static void nbl_serv_set_link_state(struct nbl_service_mgt *serv_mgt, struct net_device *netdev); +static int nbl_serv_update_default_vlan(struct nbl_service_mgt *serv_mgt, u16 vid); + +static void nbl_serv_set_queue_param(struct nbl_serv_ring *ring, u16 desc_num, + struct nbl_txrx_queue_param *param, u16 vsi_id, + u16 global_vector_id) +{ + param->vsi_id = vsi_id; + param->dma = ring->dma; + param->desc_num = desc_num; + param->local_queue_id = ring->local_queue_id / 2; + param->global_vector_id = global_vector_id; + param->intr_en = 1; + param->intr_mask = 1; + param->extend_header = 1; + param->rxcsum = 1; + param->split = 0; +} + +/** + * In virtio mode, the emulator triggers the configuration of + * txrx_registers only based on tx_ring, so the rx_info needs + * to be delivered first before the tx_info can be delivered. + */ +static int +nbl_serv_setup_queues(struct nbl_service_mgt *serv_mgt, struct nbl_serv_ring_vsi_info *vsi_info) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_txrx_queue_param param = {0}; + struct nbl_serv_ring *ring; + struct nbl_serv_vector *vector; + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int vector_offset = 0; + int i, ret = 0; + + if (vsi_info->vsi_index == NBL_VSI_XDP) + vector_offset = ring_mgt->xdp_ring_offset; + + for (i = start; i < end; i++) { + vector = &ring_mgt->vectors[i - vector_offset]; + ring = &ring_mgt->rx_rings[i]; + nbl_serv_set_queue_param(ring, ring_mgt->rx_desc_num, ¶m, + vsi_info->vsi_id, vector->global_vector_id); + + ret = disp_ops->setup_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m, false); + if (ret) + return ret; + } + + for (i = start; i < end; i++) { + vector = &ring_mgt->vectors[i - vector_offset]; + ring = &ring_mgt->tx_rings[i]; + + nbl_serv_set_queue_param(ring, ring_mgt->tx_desc_num, ¶m, + vsi_info->vsi_id, vector->global_vector_id); + + ret = disp_ops->setup_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m, true); + if (ret) + return ret; + } + + return 0; +} + +static void +nbl_serv_flush_rx_queues(struct nbl_service_mgt *serv_mgt, u16 ring_offset, u16 ring_num) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int i; + + for (i = ring_offset; i < ring_offset + ring_num; i++) + disp_ops->kick_rx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); +} + +static int nbl_serv_setup_rings(struct nbl_service_mgt *serv_mgt, struct net_device *netdev, + struct nbl_serv_ring_vsi_info *vsi_info, bool use_napi) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i, ret = 0; + + for (i = start; i < end; i++) { + ring_mgt->tx_rings[i].dma = + disp_ops->start_tx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + if (!ring_mgt->tx_rings[i].dma) { + netdev_err(netdev, "Fail to start tx ring %d", i); + ret = -EFAULT; + break; + } + } + if (i != end) { + while (--i + 1 > start) + disp_ops->stop_tx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + goto tx_err; + } + + for (i = start; i < end; i++) { + ring_mgt->rx_rings[i].dma = + disp_ops->start_rx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i, use_napi); + if (!ring_mgt->rx_rings[i].dma) { + netdev_err(netdev, "Fail to start rx ring %d", i); + ret = -EFAULT; + break; + } + } + if (i != end) { + while (--i + 1 > start) + disp_ops->stop_rx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + goto rx_err; + } + + return 0; + +rx_err: + for (i = start; i < end; i++) + disp_ops->stop_tx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); +tx_err: + return ret; +} + +static void nbl_serv_stop_rings(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_ring_vsi_info *vsi_info) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i; + + for (i = start; i < end; i++) + disp_ops->stop_tx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + + for (i = start; i < end; i++) + disp_ops->stop_rx_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); +} + +static int nbl_serv_set_tx_rings(struct nbl_serv_ring_mgt *ring_mgt, + struct net_device *netdev, struct device *dev) +{ + int i; + u16 ring_num = ring_mgt->tx_ring_num; + + ring_mgt->tx_rings = devm_kcalloc(dev, ring_num, sizeof(*ring_mgt->tx_rings), GFP_KERNEL); + if (!ring_mgt->tx_rings) + return -ENOMEM; + + for (i = 0; i < ring_num; i++) + ring_mgt->tx_rings[i].index = i; + + return 0; +} + +static void nbl_serv_remove_tx_ring(struct nbl_serv_ring_mgt *ring_mgt, struct device *dev) +{ + devm_kfree(dev, ring_mgt->tx_rings); + ring_mgt->tx_rings = NULL; +} + +static int nbl_serv_set_rx_rings(struct nbl_serv_ring_mgt *ring_mgt, + struct net_device *netdev, struct device *dev) +{ + int i; + u16 ring_num = ring_mgt->rx_ring_num; + + ring_mgt->rx_rings = devm_kcalloc(dev, ring_num, sizeof(*ring_mgt->rx_rings), GFP_KERNEL); + if (!ring_mgt->rx_rings) + return -ENOMEM; + + for (i = 0; i < ring_num; i++) + ring_mgt->rx_rings[i].index = i; + + return 0; +} + +static void nbl_serv_remove_rx_ring(struct nbl_serv_ring_mgt *ring_mgt, struct device *dev) +{ + devm_kfree(dev, ring_mgt->rx_rings); + ring_mgt->rx_rings = NULL; +} + +static int nbl_serv_register_xdp_rxq(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_ring_mgt *ring_mgt) +{ + u16 ring_num; + int i, j; + int ret; + struct nbl_dispatch_ops *disp_ops; + struct nbl_serv_ring_vsi_info *vsi_info; + + if (ring_mgt->xdp_ring_offset == ring_mgt->tx_ring_num) + return 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + ring_num = vsi_info->ring_num; + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + for (i = 0; i < ring_num; i++) { + ret = disp_ops->register_xdp_rxq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + if (ret) + goto register_xdp_err; + } + + return 0; +register_xdp_err: + for (j = 0; j < i; j++) + disp_ops->unregister_xdp_rxq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), j); + + return -1; +} + +static void nbl_serv_unregister_xdp_rxq(struct nbl_service_mgt *serv_mgt, + struct nbl_serv_ring_mgt *ring_mgt) +{ + u16 ring_num; + int i; + struct nbl_dispatch_ops *disp_ops; + struct nbl_serv_ring_vsi_info *vsi_info; + + if (ring_mgt->xdp_ring_offset == ring_mgt->tx_ring_num) + return; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + ring_num = vsi_info->ring_num; + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + for (i = 0; i < ring_num; i++) + disp_ops->unregister_xdp_rxq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); +} + +static int nbl_serv_set_vectors(struct nbl_service_mgt *serv_mgt, + struct net_device *netdev, struct device *dev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_resource_pt_ops *pt_ops = NBL_ADAPTER_TO_RES_PT_OPS(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int i; + u16 ring_num = ring_mgt->xdp_ring_offset; + + ring_mgt->vectors = devm_kcalloc(dev, ring_num, sizeof(*ring_mgt->vectors), GFP_KERNEL); + if (!ring_mgt->vectors) + return -ENOMEM; + + for (i = 0; i < ring_num; i++) { + ring_mgt->vectors[i].napi = + disp_ops->get_vector_napi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), i); + netif_napi_add(netdev, ring_mgt->vectors[i].napi, pt_ops->napi_poll); + + ring_mgt->vectors[i].netdev = netdev; + cpumask_clear(&ring_mgt->vectors[i].cpumask); + } + + return 0; +} + +static void nbl_serv_remove_vectors(struct nbl_serv_ring_mgt *ring_mgt, struct device *dev) +{ + int i; + u16 ring_num = ring_mgt->xdp_ring_offset; + + for (i = 0; i < ring_num; i++) + netif_napi_del(ring_mgt->vectors[i].napi); + + devm_kfree(dev, ring_mgt->vectors); + ring_mgt->vectors = NULL; +} + +static struct nbl_serv_vlan_node *nbl_serv_alloc_vlan_node(void) +{ + struct nbl_serv_vlan_node *vlan_node = NULL; + + vlan_node = kzalloc(sizeof(*vlan_node), GFP_ATOMIC); + if (!vlan_node) + return NULL; + + INIT_LIST_HEAD(&vlan_node->node); + vlan_node->ref_cnt = 1; + + return vlan_node; +} + +static void nbl_serv_free_vlan_node(struct nbl_serv_vlan_node *vlan_node) +{ + kfree(vlan_node); +} + +static struct nbl_serv_submac_node *nbl_serv_alloc_submac_node(void) +{ + struct nbl_serv_submac_node *submac_node = NULL; + + submac_node = kzalloc(sizeof(*submac_node), GFP_ATOMIC); + if (!submac_node) + return NULL; + + INIT_LIST_HEAD(&submac_node->node); + return submac_node; +} + +static void nbl_serv_free_submac_node(struct nbl_serv_submac_node *submac_node) +{ + kfree(submac_node); +} + +static void nbl_serv_del_all_vlans(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_vlan_node *vlan_node, *vlan_node_safe; + + list_for_each_entry_safe(vlan_node, vlan_node_safe, &flow_mgt->vlan_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, NBL_COMMON_TO_VSI_ID(common)); + + list_del(&vlan_node->node); + nbl_serv_free_vlan_node(vlan_node); + } +} + +static void nbl_serv_del_all_submacs(struct nbl_service_mgt *serv_mgt, u16 vsi) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_submac_node *submac_node, *submac_node_safe; + + list_for_each_entry_safe(submac_node, submac_node_safe, &flow_mgt->submac_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, + NBL_DEFAULT_VLAN_ID, vsi); + + list_del(&submac_node->node); + nbl_serv_free_submac_node(submac_node); + } +} + +static int nbl_serv_validate_tc_config(struct tc_mqprio_qopt_offload *mqprio_qopt, + struct nbl_common_info *common, u16 num_active_queues) +{ + u64 tx_rate = 0; + int i, num_qps = 0; + + if (mqprio_qopt->qopt.num_tc > NBL_MAX_QUEUE_TC_NUM || mqprio_qopt->qopt.num_tc < 1) { + nbl_err(common, NBL_DEBUG_QUEUE, "Invalid num_tc %u", mqprio_qopt->qopt.num_tc); + return -EINVAL; + } + + for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) { + if (!mqprio_qopt->qopt.count[i] || mqprio_qopt->qopt.offset[i] != num_qps) { + nbl_err(common, NBL_DEBUG_QUEUE, "Invalid offset%u, num_qps:%u for tc %d", + mqprio_qopt->qopt.offset[i], num_qps, i); + return -EINVAL; + } + + if (mqprio_qopt->min_rate[i]) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Invalid min tx rate (greater than 0) specified for TC %d", i); + return -EINVAL; + } + + tx_rate = div_u64(mqprio_qopt->max_rate[i], NBL_TC_MBPS_DIVSIOR); + + if (mqprio_qopt->max_rate[i] && tx_rate < NBL_TC_WEIGHT_GRAVITY) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Invalid max tx rate for TC %d, minimum %d Mbps", + i, NBL_TC_MBPS_DIVSIOR); + return -EINVAL; + } + + if (tx_rate % NBL_TC_WEIGHT_GRAVITY != 0) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Invalid max tx rate for TC %d, not divisible by %d", + i, NBL_TC_MBPS_DIVSIOR); + return -EINVAL; + } + + num_qps += mqprio_qopt->qopt.count[i]; + } + + if (num_qps > num_active_queues) { + nbl_err(common, NBL_DEBUG_QUEUE, "Cannot support requested number of queues"); + return -EINVAL; + } + + return 0; +} + +void nbl_serv_cpu_affinity_init(void *priv, u16 rings_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + int i; + + for (i = 0; i < rings_num; i++) { + cpumask_set_cpu(cpumask_local_spread(i, NBL_COMMON_TO_DEV(common)->numa_node), + &ring_mgt->vectors[i].cpumask); + netif_set_xps_queue(ring_mgt->vectors[i].netdev, &ring_mgt->vectors[i].cpumask, i); + } +} + +static int nbl_serv_setup_tc_mqprio(struct net_device *netdev, void *type_data) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_tc_mgt *tc_mgt = NBL_SERV_MGT_TO_TC_MGT(serv_mgt); + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; + struct nbl_tc_qidsc_param param; + u8 num_tc = mqprio_qopt->qopt.num_tc, total_qps = 0; + struct nbl_serv_ring_vsi_info *vsi_info; + int i, ret = 0; + + memset(¶m, 0, sizeof(param)); + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + param.vsi_id = vsi_info->vsi_id; + + if (!mqprio_qopt->qopt.hw) { + /*hw 1 to hw 0*/ + if (tc_mgt->state == NBL_TC_RUNNING) { + /*reset the tc configuration*/ + netdev_reset_tc(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); + + param.origin_qps = tc_mgt->total_qps; + disp_ops->cfg_qdisc_mqprio(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + + total_qps = tc_mgt->orig_num_active_queues; + tc_mgt->num_tc = 0; + tc_mgt->state = NBL_TC_INVALID; + + goto exit; + } else { + return -EINVAL; + } + } + + if (mqprio_qopt->mode != TC_MQPRIO_MODE_CHANNEL) + return -EOPNOTSUPP; + + if (tc_mgt->state != NBL_TC_INVALID) { + netdev_err(netdev, "TC configuration already exists"); + return -EINVAL; + } + + ret = nbl_serv_validate_tc_config(mqprio_qopt, common, vsi_info->ring_num); + if (ret) { + netdev_err(netdev, "TC config invalid"); + return ret; + } + + if (tc_mgt->num_tc == num_tc) + return 0; + + if (num_tc > NBL_MAX_TC_NUM) { + netdev_err(netdev, "num_tc max to 8 but set %d\n", num_tc); + return -EINVAL; + } + + for (i = 0; i < num_tc; i++) { + total_qps += mqprio_qopt->qopt.count[i]; + param.info[i].count = mqprio_qopt->qopt.count[i]; + param.info[i].offset = mqprio_qopt->qopt.offset[i]; + param.info[i].max_tx_rate = div_u64(mqprio_qopt->max_rate[i], NBL_TC_MBPS_DIVSIOR); + } + + tc_mgt->num_tc = num_tc; + tc_mgt->orig_num_active_queues = vsi_info->active_ring_num; + + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); + + param.num_tc = num_tc; + param.enable = true; + param.origin_qps = tc_mgt->orig_num_active_queues; + param.gravity = NBL_TC_WEIGHT_GRAVITY; + ret = disp_ops->cfg_qdisc_mqprio(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + if (ret) { + netdev_err(netdev, "Fail to cfg qdisc mqprio"); + tc_mgt->num_tc = 0; + return ret; + } + + netdev_reset_tc(netdev); + /* Report the tc mapping up the stack */ + netdev_set_num_tc(netdev, num_tc); + for (i = 0; i < num_tc; i++) + netdev_set_tc_queue(netdev, i, mqprio_qopt->qopt.count[i], + mqprio_qopt->qopt.offset[i]); + + tc_mgt->state = NBL_TC_RUNNING; +exit: + /* If the device are unregistering, we cannot set queue nums or start them, + * otherwise we will hold the refcnt forever and block the unregistering process. + * + * Note: ndo_stop will not help, cause ndo_stop(in dev_close_many) execs + * before ndo_setup_tc(in dev_shutdown) when unregistering + */ + if (total_qps && NETREG_REGISTERED == netdev->reg_state && + !test_bit(NBL_DOWN, adapter->state)) { + nbl_serv_cpu_affinity_init(serv_mgt, total_qps); + netif_set_real_num_rx_queues(netdev, total_qps); + netif_set_real_num_tx_queues(netdev, total_qps); + + nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, total_qps, 1); + + netif_tx_start_all_queues(netdev); + nbl_serv_set_link_state(serv_mgt, netdev); + } + + tc_mgt->total_qps = total_qps; + return ret; +} + +static int nbl_serv_ipv6_exthdr_num(struct sk_buff *skb, int start, u8 nexthdr) +{ + int exthdr_num = 0; + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + + while (ipv6_ext_hdr(nexthdr)) { + if (nexthdr == NEXTHDR_NONE) + return -1; + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + return -1; + + exthdr_num++; + + if (nexthdr == NEXTHDR_FRAGMENT) + hdrlen = 8; + else if (nexthdr == NEXTHDR_AUTH) + hdrlen = ipv6_authlen(hp); + else + hdrlen = ipv6_optlen(hp); + + nexthdr = hp->nexthdr; + start += hdrlen; + } + + return exthdr_num; +} + +static void nbl_serv_set_sfp_state(void *priv, struct net_device *netdev, u8 eth_id, + bool open, bool is_force) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret = 0; + + if (test_bit(NBL_FLAG_LINK_DOWN_ON_CLOSE, serv_mgt->flags) || is_force) { + if (open) { + ret = disp_ops->set_sfp_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, NBL_SFP_MODULE_ON); + if (ret) + netdev_info(netdev, "Fail to open sfp\n"); + else + netdev_info(netdev, "open sfp\n"); + } else { + ret = disp_ops->set_sfp_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, NBL_SFP_MODULE_OFF); + if (ret) + netdev_info(netdev, "Fail to close sfp\n"); + else + netdev_info(netdev, "close sfp\n"); + } + } +} + +static void nbl_serv_set_netdev_carrier_state(void *priv, struct net_device *netdev, u8 link_state) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + + if (test_bit(NBL_DOWN, adapter->state)) + return; + + switch (net_resource_mgt->link_forced) { + case IFLA_VF_LINK_STATE_AUTO: + if (link_state) { + if (!netif_carrier_ok(netdev)) { + netif_carrier_on(netdev); + netdev_info(netdev, "Set nic link up\n"); + } + } else { + if (netif_carrier_ok(netdev)) { + netif_carrier_off(netdev); + netdev_info(netdev, "Set nic link down\n"); + } + } + return; + case IFLA_VF_LINK_STATE_ENABLE: + netif_carrier_on(netdev); + return; + case IFLA_VF_LINK_STATE_DISABLE: + netif_carrier_off(netdev); + return; + default: + netif_carrier_on(netdev); + return; + } +} + +static void nbl_serv_set_link_state(struct nbl_service_mgt *serv_mgt, struct net_device *netdev) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + struct nbl_eth_link_info eth_link_info = {0}; + int ret = 0; + + net_resource_mgt->link_forced = + disp_ops->get_link_forced(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + + if (net_resource_mgt->link_forced == IFLA_VF_LINK_STATE_AUTO) { + ret = disp_ops->get_link_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, ð_link_info); + if (ret) { + netdev_err(netdev, "Fail to get_link_state err %d\n", ret); + eth_link_info.link_status = 1; + } + } + + nbl_serv_set_netdev_carrier_state(serv_mgt, netdev, eth_link_info.link_status); +} + +static int nbl_serv_rep_netdev_open(struct net_device *netdev) +{ + int ret = 0; + + netdev_info(netdev, "Nbl rep open\n"); + ret = netif_set_real_num_tx_queues(netdev, 1); + if (ret) + goto fail; + ret = netif_set_real_num_rx_queues(netdev, 1); + if (ret) + goto fail; + + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + netdev_info(netdev, "Nbl rep open ok!\n"); + + return 0; + +fail: + return ret; +} + +static int nbl_serv_rep_netdev_stop(struct net_device *netdev) +{ + netdev_info(netdev, "Nbl rep stop\n"); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + netdev_info(netdev, "Nbl rep stop ok!\n"); + + return 0; +} + +int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, + u16 real_qps, bool use_napi) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + int ret = 0; + + if (vsi_info->started) + return 0; + + ret = nbl_serv_setup_rings(serv_mgt, netdev, vsi_info, use_napi); + if (ret) { + netdev_err(netdev, "Fail to setup rings\n"); + goto setup_rings_fail; + } + + ret = nbl_serv_setup_queues(serv_mgt, vsi_info); + if (ret) { + netdev_err(netdev, "Fail to setup queues\n"); + goto setup_queue_fail; + } + nbl_serv_flush_rx_queues(serv_mgt, vsi_info->ring_offset, vsi_info->ring_num); + + if (vsi_index == NBL_VSI_DATA) + disp_ops->cfg_txrx_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + net_resource_mgt->vlan_tci, net_resource_mgt->vlan_proto, + vsi_index); + + ret = disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_info->vsi_id, true); + if (ret) { + netdev_err(netdev, "Fail to setup dsch\n"); + goto setup_dsch_fail; + } + + vsi_info->active_ring_num = real_qps; + ret = disp_ops->setup_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, real_qps); + if (ret) + goto setup_cqs_fail; + + vsi_info->started = true; + return 0; + +setup_cqs_fail: + disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), false); +setup_dsch_fail: + disp_ops->remove_all_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common)); +setup_queue_fail: + nbl_serv_stop_rings(serv_mgt, vsi_info); +setup_rings_fail: + return ret; +} + +int nbl_serv_vsi_stop(void *priv, u16 vsi_index) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + + if (!vsi_info->started) + return 0; + + vsi_info->started = false; + /* modify defalt action and rss configuration */ + disp_ops->remove_cqs(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id); + + /* disable and rest tx/rx logic queue */ + disp_ops->remove_all_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id); + + /* clear dsch config */ + disp_ops->cfg_dsch(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_info->vsi_id, false); + /* free tx and rx bufs */ + nbl_serv_stop_rings(serv_mgt, vsi_info); + + return 0; +} + +static int nbl_serv_switch_traffic_default_dest(void *priv, struct nbl_service_traffic_switch *info) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_netdev_priv *net_priv = netdev_priv(dev); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + int ret; + u16 from_vsi, to_vsi; + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (!vlan_node->vid) { + from_vsi = net_priv->normal_vsi; + to_vsi = info->normal_vsi; + } else { + from_vsi = net_priv->other_vsi; + to_vsi = info->sync_other_vsi; + } + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, from_vsi); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, to_vsi); + if (ret) { + netdev_err(dev, "Fail to cfg macvlan on vid %u in vsi switch", + vlan_node->vid); + goto fail; + } + } + /* arp/nd traffic */ + disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->normal_vsi); + ret = disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), info->normal_vsi); + if (ret) + goto add_multi_fail; + + /* lldp/lacp switch */ + if (info->has_lldp) { + disp_ops->del_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); + ret = disp_ops->add_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + info->sync_other_vsi); + if (ret) + goto add_lldp_fail; + } + + if (info->has_lacp) { + disp_ops->del_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); + ret = disp_ops->add_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + info->sync_other_vsi); + if (ret) + goto add_lacp_fail; + } + + net_priv->normal_vsi = info->normal_vsi; + net_priv->other_vsi = info->sync_other_vsi; + net_priv->async_pending_vsi = info->async_other_vsi; + + /* trigger submac update */ + net_resource_mgt->user_promisc_mode = info->promisc; + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); + + return 0; + +add_lacp_fail: + disp_ops->add_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); + if (info->has_lldp) + disp_ops->del_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), info->sync_other_vsi); +add_lldp_fail: + if (info->has_lldp) + disp_ops->add_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); + disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), info->normal_vsi); +add_multi_fail: + disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->normal_vsi); +fail: + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (!vlan_node->vid) { + from_vsi = net_priv->normal_vsi; + to_vsi = info->normal_vsi; + } else { + from_vsi = net_priv->other_vsi; + to_vsi = info->sync_other_vsi; + } + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, to_vsi); + disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, from_vsi); + } + + return -EINVAL; +} + +static int nbl_serv_abnormal_event_to_queue(int event_type) +{ + switch (event_type) { + case NBL_ABNORMAL_EVENT_DVN: + return NBL_TX; + case NBL_ABNORMAL_EVENT_UVN: + return NBL_RX; + default: + return event_type; + } +} + +static int nbl_serv_stop_abnormal_sw_queue(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, int type) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->stop_abnormal_sw_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_queue_id, type); +} + +static int nbl_serv_chan_stop_abnormal_sw_queue_req(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, u16 func_id, int type) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_stop_abnormal_sw_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + int ret = 0; + + param.local_queue_id = local_queue_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_STOP_ABNORMAL_SW_QUEUE, + ¶m, sizeof(param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); + + return ret; +} + +static void nbl_serv_chan_stop_abnormal_sw_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_stop_abnormal_sw_queue *param = + (struct nbl_chan_param_stop_abnormal_sw_queue *)data; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + if (param->local_queue_id < vsi_info->ring_offset || + param->local_queue_id >= vsi_info->ring_offset + vsi_info->ring_num || + !vsi_info->ring_num) { + ret = -EINVAL; + goto send_ack; + } + + ret = nbl_serv_stop_abnormal_sw_queue(serv_mgt, param->local_queue_id, param->type); + +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_STOP_ABNORMAL_SW_QUEUE, msg_id, + ret, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + +static dma_addr_t nbl_serv_netdev_queue_restore(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, int type) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->restore_abnormal_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_queue_id, type); +} + +static int nbl_serv_netdev_queue_restart(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, int type) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->restart_abnormal_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + local_queue_id, type); +} + +static dma_addr_t nbl_serv_chan_restore_netdev_queue_req(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, u16 func_id, int type) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_restore_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + dma_addr_t dma = 0; + int ret = 0; + + param.local_queue_id = local_queue_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, + ¶m, sizeof(param), &dma, sizeof(dma), 1); + ret = chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); + if (ret) + return 0; + + return dma; +} + +static void nbl_serv_chan_restore_netdev_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_restore_queue *param = (struct nbl_chan_param_restore_queue *)data; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_chan_ack_info chan_ack; + dma_addr_t dma = 0; + int ret = NBL_CHAN_RESP_OK; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + if (param->local_queue_id < vsi_info->ring_offset || + param->local_queue_id >= vsi_info->ring_offset + vsi_info->ring_num || + !vsi_info->ring_num) { + ret = -EINVAL; + goto send_ack; + } + + dma = nbl_serv_netdev_queue_restore(serv_mgt, param->local_queue_id, param->type); + +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, msg_id, + ret, &dma, sizeof(dma)); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + +static int nbl_serv_chan_restart_netdev_queue_req(struct nbl_service_mgt *serv_mgt, + u16 local_queue_id, u16 func_id, int type) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_restart_queue param = {0}; + struct nbl_chan_send_info chan_send = {0}; + + param.local_queue_id = local_queue_id; + param.type = type; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, + ¶m, sizeof(param), NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); +} + +static void nbl_serv_chan_restart_netdev_queue_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_param_restart_queue *param = (struct nbl_chan_param_restart_queue *)data; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_chan_ack_info chan_ack; + int ret = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + if (param->local_queue_id < vsi_info->ring_offset || + param->local_queue_id >= vsi_info->ring_offset + vsi_info->ring_num || + !vsi_info->ring_num) { + ret = -EINVAL; + goto send_ack; + } + + ret = nbl_serv_netdev_queue_restart(serv_mgt, param->local_queue_id, param->type); + +send_ack: + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, msg_id, + ret, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + +static int +nbl_serv_start_abnormal_hw_queue(struct nbl_service_mgt *serv_mgt, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_txrx_queue_param param = {0}; + struct nbl_serv_vector *vector; + struct nbl_serv_ring *ring; + int ret = 0; + + switch (type) { + case NBL_TX: + vector = &ring_mgt->vectors[local_queue_id]; + ring = &ring_mgt->tx_rings[local_queue_id]; + ring->dma = dma; + nbl_serv_set_queue_param(ring, ring_mgt->tx_desc_num, ¶m, + vsi_id, vector->global_vector_id); + ret = disp_ops->setup_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m, true); + return ret; + case NBL_RX: + vector = &ring_mgt->vectors[local_queue_id]; + ring = &ring_mgt->rx_rings[local_queue_id]; + ring->dma = dma; + + nbl_serv_set_queue_param(ring, ring_mgt->rx_desc_num, ¶m, + vsi_id, vector->global_vector_id); + ret = disp_ops->setup_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m, false); + return 0; + default: + break; + } + + return -EINVAL; +} + +static void nbl_serv_restore_queue(struct nbl_service_mgt *serv_mgt, u16 vsi_id, + u16 local_queue_id, u16 type, bool dif_err) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 func_id = disp_ops->get_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u16 global_queue_id; + dma_addr_t dma = 0; + int ret = 0; + + while (!rtnl_trylock()) + msleep(20); + + ret = nbl_serv_chan_stop_abnormal_sw_queue_req(serv_mgt, local_queue_id, func_id, type); + if (ret) + goto unlock; + + ret = disp_ops->stop_abnormal_hw_queue(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, + local_queue_id, type); + if (ret) + goto unlock; + + dma = nbl_serv_chan_restore_netdev_queue_req(serv_mgt, local_queue_id, func_id, type); + if (!dma) + goto unlock; + + ret = nbl_serv_start_abnormal_hw_queue(serv_mgt, vsi_id, local_queue_id, dma, type); + if (ret) + goto unlock; + + ret = nbl_serv_chan_restart_netdev_queue_req(serv_mgt, local_queue_id, func_id, type); + if (ret) + goto unlock; + + if (dif_err && type == NBL_TX) { + global_queue_id = + disp_ops->get_vsi_global_queue_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, local_queue_id); + nbl_info(common, NBL_DEBUG_COMMON, + "dvn int_status:0, queue_id:%d\n", global_queue_id); + } + +unlock: + rtnl_unlock(); +} + +static void nbl_serv_handle_tx_timeout(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, tx_timeout); + struct nbl_service_mgt *serv_mgt = serv_net_resource_mgt->serv_mgt; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + struct nbl_serv_vector *vector; + int i = 0; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + for (i = vsi_info->ring_offset; i < vsi_info->ring_offset + vsi_info->ring_num; i++) { + if (ring_mgt->tx_rings[i].need_recovery) { + vector = &ring_mgt->vectors[i]; + nbl_serv_restore_queue(serv_mgt, vsi_info->vsi_id, i, NBL_TX, false); + ring_mgt->tx_rings[i].need_recovery = false; + } + } +} + +static void nbl_serv_update_link_state(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, update_link_state); + struct nbl_service_mgt *serv_mgt = serv_net_resource_mgt->serv_mgt; + + nbl_serv_set_link_state(serv_mgt, serv_net_resource_mgt->netdev); +} + +static int nbl_serv_chan_notify_link_forced_req(struct nbl_service_mgt *serv_mgt, u16 func_id) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_NOTIFY_LINK_FORCED, NULL, 0, NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); +} + +static void nbl_serv_chan_notify_link_forced_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_ack_info chan_ack; + + if (!net_resource_mgt) + return; + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NOTIFY_LINK_FORCED, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); + + nbl_common_queue_work(&net_resource_mgt->update_link_state, false, false); +} + +static void nbl_serv_register_link_forced_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_NOTIFY_LINK_FORCED, + nbl_serv_chan_notify_link_forced_resp, serv_mgt); +} + +static void nbl_serv_update_vlan(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, update_vlan); + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct net_device *netdev = net_resource_mgt->netdev; + int was_running, err; + u16 vid; + + vid = net_resource_mgt->vlan_tci & VLAN_VID_MASK; + nbl_serv_update_default_vlan(serv_mgt, vid); + + rtnl_lock(); + was_running = netif_running(netdev); + + if (was_running) { + err = nbl_serv_netdev_stop(netdev); + if (err) { + netdev_err(netdev, "Netdev stop failed while update_vlan\n"); + goto netdev_stop_fail; + } + + err = nbl_serv_netdev_open(netdev); + if (err) { + netdev_err(netdev, "Netdev open failed after setting ringparam\n"); + goto netdev_open_fail; + } + } + +netdev_stop_fail: +netdev_open_fail: + rtnl_unlock(); +} + +static int nbl_serv_chan_notify_vlan_req(struct nbl_service_mgt *serv_mgt, u16 func_id, + struct nbl_serv_notify_vlan_param *param) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_chan_send_info chan_send = {0}; + + NBL_CHAN_SEND(chan_send, func_id, NBL_CHAN_MSG_NOTIFY_VLAN, + param, sizeof(*param), NULL, 0, 1); + return chan_ops->send_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_send); +} + +static void nbl_serv_chan_notify_vlan_resp(void *priv, u16 src_id, u16 msg_id, + void *data, u32 data_len) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + struct nbl_serv_notify_vlan_param *param = (struct nbl_serv_notify_vlan_param *)data; + struct nbl_chan_ack_info chan_ack; + + net_resource_mgt->vlan_tci = param->vlan_tci; + net_resource_mgt->vlan_proto = param->vlan_proto; + + nbl_common_queue_work(&net_resource_mgt->update_vlan, false, false); + + NBL_CHAN_ACK(chan_ack, src_id, NBL_CHAN_MSG_NOTIFY_VLAN, msg_id, + NBL_CHAN_RESP_OK, NULL, 0); + chan_ops->send_ack(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), &chan_ack); +} + +static void nbl_serv_register_vlan_notify(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), NBL_CHAN_MSG_NOTIFY_VLAN, + nbl_serv_chan_notify_vlan_resp, serv_mgt); +} + +int nbl_serv_netdev_open(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_tc_mgt *tc_mgt = NBL_SERV_MGT_TO_TC_MGT(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_ring_vsi_info *vsi_info; + int num_cpus, real_qps, ret = 0; + bool netdev_open = true; + + if (!test_bit(NBL_DOWN, adapter->state)) + return -EBUSY; + + netdev_info(netdev, "Nbl open\n"); + + if (ring_mgt->xdp_prog) + nbl_event_notify(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_open, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + netif_carrier_off(netdev); + nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), true, false); + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + if (tc_mgt->num_tc) { + real_qps = tc_mgt->total_qps; + } else if (vsi_info->active_ring_num) { + real_qps = vsi_info->active_ring_num; + } else { + num_cpus = num_online_cpus(); + real_qps = num_cpus > vsi_info->ring_num ? vsi_info->ring_num : num_cpus; + } + + ret = nbl_serv_vsi_open(serv_mgt, netdev, NBL_VSI_DATA, real_qps, 1); + if (ret) + goto vsi_open_fail; + + ret = netif_set_real_num_tx_queues(netdev, real_qps); + if (ret) + goto setup_real_qps_fail; + ret = netif_set_real_num_rx_queues(netdev, real_qps); + if (ret) + goto setup_real_qps_fail; + + netif_tx_start_all_queues(netdev); + clear_bit(NBL_DOWN, adapter->state); + set_bit(NBL_RUNNING, adapter->state); + nbl_serv_set_link_state(serv_mgt, netdev); + + netdev_info(netdev, "Nbl open ok!\n"); + + return 0; + +setup_real_qps_fail: + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); +vsi_open_fail: + netdev_open = false; + if (ring_mgt->xdp_prog) + nbl_event_notify(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_open, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + return ret; +} + +int nbl_serv_netdev_stop(struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + bool netdev_open = false; + + if (!test_bit(NBL_RUNNING, adapter->state)) + return -EBUSY; + + netdev_info(netdev, "Nbl stop\n"); + set_bit(NBL_DOWN, adapter->state); + clear_bit(NBL_RUNNING, adapter->state); + + nbl_serv_set_sfp_state(serv_mgt, netdev, NBL_COMMON_TO_ETH_ID(common), false, false); + + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + nbl_serv_vsi_stop(serv_mgt, NBL_VSI_DATA); + + if (ring_mgt->xdp_prog) + nbl_event_notify(NBL_EVENT_NETDEV_STATE_CHANGE, &netdev_open, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + netdev_info(netdev, "Nbl stop ok!\n"); + + return 0; +} + +static int nbl_serv_change_mtu(struct net_device *netdev, int new_mtu) +{ + netdev->mtu = new_mtu; + return 0; +} + +static int nbl_serv_set_mac(struct net_device *dev, void *p) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + struct sockaddr *addr = p; + struct nbl_netdev_priv *priv = netdev_priv(dev); + u16 vsi_id; + int ret = 0; + + if (!is_valid_ether_addr(addr->sa_data)) { + netdev_err(dev, "Temp to change a invalid mac address %pM\n", addr->sa_data); + return -EADDRNOTAVAIL; + } + + if (ether_addr_equal(dev->dev_addr, addr->sa_data)) + return 0; + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (vlan_node->vid == 0) + vsi_id = priv->normal_vsi; + else + vsi_id = priv->other_vsi; + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, vsi_id); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, + vlan_node->vid, vsi_id); + if (ret) { + netdev_err(dev, "Fail to cfg macvlan on vid %u", vlan_node->vid); + goto fail; + } + } + + disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->normal_vsi, addr->sa_data); + + ether_addr_copy(flow_mgt->mac, addr->sa_data); + eth_hw_addr_set(dev, addr->sa_data); + + if (!NBL_COMMON_TO_VF_CAP(common)) + disp_ops->set_eth_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + addr->sa_data, NBL_COMMON_TO_ETH_ID(common)); + + return 0; +fail: + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (vlan_node->vid == 0) + vsi_id = priv->normal_vsi; + else + vsi_id = priv->other_vsi; + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), addr->sa_data, + vlan_node->vid, vsi_id); + disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vlan_node->vid, vsi_id); + } + return -EAGAIN; +} + +static int nbl_serv_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_serv_vlan_node *vlan_node; + u16 vsi_id = priv->other_vsi; + int ret = 0; + + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + nbl_debug(common, NBL_DEBUG_COMMON, "add mac-vlan dev for proto 0x%04x, vid %u.", + be16_to_cpu(proto), vid); + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + nbl_debug(common, NBL_DEBUG_COMMON, "add mac-vlan dev vid %u.", vlan_node->vid); + if (vlan_node->vid == vid) { + vlan_node->ref_cnt++; + return 0; + } + } + + vlan_node = nbl_serv_alloc_vlan_node(); + if (!vlan_node) + return -EAGAIN; + + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->mac, vid, vsi_id); + if (ret) { + nbl_serv_free_vlan_node(vlan_node); + return -EAGAIN; + } + + vlan_node->vid = vid; + list_add(&vlan_node->node, &flow_mgt->vlan_list); + + return 0; +} + +static int nbl_serv_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_serv_vlan_node *vlan_node; + u16 vsi_id = priv->other_vsi; + + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + nbl_debug(common, NBL_DEBUG_COMMON, "del mac-vlan dev for proto 0x%04x, vid %u.", + be16_to_cpu(proto), vid); + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + nbl_debug(common, NBL_DEBUG_COMMON, "del mac-vlan dev vid %u.", vlan_node->vid); + if (vlan_node->vid == vid) { + vlan_node->ref_cnt--; + if (!vlan_node->ref_cnt) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->mac, vid, vsi_id); + list_del(&vlan_node->node); + nbl_serv_free_vlan_node(vlan_node); + } + break; + } + } + + return 0; +} + +static int nbl_serv_update_default_vlan(struct nbl_service_mgt *serv_mgt, u16 vid) +{ + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + struct nbl_serv_vlan_node *node, *tmp; + struct nbl_serv_submac_node *submac_node; + struct nbl_common_info *common; + u16 vsi; + int ret; + + if (flow_mgt->vid == vid) + return 0; + + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + vsi = NBL_COMMON_TO_VSI_ID(common); + rtnl_lock(); + + /* update mac sub-interface */ + list_for_each_entry(submac_node, &flow_mgt->submac_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, + flow_mgt->vid, vsi); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, + vid, vsi); + if (ret) { + nbl_err(common, NBL_DEBUG_COMMON, "update vlan %u, submac %pM failed\n", + vid, submac_node->mac); + goto update_submac_if_failed; + } + } + + list_for_each_entry(vlan_node, &flow_mgt->vlan_list, node) { + if (vlan_node->vid == vid) { + vlan_node->ref_cnt++; + goto free_old_vlan; + } + } + + /* new vlan node */ + vlan_node = nbl_serv_alloc_vlan_node(); + if (!vlan_node) { + ret = -ENOMEM; + goto alloc_node_failed; + } + + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, vid, vsi); + if (ret) + goto add_macvlan_failed; + vlan_node->vid = vid; + list_add(&vlan_node->node, &flow_mgt->vlan_list); + +free_old_vlan: + list_for_each_entry_safe(node, tmp, &flow_mgt->vlan_list, node) { + if (node->vid == flow_mgt->vid) { + node->ref_cnt--; + if (!node->ref_cnt) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + flow_mgt->mac, node->vid, vsi); + list_del(&node->node); + nbl_serv_free_vlan_node(node); + } + break; + } + } + + flow_mgt->vid = vid; + rtnl_unlock(); + + return 0; + +add_macvlan_failed: + nbl_serv_free_vlan_node(vlan_node); +alloc_node_failed: +update_submac_if_failed: + list_for_each_entry(submac_node, &flow_mgt->submac_list, node) { + disp_ops->del_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, + vid, vsi); + disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), submac_node->mac, + flow_mgt->vid, vsi); + } + rtnl_unlock(); + + return ret; +} + +static void nbl_serv_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct nbl_queue_stats queue_stats = { 0 }; + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + u16 start, end; + int i; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + start = vsi_info->ring_offset; + end = vsi_info->ring_offset + vsi_info->ring_num; + + if (!stats) { + pr_err("get_stats64 is null\n"); + return; + } + + for (i = start; i < end; i++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + i, &queue_stats, true); + stats->tx_packets += queue_stats.packets; + stats->tx_bytes += queue_stats.bytes; + } + + for (i = start; i < end; i++) { + disp_ops->get_queue_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + i, &queue_stats, false); + stats->rx_packets += queue_stats.packets; + stats->rx_bytes += queue_stats.bytes; + } + + stats->multicast = 0; + stats->rx_errors = 0; + stats->tx_errors = 0; + stats->rx_length_errors = 0; + stats->rx_crc_errors = 0; + stats->rx_frame_errors = 0; + stats->rx_dropped = 0; + stats->tx_dropped = 0; +} + +static void nbl_modify_submacs(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct netdev_hw_addr *ha; + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_netdev_priv *priv = netdev_priv(net_resource_mgt->netdev); + struct nbl_serv_submac_node *submac_node; + int uc_count, i, ret = 0; + u8 *buf = NULL; + u16 len; + + spin_lock_bh(&net_resource_mgt->mac_vlan_list_lock); + uc_count = netdev_uc_count(net_resource_mgt->netdev); + + if (uc_count) { + len = uc_count * ETH_ALEN; + buf = kzalloc(len, GFP_ATOMIC); + + if (!buf) { + spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); + return; + } + + i = 0; + netdev_hw_addr_list_for_each(ha, &net_resource_mgt->netdev->uc) { + if (i >= len) + break; + memcpy(&buf[i], ha->addr, ETH_ALEN); + i += ETH_ALEN; + } + + net_resource_mgt->rxmode_set_required &= ~NBL_FLAG_AQ_MODIFY_MAC_FILTER; + } + spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); + + nbl_serv_del_all_submacs(serv_mgt, priv->async_other_vsi); + + for (i = 0; i < uc_count; i++) { + submac_node = nbl_serv_alloc_submac_node(); + if (!submac_node) + break; + + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &buf[i * ETH_ALEN], + flow_mgt->vid, priv->async_pending_vsi); + if (ret) { + nbl_serv_free_submac_node(submac_node); + break; + } + + ether_addr_copy(submac_node->mac, &buf[i * ETH_ALEN]); + list_add(&submac_node->node, &flow_mgt->submac_list); + } + + kfree(buf); + priv->async_other_vsi = priv->async_pending_vsi; +} + +static void nbl_modify_promisc_mode(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_netdev_priv *priv = netdev_priv(net_resource_mgt->netdev); + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 mode = 0; + + spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + if (net_resource_mgt->curr_promiscuout_mode & (IFF_PROMISC | IFF_ALLMULTI)) + mode = 1; + + if (net_resource_mgt->user_promisc_mode) + mode = 1; + + net_resource_mgt->rxmode_set_required &= ~NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; + spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + + disp_ops->set_promisc_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + priv->async_other_vsi, mode); +} + +static struct nbl_mac_filter *nbl_find_filter(struct nbl_adapter *adapter, const u8 *macaddr) +{ + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_mac_filter *f; + + if (!macaddr) + return NULL; + + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + list_for_each_entry(f, &net_resource_mgt->mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + + return NULL; +} + +static void nbl_free_filter(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_mac_filter *f; + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &net_resource_mgt->mac_filter_list) { + f = list_entry(pos, struct nbl_mac_filter, list); + list_del(&f->list); + kfree(f); + } +} + +static struct nbl_mac_filter *nbl_add_filter(struct nbl_adapter *adapter, const u8 *macaddr) +{ + struct nbl_mac_filter *f; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + if (!macaddr) + return NULL; + + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + f = nbl_find_filter(adapter, macaddr); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return f; + + ether_addr_copy(f->macaddr, macaddr); + list_add_tail(&f->list, &net_resource_mgt->mac_filter_list); + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; + } + + return f; +} + +static int nbl_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct nbl_adapter *adapter; + struct nbl_mac_filter *f; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + f = nbl_find_filter(adapter, addr); + if (f) { + list_del(&f->list); + kfree(f); + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_MODIFY_MAC_FILTER; + } + + return 0; +} + +static int nbl_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct nbl_adapter *adapter; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + if (nbl_add_filter(adapter, addr)) + return 0; + else + return -ENOMEM; +} + +static bool nbl_serv_promisc_mode_changed(struct net_device *dev) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(dev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + return (net_resource_mgt->curr_promiscuout_mode ^ dev->flags) + & (IFF_PROMISC | IFF_ALLMULTI); +} + +static void nbl_serv_set_rx_mode(struct net_device *dev) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(dev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + spin_lock_bh(&net_resource_mgt->mac_vlan_list_lock); + __dev_uc_sync(dev, nbl_addr_sync, nbl_addr_unsync); + spin_unlock_bh(&net_resource_mgt->mac_vlan_list_lock); + + if (!NBL_COMMON_TO_VF_CAP(NBL_SERV_MGT_TO_COMMON(serv_mgt))) { /* only pf support */ + spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + if (nbl_serv_promisc_mode_changed(dev)) { + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; + net_resource_mgt->curr_promiscuout_mode = dev->flags; + } + spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + } + + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); +} + +static void nbl_serv_change_rx_flags(struct net_device *dev, int flag) +{ + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + adapter = NBL_NETDEV_TO_ADAPTER(dev); + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + spin_lock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + if (nbl_serv_promisc_mode_changed(dev)) { + net_resource_mgt->rxmode_set_required |= NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE; + net_resource_mgt->curr_promiscuout_mode = dev->flags; + } + spin_unlock_bh(&net_resource_mgt->current_netdev_promisc_flags_lock); + + nbl_common_queue_work(&net_resource_mgt->rx_mode_async, false, false); +} + +static netdev_features_t +nbl_serv_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) +{ + u32 l2_l3_hrd_len = 0, l4_hrd_len = 0, total_hrd_len = 0; + u8 l4_proto = 0; + __be16 protocol, frag_off; + int ret; + unsigned char *exthdr; + unsigned int offset = 0; + int nexthdr = 0; + int exthdr_num = 0; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL. + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 256 bytes or bigger than 16383 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < NBL_TX_TSO_MSS_MIN || + skb_shinfo(skb)->gso_size > NBL_TX_TSO_MSS_MAX)) + features &= ~NETIF_F_GSO_MASK; + + l2_l3_hrd_len = (u32)(skb_transport_header(skb) - skb->data); + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_IP)) { + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) { + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); + if (ret < 0) + goto out_rm_features; + } + + /* IPV6 extension headers + * (1) donot support routing and destination extension headers + * (2) support 2 extension headers mostly + */ + nexthdr = ipv6_find_hdr(skb, &offset, NEXTHDR_ROUTING, NULL, NULL); + if (nexthdr == NEXTHDR_ROUTING) { + netdev_info(dev, "skb contain ipv6 routing ext header\n"); + goto out_rm_features; + } + + nexthdr = ipv6_find_hdr(skb, &offset, NEXTHDR_DEST, NULL, NULL); + if (nexthdr == NEXTHDR_DEST) { + netdev_info(dev, "skb contain ipv6 routing dest header\n"); + goto out_rm_features; + } + + exthdr_num = nbl_serv_ipv6_exthdr_num(skb, exthdr - skb->data, ip.v6->nexthdr); + if (exthdr_num < 0 || exthdr_num > 2) { + netdev_info(dev, "skb ipv6 exthdr_num:%d\n", exthdr_num); + goto out_rm_features; + } + } else { + goto out_rm_features; + } + + switch (l4_proto) { + case IPPROTO_TCP: + l4_hrd_len = (l4.tcp->doff) * 4; + break; + case IPPROTO_UDP: + l4_hrd_len = sizeof(struct udphdr); + break; + case IPPROTO_SCTP: + l4_hrd_len = sizeof(struct sctphdr); + break; + default: + goto out_rm_features; + } + + total_hrd_len = l2_l3_hrd_len + l4_hrd_len; + + // TX checksum offload support total header len is [0, 255] + if (total_hrd_len > NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX) + goto out_rm_features; + + // TSO support total header len is [42, 128] + if (total_hrd_len < NBL_TX_TSO_L2L3L4_HDR_LEN_MIN || + total_hrd_len > NBL_TX_TSO_L2L3L4_HDR_LEN_MAX) + features &= ~NETIF_F_GSO_MASK; + + if (skb->encapsulation) + goto out_rm_features; + + return features; + +out_rm_features: + return features & ~(NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_MASK); +} + +static int nbl_serv_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + netdev_features_t changed = netdev->features ^ features; + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + + if (changed & NETIF_F_NTUPLE) { + bool ena = !!(features & NETIF_F_NTUPLE); + + disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, ena); + } + + return 0; +} + +static int nbl_serv_config_fd_flow_state(void *priv, enum nbl_chan_fdir_rule_type type, u32 state) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u16 vsi_id = NBL_COMMON_TO_VSI_ID(common); + + return disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + type, vsi_id, state); +} + +static LIST_HEAD(nbl_serv_block_cb_list); + +static int nbl_serv_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + + switch (type) { + case TC_SETUP_BLOCK: { + return flow_block_cb_setup_simple((struct flow_block_offload *)type_data, + &nbl_serv_block_cb_list, + nbl_serv_setup_tc_block_cb, + priv, priv, true); + } + case TC_SETUP_QDISC_MQPRIO: + return nbl_serv_setup_tc_mqprio(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int nbl_serv_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) + return -EINVAL; + + function_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), vf_id); + + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + ether_addr_copy(net_resource_mgt->vf_info[vf_id].mac, mac); + + disp_ops->register_func_mac(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mac, function_id); + + return 0; +} + +static int nbl_serv_set_vf_rate(struct net_device *dev, int vf_id, int min_rate, int max_rate) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + int ret = 0; + + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info || min_rate > 0) + return -EINVAL; + + if (vf_id < net_resource_mgt->num_vfs) { + function_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), vf_id); + + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, max_rate); + } + + if (!ret) + net_resource_mgt->vf_info[vf_id].max_tx_rate = max_rate; + + ret = disp_ops->register_func_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, max_rate); + + return ret; +} + +static int nbl_serv_set_vf_spoofchk(struct net_device *dev, int vf_id, bool ena) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret = 0; + + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) + return -EINVAL; + + if (vf_id < net_resource_mgt->num_vfs) + ret = disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), vf_id, ena); + + if (!ret) + net_resource_mgt->vf_info[vf_id].spoof_check = ena; + + return ret; +} + +static int nbl_serv_set_vf_link_state(struct net_device *dev, int vf_id, int link_state) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 function_id = U16_MAX; + bool should_notify = false; + int ret = 0; + + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) + return -EINVAL; + + function_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), vf_id); + + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + ret = disp_ops->register_func_link_forced(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, link_state, &should_notify); + if (!ret && should_notify) + nbl_serv_chan_notify_link_forced_req(serv_mgt, function_id); + + if (!ret) + net_resource_mgt->vf_info[vf_id].state = link_state; + + return ret; +} + +static int nbl_serv_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan, u8 qos, __be16 proto) +{ + struct nbl_service_mgt *serv_mgt = NBL_NETDEV_TO_SERV_MGT(dev); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_notify_vlan_param param = {0}; + int ret = 0; + u16 function_id = U16_MAX; + bool should_notify = false; + + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) + return -EINVAL; + + if (vlan > 4095 || qos > 7) + return -EINVAL; + + function_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), vf_id); + + if (function_id == U16_MAX) { + netdev_info(dev, "vf id %d invalid\n", vf_id); + return -EINVAL; + } + + if (vlan) { + param.vlan_tci = (vlan & VLAN_VID_MASK) | (qos << VLAN_PRIO_SHIFT); + param.vlan_proto = ntohs(proto); + } else { + proto = 0; + qos = 0; + } + + ret = disp_ops->register_func_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), function_id, + param.vlan_tci, param.vlan_proto, + &should_notify); + if (should_notify && !ret) { + ret = nbl_serv_chan_notify_vlan_req(serv_mgt, function_id, ¶m); + if (ret) + disp_ops->register_func_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + function_id, 0, 0, &should_notify); + } + if (!ret) { + net_resource_mgt->vf_info[vf_id].vlan = vlan; + net_resource_mgt->vf_info[vf_id].vlan_proto = ntohs(proto); + net_resource_mgt->vf_info[vf_id].vlan_qos = qos; + } + + return ret; +} + +static int nbl_serv_get_vf_config(struct net_device *dev, int vf_id, struct ifla_vf_info *ivi) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + + if (vf_id >= net_resource_mgt->total_vfs || !net_resource_mgt->vf_info) + return -EINVAL; + + ivi->vf = vf_id; + ivi->spoofchk = vf_info[vf_id].spoof_check; + ivi->linkstate = vf_info[vf_id].state; + ivi->max_tx_rate = vf_info[vf_id].max_tx_rate; + ivi->vlan = vf_info[vf_id].vlan; + ivi->vlan_proto = htons(vf_info[vf_id].vlan_proto); + ivi->qos = vf_info[vf_id].vlan_qos; + ether_addr_copy(ivi->mac, vf_info[vf_id].mac); + + return 0; +} + +static u8 nbl_get_dscp_up(struct nbl_serv_net_resource_mgt *net_resource_mgt, struct sk_buff *skb) +{ + u8 dscp = 0; + + if (skb->protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + + return net_resource_mgt->dscp2prio_map[dscp]; +} + +static u16 +nbl_serv_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (net_resource_mgt->pfc_mode == NBL_TRUST_MODE_DSCP) + skb->priority = nbl_get_dscp_up(net_resource_mgt, skb); + + return netdev_pick_tx(netdev, skb, sb_dev); +} + +static void nbl_serv_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info; + + vsi_info = &ring_mgt->vsi_info[NBL_VSI_DATA]; + + ring_mgt->tx_rings[vsi_info->ring_offset + txqueue].need_recovery = true; + ring_mgt->tx_rings[vsi_info->ring_offset + txqueue].tx_timeout_count++; + + nbl_warn(common, NBL_DEBUG_QUEUE, "TX timeout on queue %d", txqueue); + + nbl_common_queue_work(&NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->tx_timeout, false, false); +} + +static int nbl_serv_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *netdev, u32 filter_mask, int nlflags) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + u16 bmode; + + bmode = net_resource_mgt->bridge_mode; + + return ndo_dflt_bridge_getlink(skb, pid, seq, netdev, bmode, 0, 0, nlflags, + filter_mask, NULL); +} + +static int nbl_serv_bridge_setlink(struct net_device *netdev, struct nlmsghdr *nlh, + u16 flags, struct netlink_ext_ack *extack) +{ + struct nbl_netdev_priv *priv = netdev_priv(netdev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nlattr *attr, *br_spec; + u16 mode; + int ret, rem; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) + return -EINVAL; + + if (mode == net_resource_mgt->bridge_mode) + continue; + + ret = disp_ops->set_bridge_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mode); + if (ret) { + netdev_info(netdev, "bridge_setlink failed 0x%x", ret); + return ret; + } + + net_resource_mgt->bridge_mode = mode; + } + + return 0; +} + +static int nbl_serv_get_phys_port_name(struct net_device *dev, char *name, size_t len) +{ + struct nbl_common_info *common = NBL_NETDEV_TO_COMMON(dev); + u8 pf_id; + + pf_id = common->eth_id; + if ((NBL_COMMON_TO_ETH_MODE(common) == NBL_TWO_ETHERNET_PORT) && common->eth_id == 2) + pf_id = 1; + + if (snprintf(name, len, "p%u", pf_id) >= len) + return -EINVAL; + return 0; +} + +static int nbl_serv_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u8 mac[ETH_ALEN]; + + /* return success to avoid linkwatch_do_dev report warnning */ + if (test_bit(NBL_FATAL_ERR, adapter->state)) + return 0; + + disp_ops->get_base_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mac); + + ppid->id_len = ETH_ALEN; + memcpy(&ppid->id, mac, ppid->id_len); + + return 0; +} + +static int nbl_serv_register_net(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int p4_type, ret = 0; + + ret = disp_ops->register_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + register_param, register_result); + if (ret) + return ret; + + p4_type = disp_ops->get_p4_used(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + switch (p4_type) { + case NBL_P4_DEFAULT: + set_bit(NBL_FLAG_P4_DEFAULT, serv_mgt->flags); + break; + default: + nbl_warn(NBL_SERV_MGT_TO_COMMON(serv_mgt), NBL_DEBUG_CUSTOMIZED_P4, + "Unknown P4 type %d", p4_type); + } + + return 0; +} + +static int nbl_serv_unregister_net(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->unregister_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_setup_txrx_queues(void *priv, u16 vsi_id, u16 queue_num, u16 net_vector_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vector *vector; + int i, ret = 0; + + /* Clear cfgs, in case this function exited abnormaly last time */ + disp_ops->clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + + /* queue_num include user&kernel queue */ + ret = disp_ops->alloc_txrx_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, queue_num); + if (ret) + return -EFAULT; + + /* ring_mgt->tx_ring_number only for kernel use */ + for (i = 0; i < ring_mgt->tx_ring_num; i++) { + ring_mgt->tx_rings[i].local_queue_id = NBL_PAIR_ID_GET_TX(i); + ring_mgt->rx_rings[i].local_queue_id = NBL_PAIR_ID_GET_RX(i); + } + + for (i = 0; i < ring_mgt->xdp_ring_offset; i++) { + vector = &ring_mgt->vectors[i]; + vector->local_vector_id = i + net_vector_id; + vector->global_vector_id = + disp_ops->get_global_vector(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, vector->local_vector_id); + vector->irq_enable_base = + disp_ops->get_msix_irq_enable_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector->global_vector_id, + &vector->irq_data); + + disp_ops->set_vector_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector->irq_enable_base, + vector->irq_data, i, + ring_mgt->net_msix_mask_en); + } + + return 0; +} + +static void nbl_serv_remove_txrx_queues(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->free_txrx_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_init_tx_rate(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 func_id; + int ret = 0; + + if (net_resource_mgt->max_tx_rate) { + func_id = disp_ops->get_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, net_resource_mgt->max_tx_rate); + } + + return ret; +} + +static int nbl_serv_setup_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->setup_q2vsi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_remove_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->remove_q2vsi(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_setup_rss(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->setup_rss(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_remove_rss(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->remove_rss(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_alloc_rings(void *priv, struct net_device *netdev, struct nbl_ring_param *param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ring_mgt->tx_ring_num = param->tx_ring_num; + ring_mgt->rx_ring_num = param->rx_ring_num; + ring_mgt->tx_desc_num = param->queue_size; + ring_mgt->rx_desc_num = param->queue_size; + ring_mgt->xdp_ring_offset = param->xdp_ring_offset; + + ret = disp_ops->alloc_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), netdev, param); + if (ret) + goto alloc_rings_fail; + + ret = nbl_serv_set_tx_rings(ring_mgt, netdev, dev); + if (ret) + goto set_tx_fail; + + ret = nbl_serv_set_rx_rings(ring_mgt, netdev, dev); + if (ret) + goto set_rx_fail; + + ret = nbl_serv_set_vectors(serv_mgt, netdev, dev); + if (ret) + goto set_vectors_fail; + + ret = nbl_serv_register_xdp_rxq(serv_mgt, ring_mgt); + if (ret) + goto register_xdp_err; + + return 0; + +register_xdp_err: + nbl_serv_remove_vectors(ring_mgt, dev); +set_vectors_fail: + nbl_serv_remove_rx_ring(ring_mgt, dev); +set_rx_fail: + nbl_serv_remove_tx_ring(ring_mgt, dev); +set_tx_fail: + disp_ops->remove_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +alloc_rings_fail: + return ret; +} + +static void nbl_serv_free_rings(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + struct nbl_serv_ring_mgt *ring_mgt; + struct nbl_dispatch_ops *disp_ops; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + nbl_serv_unregister_xdp_rxq(serv_mgt, ring_mgt); + nbl_serv_remove_vectors(ring_mgt, dev); + nbl_serv_remove_rx_ring(ring_mgt, dev); + nbl_serv_remove_tx_ring(ring_mgt, dev); + + disp_ops->remove_rings(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_enable_napis(void *priv, u16 vsi_index) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i; + + for (i = start; i < end; i++) + napi_enable(ring_mgt->vectors[i].napi); + + return 0; +} + +static void nbl_serv_disable_napis(void *priv, u16 vsi_index) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_ring_vsi_info *vsi_info = &ring_mgt->vsi_info[vsi_index]; + u16 start = vsi_info->ring_offset, end = vsi_info->ring_offset + vsi_info->ring_num; + int i; + + for (i = start; i < end; i++) + napi_disable(ring_mgt->vectors[i].napi); +} + +static void nbl_serv_set_mask_en(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt; + + ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + + ring_mgt->net_msix_mask_en = enable; +} + +static int nbl_serv_start_net_flow(void *priv, struct net_device *netdev, u16 vsi_id, u16 vid) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + struct nbl_serv_vlan_node *vlan_node; + int ret = 0; + + /* Clear cfgs, in case this function exited abnormaly last time */ + disp_ops->clear_accel_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + disp_ops->clear_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + if (!common->is_vf) + disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, 1); + + if (!list_empty(&flow_mgt->vlan_list)) + return -ECONNRESET; + + ret = disp_ops->add_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + if (ret) + goto add_multi_fail; + + vlan_node = nbl_serv_alloc_vlan_node(); + if (!vlan_node) + goto alloc_fail; + + flow_mgt->vid = vid; + ether_addr_copy(flow_mgt->mac, netdev->dev_addr); + ret = disp_ops->add_macvlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), flow_mgt->mac, + vid, vsi_id); + if (ret) + goto add_macvlan_fail; + + vlan_node->vid = vid; + list_add(&vlan_node->node, &flow_mgt->vlan_list); + + return 0; + +add_macvlan_fail: + nbl_serv_free_vlan_node(vlan_node); +alloc_fail: + disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +add_multi_fail: + return ret; +} + +static void nbl_serv_stop_net_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_netdev_priv *net_priv = netdev_priv(dev); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_flow_mgt *flow_mgt = NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt); + + nbl_serv_del_all_vlans(serv_mgt); + nbl_serv_del_all_submacs(serv_mgt, net_priv->async_other_vsi); + if (!common->is_vf) + disp_ops->config_fd_flow_state(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_CHAN_FDIR_RULE_NORMAL, vsi_id, 0); + + disp_ops->del_multi_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); + + disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vsi_id, -1, false); + memset(flow_mgt->mac, 0, sizeof(flow_mgt->mac)); +} + +static int nbl_serv_set_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->add_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_remove_lldp_flow(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_lldp_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static int nbl_serv_start_mgt_flow(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->setup_multi_group(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_stop_mgt_flow(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->remove_multi_group(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static u32 nbl_serv_get_tx_headroom(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_tx_headroom(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +/** + * This ops get flexible product capability from ctrl device, if the device has not manager cap, it + * need get capability from ctr device by channel + */ +static bool nbl_serv_get_product_flex_cap(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_product_flex_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + cap_type); +} + +/** + * This ops get fix product capability from resource layer, this capability fix by product_type, no + * need get from ctrl device + */ +static bool nbl_serv_get_product_fix_cap(void *priv, enum nbl_fix_cap_type cap_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + cap_type); +} + +static int nbl_serv_init_chip_factory(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + int ret = 0; + + ret = disp_ops->init_chip_module(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "init_chip_module failed\n"); + goto module_init_fail; + } + + return 0; + +module_init_fail: + return ret; +} + +static int nbl_serv_destroy_chip_factory(void *p) +{ + return 0; +} + +static int nbl_serv_init_chip(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + struct nbl_common_info *common; + struct device *dev; + int ret = 0; + + common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + dev = NBL_COMMON_TO_DEV(common); + + ret = disp_ops->init_chip_module(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "init_chip_module failed\n"); + goto module_init_fail; + } + + ret = disp_ops->queue_init(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "queue_init failed\n"); + goto queue_init_fail; + } + + ret = disp_ops->vsi_init(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) { + dev_err(dev, "vsi_init failed\n"); + goto vsi_init_fail; + } + + return 0; + +vsi_init_fail: +queue_init_fail: +module_init_fail: + return ret; +} + +static int nbl_serv_destroy_chip(void *p) +{ + return 0; +} + +static int nbl_serv_configure_msix_map(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->configure_msix_map(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), num_net_msix, + num_others_msix, net_msix_mask_en); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_destroy_msix_map(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->destroy_msix_map(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_enable_mailbox_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_mailbox_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector_id, enable_msix); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_abnormal_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector_id, enable_msix); + if (ret) + return -EIO; + + return 0; +} + +static irqreturn_t nbl_serv_clean_rings(int __always_unused irq, void *data) +{ + struct nbl_serv_vector *vector = (struct nbl_serv_vector *)data; + + napi_schedule_irqoff(vector->napi); + + return IRQ_HANDLED; +} + +static int nbl_serv_request_net_irq(void *priv, struct nbl_msix_info_param *msix_info) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_ring *tx_ring, *rx_ring; + struct nbl_serv_vector *vector; + u32 irq_num; + int i, ret = 0; + + for (i = 0; i < ring_mgt->xdp_ring_offset; i++) { + tx_ring = &ring_mgt->tx_rings[i]; + rx_ring = &ring_mgt->rx_rings[i]; + vector = &ring_mgt->vectors[i]; + vector->tx_ring = tx_ring; + vector->rx_ring = rx_ring; + + irq_num = msix_info->msix_entries[i].vector; + snprintf(vector->name, sizeof(vector->name) - 1, "%s%03d-%s-%02u", "NBL", + NBL_COMMON_TO_VSI_ID(common), "TxRx", i); + ret = devm_request_irq(dev, irq_num, nbl_serv_clean_rings, 0, + vector->name, vector); + if (ret) { + nbl_err(common, NBL_DEBUG_INTR, "TxRx Queue %u requests MSIX irq failed " + "with error %d", i, ret); + goto request_irq_err; + } + if (!cpumask_empty(&vector->cpumask)) + irq_set_affinity_hint(irq_num, &vector->cpumask); + } + + net_resource_mgt->num_net_msix = msix_info->msix_num; + + return 0; + +request_irq_err: + while (--i + 1) { + vector = &ring_mgt->vectors[i]; + + irq_num = msix_info->msix_entries[i].vector; + irq_set_affinity_hint(irq_num, NULL); + devm_free_irq(dev, irq_num, vector); + } + return ret; +} + +static void nbl_serv_free_net_irq(void *priv, struct nbl_msix_info_param *msix_info) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_vector *vector; + u32 irq_num; + int i; + + for (i = 0; i < ring_mgt->xdp_ring_offset; i++) { + vector = &ring_mgt->vectors[i]; + + irq_num = msix_info->msix_entries[i].vector; + irq_set_affinity_hint(irq_num, NULL); + devm_free_irq(dev, irq_num, vector); + } +} + +static u16 nbl_serv_get_global_vector(void *priv, u16 local_vector_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_global_vector(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), local_vector_id); +} + +static u16 nbl_serv_get_msix_entry_id(void *priv, u16 local_vector_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_msix_entry_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), local_vector_id); +} + +static u16 nbl_serv_get_vsi_id(void *priv, u16 func_id, u16 type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, type); +} + +static void nbl_serv_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_eth_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, + eth_mode, eth_id, logic_eth_id); +} + +void nbl_serv_get_rep_drop_stats(struct nbl_service_mgt *serv_mgt, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_serv_rep_drop *rep_drop; + u16 rep_data_index; + unsigned int start; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + rep_data_index = disp_ops->get_rep_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), rep_vsi_id); + if (rep_data_index >= net_resource_mgt->num_vfs) + return; + + rep_drop = &net_resource_mgt->rep_drop[rep_data_index]; + do { + start = u64_stats_fetch_begin(&rep_drop->rep_drop_syncp); + rep_stats->dropped = rep_drop->tx_dropped; + } while (u64_stats_fetch_retry(&rep_drop->rep_drop_syncp, start)); +} + +static void nbl_serv_rep_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct nbl_netdev_priv *rep_priv = netdev_priv(netdev); + struct nbl_rep_stats rep_stats = { 0 }; + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + if (!adapter) { + netdev_err(netdev, "rep get stats, adapter is null\n"); + return; + } + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + if (!stats) { + netdev_err(netdev, "rep get stats, stats is null\n"); + return; + } + + disp_ops->get_rep_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + rep_priv->rep->rep_vsi_id, &rep_stats, true); + stats->tx_packets += rep_stats.packets; + stats->tx_bytes += rep_stats.bytes; + + disp_ops->get_rep_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + rep_priv->rep->rep_vsi_id, &rep_stats, false); + stats->rx_packets += rep_stats.packets; + stats->rx_bytes += rep_stats.bytes; + + nbl_serv_get_rep_drop_stats(serv_mgt, rep_priv->rep->rep_vsi_id, &rep_stats); + stats->tx_dropped += rep_stats.dropped; + stats->rx_dropped = 0; + stats->multicast = 0; + stats->rx_errors = 0; + stats->tx_errors = 0; + stats->rx_length_errors = 0; + stats->rx_crc_errors = 0; + stats->rx_frame_errors = 0; +} + +static void nbl_serv_rep_set_rx_mode(struct net_device *dev) +{ +} + +static int nbl_serv_rep_set_mac(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) { + netdev_err(dev, "Temp to change a invalid mac address %pM\n", addr->sa_data); + return -EADDRNOTAVAIL; + } + + if (ether_addr_equal(dev->dev_addr, addr->sa_data)) + return 0; + + return -EOPNOTSUPP; +} + +static int nbl_serv_rep_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + return -EAGAIN; +} + +static int nbl_serv_rep_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + return -EAGAIN; +} + +static LIST_HEAD(nbl_serv_rep_block_cb_list); + +static int nbl_serv_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + + switch (type) { + case TC_SETUP_BLOCK: { + return flow_block_cb_setup_simple((struct flow_block_offload *)type_data, + &nbl_serv_rep_block_cb_list, + nbl_serv_setup_tc_block_cb, + priv, priv, true); + } + default: + return -EOPNOTSUPP; + } +} + +static int nbl_serv_rep_get_phys_port_name(struct net_device *dev, char *name, size_t len) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(dev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 vf_base_vsi_id; + u16 vf_id; + u8 pf_id; + + pf_id = common->eth_id; + if ((NBL_COMMON_TO_ETH_MODE(common) == NBL_TWO_ETHERNET_PORT) && common->eth_id == 2) + pf_id = 1; + + vf_base_vsi_id = disp_ops->get_vf_base_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_MGT_PF(common)); + vf_id = priv->rep->rep_vsi_id - vf_base_vsi_id; + if (snprintf(name, len, "pf%uvf%u", pf_id, vf_id) >= len) + return -EINVAL; + return 0; +} + +static int nbl_serv_rep_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) +{ + struct nbl_netdev_priv *priv = netdev_priv(dev); + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u8 mac[ETH_ALEN]; + + disp_ops->get_base_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), mac); + + ppid->id_len = ETH_ALEN; + memcpy(&ppid->id, mac, ppid->id_len); + + return 0; +} + +static struct nbl_indr_dev_priv *nbl_find_indr_dev_priv(void *priv, struct net_device *netdev, + int binder_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_indr_dev_priv *indr_priv; + + if (!netdev) + return NULL; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + list_for_each_entry(indr_priv, &net_resource_mgt->indr_dev_priv_list, list) + if (indr_priv->indr_dev == netdev && indr_priv->binder_type == binder_type) + return indr_priv; + + return NULL; +} + +static void nbl_serv_indr_dev_block_unbind(void *priv) +{ + struct nbl_indr_dev_priv *indr_priv = priv; + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(indr_priv->dev_priv); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + + list_del(&indr_priv->list); + devm_kfree(dev, indr_priv); +} + +static LIST_HEAD(nbl_serv_indr_block_cb_list); + +static int nbl_serv_indr_dev_setup_block(struct net_device *netdev, struct Qdisc *sch, + struct nbl_netdev_priv *dev_priv, + struct flow_block_offload *flow_bo, + flow_setup_cb_t *setup_cb, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + struct nbl_adapter *adapter = NBL_NETDEV_PRIV_TO_ADAPTER(dev_priv); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct device *dev = NBL_ADAPTER_TO_DEV(adapter); + struct nbl_indr_dev_priv *indr_priv = NULL; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NULL; + struct flow_block_cb *block_cb = NULL; + + if (flow_bo->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + (flow_bo->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + !netif_is_ovs_master(netdev))) + return -EOPNOTSUPP; + + flow_bo->unlocked_driver_cb = true; + flow_bo->driver_block_list = &nbl_serv_indr_block_cb_list; + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + switch (flow_bo->command) { + case FLOW_BLOCK_BIND: + indr_priv = nbl_find_indr_dev_priv(serv_mgt, netdev, flow_bo->binder_type); + if (indr_priv) + return -EEXIST; + + indr_priv = devm_kzalloc(dev, sizeof(struct nbl_indr_dev_priv), GFP_KERNEL); + if (!indr_priv) + return -ENOMEM; + + indr_priv->indr_dev = netdev; + indr_priv->dev_priv = dev_priv; + indr_priv->binder_type = flow_bo->binder_type; + list_add_tail(&indr_priv->list, &net_resource_mgt->indr_dev_priv_list); + + block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, + nbl_serv_indr_dev_block_unbind, flow_bo, + netdev, sch, data, dev_priv, cleanup); + if (IS_ERR(block_cb)) { + netdev_err(netdev, "indr block cb alloc fail\n"); + list_del(&indr_priv->list); + devm_kfree(dev, indr_priv); + return PTR_ERR(block_cb); + } + flow_block_cb_add(block_cb, flow_bo); + list_add_tail(&block_cb->driver_list, &nbl_serv_indr_block_cb_list); + break; + case FLOW_BLOCK_UNBIND: + indr_priv = nbl_find_indr_dev_priv(serv_mgt, netdev, flow_bo->binder_type); + if (!indr_priv) + return -ENOENT; + + block_cb = flow_block_cb_lookup(flow_bo->block, setup_cb, indr_priv); + if (!block_cb) + return -ENOENT; + flow_indr_block_cb_remove(block_cb, flow_bo); + list_del(&block_cb->driver_list); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int nbl_serv_indr_dev_setup_tc(struct net_device *dev, struct Qdisc *sch, void *cb_priv, + enum tc_setup_type type, void *type_data, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + struct nbl_netdev_priv *priv = cb_priv; + + switch (type) { + case TC_SETUP_BLOCK: + return nbl_serv_indr_dev_setup_block(dev, sch, priv, type_data, + nbl_serv_indr_setup_tc_block_cb, + data, cleanup); + default: + return -EOPNOTSUPP; + } +} + +static void nbl_serv_get_rep_feature(void *priv, struct nbl_register_net_result *register_result) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_rep_feature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), register_result); +} + +static void nbl_serv_get_rep_queue_num(void *priv, u8 *base_queue_id, u8 *rep_queue_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + + *base_queue_id = (u8)ring_mgt->vsi_info[NBL_VSI_CTRL].ring_offset; + *rep_queue_num = (u8)ring_mgt->vsi_info[NBL_VSI_CTRL].ring_num; +} + +static void nbl_serv_get_rep_queue_info(void *priv, u16 *queue_num, u16 *queue_size) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_rep_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + queue_num, queue_size); +} + +static void nbl_serv_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_user_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + queue_num, queue_size, vsi_id); +} + +static int nbl_serv_rep_enqueue(struct sk_buff *skb, + struct nbl_serv_rep_queue_mgt *rep_queue_mgt) +{ + if (rep_queue_mgt->size == 0) + return -EINVAL; + + return ptr_ring_produce(&rep_queue_mgt->ring, skb); +} + +static struct sk_buff *nbl_serv_rep_dequeue(struct nbl_serv_rep_queue_mgt *rep_queue_mgt) +{ + struct sk_buff *skb; + + if (rep_queue_mgt->size == 0) + return NULL; + + if (__ptr_ring_empty(&rep_queue_mgt->ring)) + skb = NULL; + else + skb = __ptr_ring_consume(&rep_queue_mgt->ring); + + if (unlikely(!skb)) { + /* smp_mb for dequeue */ + smp_mb__after_atomic(); + if (!__ptr_ring_empty(&rep_queue_mgt->ring)) + skb = __ptr_ring_consume(&rep_queue_mgt->ring); + } + + return skb; +} + +static inline bool nbl_serv_rep_queue_mgt_start(struct nbl_serv_rep_queue_mgt *rep_queue_mgt) +{ + return spin_trylock(&rep_queue_mgt->seq_lock); +} + +static inline void nbl_serv_rep_queue_mgt_end(struct nbl_serv_rep_queue_mgt *rep_queue_mgt) +{ + spin_unlock(&rep_queue_mgt->seq_lock); +} + +static void nbl_serv_rep_update_drop_stats(void *priv, struct sk_buff *skb) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + u16 rep_vsi_id; + u16 rep_data_index; + + rep_vsi_id = *(u16 *)&skb->cb[NBL_SKB_FILL_VSI_ID_OFF]; + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + rep_data_index = disp_ops->get_rep_index(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), rep_vsi_id); + dev_kfree_skb_any(skb); + if (rep_data_index >= net_resource_mgt->num_vfs) + return; + + u64_stats_update_begin(&net_resource_mgt->rep_drop[rep_data_index].rep_drop_syncp); + net_resource_mgt->rep_drop[rep_data_index].tx_dropped++; + u64_stats_update_end(&net_resource_mgt->rep_drop[rep_data_index].rep_drop_syncp); +} + +static void nbl_serv_rep_queue_mgt_run(struct nbl_serv_rep_queue_mgt *rep_queue_mgt, + struct net_device *netdev) +{ + struct nbl_adapter *adapter = NBL_NETDEV_TO_ADAPTER(netdev); + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(adapter); + struct nbl_resource_pt_ops *pt_ops = NBL_ADAPTER_TO_RES_PT_OPS(adapter); + struct sk_buff *skb; + netdev_tx_t ret = NETDEV_TX_OK; + int i = 0; + + skb = nbl_serv_rep_dequeue(rep_queue_mgt); + if (!skb) + return; + for (; skb; skb = nbl_serv_rep_dequeue(rep_queue_mgt)) { + ret = pt_ops->rep_xmit(skb, rep_queue_mgt->netdev); + if (ret == NETDEV_TX_BUSY) { + if (net_ratelimit()) + netdev_dbg(netdev, "dequeue skb tx busy!\n"); + /* never hang in sirq too long, so if a tx_busy is returned, drop it */ + nbl_serv_rep_update_drop_stats(serv_mgt, skb); + } + if (i++ >= NBL_DEFAULT_REP_TX_MAX_NUM) + return; + } +} + +static netdev_tx_t nbl_serv_rep_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nbl_netdev_priv *rep_priv = netdev_priv(netdev); + struct nbl_adapter *adapter; + struct nbl_service_mgt *serv_mgt; + struct nbl_serv_rep_queue_mgt *rep_queue_mgt; + int ret; + u8 rep_queue_idx; + u8 i; + bool has_locked_flag = false; + + adapter = NBL_NETDEV_TO_ADAPTER(netdev); + serv_mgt = NBL_NETDEV_TO_SERV_MGT(netdev); + + rep_queue_idx = (rep_priv->rep->rep_vsi_id - 1) % rep_priv->rep->rep_queue_num; + rep_queue_mgt = &serv_mgt->rep_queue_mgt[rep_queue_idx]; + skb->queue_mapping = rep_queue_idx + rep_priv->rep->base_queue_id; + *(u16 *)(&skb->cb[NBL_SKB_FILL_VSI_ID_OFF]) = rep_priv->rep->rep_vsi_id; + skb->cb[NBL_SKB_FILL_EXT_HDR_OFF] = NBL_REP_FILL_EXT_HDR; + ret = nbl_serv_rep_enqueue(skb, rep_queue_mgt); + + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_info(netdev, "rep enqueue fail, size:%d, rep_vsi_id:%d!!\n", + rep_queue_mgt->size, rep_priv->rep->rep_vsi_id); + } + for (i = 0; i < NBL_DEFAULT_REP_TX_RETRY_NUM; i++) { + if (nbl_serv_rep_queue_mgt_start(rep_queue_mgt)) { + has_locked_flag = true; + nbl_serv_rep_queue_mgt_run(rep_queue_mgt, netdev); + nbl_serv_rep_queue_mgt_end(rep_queue_mgt); + } + } + + if (has_locked_flag) { + if (ret) + ret = NET_XMIT_CN; + else + ret = NET_XMIT_SUCCESS; + } + + if (likely(ret)) { + /* enqueue failed but get lock succ, need a retry */ + if (ret == NET_XMIT_CN) { + return NETDEV_TX_BUSY; + } else if (ret == NET_XMIT_SUCCESS) { + /* enqueue succ and get lock succ, rep_xmit regard as a ok */ + return NETDEV_TX_OK; + } + /* enqueue and get lock failed, free skb and no need a retry */ + nbl_serv_rep_update_drop_stats(serv_mgt, skb); + return NETDEV_TX_OK; + } + + /* enqueue succ but get lock failed, rep_xmit regard as a ok */ + return NETDEV_TX_OK; +} + +static int nbl_serv_alloc_rep_queue_mgt(void *priv, struct net_device *netdev) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + int i, ret; + u8 base_queue_id; + u8 rep_queue_num; + + if (!serv_mgt) + return -EINVAL; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + + dev_info(dev, "nbl serv alloc rep queue mgt start\n"); + nbl_serv_get_rep_queue_num(serv_mgt, &base_queue_id, &rep_queue_num); + serv_mgt->rep_queue_mgt = devm_kcalloc(dev, rep_queue_num, + sizeof(struct nbl_serv_rep_queue_mgt), GFP_KERNEL); + if (!serv_mgt->rep_queue_mgt) + return -ENOMEM; + for (i = 0; i < rep_queue_num; i++) { + ret = ptr_ring_init(&serv_mgt->rep_queue_mgt[i].ring, + NBL_REP_QUEUE_MGT_DESC_NUM, GFP_KERNEL); + if (ret) { + dev_err(dev, "ptr ring init failed\n"); + goto free_ptr_ring; + } + + spin_lock_init(&serv_mgt->rep_queue_mgt[i].seq_lock); + serv_mgt->rep_queue_mgt[i].size = NBL_REP_QUEUE_MGT_DESC_NUM; + serv_mgt->rep_queue_mgt[i].netdev = netdev; + dev_info(dev, "rep_queue_mgt init success\n"); + } + dev_info(dev, "nbl serv alloc rep queue mgt end\n"); + + return 0; + +free_ptr_ring: + for (; i >= 0; i--) + ptr_ring_cleanup(&serv_mgt->rep_queue_mgt[i].ring, 0); + + devm_kfree(dev, serv_mgt->rep_queue_mgt); + serv_mgt->rep_queue_mgt = NULL; + return -ENOMEM; +} + +static int nbl_serv_free_rep_queue_mgt(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct device *dev; + int i; + u8 base_queue_id; + u8 rep_queue_num; + + if (!serv_mgt) + return -EINVAL; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + if (!serv_mgt->rep_queue_mgt) + return -EINVAL; + + nbl_serv_get_rep_queue_num(serv_mgt, &base_queue_id, &rep_queue_num); + for (i = 0; i < rep_queue_num; i++) + ptr_ring_cleanup(&serv_mgt->rep_queue_mgt[i].ring, 0); + + dev_info(dev, "ptr ring cleanup\n"); + devm_kfree(dev, serv_mgt->rep_queue_mgt); + serv_mgt->rep_queue_mgt = NULL; + + return 0; +} + +static void nbl_serv_set_eswitch_mode(void *priv, u16 eswitch_mode) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + + if (eswitch_mode == NBL_ESWITCH_OFFLOADS) { + disp_ops->set_dport_fc_th_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, false); + if (net_resource_mgt->lag_info && net_resource_mgt->lag_info->lag_num > 1) + disp_ops->set_shaping_dport_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, false); + } else { + disp_ops->set_dport_fc_th_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, true); + if (net_resource_mgt->lag_info && net_resource_mgt->lag_info->lag_num > 1) + disp_ops->set_shaping_dport_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, true); + } + disp_ops->set_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eswitch_mode); +} + +static u16 nbl_serv_get_eswitch_mode(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_alloc_rep_data(void *priv, int num_vfs, u16 vf_base_vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct device *dev; + + if (!serv_mgt) + return -EINVAL; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + net_resource_mgt->rep_drop = devm_kcalloc(dev, num_vfs, + sizeof(struct nbl_serv_rep_drop), + GFP_KERNEL); + if (!net_resource_mgt->rep_drop) + return -ENOMEM; + + net_resource_mgt->num_vfs = num_vfs; + return disp_ops->alloc_rep_data(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), num_vfs, + vf_base_vsi_id); +} + +static void nbl_serv_free_rep_data(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_dispatch_ops *disp_ops; + struct device *dev; + + if (!serv_mgt) + return; + + dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + if (net_resource_mgt->rep_drop) + devm_kfree(dev, net_resource_mgt->rep_drop); + disp_ops->free_rep_data(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_set_rep_netdev_info(void *priv, void *rep_data) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->set_rep_netdev_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), rep_data); +} + +static void nbl_serv_unset_rep_netdev_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->unset_rep_netdev_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_disable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->disable_phy_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id); +} + +static int nbl_serv_enable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->enable_phy_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id); +} + +static void nbl_serv_init_acl(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->init_acl(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_uninit_acl(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->uninit_acl(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_set_upcall_rule(void *priv, u8 eth_id, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->add_nd_upcall_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, 0); + + return disp_ops->set_upcall_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, vsi_id); +} + +static int nbl_serv_unset_upcall_rule(void *priv, u8 eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->del_nd_upcall_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return disp_ops->unset_upcall_rule(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id); +} + +static int nbl_serv_switchdev_init_cmdq(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + return disp_ops->switchdev_init_cmdq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_switchdev_deinit_cmdq(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + return disp_ops->switchdev_deinit_cmdq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_set_tc_flow_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->set_tc_flow_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_unset_tc_flow_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + return disp_ops->unset_tc_flow_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_get_tc_flow_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_tc_flow_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_register_indr_dev_tc_offload(void *priv, struct net_device *netdev) +{ + struct nbl_netdev_priv *dev_priv = netdev_priv(netdev); + + return flow_indr_dev_register(nbl_serv_indr_dev_setup_tc, dev_priv); +} + +static void nbl_serv_unregister_indr_dev_tc_offload(void *priv, struct net_device *netdev) +{ + struct nbl_netdev_priv *dev_priv = netdev_priv(netdev); + + flow_indr_dev_unregister(nbl_serv_indr_dev_setup_tc, dev_priv, + nbl_serv_indr_dev_block_unbind); +} + +static void nbl_serv_set_lag_info(void *priv, struct net_device *bond_netdev, u8 lag_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + + net_resource_mgt->lag_info = devm_kzalloc(dev, sizeof(struct nbl_serv_lag_info), + GFP_KERNEL); + if (!net_resource_mgt->lag_info) + return; + net_resource_mgt->lag_info->bond_netdev = bond_netdev; + net_resource_mgt->lag_info->lag_id = lag_id; + + dev_info(dev, "set lag info, bond_netdev:%p, lag_id:%d\n", bond_netdev, lag_id); +} + +static void nbl_serv_unset_lag_info(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + + if (net_resource_mgt->lag_info) { + devm_kfree(dev, net_resource_mgt->lag_info); + net_resource_mgt->lag_info = NULL; + } +} + +static void +nbl_serv_set_netdev_ops(void *priv, const struct net_device_ops *net_device_ops, bool is_pf) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + + dev_info(dev, "set netdev ops:%p is_pf:%d\n", net_device_ops, is_pf); + if (is_pf) + net_resource_mgt->netdev_ops.pf_netdev_ops = (void *)net_device_ops; + else + net_resource_mgt->netdev_ops.rep_netdev_ops = (void *)net_device_ops; +} + +static int nbl_serv_enable_lag_protocol(void *priv, u16 eth_id, bool lag_en) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct net_device *dev = net_resource_mgt->netdev; + struct nbl_netdev_priv *net_priv = netdev_priv(dev); + int ret = 0; + + ret = disp_ops->enable_lag_protocol(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, lag_en); + if (lag_en) + ret = disp_ops->add_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + net_priv->other_vsi); + else + disp_ops->del_lag_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), net_priv->other_vsi); + + return ret; +} + +static int nbl_serv_cfg_lag_hash_algorithm(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->cfg_lag_hash_algorithm(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, lag_id, hash_type); +} + +static int nbl_serv_cfg_lag_member_fwd(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + if (net_resource_mgt->lag_info) + net_resource_mgt->lag_info->lag_id = lag_id; + + disp_ops->cfg_lag_mcc(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, lag_id, fwd); + + return disp_ops->cfg_lag_member_fwd(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, lag_id, fwd); +} + +static int nbl_serv_cfg_lag_member_list(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + u16 cur_eswitch_mode = NBL_ESWITCH_NONE; + bool shaping_vld = true; + + ret = disp_ops->cfg_lag_member_list(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + if (ret) + return ret; + + ret = disp_ops->cfg_duppkt_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + if (ret) + return ret; + + if (net_resource_mgt->lag_info) + net_resource_mgt->lag_info->lag_num = param->lag_num; + + cur_eswitch_mode = disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (cur_eswitch_mode == NBL_ESWITCH_OFFLOADS) { + shaping_vld = param->lag_num > 1 ? false : true; + disp_ops->set_shaping_dport_vld(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + common->eth_id, shaping_vld); + } + + ret = disp_ops->cfg_eth_bond_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + if (ret) + return ret; + + ret = disp_ops->cfg_duppkt_mcc(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param); + + return ret; +} + +static int nbl_serv_cfg_lag_member_up_attr(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->cfg_lag_member_up_attr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, lag_id, enable); +} + +static void nbl_serv_net_stats_update_task(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, net_stats_update); + struct nbl_service_mgt *serv_mgt; + + serv_mgt = serv_net_resource_mgt->serv_mgt; + + nbl_serv_update_stats(serv_mgt, false); +} + +static void nbl_serv_rx_mode_async_task(struct work_struct *work) +{ + struct nbl_serv_net_resource_mgt *serv_net_resource_mgt = + container_of(work, struct nbl_serv_net_resource_mgt, rx_mode_async); + + if (serv_net_resource_mgt->rxmode_set_required & NBL_FLAG_AQ_MODIFY_MAC_FILTER) + nbl_modify_submacs(serv_net_resource_mgt); + + if (serv_net_resource_mgt->rxmode_set_required & NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE) + nbl_modify_promisc_mode(serv_net_resource_mgt); +} + +static void nbl_serv_net_task_service_timer(struct timer_list *t) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = + from_timer(net_resource_mgt, t, serv_timer); + + mod_timer(&net_resource_mgt->serv_timer, + round_jiffies(net_resource_mgt->serv_timer_period + jiffies)); + nbl_common_queue_work(&net_resource_mgt->net_stats_update, false, false); +} + +static void nbl_serv_setup_flow_mgt(struct nbl_serv_flow_mgt *flow_mgt) +{ + INIT_LIST_HEAD(&flow_mgt->vlan_list); + INIT_LIST_HEAD(&flow_mgt->submac_list); +} + +static void nbl_serv_register_restore_netdev_queue(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_channel_ops *chan_ops = NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt); + + if (!chan_ops->check_queue_exist(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_TYPE_MAILBOX)) + return; + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_STOP_ABNORMAL_SW_QUEUE, + nbl_serv_chan_stop_abnormal_sw_queue_resp, serv_mgt); + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, + nbl_serv_chan_restore_netdev_queue_resp, serv_mgt); + + chan_ops->register_msg(NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt), + NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, + nbl_serv_chan_restart_netdev_queue_resp, serv_mgt); +} + +static void nbl_serv_remove_net_resource_mgt(void *priv) +{ + struct device *dev; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + dev = NBL_COMMON_TO_DEV(common); + + if (net_resource_mgt) { + del_timer_sync(&net_resource_mgt->serv_timer); + nbl_common_release_task(&net_resource_mgt->rx_mode_async); + nbl_common_release_task(&net_resource_mgt->net_stats_update); + nbl_common_release_task(&net_resource_mgt->tx_timeout); + if (common->is_vf) { + nbl_common_release_task(&net_resource_mgt->update_link_state); + nbl_common_release_task(&net_resource_mgt->update_vlan); + } + nbl_free_filter(net_resource_mgt); + devm_kfree(dev, net_resource_mgt); + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = NULL; + } +} + +static int nbl_serv_phy_init(struct nbl_serv_net_resource_mgt *net_resource_mgt) +{ + struct nbl_service_mgt *serv_mgt = net_resource_mgt->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u8 eth_id = NBL_COMMON_TO_ETH_ID(common); + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_phy_caps(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, &net_resource_mgt->phy_caps); + + return ret; +} + +static int nbl_serv_setup_net_resource_mgt(void *priv, struct net_device *netdev, + u16 vlan_proto, u16 vlan_tci, u32 rate) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_serv_net_resource_mgt *net_resource_mgt; + + net_resource_mgt = devm_kzalloc(dev, sizeof(struct nbl_serv_net_resource_mgt), GFP_KERNEL); + if (!net_resource_mgt) + return -ENOMEM; + + net_resource_mgt->netdev = netdev; + net_resource_mgt->serv_mgt = serv_mgt; + net_resource_mgt->vlan_proto = vlan_proto; + net_resource_mgt->vlan_tci = vlan_tci; + net_resource_mgt->max_tx_rate = rate; + NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) = net_resource_mgt; + + nbl_serv_phy_init(net_resource_mgt); + nbl_serv_register_restore_netdev_queue(serv_mgt); + if (common->is_vf) { + nbl_serv_register_link_forced_notify(serv_mgt); + nbl_serv_register_vlan_notify(serv_mgt); + } + timer_setup(&net_resource_mgt->serv_timer, nbl_serv_net_task_service_timer, 0); + + net_resource_mgt->serv_timer_period = HZ; + nbl_common_alloc_task(&net_resource_mgt->rx_mode_async, nbl_serv_rx_mode_async_task); + nbl_common_alloc_task(&net_resource_mgt->net_stats_update, nbl_serv_net_stats_update_task); + nbl_common_alloc_task(&net_resource_mgt->tx_timeout, nbl_serv_handle_tx_timeout); + if (common->is_vf) { + nbl_common_alloc_task(&net_resource_mgt->update_link_state, + nbl_serv_update_link_state); + nbl_common_alloc_task(&net_resource_mgt->update_vlan, + nbl_serv_update_vlan); + } + + INIT_LIST_HEAD(&net_resource_mgt->mac_filter_list); + INIT_LIST_HEAD(&net_resource_mgt->indr_dev_priv_list); + spin_lock_init(&net_resource_mgt->mac_vlan_list_lock); + spin_lock_init(&net_resource_mgt->current_netdev_promisc_flags_lock); + net_resource_mgt->get_stats_jiffies = jiffies; + + mod_timer(&net_resource_mgt->serv_timer, + round_jiffies(jiffies + net_resource_mgt->serv_timer_period)); + + return 0; +} + +static int nbl_serv_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_adminq_irq(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vector_id, enable_msix); + if (ret) + return -EIO; + + return 0; +} + +static u16 nbl_serv_get_rdma_cap_num(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_rdma_cap_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_setup_rdma_id(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->setup_rdma_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_remove_rdma_id(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->remove_rdma_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_register_rdma(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->register_rdma(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, param); +} + +static void nbl_serv_unregister_rdma(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->unregister_rdma(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_register_rdma_bond(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->register_rdma_bond(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + list_param, register_param); +} + +static void nbl_serv_unregister_rdma_bond(void *priv, u16 lag_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->unregister_rdma_bond(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), lag_id); +} + +static u8 __iomem *nbl_serv_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_hw_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), size); +} + +static u64 nbl_serv_get_real_hw_addr(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_real_hw_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static u16 nbl_serv_get_function_id(void *priv, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id); +} + +static void nbl_serv_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_real_bdf(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_id, + bus, dev, function); +} + +static int nbl_serv_get_devlink_info(struct devlink *devlink, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv->priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + char firmware_version[NBL_DEVLINK_INFO_FRIMWARE_VERSION_LEN] = {0}; + int ret = 0; + + disp_ops->get_firmware_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + firmware_version, sizeof(firmware_version)); + if (ret) + return ret; + + ret = devlink_info_version_fixed_put(req, "FW Version:", firmware_version); + if (ret) + return ret; + + return ret; +} + +/* Why do we need this? + * Because the original function in kernel cannot handle when we set subvendor and subdevice + * to be 0xFFFF, so write a correct one. + */ +static bool +nbl_serv_pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record) +{ + struct pci_dev *pdev = to_pci_dev(context->dev); + struct nbl_serv_pldm_pci_record_id id = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subsystem_vendor = PCI_ANY_ID, + .subsystem_device = PCI_ANY_ID, + }; + struct pldmfw_desc_tlv *desc; + bool ret; + + list_for_each_entry(desc, &record->descs, entry) { + u16 value; + u16 *ptr; + + switch (desc->type) { + case PLDM_DESC_ID_PCI_VENDOR_ID: + ptr = &id.vendor; + break; + case PLDM_DESC_ID_PCI_DEVICE_ID: + ptr = &id.device; + break; + case PLDM_DESC_ID_PCI_SUBVENDOR_ID: + ptr = &id.subsystem_vendor; + break; + case PLDM_DESC_ID_PCI_SUBDEV_ID: + ptr = &id.subsystem_device; + break; + default: + /* Skip unrelated TLVs */ + continue; + } + + value = get_unaligned_le16(desc->data); + /* A value of zero for one of the descriptors is sometimes + * used when the record should ignore this field when matching + * device. For example if the record applies to any subsystem + * device or vendor. + */ + if (value) + *ptr = (int)value; + else + *ptr = PCI_ANY_ID; + } + + if ((id.vendor == (u16)PCI_ANY_ID || id.vendor == pdev->vendor) && + (id.device == (u16)PCI_ANY_ID || id.device == pdev->device) && + (id.subsystem_vendor == (u16)PCI_ANY_ID || + id.subsystem_vendor == pdev->subsystem_vendor) && + (id.subsystem_device == (u16)PCI_ANY_ID || + id.subsystem_device == pdev->subsystem_device)) + ret = true; + else + ret = false; + + return ret; +} + +static int nbl_serv_send_package_data(struct pldmfw *context, const u8 *data, u16 length) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + + nbl_info(common, NBL_DEBUG_DEVLINK, "Send package data"); + + ret = disp_ops->flash_lock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return ret; + + ret = disp_ops->flash_prepare(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return 0; +} + +static int nbl_serv_send_component_table(struct pldmfw *context, struct pldmfw_component *component, + u8 transfer_flags) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + nbl_info(common, NBL_DEBUG_DEVLINK, "Send component table, id %d", component->identifier); + + return 0; +} + +static int nbl_serv_flash_component(struct pldmfw *context, struct pldmfw_component *component) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u32 component_crc, calculated_crc; + size_t data_len = component->component_size - NBL_DEVLINK_FLASH_COMPONENT_CRC_SIZE; + int ret = 0; + + nbl_info(common, NBL_DEBUG_DEVLINK, "Flash component table, id %d", component->identifier); + + component_crc = *(u32 *)((u8 *)component->component_data + data_len); + calculated_crc = crc32_le(~0, component->component_data, data_len) ^ ~0; + if (component_crc != calculated_crc) { + nbl_err(common, NBL_DEBUG_DEVLINK, "Flash component crc error"); + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return -EFAULT; + } + + ret = disp_ops->flash_image(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), component->identifier, + component->component_data, data_len); + if (ret) + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return ret; +} + +static int nbl_serv_finalize_update(struct pldmfw *context) +{ + struct nbl_serv_update_fw_priv *priv = container_of(context, struct nbl_serv_update_fw_priv, + context); + struct nbl_service_mgt *serv_mgt = priv->serv_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + + nbl_info(common, NBL_DEBUG_DEVLINK, "Flash activate"); + + ret = disp_ops->flash_activate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + disp_ops->flash_unlock(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + return ret; +} + +static const struct pldmfw_ops nbl_update_fw_ops = { + .match_record = nbl_serv_pldmfw_op_pci_match_record, + .send_package_data = nbl_serv_send_package_data, + .send_component_table = nbl_serv_send_component_table, + .flash_component = nbl_serv_flash_component, + .finalize_update = nbl_serv_finalize_update, +}; + +static int nbl_serv_update_firmware(struct nbl_service_mgt *serv_mgt, const struct firmware *fw, + struct netlink_ext_ack *extack) +{ + struct nbl_serv_update_fw_priv priv = {0}; + int ret = 0; + + priv.context.ops = &nbl_update_fw_ops; + priv.context.dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + priv.extack = extack; + priv.serv_mgt = serv_mgt; + + ret = pldmfw_flash_image(&priv.context, fw); + + return ret; +} + +static int nbl_serv_update_devlink_flash(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct nbl_devlink_priv *priv = devlink_priv(devlink); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv->priv; + int ret = 0; + + devlink_flash_update_status_notify(devlink, "Flash start", NULL, 0, 0); + + ret = nbl_serv_update_firmware(serv_mgt, params->fw, extack); + if (ret) + devlink_flash_update_status_notify(devlink, "Flash failed", NULL, 0, 0); + else + devlink_flash_update_status_notify(devlink, + "Flash finished, please reboot to take effect", + NULL, 0, 0); + return ret; +} + +static u32 nbl_serv_get_adminq_tx_buf_size(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_adminq_tx_buf_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_emp_console_write(void *priv, char *buf, size_t count) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->emp_console_write(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), buf, count); +} + +static bool nbl_serv_check_fw_heartbeat(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->check_fw_heartbeat(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static bool nbl_serv_check_fw_reset(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->check_fw_reset(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_get_common_irq_num(void *priv, struct nbl_common_irq_num *irq_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + irq_num->mbx_irq_num = disp_ops->get_mbx_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_get_ctrl_irq_num(void *priv, struct nbl_ctrl_irq_num *irq_num) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + irq_num->adminq_irq_num = disp_ops->get_adminq_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + irq_num->abnormal_irq_num = + disp_ops->get_abnormal_irq_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_check_offload_status(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + bool is_down = false; + int ret; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->check_offload_status(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &is_down); + + /* ovs down, need to delete related pmd flow rules */ + if (is_down) + disp_ops->del_nd_upcall_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + + return ret; +} + +static u32 nbl_serv_get_chip_temperature(void *priv, enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_chip_temperature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), type, senser_id); +} + +static int nbl_serv_get_module_temperature(void *priv, u8 eth_id, enum nbl_hwmon_type type) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_module_temperature(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, type); +} + +static int nbl_serv_get_port_attributes(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->get_port_attributes(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return -EIO; + + return 0; +} + +static int nbl_serv_update_template_config(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret = 0; + + ret = disp_ops->update_ring_num(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return ret; + + ret = disp_ops->update_rdma_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return ret; + + ret = disp_ops->update_rdma_mem_type(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (ret) + return ret; + + return 0; +} + +static int nbl_serv_enable_port(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + int ret = 0; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + ret = disp_ops->enable_port(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); + if (ret) + return -EIO; + + return 0; +} + +static void nbl_serv_init_port(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops; + + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->init_port(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_configure_virtio_dev_msix(void *priv, u16 vector) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->configure_virtio_dev_msix(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vector); +} + +static void nbl_serv_configure_rdma_msix_off(void *priv, u16 vector) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->configure_rdma_msix_off(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vector); +} + +static void nbl_serv_configure_virtio_dev_ready(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->configure_virtio_dev_ready(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + if (NBL_COMMON_TO_VF_CAP(common)) + return 0; + else + return disp_ops->set_eth_mac_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + mac, eth_id); +} + +static void nbl_serv_adapt_desc_gother(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->adapt_desc_gother(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_process_flr(void *priv, u16 vfid) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->flr_clear_queues(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_accel_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_flows(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_accel(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_interrupt(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_rdma(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); + disp_ops->flr_clear_net(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); +} + +static u16 nbl_serv_covert_vfid_to_vsi_id(void *priv, u16 vfid) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->covert_vfid_to_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vfid); +} + +static void nbl_serv_recovery_abnormal(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->unmask_all_interrupts(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static void nbl_serv_keep_alive(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->keep_alive(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_register_vsi_info(void *priv, struct nbl_vsi_param *vsi_param) +{ + u16 vsi_index = vsi_param->index; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_ring_mgt *ring_mgt = NBL_SERV_MGT_TO_RING_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u32 num_cpus; + + ring_mgt->vsi_info[vsi_index].vsi_index = vsi_index; + ring_mgt->vsi_info[vsi_index].vsi_id = vsi_param->vsi_id; + ring_mgt->vsi_info[vsi_index].ring_offset = vsi_param->queue_offset; + ring_mgt->vsi_info[vsi_index].ring_num = vsi_param->queue_num; + + /* init active ring number before first open, guarantee fd direct config check success. */ + num_cpus = num_online_cpus(); + ring_mgt->vsi_info[vsi_index].active_ring_num = (u16)num_cpus > vsi_param->queue_num ? + vsi_param->queue_num : (u16)num_cpus; + + if (disp_ops->get_product_fix_cap(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_ITR_DYNAMIC)) + ring_mgt->vsi_info[vsi_index].itr_dynamic = true; + + disp_ops->register_vsi_ring(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_index, + vsi_param->queue_offset, vsi_param->queue_num); + + return disp_ops->register_vsi2q(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), vsi_index, + vsi_param->vsi_id, vsi_param->queue_offset, + vsi_param->queue_num); +} + +static int nbl_serv_st_open(struct inode *inode, struct file *filep) +{ + struct nbl_serv_st_mgt *p = container_of(inode->i_cdev, struct nbl_serv_st_mgt, cdev); + + filep->private_data = p; + + return 0; +} + +static ssize_t nbl_serv_st_write(struct file *file, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t nbl_serv_st_read(struct file *file, char __user *ubuf, size_t size, loff_t *ppos) +{ + return 0; +} + +static int nbl_serv_st_release(struct inode *inode, struct file *filp) +{ + return 0; +} + +static int nbl_serv_process_passthrough(struct nbl_service_mgt *serv_mgt, + unsigned int cmd, unsigned long arg) +{ + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_passthrough_fw_cmd_param *param = NULL, *result = NULL; + int ret = 0; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + goto alloc_param_fail; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) + goto alloc_result_fail; + + ret = copy_from_user(param, (void *)arg, _IOC_SIZE(cmd)); + if (ret) { + nbl_err(common, NBL_DEBUG_ST, "Bad access %d.\n", ret); + return ret; + } + + nbl_debug(common, NBL_DEBUG_ST, "Passthough opcode: %d\n", param->opcode); + + ret = disp_ops->passthrough_fw_cmd(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param, result); + if (ret) + goto passthrough_fail; + + ret = copy_to_user((void *)arg, result, _IOC_SIZE(cmd)); + +passthrough_fail: + kfree(result); +alloc_result_fail: + kfree(param); +alloc_param_fail: + return ret; +} + +static int nbl_serv_process_st_info(struct nbl_service_mgt *serv_mgt, + unsigned int cmd, unsigned long arg) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_st_info_param *param = NULL; + int ret = 0; + + nbl_debug(common, NBL_DEBUG_ST, "Get st info\n"); + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + return -ENOMEM; + + strscpy(param->driver_name, NBL_DRIVER_NAME, sizeof(param->driver_name)); + if (net_resource_mgt->netdev) + strscpy(param->netdev_name[0], net_resource_mgt->netdev->name, + sizeof(param->netdev_name[0])); + + disp_ops->get_driver_version(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param->driver_ver, + sizeof(param->driver_ver)); + + param->bus = common->bus; + param->devid = common->devid; + param->function = common->function; + param->domain = pci_domain_nr(NBL_COMMON_TO_PDEV(common)->bus); + + param->version = IOCTL_ST_INFO_VERSION; + + ret = copy_to_user((void *)arg, param, _IOC_SIZE(cmd)); + + kfree(param); + return ret; +} + +static long nbl_serv_st_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct nbl_serv_st_mgt *st_mgt = file->private_data; + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)st_mgt->serv_mgt; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int ret = 0; + + if (_IOC_TYPE(cmd) != IOCTL_TYPE) { + nbl_err(common, NBL_DEBUG_ST, "cmd %u, bad magic 0x%x/0x%x.\n", + cmd, _IOC_TYPE(cmd), IOCTL_TYPE); + return -ENOTTY; + } + + if (_IOC_DIR(cmd) & _IOC_READ) + ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) & _IOC_WRITE) + ret = !access_ok((void __user *)arg, _IOC_SIZE(cmd)); + + if (ret) { + nbl_err(common, NBL_DEBUG_ST, "Bad access.\n"); + return ret; + } + + switch (cmd) { + case IOCTL_PASSTHROUGH: + ret = nbl_serv_process_passthrough(serv_mgt, cmd, arg); + break; + case IOCTL_ST_INFO: + ret = nbl_serv_process_st_info(serv_mgt, cmd, arg); + break; + default: + nbl_err(common, NBL_DEBUG_ST, "Unknown cmd %d.\n", cmd); + return -EFAULT; + } + + return ret; +} + +static const struct file_operations st_ops = { + .owner = THIS_MODULE, + .open = nbl_serv_st_open, + .write = nbl_serv_st_write, + .read = nbl_serv_st_read, + .unlocked_ioctl = nbl_serv_st_unlock_ioctl, + .release = nbl_serv_st_release, +}; + +static int nbl_serv_alloc_subdev_id(struct nbl_software_tool_table *st_table) +{ + int subdev_id; + + subdev_id = find_first_zero_bit(st_table->devid, NBL_ST_MAX_DEVICE_NUM); + if (subdev_id == NBL_ST_MAX_DEVICE_NUM) + return -ENOSPC; + set_bit(subdev_id, st_table->devid); + + return subdev_id; +} + +static void nbl_serv_free_subdev_id(struct nbl_software_tool_table *st_table, int id) +{ + clear_bit(id, st_table->devid); +} + +static int nbl_serv_setup_st(void *priv, void *st_table_param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_software_tool_table *st_table = (struct nbl_software_tool_table *)st_table_param; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_serv_st_mgt *st_mgt = NBL_SERV_MGT_TO_ST_MGT(serv_mgt); + struct device *test_device; + char name[NBL_RESTOOL_NAME_LEN] = {0}; + dev_t devid; + int id, subdev_id, ret = 0; + + id = NBL_COMMON_TO_BOARD_ID(common); + + subdev_id = nbl_serv_alloc_subdev_id(st_table); + if (subdev_id < 0) + goto alloc_subdev_id_fail; + + devid = MKDEV(st_table->major, subdev_id); + + if (!NBL_COMMON_TO_PCI_FUNC_ID(common)) + snprintf(name, sizeof(name), "nblst%04x_conf%d", + NBL_COMMON_TO_PDEV(common)->device, id); + else + snprintf(name, sizeof(name), "nblst%04x_conf%d.%d", + NBL_COMMON_TO_PDEV(common)->device, id, NBL_COMMON_TO_PCI_FUNC_ID(common)); + + st_mgt = devm_kzalloc(NBL_COMMON_TO_DEV(common), sizeof(*st_mgt), GFP_KERNEL); + if (!st_mgt) + goto malloc_fail; + + st_mgt->serv_mgt = serv_mgt; + + st_mgt->major = MAJOR(devid); + st_mgt->minor = MINOR(devid); + st_mgt->devno = devid; + st_mgt->subdev_id = subdev_id; + + cdev_init(&st_mgt->cdev, &st_ops); + ret = cdev_add(&st_mgt->cdev, devid, 1); + if (ret) + goto cdev_add_fail; + + test_device = device_create(st_table->cls, NULL, st_mgt->devno, NULL, name); + if (IS_ERR(test_device)) { + ret = -EBUSY; + goto device_create_fail; + } + + NBL_SERV_MGT_TO_ST_MGT(serv_mgt) = st_mgt; + return 0; + +device_create_fail: + cdev_del(&st_mgt->cdev); +cdev_add_fail: + devm_kfree(NBL_COMMON_TO_DEV(common), st_mgt); +malloc_fail: + nbl_serv_free_subdev_id(st_table, subdev_id); +alloc_subdev_id_fail: + return ret; +} + +static void nbl_serv_remove_st(void *priv, void *st_table_param) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_software_tool_table *st_table = (struct nbl_software_tool_table *)st_table_param; + struct nbl_serv_st_mgt *st_mgt = NBL_SERV_MGT_TO_ST_MGT(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + if (!st_mgt) + return; + + device_destroy(st_table->cls, st_mgt->devno); + cdev_del(&st_mgt->cdev); + + nbl_serv_free_subdev_id(st_table, st_mgt->subdev_id); + + NBL_SERV_MGT_TO_ST_MGT(serv_mgt) = NULL; + devm_kfree(NBL_COMMON_TO_DEV(common), st_mgt); +} + +static int nbl_serv_set_spoof_check_addr(void *priv, u8 *mac) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + + return disp_ops->set_spoof_check_addr(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), mac); +} + +static u16 nbl_serv_get_vf_base_vsi_id(void *priv, u16 func_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_vf_base_vsi_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id); +} + +static int nbl_serv_get_board_id(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->get_board_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); +} + +static int nbl_serv_process_abnormal_event(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_abnormal_event_info abnomal_info; + struct nbl_abnormal_details *detail; + u16 local_queue_id; + int type, i, ret = 0; + + memset(&abnomal_info, 0, sizeof(abnomal_info)); + + ret = disp_ops->process_abnormal_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), &abnomal_info); + if (!ret) + return ret; + + for (i = 0; i < NBL_ABNORMAL_EVENT_MAX; i++) { + detail = &abnomal_info.details[i]; + + if (!detail->abnormal) + continue; + + type = nbl_serv_abnormal_event_to_queue(i); + local_queue_id = disp_ops->get_local_queue_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + detail->vsi_id, detail->qid); + if (local_queue_id == U16_MAX) + return 0; + + nbl_serv_restore_queue(serv_mgt, detail->vsi_id, local_queue_id, type, true); + } + + return 0; +} + +static int nbl_serv_cfg_bond_shaping(void *priv, u8 eth_id, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + return disp_ops->cfg_bond_shaping(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), eth_id, enable); +} + +static void nbl_serv_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, + u8 other_eth_id, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_bgid_back_pressure(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), main_eth_id, + other_eth_id, enable); +} + +static void nbl_serv_cfg_eth_bond_event(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_eth_bond_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); +} + +static ssize_t nbl_serv_vf_mac_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "usage: write MAC ADDR to set mac address\n"); +} + +static ssize_t nbl_serv_vf_mac_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + u8 mac[ETH_ALEN]; + int ret = 0; + + ret = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]); + if (ret != ETH_ALEN) + return -EINVAL; + + ret = nbl_serv_set_vf_mac(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, mac); + return ret ? ret : count; +} + +static ssize_t nbl_serv_vf_vlan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "usage: write to set VF Vlan," + " Qos, and optionally Vlan Protocol (default 802.1Q)\n"); +} + +static ssize_t nbl_serv_vf_vlan_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + char vproto_ext[5] = {'\0'}; + __be16 vlan_proto; + u16 vlan_id; + u8 qos; + int ret = 0; + + ret = sscanf(buf, "%hu:%hhu:802.%4s", &vlan_id, &qos, vproto_ext); + if (ret == 3) { + if ((strcmp(vproto_ext, "1AD") == 0) || + (strcmp(vproto_ext, "1ad") == 0)) + vlan_proto = htons(ETH_P_8021AD); + else if ((strcmp(vproto_ext, "1Q") == 0) || + (strcmp(vproto_ext, "1q") == 0)) + vlan_proto = htons(ETH_P_8021Q); + else + return -EINVAL; + } else { + ret = sscanf(buf, "%hu:%hhu", &vlan_id, &qos); + if (ret != 2) + return -EINVAL; + vlan_proto = htons(ETH_P_8021Q); + } + + ret = nbl_serv_set_vf_vlan(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, vlan_id, qos, vlan_proto); + return ret ? ret : count; +} + +static ssize_t nbl_serv_vf_max_tx_rate_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "usage: write RATE to set max_tx_rate(Mbps)\n"); +} + +static ssize_t nbl_serv_vf_max_tx_rate_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + int max_tx_rate = 0, ret = 0; + + ret = kstrtos32(buf, 0, &max_tx_rate); + if (ret) + return -EINVAL; + + ret = nbl_serv_set_vf_rate(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, 0, max_tx_rate); + return ret ? ret : count; +} + +static ssize_t nbl_serv_vf_spoofchk_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "usage: write to set vf spoof check\n"); +} + +static ssize_t nbl_serv_vf_spoofchk_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + bool enable = false; + int ret = 0; + + if (sysfs_streq(buf, "ON")) + enable = true; + else if (sysfs_streq(buf, "OFF")) + enable = false; + else + return -EINVAL; + + ret = nbl_serv_set_vf_spoofchk(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, enable); + return ret ? ret : count; +} + +static ssize_t nbl_serv_vf_link_state_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "usage: write to set vf link state\n"); +} + +static ssize_t nbl_serv_vf_link_state_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_serv_vf_info *vf_info = container_of(kobj, struct nbl_serv_vf_info, kobj); + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)vf_info->priv; + int state = 0, ret = 0; + + if (sysfs_streq(buf, "AUTO")) + state = IFLA_VF_LINK_STATE_AUTO; + else if (sysfs_streq(buf, "ENABLE")) + state = IFLA_VF_LINK_STATE_ENABLE; + else if (sysfs_streq(buf, "DISABLE")) + state = IFLA_VF_LINK_STATE_DISABLE; + else + return -EINVAL; + + ret = nbl_serv_set_vf_link_state(NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt)->netdev, + vf_info->vf_id, state); + return ret ? ret : count; +} + +static ssize_t nbl_serv_vf_config_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct kobj_attribute *kattr = container_of(attr, struct kobj_attribute, attr); + + if (kattr->show) + return kattr->show(kobj, kattr, buf); + + return -EIO; +} + +static ssize_t nbl_serv_vf_config_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct kobj_attribute *kattr = container_of(attr, struct kobj_attribute, attr); + + if (kattr->show) + return kattr->store(kobj, kattr, buf, count); + + return -EIO; +} + +static struct kobj_attribute nbl_attr_vf_mac = { + .attr = {.name = "mac", + .mode = 0644}, + .show = nbl_serv_vf_mac_show, + .store = nbl_serv_vf_mac_store, +}; + +static struct kobj_attribute nbl_attr_vf_vlan = { + .attr = {.name = "vlan", + .mode = 0644}, + .show = nbl_serv_vf_vlan_show, + .store = nbl_serv_vf_vlan_store, +}; + +static struct kobj_attribute nbl_attr_vf_max_tx_rate = { + .attr = {.name = "max_tx_rate", + .mode = 0644}, + .show = nbl_serv_vf_max_tx_rate_show, + .store = nbl_serv_vf_max_tx_rate_store, +}; + +static struct kobj_attribute nbl_attr_vf_spoofchk = { + .attr = {.name = "spoofchk", + .mode = 0644}, + .show = nbl_serv_vf_spoofchk_show, + .store = nbl_serv_vf_spoofchk_store, +}; + +static struct kobj_attribute nbl_attr_vf_link_state = { + .attr = {.name = "link_state", + .mode = 0644}, + .show = nbl_serv_vf_link_state_show, + .store = nbl_serv_vf_link_state_store, +}; + +static struct attribute *nbl_vf_config_attrs[] = { + &nbl_attr_vf_mac.attr, + &nbl_attr_vf_vlan.attr, + &nbl_attr_vf_max_tx_rate.attr, + &nbl_attr_vf_spoofchk.attr, + &nbl_attr_vf_link_state.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(nbl_vf_config); + +static const struct sysfs_ops nbl_sysfs_ops_vf = { + .show = nbl_serv_vf_config_show, + .store = nbl_serv_vf_config_store, +}; + +static const struct kobj_type nbl_kobj_vf_type = { + .sysfs_ops = &nbl_sysfs_ops_vf, + .default_groups = nbl_vf_config_groups, +}; + +static int nbl_serv_setup_vf_sysfs(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + int i = 0, ret = 0; + + for (i = 0; i < net_resource_mgt->num_vfs; i++) { + vf_info[i].priv = serv_mgt; + vf_info[i].vf_id = (u16)i; + + ret = kobject_init_and_add(&vf_info[i].kobj, &nbl_kobj_vf_type, + net_resource_mgt->sriov_kobj, "vf%d", i); + if (ret) + goto err; + } + + return 0; + +err: + while (--i + 1) + kobject_put(&vf_info[i].kobj); + + return ret; +} + +static void nbl_serv_remove_vf_sysfs(struct nbl_service_mgt *serv_mgt) +{ + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + int i = 0; + + for (i = 0; i < net_resource_mgt->num_vfs; i++) + kobject_put(&vf_info[i].kobj); +} + +static int nbl_serv_setup_vf_config(void *priv, int num_vfs, bool is_flush) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u16 func_id = U16_MAX; + u16 vlan_tci; + bool should_notify; + int i, ret = 0; + + net_resource_mgt->num_vfs = num_vfs; + + for (i = 0; i < net_resource_mgt->num_vfs; i++) { + func_id = disp_ops->get_vf_function_id(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), i); + + if (func_id == U16_MAX) { + nbl_err(common, NBL_DEBUG_MAIN, "vf id %d invalid\n", i); + return -EINVAL; + } + + disp_ops->register_func_mac(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + vf_info[i].mac, func_id); + + vlan_tci = vf_info[i].vlan | (u16)(vf_info[i].vlan_qos << VLAN_PRIO_SHIFT); + ret = disp_ops->register_func_vlan(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, + vlan_tci, vf_info[i].vlan_proto, + &should_notify); + if (ret) + break; + + ret = disp_ops->register_func_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), func_id, + vf_info[i].max_tx_rate); + if (ret) + break; + + ret = disp_ops->set_tx_rate(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].max_tx_rate); + if (ret) + break; + + ret = disp_ops->set_vf_spoof_check(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + NBL_COMMON_TO_VSI_ID(common), i, + vf_info[i].spoof_check); + if (ret) + break; + + /* No need to notify vf, vf will get link forced when probe, + * Here we only flush the config. + */ + ret = disp_ops->register_func_link_forced(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + func_id, vf_info[i].state, + &should_notify); + if (ret) + break; + } + + if (!ret && net_resource_mgt->sriov_kobj && !is_flush) + ret = nbl_serv_setup_vf_sysfs(serv_mgt); + + if (ret) + net_resource_mgt->num_vfs = 0; + + return ret; +} + +static void nbl_serv_remove_vf_config(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info = net_resource_mgt->vf_info; + int i; + + nbl_serv_remove_vf_sysfs(serv_mgt); + + for (i = 0; i < net_resource_mgt->num_vfs; i++) + memset(&vf_info[i], 0, sizeof(vf_info[i])); + + nbl_serv_setup_vf_config(priv, net_resource_mgt->num_vfs, true); + + net_resource_mgt->num_vfs = 0; +} + +static int nbl_serv_setup_vf_resource(void *priv, int num_vfs) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct nbl_serv_vf_info *vf_info; + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + int i; + + net_resource_mgt->total_vfs = num_vfs; + + net_resource_mgt->vf_info = devm_kcalloc(dev, net_resource_mgt->total_vfs, + sizeof(struct nbl_serv_vf_info), GFP_KERNEL); + if (!net_resource_mgt->vf_info) + return -ENOMEM; + + vf_info = net_resource_mgt->vf_info; + for (i = 0; i < net_resource_mgt->total_vfs; i++) { + vf_info[i].state = IFLA_VF_LINK_STATE_AUTO; + vf_info[i].spoof_check = false; + } + + net_resource_mgt->sriov_kobj = kobject_create_and_add("SRIOV", &dev->kobj); + if (!net_resource_mgt->sriov_kobj) + nbl_warn(NBL_SERV_MGT_TO_COMMON(serv_mgt), NBL_DEBUG_MAIN, + "Fail to create sriov sysfs"); + + return 0; +} + +static void nbl_serv_remove_vf_resource(void *priv) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt); + struct device *dev = NBL_SERV_MGT_TO_DEV(serv_mgt); + + nbl_serv_remove_vf_config(priv); + + kobject_put(net_resource_mgt->sriov_kobj); + + if (net_resource_mgt->vf_info) { + devm_kfree(dev, net_resource_mgt->vf_info); + net_resource_mgt->vf_info = NULL; + } +} + +static void nbl_serv_cfg_fd_update_event(void *priv, bool enable) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->cfg_fd_update_event(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), enable); +} + +static void nbl_serv_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_xdp_queue_info(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), queue_num, queue_size, + vsi_id); +} + +static void nbl_serv_set_hw_status(void *priv, enum nbl_hw_status hw_status) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->set_hw_status(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), hw_status); +} + +static void nbl_serv_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + disp_ops->get_active_func_bitmaps(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), bitmap, max_func); +} + +static int nbl_serv_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_serv_net_resource_mgt *net_resource_mgt = serv_mgt->net_resource_mgt; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret; + + ret = disp_ops->configure_qos(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, pfc, trust, dscp2prio_map); + + net_resource_mgt->pfc_mode = trust; + memcpy(net_resource_mgt->dscp2prio_map, dscp2prio_map, NBL_DSCP_MAX); + + return ret; +} + +static int nbl_serv_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret; + + ret = disp_ops->set_pfc_buffer_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, prio, xoff, xon); + + return ret; +} + +static int nbl_serv_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +{ + struct nbl_service_mgt *serv_mgt = (struct nbl_service_mgt *)priv; + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + int ret; + + ret = disp_ops->get_pfc_buffer_size(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + eth_id, prio, xoff, xon); + + return ret; +} + +static struct nbl_service_ops serv_ops = { + .init_chip_factory = nbl_serv_init_chip_factory, + .destroy_chip_factory = nbl_serv_destroy_chip_factory, + .init_chip = nbl_serv_init_chip, + .destroy_chip = nbl_serv_destroy_chip, + + .configure_msix_map = nbl_serv_configure_msix_map, + .destroy_msix_map = nbl_serv_destroy_msix_map, + .enable_mailbox_irq = nbl_serv_enable_mailbox_irq, + .enable_abnormal_irq = nbl_serv_enable_abnormal_irq, + .enable_adminq_irq = nbl_serv_enable_adminq_irq, + .request_net_irq = nbl_serv_request_net_irq, + .free_net_irq = nbl_serv_free_net_irq, + .get_global_vector = nbl_serv_get_global_vector, + .get_msix_entry_id = nbl_serv_get_msix_entry_id, + .get_common_irq_num = nbl_serv_get_common_irq_num, + .get_ctrl_irq_num = nbl_serv_get_ctrl_irq_num, + .get_chip_temperature = nbl_serv_get_chip_temperature, + .get_module_temperature = nbl_serv_get_module_temperature, + .get_port_attributes = nbl_serv_get_port_attributes, + .update_template_config = nbl_serv_update_template_config, + .enable_port = nbl_serv_enable_port, + .init_port = nbl_serv_init_port, + .set_sfp_state = nbl_serv_set_sfp_state, + + .register_net = nbl_serv_register_net, + .unregister_net = nbl_serv_unregister_net, + .setup_txrx_queues = nbl_serv_setup_txrx_queues, + .remove_txrx_queues = nbl_serv_remove_txrx_queues, + .check_offload_status = nbl_serv_check_offload_status, + .init_tx_rate = nbl_serv_init_tx_rate, + .setup_q2vsi = nbl_serv_setup_q2vsi, + .remove_q2vsi = nbl_serv_remove_q2vsi, + .setup_rss = nbl_serv_setup_rss, + .remove_rss = nbl_serv_remove_rss, + .register_vsi_info = nbl_serv_register_vsi_info, + + .alloc_rings = nbl_serv_alloc_rings, + .cpu_affinity_init = nbl_serv_cpu_affinity_init, + .free_rings = nbl_serv_free_rings, + .enable_napis = nbl_serv_enable_napis, + .disable_napis = nbl_serv_disable_napis, + .set_mask_en = nbl_serv_set_mask_en, + .start_net_flow = nbl_serv_start_net_flow, + .stop_net_flow = nbl_serv_stop_net_flow, + .set_lldp_flow = nbl_serv_set_lldp_flow, + .remove_lldp_flow = nbl_serv_remove_lldp_flow, + .start_mgt_flow = nbl_serv_start_mgt_flow, + .stop_mgt_flow = nbl_serv_stop_mgt_flow, + .get_tx_headroom = nbl_serv_get_tx_headroom, + .get_product_flex_cap = nbl_serv_get_product_flex_cap, + .get_product_fix_cap = nbl_serv_get_product_fix_cap, + .set_spoof_check_addr = nbl_serv_set_spoof_check_addr, + + .vsi_open = nbl_serv_vsi_open, + .vsi_stop = nbl_serv_vsi_stop, + .switch_traffic_default_dest = nbl_serv_switch_traffic_default_dest, + .config_fd_flow_state = nbl_serv_config_fd_flow_state, + + /* For netdev ops */ + .netdev_open = nbl_serv_netdev_open, + .netdev_stop = nbl_serv_netdev_stop, + .change_mtu = nbl_serv_change_mtu, + .set_mac = nbl_serv_set_mac, + .rx_add_vid = nbl_serv_rx_add_vid, + .rx_kill_vid = nbl_serv_rx_kill_vid, + .get_stats64 = nbl_serv_get_stats64, + .set_rx_mode = nbl_serv_set_rx_mode, + .change_rx_flags = nbl_serv_change_rx_flags, + .set_features = nbl_serv_set_features, + .features_check = nbl_serv_features_check, + .setup_tc = nbl_serv_setup_tc, + .set_vf_spoofchk = nbl_serv_set_vf_spoofchk, + .get_phys_port_name = nbl_serv_get_phys_port_name, + .get_port_parent_id = nbl_serv_get_port_parent_id, + .tx_timeout = nbl_serv_tx_timeout, + .bridge_setlink = nbl_serv_bridge_setlink, + .bridge_getlink = nbl_serv_bridge_getlink, + .set_vf_link_state = nbl_serv_set_vf_link_state, + .set_vf_mac = nbl_serv_set_vf_mac, + .set_vf_rate = nbl_serv_set_vf_rate, + .set_vf_vlan = nbl_serv_set_vf_vlan, + .get_vf_config = nbl_serv_get_vf_config, + .select_queue = nbl_serv_select_queue, + + /* For rep associated */ + .rep_netdev_open = nbl_serv_rep_netdev_open, + .rep_netdev_stop = nbl_serv_rep_netdev_stop, + .rep_start_xmit = nbl_serv_rep_start_xmit, + .rep_get_stats64 = nbl_serv_rep_get_stats64, + .rep_set_rx_mode = nbl_serv_rep_set_rx_mode, + .rep_set_mac = nbl_serv_rep_set_mac, + .rep_rx_add_vid = nbl_serv_rep_rx_add_vid, + .rep_rx_kill_vid = nbl_serv_rep_rx_kill_vid, + .rep_setup_tc = nbl_serv_rep_setup_tc, + .rep_get_phys_port_name = nbl_serv_rep_get_phys_port_name, + .rep_get_port_parent_id = nbl_serv_rep_get_port_parent_id, + .get_rep_feature = nbl_serv_get_rep_feature, + .get_rep_queue_num = nbl_serv_get_rep_queue_num, + .get_rep_queue_info = nbl_serv_get_rep_queue_info, + .get_user_queue_info = nbl_serv_get_user_queue_info, + .alloc_rep_queue_mgt = nbl_serv_alloc_rep_queue_mgt, + .free_rep_queue_mgt = nbl_serv_free_rep_queue_mgt, + .set_eswitch_mode = nbl_serv_set_eswitch_mode, + .get_eswitch_mode = nbl_serv_get_eswitch_mode, + .alloc_rep_data = nbl_serv_alloc_rep_data, + .free_rep_data = nbl_serv_free_rep_data, + .set_rep_netdev_info = nbl_serv_set_rep_netdev_info, + .unset_rep_netdev_info = nbl_serv_unset_rep_netdev_info, + .disable_phy_flow = nbl_serv_disable_phy_flow, + .enable_phy_flow = nbl_serv_enable_phy_flow, + .init_acl = nbl_serv_init_acl, + .uninit_acl = nbl_serv_uninit_acl, + .set_upcall_rule = nbl_serv_set_upcall_rule, + .unset_upcall_rule = nbl_serv_unset_upcall_rule, + .switchdev_init_cmdq = nbl_serv_switchdev_init_cmdq, + .switchdev_deinit_cmdq = nbl_serv_switchdev_deinit_cmdq, + .set_tc_flow_info = nbl_serv_set_tc_flow_info, + .unset_tc_flow_info = nbl_serv_unset_tc_flow_info, + .get_tc_flow_info = nbl_serv_get_tc_flow_info, + .register_indr_dev_tc_offload = nbl_serv_register_indr_dev_tc_offload, + .unregister_indr_dev_tc_offload = nbl_serv_unregister_indr_dev_tc_offload, + .set_lag_info = nbl_serv_set_lag_info, + .unset_lag_info = nbl_serv_unset_lag_info, + .set_netdev_ops = nbl_serv_set_netdev_ops, + + .get_vsi_id = nbl_serv_get_vsi_id, + .get_eth_id = nbl_serv_get_eth_id, + .setup_net_resource_mgt = nbl_serv_setup_net_resource_mgt, + .remove_net_resource_mgt = nbl_serv_remove_net_resource_mgt, + .enable_lag_protocol = nbl_serv_enable_lag_protocol, + .cfg_lag_hash_algorithm = nbl_serv_cfg_lag_hash_algorithm, + .cfg_lag_member_fwd = nbl_serv_cfg_lag_member_fwd, + .cfg_lag_member_list = nbl_serv_cfg_lag_member_list, + .cfg_lag_member_up_attr = nbl_serv_cfg_lag_member_up_attr, + .cfg_bond_shaping = nbl_serv_cfg_bond_shaping, + .cfg_bgid_back_pressure = nbl_serv_cfg_bgid_back_pressure, + + .get_rdma_cap_num = nbl_serv_get_rdma_cap_num, + .setup_rdma_id = nbl_serv_setup_rdma_id, + .remove_rdma_id = nbl_serv_remove_rdma_id, + .register_rdma = nbl_serv_register_rdma, + .unregister_rdma = nbl_serv_unregister_rdma, + .register_rdma_bond = nbl_serv_register_rdma_bond, + .unregister_rdma_bond = nbl_serv_unregister_rdma_bond, + .get_hw_addr = nbl_serv_get_hw_addr, + .get_real_hw_addr = nbl_serv_get_real_hw_addr, + .get_function_id = nbl_serv_get_function_id, + .get_real_bdf = nbl_serv_get_real_bdf, + .set_eth_mac_addr = nbl_serv_set_eth_mac_addr, + .process_abnormal_event = nbl_serv_process_abnormal_event, + .adapt_desc_gother = nbl_serv_adapt_desc_gother, + .process_flr = nbl_serv_process_flr, + .get_board_id = nbl_serv_get_board_id, + .covert_vfid_to_vsi_id = nbl_serv_covert_vfid_to_vsi_id, + .recovery_abnormal = nbl_serv_recovery_abnormal, + .keep_alive = nbl_serv_keep_alive, + + .get_devlink_info = nbl_serv_get_devlink_info, + .update_devlink_flash = nbl_serv_update_devlink_flash, + .get_adminq_tx_buf_size = nbl_serv_get_adminq_tx_buf_size, + .emp_console_write = nbl_serv_emp_console_write, + + .check_fw_heartbeat = nbl_serv_check_fw_heartbeat, + .check_fw_reset = nbl_serv_check_fw_reset, + .set_netdev_carrier_state = nbl_serv_set_netdev_carrier_state, + .cfg_eth_bond_event = nbl_serv_cfg_eth_bond_event, + .cfg_fd_update_event = nbl_serv_cfg_fd_update_event, + + /* For virtio */ + .configure_virtio_dev_msix = nbl_serv_configure_virtio_dev_msix, + .configure_rdma_msix_off = nbl_serv_configure_rdma_msix_off, + .configure_virtio_dev_ready = nbl_serv_configure_virtio_dev_ready, + + .setup_st = nbl_serv_setup_st, + .remove_st = nbl_serv_remove_st, + .get_vf_base_vsi_id = nbl_serv_get_vf_base_vsi_id, + + .setup_vf_config = nbl_serv_setup_vf_config, + .remove_vf_config = nbl_serv_remove_vf_config, + .setup_vf_resource = nbl_serv_setup_vf_resource, + .remove_vf_resource = nbl_serv_remove_vf_resource, + + .get_xdp_queue_info = nbl_serv_get_xdp_queue_info, + .set_hw_status = nbl_serv_set_hw_status, + .get_active_func_bitmaps = nbl_serv_get_active_func_bitmaps, + .configure_qos = nbl_serv_configure_qos, + .set_pfc_buffer_size = nbl_serv_set_pfc_buffer_size, + .get_pfc_buffer_size = nbl_serv_get_pfc_buffer_size, +}; + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_serv_setup_serv_mgt(struct nbl_common_info *common, + struct nbl_service_mgt **serv_mgt) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + *serv_mgt = devm_kzalloc(dev, sizeof(struct nbl_service_mgt), GFP_KERNEL); + if (!*serv_mgt) + return -ENOMEM; + + NBL_SERV_MGT_TO_COMMON(*serv_mgt) = common; + nbl_serv_setup_flow_mgt(NBL_SERV_MGT_TO_FLOW_MGT(*serv_mgt)); + + return 0; +} + +static void nbl_serv_remove_serv_mgt(struct nbl_common_info *common, + struct nbl_service_mgt **serv_mgt) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, *serv_mgt); + *serv_mgt = NULL; +} + +static void nbl_serv_remove_ops(struct device *dev, struct nbl_service_ops_tbl **serv_ops_tbl) +{ + devm_kfree(dev, *serv_ops_tbl); + *serv_ops_tbl = NULL; +} + +static int nbl_serv_setup_ops(struct device *dev, struct nbl_service_ops_tbl **serv_ops_tbl, + struct nbl_service_mgt *serv_mgt) +{ + *serv_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_service_ops_tbl), GFP_KERNEL); + if (!*serv_ops_tbl) + return -ENOMEM; + + NBL_SERV_OPS_TBL_TO_OPS(*serv_ops_tbl) = &serv_ops; + nbl_serv_setup_ethtool_ops(&serv_ops); + nbl_serv_setup_ktls_ops(&serv_ops); + nbl_serv_setup_xfrm_ops(&serv_ops); + NBL_SERV_OPS_TBL_TO_PRIV(*serv_ops_tbl) = serv_mgt; + + return 0; +} + +int nbl_serv_init(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_service_mgt **serv_mgt; + struct nbl_service_ops_tbl **serv_ops_tbl; + struct nbl_dispatch_ops_tbl *disp_ops_tbl; + struct nbl_dispatch_ops *disp_ops; + struct nbl_channel_ops_tbl *chan_ops_tbl; + int ret = 0; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + serv_mgt = (struct nbl_service_mgt **)&NBL_ADAPTER_TO_SERV_MGT(adapter); + serv_ops_tbl = &NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); + disp_ops_tbl = NBL_ADAPTER_TO_DISP_OPS_TBL(adapter); + chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + disp_ops = disp_ops_tbl->ops; + + ret = nbl_serv_setup_serv_mgt(common, serv_mgt); + if (ret) + goto setup_mgt_fail; + + ret = nbl_serv_setup_ops(dev, serv_ops_tbl, *serv_mgt); + if (ret) + goto setup_ops_fail; + + NBL_SERV_MGT_TO_DISP_OPS_TBL(*serv_mgt) = disp_ops_tbl; + NBL_SERV_MGT_TO_CHAN_OPS_TBL(*serv_mgt) = chan_ops_tbl; + disp_ops->get_resource_pt_ops(disp_ops_tbl->priv, &(*serv_ops_tbl)->pt_ops); + + return 0; + +setup_ops_fail: + nbl_serv_remove_serv_mgt(common, serv_mgt); +setup_mgt_fail: + return ret; +} + +void nbl_serv_remove(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_service_mgt **serv_mgt; + struct nbl_service_ops_tbl **serv_ops_tbl; + + if (!adapter) + return; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + serv_mgt = (struct nbl_service_mgt **)&NBL_ADAPTER_TO_SERV_MGT(adapter); + serv_ops_tbl = &NBL_ADAPTER_TO_SERV_OPS_TBL(adapter); + + nbl_serv_remove_ops(dev, serv_ops_tbl); + nbl_serv_remove_serv_mgt(common, serv_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h new file mode 100644 index 0000000000000000000000000000000000000000..a7e4265d09549b052a080e2f0edf09da21ee3657 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_service.h @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_SERVICE_H_ +#define _NBL_SERVICE_H_ + +#include +#include +#include "nbl_core.h" + +#define NBL_SERV_MGT_TO_COMMON(serv_mgt) ((serv_mgt)->common) +#define NBL_SERV_MGT_TO_DEV(serv_mgt) NBL_COMMON_TO_DEV(NBL_SERV_MGT_TO_COMMON(serv_mgt)) +#define NBL_SERV_MGT_TO_RING_MGT(serv_mgt) (&(serv_mgt)->ring_mgt) +#define NBL_SERV_MGT_TO_REP_QUEUE_MGT(serv_mgt) ((serv_mgt)->rep_queue_mgt) +#define NBL_SERV_MGT_TO_FLOW_MGT(serv_mgt) (&(serv_mgt)->flow_mgt) +#define NBL_SERV_MGT_TO_NET_RES_MGT(serv_mgt) ((serv_mgt)->net_resource_mgt) +#define NBL_SERV_MGT_TO_TC_MGT(serv_mgt) (&(serv_mgt)->tc_mgt) +#define NBL_SERV_MGT_TO_ST_MGT(serv_mgt) ((serv_mgt)->st_mgt) + +#define NBL_SERV_MGT_TO_DISP_OPS_TBL(serv_mgt) ((serv_mgt)->disp_ops_tbl) +#define NBL_SERV_MGT_TO_DISP_OPS(serv_mgt) (NBL_SERV_MGT_TO_DISP_OPS_TBL(serv_mgt)->ops) +#define NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt) (NBL_SERV_MGT_TO_DISP_OPS_TBL(serv_mgt)->priv) + +#define NBL_SERV_MGT_TO_CHAN_OPS_TBL(serv_mgt) ((serv_mgt)->chan_ops_tbl) +#define NBL_SERV_MGT_TO_CHAN_OPS(serv_mgt) (NBL_SERV_MGT_TO_CHAN_OPS_TBL(serv_mgt)->ops) +#define NBL_SERV_MGT_TO_CHAN_PRIV(serv_mgt) (NBL_SERV_MGT_TO_CHAN_OPS_TBL(serv_mgt)->priv) + +#define NBL_DEFAULT_VLAN_ID 0 + +#define NBL_REP_QUEUE_MGT_DESC_MAX (32768) +#define NBL_REP_QUEUE_MGT_DESC_NUM (2048) +#define NBL_REP_PER_VSI_QUEUE_NUM (1) +#define NBL_DEFAULT_REP_TX_RETRY_NUM 2 +#define NBL_DEFAULT_REP_TX_MAX_NUM 8192 + +#define NBL_MAX_QUEUE_TC_NUM (8) +#define NBL_TC_WEIGHT_GRAVITY (10) +#define NBL_TC_MBPS_DIVSIOR (125000) + +#define NBL_TX_TSO_MSS_MIN (256) +#define NBL_TX_TSO_MSS_MAX (16383) +#define NBL_TX_TSO_L2L3L4_HDR_LEN_MIN (42) +#define NBL_TX_TSO_L2L3L4_HDR_LEN_MAX (128) +#define NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX (255) + +#define NBL_FLAG_AQ_MODIFY_MAC_FILTER BIT(0) +#define NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT(1) +#define NBL_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT(1) + +#define NBL_EEPROM_LENGTH (0) + +/* input set */ +#define NBL_MAC_ADDR_LEN_U8 6 + +#define NBL_FLOW_IN_PORT_TYPE_ETH 0x0 +#define NBL_FLOW_IN_PORT_TYPE_LAG 0x400 +#define NBL_FLOW_IN_PORT_TYPE_VSI 0x800 + +#define NBL_FLOW_OUT_PORT_TYPE_VSI 0x0 +#define NBL_FLOW_OUT_PORT_TYPE_ETH 0x10 +#define NBL_FLOW_OUT_PORT_TYPE_LAG 0x20 + +#define SET_DPORT_TYPE_VSI_HOST (0) +#define SET_DPORT_TYPE_VSI_ECPU (1) +#define SET_DPORT_TYPE_ETH_LAG (2) +#define SET_DPORT_TYPE_SP_PORT (3) + +#define NBL_VLAN_PCP_SHIFT 13 + +#define NBL_DEVLINK_INFO_FRIMWARE_VERSION_LEN 32 +#define NBL_DEVLINK_FLASH_COMPONENT_CRC_SIZE 4 + +/* For customized P4 */ +#define NBL_P4_ELF_IDENT "\x7F\x45\x4C\x46\x01\x01\x01\x00" +#define NBL_P4_ELF_IDENT_LEN 8 +#define NBL_P4_VERIFY_CODE_LEN 9 +#define NBL_P4_PRODUCT_INFO_SECTION_NAME "product_info" +#define NBL_MD5SUM_LEN 16 + +enum { + NBL_MGT_SERV_MGT, + NBL_MGT_SERV_RDMA, +}; + +enum { + NBL_NET_SERV_NET, + NBL_NET_SERV_RDMA, +}; + +enum { + NBL_TC_INVALID, + NBL_TC_RUNNING, +}; + +struct nbl_serv_ring { + dma_addr_t dma; + u16 index; + u16 local_queue_id; + u16 global_queue_id; + bool need_recovery; + u32 tx_timeout_count; +}; + +struct nbl_serv_vector { + char name[32]; + cpumask_t cpumask; + struct net_device *netdev; + struct napi_struct *napi; + struct nbl_serv_ring *tx_ring; + struct nbl_serv_ring *rx_ring; + u8 __iomem *irq_enable_base; + u32 irq_data; + u16 local_vector_id; + u16 global_vector_id; + u16 intr_rate_usecs; + u16 intr_suppress_level; +}; + +struct nbl_serv_ring_vsi_info { + u16 vsi_index; + u16 vsi_id; + u16 ring_offset; + u16 ring_num; + u16 active_ring_num; + bool itr_dynamic; + bool started; +}; + +struct nbl_serv_ring_mgt { + struct nbl_serv_ring *tx_rings; + struct nbl_serv_ring *rx_rings; + struct nbl_serv_vector *vectors; + void *xdp_prog; + struct nbl_serv_ring_vsi_info vsi_info[NBL_VSI_MAX]; + u16 tx_desc_num; + u16 rx_desc_num; + u16 tx_ring_num; + u16 rx_ring_num; + u16 xdp_ring_offset; + u16 active_ring_num; + bool net_msix_mask_en; +}; + +struct nbl_serv_vlan_node { + struct list_head node; + u16 vid; + u16 ref_cnt; +}; + +struct nbl_serv_submac_node { + struct list_head node; + u8 mac[ETH_ALEN]; +}; + +struct nbl_serv_flow_mgt { + struct list_head vlan_list; + struct list_head submac_list; + u16 vid; + u8 mac[ETH_ALEN]; + u8 eth; +}; + +struct nbl_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; +}; + +struct nbl_serv_tc_mgt { + int state; + u16 orig_num_active_queues; + u16 num_tc; + u16 total_qps; +}; + +enum nbl_adapter_flags { + /* p4 flags must be at the start */ + NBL_FLAG_P4_DEFAULT, + NBL_FLAG_LINK_DOWN_ON_CLOSE, + NBL_FLAG_NRZ_RS_FEC_544_SUPPORT, + NBL_ADAPTER_FLAGS_MAX +}; + +struct nbl_serv_lag_info { + struct net_device *bond_netdev; + u16 lag_num; + u8 lag_id; +}; + +struct nbl_serv_netdev_ops { + void *pf_netdev_ops; + void *rep_netdev_ops; +}; + +struct nbl_serv_rep_drop { + struct u64_stats_sync rep_drop_syncp; + u64 tx_dropped; +}; + +struct nbl_sysfs_vf_config_attr { + struct kobj_attribute mac_attr; + struct kobj_attribute rate_attr; + struct kobj_attribute spoofchk_attr; + struct kobj_attribute state_attr; + void *priv; + int vf_id; +}; + +struct nbl_serv_vf_info { + struct kobject kobj; + void *priv; + u16 vf_id; + + int state; + int spoof_check; + int max_tx_rate; + u8 mac[ETH_ALEN]; + u16 vlan; + u16 vlan_proto; + u8 vlan_qos; +}; + +struct nbl_serv_net_resource_mgt { + struct nbl_service_mgt *serv_mgt; + struct net_device *netdev; + struct work_struct net_stats_update; + struct work_struct rx_mode_async; + struct work_struct tx_timeout; + struct work_struct update_link_state; + struct work_struct update_vlan; + struct delayed_work watchdog_task; + struct timer_list serv_timer; + unsigned long serv_timer_period; + + /* spinlock_t for rx mode submac */ + spinlock_t mac_vlan_list_lock; + /* spinlock_t for rx mode promisc */ + spinlock_t current_netdev_promisc_flags_lock; + struct list_head mac_filter_list; + struct list_head indr_dev_priv_list; + struct nbl_serv_lag_info *lag_info; + struct nbl_serv_netdev_ops netdev_ops; + u32 rxmode_set_required; + u16 curr_promiscuout_mode; + u16 user_promisc_mode; + u16 num_net_msix; + int num_vfs; + int total_vfs; + + /* stats for netdev */ + u64 get_stats_jiffies; + struct nbl_stats stats; + struct nbl_priv_stats priv_stats; + struct nbl_phy_caps phy_caps; + struct nbl_serv_rep_drop *rep_drop; + struct nbl_serv_vf_info *vf_info; + struct kobject *sriov_kobj; + u32 configured_speed; + u32 configured_fec; + u16 bridge_mode; + int link_forced; + + u16 vlan_tci; + u16 vlan_proto; + int max_tx_rate; + u8 pfc_mode; + u8 dscp2prio_map[NBL_DSCP_MAX]; /* DSCP -> Priority map */ +}; + +struct nbl_serv_rep_queue_mgt { + struct ptr_ring ring; + struct net_device *netdev; + + /* spinlock_t for queue mgt */ + spinlock_t seq_lock; + int size; +}; + +#define IOCTL_TYPE 'n' +#define IOCTL_PASSTHROUGH _IOWR(IOCTL_TYPE, 0x01, struct nbl_passthrough_fw_cmd_param) +#define IOCTL_ST_INFO _IOR(IOCTL_TYPE, 0x02, struct nbl_st_info_param) + +#define IOCTL_ST_INFO_VERSION 0x10 /* 1.0 */ + +#define NBL_RESTOOL_NAME_LEN 32 +struct nbl_serv_st_mgt { + void *serv_mgt; + struct cdev cdev; + int major; + int minor; + dev_t devno; + int subdev_id; +}; + +struct nbl_service_mgt { + struct nbl_common_info *common; + struct nbl_dispatch_ops_tbl *disp_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_serv_ring_mgt ring_mgt; + struct nbl_serv_rep_queue_mgt *rep_queue_mgt; + struct nbl_serv_flow_mgt flow_mgt; + struct nbl_serv_net_resource_mgt *net_resource_mgt; + struct nbl_serv_tc_mgt tc_mgt; + struct nbl_serv_st_mgt *st_mgt; + DECLARE_BITMAP(flags, NBL_ADAPTER_FLAGS_MAX); +}; + +struct nbl_serv_update_fw_priv { + struct pldmfw context; + struct netlink_ext_ack *extack; + struct nbl_service_mgt *serv_mgt; +}; + +struct nbl_serv_pldm_pci_record_id { + u16 vendor; + u16 device; + u16 subsystem_vendor; + u16 subsystem_device; +}; + +struct nbl_tc_flow_parse_pattern { + u32 pattern_type; + int (*parse_func)(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common); +}; + +struct nbl_tc_flow_action_driver_ops { + int (*act_update)(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filterr, + struct nbl_tc_flow_param *param); +}; + +struct nbl_serv_notify_vlan_param { + u16 vlan_tci; + u16 vlan_proto; +}; + +int nbl_serv_netdev_open(struct net_device *netdev); +int nbl_serv_netdev_stop(struct net_device *netdev); +int nbl_serv_vsi_open(void *priv, struct net_device *netdev, u16 vsi_index, + u16 real_qps, bool use_napi); +int nbl_serv_vsi_stop(void *priv, u16 vsi_index); +void nbl_serv_get_rep_drop_stats(struct nbl_service_mgt *serv_mgt, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats); +void nbl_serv_cpu_affinity_init(void *priv, u16 rings_num); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..34bbe735bd7404f48ae8360b3da38be94cdf944e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_dev.h" + +const char *const nbl_sysfs_qos_name[] = { + /* rdma */ + "save", + "tc2pri", + "sq_pri_map", + "raq_pri_map", + "pri_imap", + "pfc_imap", + "db_to_csch_en", + "sw_db_csch_th", + "csch_qlen_th", + "poll_wgt", + "sp_wrr", + "tc_wgt", + + "pfc", + "pfc_buffer", + "trust", + "dscp2prio", +}; + +static ssize_t dscp2prio_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_net_qos *qos_config = &net_dev->qos_config; + int len = 0; + int i; + + len += snprintf(buf + len, PAGE_SIZE - len, "dscp2prio mapping:\n"); + for (i = 0; i < NBL_DSCP_MAX; i++) + len += snprintf(buf + len, PAGE_SIZE - len, "\tprio:%d dscp:%d,\n", + qos_config->dscp2prio_map[i], i); + + return len; +} + +static ssize_t dscp2prio_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_net_qos *qos_config = &net_dev->qos_config; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + char cmd[8]; + int dscp, prio, ret; + int i; + + ret = sscanf(buf, "%7[^,], %d , %d", cmd, &dscp, &prio); + + if (strncmp(cmd, "set", 3) == 0) { + if (ret != 3 || dscp < 0 || dscp >= NBL_DSCP_MAX || prio < 0 || prio > 7) + return -EINVAL; + qos_config->dscp2prio_map[dscp] = prio; + } else if (strncmp(cmd, "del", 3) == 0) { + if (ret != 3 || dscp < 0 || dscp >= NBL_DSCP_MAX) + return -EINVAL; + if (qos_config->dscp2prio_map[dscp] == 0) + return -EINVAL; + qos_config->dscp2prio_map[dscp] = 0; + } else if (strncmp(cmd, "flush", 5) == 0) { + for (i = 0; i < NBL_DSCP_MAX; i++) + qos_config->dscp2prio_map[i] = i / NBL_MAX_PFC_PRIORITIES; + } else { + return -EINVAL; + } + + serv_ops->configure_qos(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_ETH_ID(common), + qos_config->pfc, qos_config->trust_mode, qos_config->dscp2prio_map); + + return count; +} + +static ssize_t trust_mode_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_net_qos *qos_config = &net_dev->qos_config; + + return scnprintf(buf, PAGE_SIZE, "%s\n", + qos_config->trust_mode == NBL_TRUST_MODE_DSCP ? "dscp" : "802.1p"); +} + +static ssize_t trust_mode_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_net_qos *qos_config = &net_dev->qos_config; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 trust_mode; + int ret; + + if (strncmp(buf, "dscp", 4) == 0) { + trust_mode = NBL_TRUST_MODE_DSCP; + } else if (strncmp(buf, "802.1p", 6) == 0) { + trust_mode = NBL_TRUST_MODE_8021P; + } else { + netdev_err(net_dev->netdev, "Invalid trust mode: %s\n", buf); + return -EINVAL; + } + + if (qos_config->trust_mode == trust_mode) + return count; + + ret = serv_ops->configure_qos(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), + qos_config->pfc, trust_mode, qos_config->dscp2prio_map); + if (ret) { + netdev_err(net_dev->netdev, "configure_qos trust mode: %s failed\n", buf); + return -EIO; + } + + qos_config->trust_mode = trust_mode; + + netdev_info(net_dev->netdev, "Trust mode set to %s\n", buf); + return count; +} + +static ssize_t pfc_buffer_size_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_net_qos *qos_config = &net_dev->qos_config; + int prio; + ssize_t count = 0; + + for (prio = 0; prio < NBL_MAX_PFC_PRIORITIES; prio++) + count += snprintf(buf + count, PAGE_SIZE - count, "prio %d, xoff %d, xon %d\n", + prio, qos_config->buffer_sizes[prio][0], + qos_config->buffer_sizes[prio][1]); + + return count; +} + +static ssize_t pfc_buffer_size_store(struct nbl_sysfs_qos_info *qos_info, + const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_net_qos *qos_config = &net_dev->qos_config; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + int prio, xoff, xon; + int ret; + + if (sscanf(buf, "%d,%d,%d", &prio, &xoff, &xon) != 3) + return -EINVAL; + + if (prio < 0 || prio >= NBL_MAX_PFC_PRIORITIES) + return -EINVAL; + + ret = serv_ops->set_pfc_buffer_size(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), prio, xoff, xon); + if (ret) { + netdev_err(net_dev->netdev, "set_pfc_buffer_size failed\n"); + return ret; + } + qos_config->buffer_sizes[prio][0] = xoff; + qos_config->buffer_sizes[prio][1] = xon; + + return count; +} + +static ssize_t pfc_show(struct nbl_sysfs_qos_info *qos_info, char *buf) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_net_qos *qos_config = &net_dev->qos_config; + + return scnprintf(buf, PAGE_SIZE, "%d,%d,%d,%d,%d,%d,%d,%d\n", + qos_config->pfc[0], qos_config->pfc[1], + qos_config->pfc[2], qos_config->pfc[3], + qos_config->pfc[4], qos_config->pfc[5], + qos_config->pfc[6], qos_config->pfc[7]); +} + +static ssize_t pfc_store(struct nbl_sysfs_qos_info *qos_info, const char *buf, size_t count) +{ + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_net_qos *qos_config = &net_dev->qos_config; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + u8 pfc_config[NBL_MAX_PFC_PRIORITIES]; + int ret, i; + ssize_t len = count; + bool changed = false; + + while (len > 0 && (buf[len - 1] == '\n' || buf[len - 1] == ' ')) + len--; + + if (len == 0) { + netdev_err(net_dev->netdev, "Invalid input: no data to parse.\n"); + return count; + } + + if (len != 15) { + netdev_err(net_dev->netdev, "Invalid input length %ld.\n", len); + return -EINVAL; + } + + ret = sscanf(buf, "%hhd,%hhd,%hhd,%hhd,%hhd,%hhd,%hhd,%hhd", + &pfc_config[0], &pfc_config[1], &pfc_config[2], &pfc_config[3], + &pfc_config[4], &pfc_config[5], &pfc_config[6], &pfc_config[7]); + + if (ret != NBL_MAX_PFC_PRIORITIES) { + netdev_err(net_dev->netdev, "Failed to parse PFC. Expected 8 got %d\n", ret); + return -EINVAL; + } + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + if (pfc_config[i] != qos_config->pfc[i]) { + changed = true; + break; + } + } + + if (!changed) + return count; + + netdev_info(net_dev->netdev, "Parsed PFC configuration: %u %u %u %u %u %u %u %u\n", + pfc_config[0], pfc_config[1], pfc_config[2], pfc_config[3], + pfc_config[4], pfc_config[5], pfc_config[6], pfc_config[7]); + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) + if (pfc_config[i] > 1) + return -EINVAL; + + ret = serv_ops->configure_qos(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), pfc_config, + qos_config->trust_mode, qos_config->dscp2prio_map); + if (ret) { + netdev_err(net_dev->netdev, "configure_qos trust mode: %s failed\n", buf); + return -EIO; + } + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) + qos_config->pfc[i] = pfc_config[i]; + + return count; +} + +static ssize_t nbl_qos_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct nbl_sysfs_qos_info *qos_info = + container_of(attr, struct nbl_sysfs_qos_info, kobj_attr); + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + + switch (qos_info->offset) { + case NBL_QOS_PFC: + return pfc_show(qos_info, buf); + case NBL_QOS_TRUST: + return trust_mode_show(qos_info, buf); + case NBL_QOS_DSCP2PRIO: + return dscp2prio_show(qos_info, buf); + case NBL_QOS_PFC_BUFFER: + return pfc_buffer_size_show(qos_info, buf); + case NBL_QOS_RDMA_SAVE: + case NBL_QOS_RDMA_TC2PRI: + case NBL_QOS_RDMA_SQ_PRI_MAP: + case NBL_QOS_RDMA_RAQ_PRI_MAP: + case NBL_QOS_RDMA_PRI_IMAP: + case NBL_QOS_RDMA_PFC_IMAP: + case NBL_QOS_RDMA_DB_TO_CSCH_EN: + case NBL_QOS_RDMA_SW_DB_CSCH_TH: + case NBL_QOS_RDMA_CSCH_QLEN_TH: + case NBL_QOS_RDMA_POLL_WGT: + case NBL_QOS_RDMA_SP_WRR: + case NBL_QOS_RDMA_TC_WGT: + return nbl_dev_rdma_qos_cfg_show(dev_mgt, qos_info->offset, buf); + default: + return -EINVAL; + } +} + +static ssize_t nbl_qos_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nbl_sysfs_qos_info *qos_info = + container_of(attr, struct nbl_sysfs_qos_info, kobj_attr); + struct nbl_dev_net *net_dev = qos_info->net_dev; + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + + switch (qos_info->offset) { + case NBL_QOS_PFC: + return pfc_store(qos_info, buf, count); + case NBL_QOS_TRUST: + return trust_mode_store(qos_info, buf, count); + case NBL_QOS_DSCP2PRIO: + return dscp2prio_store(qos_info, buf, count); + case NBL_QOS_PFC_BUFFER: + return pfc_buffer_size_store(qos_info, buf, count); + case NBL_QOS_RDMA_SAVE: + case NBL_QOS_RDMA_TC2PRI: + case NBL_QOS_RDMA_SQ_PRI_MAP: + case NBL_QOS_RDMA_RAQ_PRI_MAP: + case NBL_QOS_RDMA_PRI_IMAP: + case NBL_QOS_RDMA_PFC_IMAP: + case NBL_QOS_RDMA_DB_TO_CSCH_EN: + case NBL_QOS_RDMA_SW_DB_CSCH_TH: + case NBL_QOS_RDMA_CSCH_QLEN_TH: + case NBL_QOS_RDMA_POLL_WGT: + case NBL_QOS_RDMA_SP_WRR: + case NBL_QOS_RDMA_TC_WGT: + return nbl_dev_rdma_qos_cfg_store(dev_mgt, qos_info->offset, buf, count); + default: + return -EINVAL; + } +} + +static void nbl_init_qos_config(struct nbl_dev_net *net_dev) +{ + struct nbl_netdev_priv *net_priv = netdev_priv(net_dev->netdev); + struct nbl_adapter *adapter = net_priv->adapter; + struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter); + struct nbl_service_ops *serv_ops = NBL_DEV_MGT_TO_SERV_OPS(dev_mgt); + struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt); + struct nbl_net_qos *qos_config = &net_dev->qos_config; + int i; + + for (i = 0; i < NBL_DSCP_MAX; i++) + qos_config->dscp2prio_map[i] = i / NBL_MAX_PFC_PRIORITIES; + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) + serv_ops->get_pfc_buffer_size(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), + NBL_COMMON_TO_ETH_ID(common), i, + &qos_config->buffer_sizes[i][0], + &qos_config->buffer_sizes[i][1]); + + serv_ops->configure_qos(NBL_DEV_MGT_TO_SERV_PRIV(dev_mgt), NBL_COMMON_TO_ETH_ID(common), + qos_config->pfc, qos_config->trust_mode, qos_config->dscp2prio_map); +} + +int nbl_netdev_add_sysfs(struct net_device *netdev, struct nbl_dev_net *net_dev) +{ + int ret; + int i; + + nbl_init_qos_config(net_dev); + net_dev->qos_config.qos_kobj = kobject_create_and_add("qos", &netdev->dev.kobj); + if (!net_dev->qos_config.qos_kobj) + return -ENOMEM; + + for (i = 0; i < NBL_QOS_TYPE_MAX; i++) { + net_dev->qos_config.qos_info[i].net_dev = net_dev; + net_dev->qos_config.qos_info[i].offset = i; + /* create qos sysfs */ + sysfs_attr_init(&net_dev->qos_config.qos_info[i].kobj_attr.attr); + net_dev->qos_config.qos_info[i].kobj_attr.attr.name = nbl_sysfs_qos_name[i]; + net_dev->qos_config.qos_info[i].kobj_attr.attr.mode = 0644; + net_dev->qos_config.qos_info[i].kobj_attr.show = nbl_qos_show; + net_dev->qos_config.qos_info[i].kobj_attr.store = nbl_qos_store; + ret = sysfs_create_file(net_dev->qos_config.qos_kobj, + &net_dev->qos_config.qos_info[i].kobj_attr.attr); + if (ret) + netdev_err(netdev, "Failed to create %s sysfs file\n", + nbl_sysfs_qos_name[i]); + } + + return 0; +} + +void nbl_netdev_remove_sysfs(struct nbl_dev_net *net_dev) +{ + int i; + + if (!net_dev->qos_config.qos_kobj) + return; + + for (i = 0; i < NBL_QOS_TYPE_MAX; i++) + sysfs_remove_file(net_dev->qos_config.qos_kobj, + &net_dev->qos_config.qos_info[i].kobj_attr.attr); + + kobject_put(net_dev->qos_config.qos_kobj); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h new file mode 100644 index 0000000000000000000000000000000000000000..3d049928f7267ec8afe40ddb45b88dd7045f5bae --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_sysfs.h @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_SYSFS_H_ +#define _NBL_SYSFS_H_ + +enum nbl_qos_param_types { + NBL_QOS_RDMA_SAVE, + NBL_QOS_RDMA_TC2PRI, + NBL_QOS_RDMA_SQ_PRI_MAP, + NBL_QOS_RDMA_RAQ_PRI_MAP, + NBL_QOS_RDMA_PRI_IMAP, + NBL_QOS_RDMA_PFC_IMAP, + NBL_QOS_RDMA_DB_TO_CSCH_EN, + NBL_QOS_RDMA_SW_DB_CSCH_TH, + NBL_QOS_RDMA_CSCH_QLEN_TH, + NBL_QOS_RDMA_POLL_WGT, + NBL_QOS_RDMA_SP_WRR, + + /* function base */ + NBL_QOS_RDMA_TC_WGT, + NBL_QOS_PFC, + NBL_QOS_PFC_BUFFER, + NBL_QOS_TRUST, + NBL_QOS_DSCP2PRIO, + NBL_QOS_TYPE_MAX +}; + +struct nbl_sysfs_qos_info { + int offset; + struct nbl_dev_net *net_dev; + struct kobj_attribute kobj_attr; +}; + +struct nbl_net_qos { + struct kobject *qos_kobj; + struct nbl_sysfs_qos_info qos_info[NBL_QOS_TYPE_MAX]; + u8 pfc[NBL_MAX_PFC_PRIORITIES]; + u8 trust_mode; /* Trust Mode value 0:802.1p 1: dscp */ + u8 dscp2prio_map[NBL_DSCP_MAX]; /* DSCP -> Priority map */ + int buffer_sizes[NBL_MAX_PFC_PRIORITIES][2]; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c new file mode 100644 index 0000000000000000000000000000000000000000..72b40636916196f58a4e190e3e15409d368bb449 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.c @@ -0,0 +1,1105 @@ +#include +#include +#include "nbl_tc.h" +#include "nbl_tc_tun.h" + +static int nbl_tc_parse_proto(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_basic match; + u16 type = 0; + + flow_rule_match_basic(rule, &match); + + if (match.key->n_proto & match.mask->n_proto) { + type = ntohs(match.key->n_proto); + if (type != ETH_P_IP && type != ETH_P_IPV6 && + type != ETH_P_8021Q && type != ETH_P_8021AD) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow with ethtype 0x%04x is not supported\n", + type); + return -EOPNOTSUPP; + } + + filter->input.l2_data.ether_type = ntohs(match.key->n_proto); + filter->input.l2_mask.ether_type = ntohs(match.mask->n_proto); + filter->key_flag |= NBL_FLOW_KEY_ETHERTYPE_FLAG; + } + if (match.key->ip_proto & match.mask->ip_proto) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow with ip proto match is not supported\n"); + return -EOPNOTSUPP; + } + + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow parse proto l2_data.ether_type=0x%04x, l2_mask.ether_type=0x%04x", + filter->input.l2_data.ether_type, filter->input.l2_mask.ether_type); + return 0; +} + +static int nbl_tc_parse_eth(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_eth_addrs match; + int idx = 0; + + flow_rule_match_eth_addrs(rule, &match); + + if (match.key && match.mask) { + if (is_broadcast_ether_addr(match.key->dst) || + is_multicast_ether_addr(match.key->dst) || + is_zero_ether_addr(match.key->dst) || + !is_broadcast_ether_addr(match.mask->dst)) { + /* ignore src mac check for normal flow offload */ + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow dmac broadcast, multicast or fuzzy match is not supported\n"); + return -EOPNOTSUPP; + } + + ether_addr_copy(filter->input.l2_mask.dst_mac, match.mask->dst); + for (idx = 0; idx < ETH_ALEN; idx++) + filter->input.l2_data.dst_mac[idx] = match.key->dst[ETH_ALEN - 1 - idx]; + + filter->key_flag |= NBL_FLOW_KEY_DSTMAC_FLAG; + /* set vlan flag to match table profile graph even there is no vlan match */ + filter->key_flag |= NBL_FLOW_KEY_SVLAN_FLAG; + filter->key_flag |= NBL_FLOW_KEY_CVLAN_FLAG; + } + + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow l2_data.dst_mac=0x%02x:%02x:%02x:%02x:%02x:%02x", + filter->input.l2_data.dst_mac[5], filter->input.l2_data.dst_mac[4], + filter->input.l2_data.dst_mac[3], filter->input.l2_data.dst_mac[2], + filter->input.l2_data.dst_mac[1], filter->input.l2_data.dst_mac[0]); + + return 0; +} + +static int nbl_tc_parse_control(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + + if (match.key->addr_type & match.mask->addr_type) { + filter->input.l2_data.ether_type = ntohs(match.key->addr_type); + filter->input.l2_mask.ether_type = ntohs(match.mask->addr_type); + } + + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow parse conrtol.ether_type=0x%04x", + filter->input.l2_data.ether_type); + return 0; +} + +static int nbl_tc_parse_vlan(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.key && match.mask) { + if (match.mask->vlan_id == VLAN_VID_MASK) { + filter->input.svlan_tag = match.key->vlan_id & 0xFFF; + filter->input.svlan_mask = match.mask->vlan_id; + filter->input.svlan_type = filter->input.l2_data.ether_type; + filter->input.vlan_cnt++; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow l2data.vlan_id=%d,vlan_type=0x%04x", + filter->input.svlan_tag, filter->input.svlan_type); + } else { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow fuzzy vlan mask 0x%04x is not supported\n", + match.mask->vlan_id); + return -EINVAL; + } + } + + return 0; +} + +static int nbl_tc_parse_cvlan(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + filter->input.is_cvlan = true; + + return 0; +} + +static int nbl_tc_parse_tunnel_ip(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + return 0; +} + +static int nbl_tc_parse_tunnel_ports(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_ports enc_ports; + + flow_rule_match_enc_ports(rule, &enc_ports); + + if (memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl tc parse tunnel err: " + "udp tunnel decap filter must match dst_port fully.\n"); + return -EOPNOTSUPP; + } + + filter->input.l4_outer.dst_port = be16_to_cpu(enc_ports.key->dst); + filter->input.l4_mask_outer.dst_port = enc_ports.mask->dst; + + filter->key_flag |= NBL_FLOW_KEY_T_DSTPORT_FLAG; + + nbl_debug(common, NBL_DEBUG_FLOW, "parse outer tnl udp:dport:0x%x.\n", + filter->input.l4_outer.dst_port); + + return 0; +} + +static int nbl_tc_parse_tunnel_keyid(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_enc_keyid enc_keyid; +#define NBL_TC_VNI_FLAG_BIT 8 + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) + return 0; + + flow_rule_match_enc_keyid(rule, &enc_keyid); + if (!enc_keyid.mask->keyid) + return 0; + + filter->input.tnl.vni = be32_to_cpu(enc_keyid.key->keyid) << NBL_TC_VNI_FLAG_BIT; + filter->input.tnl_mask.vni = enc_keyid.mask->keyid; + + filter->key_flag |= NBL_FLOW_KEY_T_VNI_FLAG; + nbl_debug(common, NBL_DEBUG_FLOW, "parse outer tnl keyid:0x%x/0x%x.\n", + filter->input.tnl.vni, filter->input.tnl_mask.vni); + + return 0; +} + +static bool +nbl_tc_find_ipv4_address(const struct net_device *dev, __be32 ipv4_addr) +{ + bool ip_find = false; + struct in_ifaddr *ifa; + struct in_device *in_dev = in_dev_get(dev); + + /* check whether the dev has the ip addr */ + if (!in_dev) + goto end; + + for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_address == ipv4_addr) { + ip_find = true; + break; + } + } + + in_dev_put(in_dev); + +end: + return ip_find; +} + +static bool +nbl_tc_find_vlan_dev_ipv4_address(const struct net_device *dev, __be32 ipv4_addr) +{ + struct net_device *child; + const struct net_device *real_dev; + bool ip_find = false; + + for_each_netdev(dev_net(dev), child) { + if (is_vlan_dev(child)) { + real_dev = vlan_dev_real_dev(child); + if (real_dev != dev) + continue; + ip_find = nbl_tc_find_ipv4_address(child, ipv4_addr); + if (ip_find) + break; + } + } + + return ip_find; +} + +static bool +nbl_tc_find_ipv6_address(const struct net_device *dev, struct in6_addr ipv6_addr) +{ + bool ip_find = false; + struct inet6_ifaddr *ifa6; + struct inet6_dev *in6_dev = in6_dev_get(dev); + + /* check whether the dev has the ip addr */ + if (!in6_dev) + goto end; + + read_lock_bh(&in6_dev->lock); + list_for_each_entry(ifa6, &in6_dev->addr_list, if_list) { + char addr[INET6_ADDRSTRLEN]; + + snprintf(addr, sizeof(addr), "%pI6", &ifa6->addr); + if (!memcmp(&ifa6->addr, &ipv6_addr, sizeof(ifa6->addr))) { + ip_find = true; + break; + } + } + read_unlock_bh(&in6_dev->lock); + + in6_dev_put(in6_dev); + +end: + return ip_find; +} + +static bool +nbl_tc_find_vlan_dev_ipv6_address(const struct net_device *dev, struct in6_addr ipv6_addr) +{ + struct net_device *child; + const struct net_device *real_dev; + bool ip_find = false; + + for_each_netdev(dev_net(dev), child) { + if (is_vlan_dev(child)) { + real_dev = vlan_dev_real_dev(child); + if (real_dev != dev) + continue; + ip_find = nbl_tc_find_ipv6_address(child, ipv6_addr); + if (ip_find) + break; + } + } + + return ip_find; +} + +static int nbl_tc_parse_tunnel_control(const struct flow_rule *rule, + struct nbl_flow_pattern_conf *filter, + const struct nbl_common_info *common) +{ + struct flow_match_control match; + u16 addr_type; + int max_idx = NBL_IPV6_ADDR_LEN_AS_U8 - 1; + int idx = 0; + bool dev_ok = false; + + flow_rule_match_enc_control(rule, &match); + addr_type = match.key->addr_type; + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs ip_addrs; + + flow_rule_match_enc_ipv4_addrs(rule, &ip_addrs); + filter->input.ip_outer.src_ip.addr = be32_to_cpu(ip_addrs.key->src); + filter->input.ip_mask_outer.src_ip.addr = ip_addrs.mask->src; + filter->input.ip_outer.dst_ip.addr = be32_to_cpu(ip_addrs.key->dst); + filter->input.ip_mask_outer.dst_ip.addr = ip_addrs.mask->dst; + + filter->input.ip_outer.ip_ver = NBL_IP_VERSION_V4; + filter->key_flag |= NBL_FLOW_KEY_T_DIPV4_FLAG; + filter->key_flag |= NBL_FLOW_KEY_T_OPT_DATA_FLAG; + filter->key_flag |= NBL_FLOW_KEY_T_OPT_CLASS_FLAG; + + nbl_debug(common, NBL_DEBUG_FLOW, "parse outer tnl ctl ip: " + "sip:0x%x/0x%x, dip:0x%x/0x%x.\n", + filter->input.ip_outer.src_ip.addr, + filter->input.ip_mask_outer.src_ip.addr, + filter->input.ip_outer.dst_ip.addr, + filter->input.ip_mask_outer.dst_ip.addr); + if (filter->input.port & NBL_FLOW_IN_PORT_TYPE_LAG) { + dev_ok = true; + } else { + dev_ok = nbl_tc_find_ipv4_address(filter->input_dev, ip_addrs.key->dst); + if (!dev_ok) + dev_ok = nbl_tc_find_vlan_dev_ipv4_address(filter->input_dev, + ip_addrs.key->dst); + } + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs ip6_addrs; + char sipv6[INET6_ADDRSTRLEN]; + char dipv6[INET6_ADDRSTRLEN]; + char sipv6_msk[INET6_ADDRSTRLEN]; + char dipv6_msk[INET6_ADDRSTRLEN]; + + flow_rule_match_enc_ipv6_addrs(rule, &ip6_addrs); + + for (idx = 0; idx < NBL_IPV6_ADDR_LEN_AS_U8; idx++) { + filter->input.ip_outer.src_ip.v6_addr[idx] = + ip6_addrs.key->src.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_mask_outer.src_ip.v6_addr[idx] = + ip6_addrs.mask->src.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_outer.dst_ip.v6_addr[idx] = + ip6_addrs.key->dst.in6_u.u6_addr8[max_idx - idx]; + filter->input.ip_mask_outer.dst_ip.v6_addr[idx] = + ip6_addrs.mask->dst.in6_u.u6_addr8[max_idx - idx]; + } + filter->input.ip_outer.ip_ver = NBL_IP_VERSION_V6; + filter->key_flag |= NBL_FLOW_KEY_T_DIPV6_FLAG; + filter->key_flag |= NBL_FLOW_KEY_T_OPT_DATA_FLAG; + filter->key_flag |= NBL_FLOW_KEY_T_OPT_CLASS_FLAG; + + snprintf(sipv6, sizeof(sipv6), "%pI6", &ip6_addrs.key->src); + snprintf(dipv6, sizeof(dipv6), "%pI6", &ip6_addrs.key->dst); + snprintf(sipv6_msk, sizeof(sipv6_msk), "%pI6", &ip6_addrs.mask->src); + snprintf(dipv6_msk, sizeof(dipv6_msk), "%pI6", &ip6_addrs.mask->src); + + nbl_debug(common, NBL_DEBUG_FLOW, "parse outer tnl ctl ipv6, sip:%s/%s, dip:%s/%s\n", + sipv6, sipv6_msk, dipv6, dipv6_msk); + + if (filter->input.port & NBL_FLOW_IN_PORT_TYPE_LAG) { + dev_ok = true; + } else { + dev_ok = nbl_tc_find_ipv6_address(filter->input_dev, ip6_addrs.key->dst); + if (!dev_ok) + dev_ok = nbl_tc_find_vlan_dev_ipv6_address(filter->input_dev, + ip6_addrs.key->dst); + } + } + + if (dev_ok) + return 0; + else + return -EOPNOTSUPP; +} + +static struct nbl_tc_flow_parse_pattern parse_pattern_list[] = { + { FLOW_DISSECTOR_KEY_BASIC, nbl_tc_parse_proto }, + { FLOW_DISSECTOR_KEY_ETH_ADDRS, nbl_tc_parse_eth }, + { FLOW_DISSECTOR_KEY_CONTROL, nbl_tc_parse_control }, + { FLOW_DISSECTOR_KEY_VLAN, nbl_tc_parse_vlan }, + { FLOW_DISSECTOR_KEY_CVLAN, nbl_tc_parse_cvlan }, + { FLOW_DISSECTOR_KEY_ENC_IP, nbl_tc_parse_tunnel_ip}, + { FLOW_DISSECTOR_KEY_ENC_PORTS, nbl_tc_parse_tunnel_ports }, + { FLOW_DISSECTOR_KEY_ENC_KEYID, nbl_tc_parse_tunnel_keyid }, + { FLOW_DISSECTOR_KEY_ENC_CONTROL, nbl_tc_parse_tunnel_control }, +}; + +static int nbl_tc_flow_set_out_param(struct net_device *out_dev, + struct nbl_serv_lag_info *lag_info, + struct nbl_tc_port *out, + struct nbl_common_info *common) +{ + struct nbl_netdev_priv *dev_priv = NULL; + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + u16 eswitch_mode = NBL_ESWITCH_NONE; + + if (netif_is_lag_master(out_dev)) { + if (lag_info && lag_info->bond_netdev && lag_info->bond_netdev == out_dev) { + out->type = NBL_TC_PORT_TYPE_BOND; + out->id = lag_info->lag_id; + goto set_param_end; + } else { + return -EINVAL; + } + } + + dev_priv = netdev_priv(out_dev); + if (!dev_priv->adapter) + return -EINVAL; + + if (common->tc_inst_id != dev_priv->adapter->common.tc_inst_id) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow rule in different nic is not supported\n"); + return -EINVAL; + } + + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(dev_priv->adapter); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + eswitch_mode = + disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (eswitch_mode != NBL_ESWITCH_OFFLOADS) + return -EINVAL; + + if (dev_priv->rep) { + out->type = NBL_TC_PORT_TYPE_VSI; + out->id = dev_priv->rep->rep_vsi_id; + } else { + out->type = NBL_TC_PORT_TYPE_ETH; + out->id = dev_priv->adapter->common.eth_id; + } + +set_param_end: + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow set out.type=%s, out.id=%d\n", + out->type == NBL_TC_PORT_TYPE_VSI ? "vsi" : "uplink", out->id); + + return 0; +} + +static bool +nbl_tc_is_valid_netdev(struct net_device *netdev, struct nbl_serv_netdev_ops *netdev_ops) +{ + if (netif_is_lag_master(netdev)) + return true; + + if (netdev->netdev_ops == netdev_ops->pf_netdev_ops || + netdev->netdev_ops == netdev_ops->rep_netdev_ops) + return true; + + return false; +} + +static int nbl_tc_flow_init_param(struct nbl_netdev_priv *priv, struct flow_cls_offload *f, + struct nbl_common_info *common, struct nbl_tc_flow_param *param) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + const struct flow_action_entry *act_entry; + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_serv_netdev_ops *netdev_ops = &serv_mgt->net_resource_mgt->netdev_ops; + struct nbl_serv_lag_info *lag_info = NULL; + int i = 0; + int ret = 0; + int redirect_cnt = 0; + int mirred_cnt = 0; + const struct rtnl_link_ops *tnl_ops; + + if (priv->rep) { + param->in.type = NBL_TC_PORT_TYPE_VSI; + param->in.id = priv->rep->rep_vsi_id; + } else if (serv_mgt->net_resource_mgt->lag_info) { + if (serv_mgt->net_resource_mgt->lag_info->lag_id >= NBL_LAG_MAX_NUM) + return -EINVAL; + param->in.type = NBL_TC_PORT_TYPE_BOND; + param->in.id = serv_mgt->net_resource_mgt->lag_info->lag_id; + } else { + param->in.type = NBL_TC_PORT_TYPE_ETH; + param->in.id = common->eth_id; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow init param in.type=%s, type=%d, in.id=%d, dev:%s", + param->in.type == NBL_TC_PORT_TYPE_VSI ? "vsi" : "uplink", + param->in.type, param->in.id, priv->netdev ? priv->netdev->name : "NULL"); + + flow_action_for_each(i, act_entry, &rule->action) { + if (act_entry->id == FLOW_ACTION_REDIRECT) { + if (!act_entry->dev) + return -EINVAL; + if (redirect_cnt) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow with more than one redirect outport is not supported"); + return -EINVAL; + } + tnl_ops = act_entry->dev->rtnl_link_ops; + + if (!tnl_ops || + (tnl_ops && memcmp(tnl_ops->kind, "vxlan", sizeof("vxlan")))) { + if (!nbl_tc_is_valid_netdev(act_entry->dev, + netdev_ops)) + return -ENODEV; + + if (netif_is_lag_master(act_entry->dev)) + lag_info = serv_mgt->net_resource_mgt->lag_info; + + ret = nbl_tc_flow_set_out_param(act_entry->dev, lag_info, + ¶m->out, common); + if (ret) + return ret; + } + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow init redirect outport"); + + redirect_cnt++; + } else if (act_entry->id == FLOW_ACTION_MIRRED) { + if (!act_entry->dev) + return -EINVAL; + if (mirred_cnt) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow with more than one mirror outport is not supported"); + return -EINVAL; + } + if (!nbl_tc_is_valid_netdev(act_entry->dev, + &serv_mgt->net_resource_mgt->netdev_ops)) + return -ENODEV; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow init mirror outport"); + + lag_info = NULL; + if (netif_is_lag_master(act_entry->dev)) + lag_info = serv_mgt->net_resource_mgt->lag_info; + + ret = nbl_tc_flow_set_out_param(act_entry->dev, lag_info, + ¶m->mirror_out, common); + if (ret) + return ret; + mirred_cnt++; + } else if (redirect_cnt > 0 || mirred_cnt > 0) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow different edit action with multiple outport is not supported"); + return -EOPNOTSUPP; + } + } + + return ret; +} + +static int nbl_tc_parse_pattern(struct nbl_service_mgt *serv_mgt, + struct flow_cls_offload *f, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + u32 i = 0; + int ret = 0; + + switch (param->in.type) { + case NBL_TC_PORT_TYPE_VSI: + filter->input.port = param->in.id | NBL_FLOW_IN_PORT_TYPE_VSI; + break; + case NBL_TC_PORT_TYPE_ETH: + filter->input.port = param->in.id; + break; + case NBL_TC_PORT_TYPE_BOND: + filter->input.port = param->in.id | NBL_FLOW_IN_PORT_TYPE_LAG; + break; + default: + nbl_err(common, NBL_DEBUG_FLOW, "tc flow invalid in_port type:%d\n", + param->in.type); + return -EINVAL; + } + filter->key_flag |= NBL_FLOW_KEY_INPORT8_FLAG; + filter->key_flag |= NBL_FLOW_KEY_INPORT4_FLAG; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow dissector->used_keys=%llx\n", + dissector->used_keys); + if (dissector->used_keys & + ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow key used: 0x%llx is not supported\n", + dissector->used_keys); + return -EOPNOTSUPP; + } + + for (i = 0; i < ARRAY_SIZE(parse_pattern_list); i++) { + if (flow_rule_match_key(rule, parse_pattern_list[i].pattern_type)) { + ret = parse_pattern_list[i].parse_func(rule, filter, common); + + if (ret != 0) + return ret; + } + } + + return 0; +} + +static int nbl_tc_fill_encap_out_info(struct nbl_tc_flow_param *param, + struct nbl_rule_action *rule_act) +{ + const struct nbl_serv_lag_info *lag_info = + param->serv_mgt->net_resource_mgt->lag_info; + struct nbl_netdev_priv *dev_priv = NULL; + struct nbl_service_mgt *serv_mgt; + struct nbl_dispatch_ops *disp_ops; + u16 eswitch_mode = NBL_ESWITCH_NONE; + + if (netif_is_lag_master(rule_act->tc_tun_encap_out_dev)) { + if (lag_info && lag_info->bond_netdev && + lag_info->bond_netdev == rule_act->tc_tun_encap_out_dev) { + rule_act->port_type = SET_DPORT_TYPE_ETH_LAG; + rule_act->port_id = (lag_info->lag_id << 2) | NBL_FLOW_OUT_PORT_TYPE_LAG; + rule_act->vlan.port_id = lag_info->lag_id; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_BOND; + goto end; + } else { + nbl_err(param->common, NBL_DEBUG_FLOW, "fill encap out info err.\n"); + return -EINVAL; + } + } + + dev_priv = netdev_priv(rule_act->tc_tun_encap_out_dev); + if (!dev_priv->adapter) { + nbl_err(param->common, NBL_DEBUG_FLOW, "encap out dev priv adapter is NULL, out_dev:%s.\n", + rule_act->tc_tun_encap_out_dev->name); + return -EINVAL; + } + + if (param->common->tc_inst_id != dev_priv->adapter->common.tc_inst_id) { + nbl_err(param->common, NBL_DEBUG_FLOW, "tc flow rule in different nic is not supported\n"); + return -EINVAL; + } + + serv_mgt = NBL_ADAPTER_TO_SERV_MGT(dev_priv->adapter); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + + eswitch_mode = disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (eswitch_mode != NBL_ESWITCH_OFFLOADS) { + nbl_err(param->common, NBL_DEBUG_FLOW, "eswitch mode is not in offload.\n"); + return -EINVAL; + } + + if (dev_priv->rep) { + rule_act->port_type = SET_DPORT_TYPE_VSI_HOST; + rule_act->port_id = dev_priv->rep->rep_vsi_id; + rule_act->vlan.port_id = dev_priv->rep->rep_vsi_id; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_VSI; + } else { + rule_act->port_type = SET_DPORT_TYPE_ETH_LAG; + rule_act->port_id = dev_priv->adapter->common.eth_id | NBL_FLOW_OUT_PORT_TYPE_ETH; + rule_act->vlan.port_id = dev_priv->adapter->common.eth_id + NBL_VLAN_TYPE_ETH_BASE; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_ETH; + } + +end: + return 0; +} + +static int +nbl_tc_handle_action_port_id(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + int ret = 0; + struct net_device *encap_dev = act_entry->dev; + + if (param->mirror_out.type) + return 0; + + if (param->encap) { + param->encap = false; + /* encap info */ + ret = nbl_tc_tun_parse_encap_info(rule_act, param, encap_dev); + + if (ret) { + nbl_info(param->common, NBL_DEBUG_FLOW, "parse tc encap info failed.\n"); + return ret; + } + + /* fill encap out port info */ + ret = nbl_tc_fill_encap_out_info(param, rule_act); + if (ret) + return ret; + } else { + switch (param->out.type) { + case NBL_TC_PORT_TYPE_VSI: + rule_act->port_type = SET_DPORT_TYPE_VSI_HOST; + rule_act->port_id = param->out.id; + rule_act->vlan.port_id = param->out.id; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_VSI; + break; + case NBL_TC_PORT_TYPE_ETH: + rule_act->port_type = SET_DPORT_TYPE_ETH_LAG; + rule_act->port_id = param->out.id | NBL_FLOW_OUT_PORT_TYPE_ETH; + rule_act->vlan.port_id = param->out.id + NBL_VLAN_TYPE_ETH_BASE; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_ETH; + break; + case NBL_TC_PORT_TYPE_BOND: + rule_act->port_type = SET_DPORT_TYPE_ETH_LAG; + rule_act->port_id = (param->out.id << 2) | NBL_FLOW_OUT_PORT_TYPE_LAG; + rule_act->vlan.port_id = param->out.id; + rule_act->vlan.port_type = NBL_TC_PORT_TYPE_BOND; + break; + default: + return -EINVAL; + } + } + rule_act->flag |= NBL_FLOW_ACTION_PORT_ID; + + return 0; +} + +static int +nbl_tc_handle_action_drop(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + rule_act->flag |= NBL_FLOW_ACTION_DROP; + rule_act->drop_flag = 1; + return 0; +} + +static int +nbl_tc_handle_action_mirror(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + if (!(param->out.type && param->mirror_out.type)) + return -EINVAL; + + if (rule_act->mcc_cnt >= NBL_TC_MCC_MEMBER_MAX) + return -EINVAL; + rule_act->port_mcc[rule_act->mcc_cnt].dport_id = param->out.id; + rule_act->port_mcc[rule_act->mcc_cnt].port_type = param->out.type; + rule_act->mcc_cnt++; + + if (rule_act->mcc_cnt >= NBL_TC_MCC_MEMBER_MAX) + return -EINVAL; + rule_act->port_mcc[rule_act->mcc_cnt].dport_id = param->mirror_out.id; + rule_act->port_mcc[rule_act->mcc_cnt].port_type = param->mirror_out.type; + rule_act->mcc_cnt++; + + rule_act->flag |= NBL_FLOW_ACTION_MCC; + + return 0; +} + +static int +nbl_tc_handle_action_push_vlan(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + rule_act->vlan.eth_proto = htons(act_entry->vlan.proto); + if (rule_act->vlan.eth_proto != NBL_VLAN_TPID_VALUE && + rule_act->vlan.eth_proto != NBL_QINQ_TPID_VALUE) + return -EINVAL; + + if (filter->input.svlan_tag) + rule_act->flag |= NBL_FLOW_ACTION_PUSH_OUTER_VLAN; + else + rule_act->flag |= NBL_FLOW_ACTION_PUSH_INNER_VLAN; + rule_act->vlan.vlan_tag = act_entry->vlan.vid; + rule_act->vlan.vlan_tag |= act_entry->vlan.prio << NBL_VLAN_PCP_SHIFT; + + return 0; +} + +static int +nbl_tc_handle_action_pop_vlan(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + if (filter->input.is_cvlan) + rule_act->flag |= NBL_FLOW_ACTION_POP_OUTER_VLAN; + else + rule_act->flag |= NBL_FLOW_ACTION_POP_INNER_VLAN; + + return 0; +} + +static int +nbl_tc_handle_action_tun_encap(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + param->tunnel = (struct ip_tunnel_info *)act_entry->tunnel; + if (param->tunnel) { + rule_act->flag |= NBL_FLOW_ACTION_TUNNEL_ENCAP; + param->encap = true; + return 0; + } else { + return -EOPNOTSUPP; + } +} + +static int +nbl_tc_handle_action_tun_decap(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + rule_act->flag |= NBL_FLOW_ACTION_TUNNEL_DECAP; + + return 0; +} + +const struct nbl_tc_flow_action_driver_ops nbl_port_id_act = { + .act_update = nbl_tc_handle_action_port_id, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_drop = { + .act_update = nbl_tc_handle_action_drop, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_mirror_act = { + .act_update = nbl_tc_handle_action_mirror, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_push_vlan = { + .act_update = nbl_tc_handle_action_push_vlan, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_pop_vlan = { + .act_update = nbl_tc_handle_action_pop_vlan, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_tunnel_encap_act = { + .act_update = nbl_tc_handle_action_tun_encap, +}; + +const struct nbl_tc_flow_action_driver_ops nbl_tunnel_decap_act = { + .act_update = nbl_tc_handle_action_tun_decap, +}; + +const struct nbl_tc_flow_action_driver_ops *nbl_act_ops[] = { + [FLOW_ACTION_REDIRECT] = &nbl_port_id_act, + [FLOW_ACTION_DROP] = &nbl_drop, + [FLOW_ACTION_MIRRED] = &nbl_mirror_act, + [FLOW_ACTION_VLAN_PUSH] = &nbl_push_vlan, + [FLOW_ACTION_VLAN_POP] = &nbl_pop_vlan, + [FLOW_ACTION_TUNNEL_ENCAP] = &nbl_tunnel_encap_act, + [FLOW_ACTION_TUNNEL_DECAP] = &nbl_tunnel_decap_act, +}; + +/** + * @brief: handle action parse by type + * + * @param[in] type: action type + * @param[in] actions: nbl_flow_pattern_conf info + * @param[in] act: nbl_rule_action info storage + * @param[out] error: error info + * @return int: 0-success, other-failed + */ +static int nbl_tc_parse_action_by_type(struct nbl_rule_action *rule_act, + const struct flow_action_entry *act_entry, + enum flow_action_id type, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow_param *param) +{ + const struct nbl_tc_flow_action_driver_ops *fops; + + fops = nbl_act_ops[type]; + + if (!fops) + return 0; + + return fops->act_update(rule_act, act_entry, type, filter, param); +} + +/** + * @brief: handle action parse + * + * @param[in] attr: attr info + * @param[in] action: nbl_flow_pattern_conf info + * @param[in] act: nbl_rule_action info storage + * @param[out] error: error info + * @return int: 0-success, other-failed + * + */ +static int nbl_tc_parse_action(struct nbl_service_mgt *serv_mgt, + struct flow_cls_offload *f, + struct nbl_flow_pattern_conf *filter, + struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + const struct flow_action_entry *act_entry; + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + int i; + int ret = 0; + + flow_action_for_each(i, act_entry, &rule->action) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow parse action id %d\n", act_entry->id); + switch (act_entry->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_DROP: + case FLOW_ACTION_MIRRED: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_POP: + case FLOW_ACTION_TUNNEL_ENCAP: + case FLOW_ACTION_TUNNEL_DECAP: + ret = nbl_tc_parse_action_by_type(rule_act, act_entry, + act_entry->id, filter, param); + if (ret) + return ret; + break; + default: + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow action %d is not supported", + act_entry->id); + return -EOPNOTSUPP; + } + } + + return ret; +} + +static int nbl_serv_add_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls_offload *f) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_flow_pattern_conf *filter = NULL; + struct nbl_rule_action *act = NULL; + struct nbl_tc_flow_param param = {0}; + int ret = 0; + int ret_act = 0; + + if (!tc_can_offload(priv->netdev)) + return -EOPNOTSUPP; + + if (!nbl_tc_is_valid_netdev(priv->netdev, &serv_mgt->net_resource_mgt->netdev_ops)) + return -EOPNOTSUPP; + + param.key.cookie = f->cookie; + ret = disp_ops->flow_index_lookup(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), param.key); + if (!ret) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow cookie %llx has already add, do not add again!\n", + param.key.cookie); + return -EEXIST; + } + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow add cls, cookie=%lx\n", f->cookie); + + if (nbl_tc_flow_init_param(priv, f, common, ¶m)) + return -EINVAL; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + param.common = common; + param.serv_mgt = serv_mgt; + + filter->input_dev = priv->netdev; + ret = nbl_tc_parse_pattern(serv_mgt, f, filter, ¶m); + if (ret) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow failed to parse " + "pattern, ret %d.\n", ret); + ret = -EINVAL; + goto ret_filter_fail; + } + + act = kzalloc(sizeof(*act), GFP_KERNEL); + if (!act) { + ret = -ENOMEM; + goto ret_filter_fail; + } + + act->in_port = priv->netdev; + ret = nbl_tc_parse_action(serv_mgt, f, filter, act, ¶m); + if (ret) { + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow failed to parse action.\n"); + ret = -EINVAL; + goto ret_act_fail; + } + + memcpy(¶m.filter, filter, sizeof(param.filter)); + memcpy(¶m.act, act, sizeof(param.act)); + + ret = disp_ops->add_tc_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + +ret_act_fail: + /* free edit act */ + if (ret && act->flag & NBL_FLOW_ACTION_TUNNEL_ENCAP && + act->encap_parse_ok) { + ret_act = disp_ops->tc_tun_encap_del(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), + &act->encap_key); + if (ret_act) + nbl_debug(common, NBL_DEBUG_FLOW, "encap del err, encap_idx:%d, ret:%d", + act->encap_idx, ret_act); + } + + kfree(act); +ret_filter_fail: + kfree(filter); + + return ret; +} + +static int nbl_serv_del_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls_offload *f) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_common_info *common = NBL_SERV_MGT_TO_COMMON(serv_mgt); + struct nbl_tc_flow_param param = {0}; + int ret = 0; + + if (!nbl_tc_is_valid_netdev(priv->netdev, &serv_mgt->net_resource_mgt->netdev_ops)) + return -EOPNOTSUPP; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow del cls, cookie=%lx\n", f->cookie); + param.key.cookie = f->cookie; + + ret = disp_ops->del_tc_flow(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + + return ret; +} + +static int nbl_serv_stats_cls_flower(struct nbl_netdev_priv *priv, struct flow_cls_offload *f) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + struct nbl_stats_param param = {0}; + int ret = 0; + + if (!tc_can_offload(priv->netdev)) + return -EOPNOTSUPP; + + if (!nbl_tc_is_valid_netdev(priv->netdev, &serv_mgt->net_resource_mgt->netdev_ops)) + return -EOPNOTSUPP; + + param.f = f; + + ret = disp_ops->query_tc_stats(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt), ¶m); + + return ret; +} + +static int +nbl_serv_setup_tc_cls_flower(struct nbl_netdev_priv *priv, + struct flow_cls_offload *cls_flower) +{ + struct nbl_service_mgt *serv_mgt = NBL_ADAPTER_TO_SERV_MGT(priv->adapter); + struct nbl_dispatch_ops *disp_ops = NBL_SERV_MGT_TO_DISP_OPS(serv_mgt); + u16 eswitch_mode = NBL_ESWITCH_NONE; + + eswitch_mode = disp_ops->get_eswitch_mode(NBL_SERV_MGT_TO_DISP_PRIV(serv_mgt)); + if (eswitch_mode != NBL_ESWITCH_OFFLOADS) + return -EINVAL; + + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return nbl_serv_add_cls_flower(priv, cls_flower); + case FLOW_CLS_DESTROY: + return nbl_serv_del_cls_flower(priv, cls_flower); + case FLOW_CLS_STATS: + return nbl_serv_stats_cls_flower(priv, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +int nbl_serv_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct nbl_netdev_priv *priv = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return nbl_serv_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +int nbl_serv_indr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct nbl_indr_dev_priv *indr_priv = cb_priv; + struct nbl_netdev_priv *priv = indr_priv->dev_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return nbl_serv_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h new file mode 100644 index 0000000000000000000000000000000000000000..ea8e29551ba42d97dcd74e492e81af7fbc29cc39 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 nebula-matrix Limited. + * + */ + +#ifndef _NBL_TC_OFFLOAD_H +#define _NBL_TC_OFFLOAD_H + +#include "nbl_service.h" + +int nbl_serv_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); +int nbl_serv_indr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c new file mode 100644 index 0000000000000000000000000000000000000000..4265da86ef3015725cdcaa522f21495fea19fc48 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.c @@ -0,0 +1,556 @@ +#include +#include +#include +#include +#include "nbl_resource.h" +#include "nbl_service.h" +#include "nbl_tc_tun.h" + +static int nbl_copy_tun_info(const struct ip_tunnel_info *tun_info, + struct nbl_rule_action *rule_act) +{ + size_t tun_size; + + if (tun_info->options_len) + tun_size = sizeof(*tun_info) + tun_info->options_len; + else + tun_size = sizeof(*tun_info); + + rule_act->tunnel = kzalloc(tun_size, GFP_KERNEL); + if (!rule_act->tunnel) + return -ENOMEM; + + memcpy(rule_act->tunnel, tun_info, tun_size); + + return 0; +} + +/* only support vxlan currently */ +static struct nbl_tc_tunnel *nbl_tc_get_tunnel(struct net_device *tunnel_dev) +{ + if (netif_is_vxlan(tunnel_dev)) + return &vxlan_tunnel; + else + return NULL; +} + +static int nbl_tc_tun_gen_tunnel_header_vxlan(char buf[], u8 *ip_proto, + const struct ip_tunnel_key *tun_key) +{ + __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id); + struct udphdr *udp = (struct udphdr *)(buf); + struct vxlanhdr *vxh; + + vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + *ip_proto = IPPROTO_UDP; + + udp->dest = tun_key->tp_dst; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(tun_id); + + return 0; +} + +static int nbl_tc_tun_get_vxlan_hdr_len(void) +{ + return sizeof(struct vxlanhdr); +} + +static void nbl_tc_tun_route_cleanup(struct nbl_tc_tunnel_route_info *tun_route_info) +{ + if (tun_route_info->n) + neigh_release(tun_route_info->n); + if (tun_route_info->real_out_dev) + dev_put(tun_route_info->real_out_dev); +} + +static int nbl_route_lookup_ipv4(const struct nbl_common_info *common, + struct net_device *encap_mirred_dev, + struct nbl_tc_tunnel_route_info *tun_route_info, + struct nbl_serv_netdev_ops *netdev_ops) +{ + int ret = 0; + struct net_device *out_dev; + struct net_device *real_out_dev; + struct net_device *parent_dev; + struct neighbour *n; + struct rtable *rt; + + rt = ip_route_output_key(dev_net(encap_mirred_dev), &tun_route_info->fl.fl4); + if (IS_ERR(rt)) + return (int)PTR_ERR(rt); + + if (rt->rt_type != RTN_UNICAST) { + ret = -ENETUNREACH; + nbl_err(common, NBL_DEBUG_FLOW, "get route table failed, the route type is not unicast."); + goto rt_err; + } + + out_dev = rt->dst.dev; + if (is_vlan_dev(out_dev)) { + parent_dev = vlan_dev_priv(out_dev)->real_dev; + if (is_vlan_dev(parent_dev)) { + nbl_debug(common, NBL_DEBUG_FLOW, "ipv4 encap out dev is %s, " + "parent_dev:%s is vlan, not support two vlan\n", + out_dev->name, parent_dev ? parent_dev->name : "NULL"); + ret = -EOPNOTSUPP; + goto rt_err; + } + + real_out_dev = vlan_dev_real_dev(out_dev); + nbl_debug(common, NBL_DEBUG_FLOW, "ipv4 encap out dev is %s, real_out_dev:%s\n", + out_dev->name, real_out_dev ? real_out_dev->name : "NULL"); + } else { + real_out_dev = out_dev; + } + + if (!netif_is_lag_master(real_out_dev) && + real_out_dev->netdev_ops != netdev_ops->pf_netdev_ops && + real_out_dev->netdev_ops != netdev_ops->rep_netdev_ops) { + nbl_info(common, NBL_DEBUG_FLOW, "encap out dev is %s, not ours, not support\n", + real_out_dev->name); + ret = -EOPNOTSUPP; + goto rt_err; + } + + dev_hold(real_out_dev); + if (!tun_route_info->ttl) + tun_route_info->ttl = (u8)ip4_dst_hoplimit(&rt->dst); + + nbl_debug(common, NBL_DEBUG_FLOW, "route lookup: rt->rt_type:%u, " + "rt->dst.dev:%s, rt->dst.ops:%p, real_dev:%s, ttl:%u", + rt->rt_type, rt->dst.dev ? rt->dst.dev->name : "null", + rt->dst.ops, real_out_dev ? real_out_dev->name : "NULL", + tun_route_info->ttl); + + n = dst_neigh_lookup(&rt->dst, &tun_route_info->fl.fl4.daddr); + if (!n) { + ret = -ENONET; + nbl_info(common, NBL_DEBUG_FLOW, "get neigh failed."); + goto dev_release; + } + ip_rt_put(rt); + + tun_route_info->out_dev = out_dev; + tun_route_info->real_out_dev = real_out_dev; + tun_route_info->n = n; + + return 0; + +dev_release: + dev_put(real_out_dev); +rt_err: + ip_rt_put(rt); + return ret; +} + +static char *nbl_tc_tun_gen_eth_hdr(char *buf, struct net_device *dev, + const unsigned char *hw_dst, u16 proto, + const struct nbl_common_info *common) +{ + struct ethhdr *eth = (struct ethhdr *)buf; + char *ip; + + ether_addr_copy(eth->h_dest, hw_dst); + ether_addr_copy(eth->h_source, dev->dev_addr); + if (is_vlan_dev(dev)) { + struct vlan_hdr *vlan = + (struct vlan_hdr *)((char *)eth + sizeof(struct ethhdr)); + + ip = (char *)vlan + sizeof(struct vlan_hdr); + eth->h_proto = vlan_dev_vlan_proto(dev); + vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev)); + vlan->h_vlan_encapsulated_proto = htons(proto); + nbl_debug(common, NBL_DEBUG_FLOW, "output is vlan dev: " + "vlan_TCI:0x%x, vlan_proto:0x%x, eth_proto:0x%x", + vlan->h_vlan_TCI, vlan->h_vlan_encapsulated_proto, + eth->h_proto); + } else { + eth->h_proto = htons(proto); + ip = (char *)eth + sizeof(struct ethhdr); + } + + return ip; +} + +static int nbl_tc_tun_create_header_ipv4(struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param, + struct net_device *encap_mirred_dev, + struct nbl_encap_key *key) +{ + int ret = 0; + const struct nbl_common_info *common = param->common; + const struct ip_tunnel_key *tun_key = &key->ip_tun_key; + struct nbl_serv_netdev_ops *netdev_ops = ¶m->serv_mgt->net_resource_mgt->netdev_ops; + struct iphdr *ip; + struct nbl_tc_tunnel_route_info tun_route_info; + struct udphdr *udp; + struct vxlanhdr *vxh; + unsigned char hw_dst[ETH_ALEN]; + + u8 total_len = 0; + u8 eth_len = 0; + u8 l4_len = 0; + u8 nud_state; + + memset(&tun_route_info, 0, sizeof(tun_route_info)); + memset(hw_dst, 0, sizeof(hw_dst)); + tun_route_info.fl.fl4.flowi4_tos = tun_key->tos; + tun_route_info.fl.fl4.flowi4_proto = IPPROTO_UDP; + tun_route_info.fl.fl4.fl4_dport = tun_key->tp_dst; + tun_route_info.fl.fl4.daddr = tun_key->u.ipv4.dst; + tun_route_info.fl.fl4.saddr = tun_key->u.ipv4.src; + tun_route_info.ttl = tun_key->ttl; + + ret = nbl_route_lookup_ipv4(common, encap_mirred_dev, &tun_route_info, netdev_ops); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "get route failed in create encap head v4, encap_dev:%s, ret %d", + encap_mirred_dev->name, ret); + return ret; + } + + rule_act->tc_tun_encap_out_dev = tun_route_info.real_out_dev; + + /* cpoy mac */ + read_lock_bh(&tun_route_info.n->lock); + nud_state = tun_route_info.n->nud_state; + ether_addr_copy(hw_dst, tun_route_info.n->ha); + read_unlock_bh(&tun_route_info.n->lock); + + /* add ether header */ + ip = (struct iphdr *)nbl_tc_tun_gen_eth_hdr(rule_act->encap_buf, + tun_route_info.out_dev, hw_dst, ETH_P_IP, common); + + total_len += sizeof(struct ethhdr); + if (is_vlan_dev(tun_route_info.out_dev)) { + rule_act->encap_idx_info.info.vlan_offset = total_len - 2; + total_len += sizeof(struct vlan_hdr); + } + + eth_len = total_len; + rule_act->encap_idx_info.info.l4_ck_mod = NBL_FLOW_L4_CK_NO_MODIFY; + rule_act->encap_idx_info.info.phid2_offset = total_len; + + /* add ip header */ + ip->tos = tun_key->tos; + ip->version = NBL_FLOW_IPV4; + ip->ihl = NBL_FLOW_IHL; + ip->frag_off = NBL_FLOW_DF; + ip->ttl = tun_route_info.ttl; + ip->saddr = tun_route_info.fl.fl4.saddr; + ip->daddr = tun_route_info.fl.fl4.daddr; + + rule_act->encap_idx_info.info.len_en0 = 1; + rule_act->encap_idx_info.info.len_offset0 = total_len + NBL_FLOW_IPV4_LEN_OFFSET; + rule_act->encap_idx_info.info.l3_ck_en = 1; + rule_act->encap_idx_info.info.dscp_offset = (total_len + 1) * 8; + total_len += sizeof(struct iphdr); + + /* add tunnel proto header */ + ret = ((struct nbl_tc_tunnel *)key->tc_tunnel)->generate_tunnel_hdr((char *)ip + + sizeof(struct iphdr), &ip->protocol, &key->ip_tun_key); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl tc flow gen tun hdr err, ret:%d", ret); + goto destroy_neigh; + } + + rule_act->encap_idx_info.info.phid3_offset = total_len; + rule_act->encap_idx_info.info.sport_offset = total_len; + rule_act->encap_idx_info.info.len_en1 = 1; + rule_act->encap_idx_info.info.len_offset1 = total_len + NBL_FLOW_UDP_LEN_OFFSET; + rule_act->encap_idx_info.info.l4_ck_mod = NBL_FLOW_L4_CK_MODE_0; + total_len += sizeof(struct udphdr); + + /* tnl info */ + rule_act->encap_idx_info.info.vni_offset = total_len + NBL_FLOW_VNI_OFFSET; + total_len += ((struct nbl_tc_tunnel *)(key->tc_tunnel))->get_tun_hlen(); + + ip->tot_len = total_len - eth_len; + l4_len = (u8)(ip->tot_len - sizeof(struct iphdr)); + ip->tot_len = be16_to_cpu(ip->tot_len); + + udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); + vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + + if (udp) + udp->len = be16_to_cpu(l4_len); + + rule_act->encap_idx_info.info.tnl_len = total_len; + rule_act->encap_size = total_len; + rule_act->vni = be32_to_cpu(vxh->vx_vni); + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(tun_route_info.n, NULL); + goto destroy_neigh; + } + + nbl_tc_tun_route_cleanup(&tun_route_info); + + nbl_debug(common, NBL_DEBUG_FLOW, "create ipv4 header ok: encap_len:%d", total_len); + + return 0; + +destroy_neigh: + nbl_tc_tun_route_cleanup(&tun_route_info); + + return ret; +} + +static int nbl_route_lookup_ipv6(const struct nbl_common_info *common, + struct net_device *encap_mirred_dev, + struct nbl_tc_tunnel_route_info *tun_route_info, + struct nbl_serv_netdev_ops *netdev_ops) +{ + int ret = 0; + struct net_device *out_dev; + struct net_device *real_out_dev; + struct net_device *parent_dev; + struct neighbour *n; + struct dst_entry *dst; + + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(encap_mirred_dev), NULL, + &tun_route_info->fl.fl6, NULL); + if (IS_ERR(dst)) + return (int)PTR_ERR(dst); + + out_dev = dst->dev; + if (is_vlan_dev(out_dev)) { + parent_dev = vlan_dev_priv(out_dev)->real_dev; + real_out_dev = vlan_dev_real_dev(out_dev); + if (is_vlan_dev(parent_dev)) { + nbl_debug(common, NBL_DEBUG_FLOW, "ipv6 encap out dev is %s, " + "parent_dev:%s is vlan, not support two vlan\n", + out_dev->name, parent_dev ? parent_dev->name : "NULL"); + ret = -EOPNOTSUPP; + goto err; + } + nbl_debug(common, NBL_DEBUG_FLOW, "ipv6 encap out dev is %s, real_out_dev:%s\n", + out_dev->name, real_out_dev ? real_out_dev->name : "NULL"); + } else { + real_out_dev = out_dev; + } + + if (!netif_is_lag_master(real_out_dev) && + real_out_dev->netdev_ops != netdev_ops->pf_netdev_ops && + real_out_dev->netdev_ops != netdev_ops->rep_netdev_ops) { + nbl_err(common, NBL_DEBUG_FLOW, "encap out dev is %s, not ours, not support\n", + out_dev->name); + ret = -EOPNOTSUPP; + goto err; + } + + dev_hold(real_out_dev); + + if (!tun_route_info->ttl) + tun_route_info->ttl = (u8)ip6_dst_hoplimit(dst); + + n = dst_neigh_lookup(dst, &tun_route_info->fl.fl6.daddr); + if (!n) { + ret = -ENONET; + nbl_err(common, NBL_DEBUG_FLOW, "get neigh failed."); + goto dev_release; + } + + dst_release(dst); + tun_route_info->out_dev = out_dev; + tun_route_info->real_out_dev = real_out_dev; + tun_route_info->n = n; + + return 0; + +dev_release: + dev_put(real_out_dev); +err: + dst_release(dst); + return ret; +} + +static int nbl_tc_tun_create_header_ipv6(struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param, + struct net_device *encap_mirred_dev, + struct nbl_encap_key *key) +{ + int ret = 0; + const struct nbl_common_info *common = param->common; + const struct ip_tunnel_key *tun_key = &key->ip_tun_key; + struct nbl_serv_netdev_ops *netdev_ops = ¶m->serv_mgt->net_resource_mgt->netdev_ops; + struct ipv6hdr *ip; + struct nbl_tc_tunnel_route_info tun_route_info; + struct udphdr *udp; + struct vxlanhdr *vxh; + unsigned char hw_dst[ETH_ALEN]; + + u8 total_len = 0; + u8 eth_len = 0; + u8 l4_len = 0; + u8 nud_state; + + memset(&tun_route_info, 0, sizeof(tun_route_info)); + memset(hw_dst, 0, sizeof(hw_dst)); + tun_route_info.fl.fl6.flowlabel = + ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); + tun_route_info.fl.fl6.fl6_dport = tun_key->tp_dst; + tun_route_info.fl.fl6.fl6_sport = tun_key->tp_src; + tun_route_info.fl.fl6.daddr = tun_key->u.ipv6.dst; + tun_route_info.fl.fl6.saddr = tun_key->u.ipv6.src; + tun_route_info.ttl = tun_key->ttl; + + ret = nbl_route_lookup_ipv6(common, encap_mirred_dev, &tun_route_info, netdev_ops); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "get route failed in create encap head v6, encap_dev:%s, ret %d", + encap_mirred_dev->name, ret); + return ret; + } + + rule_act->tc_tun_encap_out_dev = tun_route_info.real_out_dev; + + /* copy mac */ + read_lock_bh(&tun_route_info.n->lock); + nud_state = tun_route_info.n->nud_state; + ether_addr_copy(hw_dst, tun_route_info.n->ha); + read_unlock_bh(&tun_route_info.n->lock); + + /* add ether header */ + ip = (struct ipv6hdr *)nbl_tc_tun_gen_eth_hdr(rule_act->encap_buf, + tun_route_info.out_dev, hw_dst, ETH_P_IPV6, common); + + total_len += sizeof(struct ethhdr); + if (is_vlan_dev(tun_route_info.out_dev)) { + rule_act->encap_idx_info.info.vlan_offset = total_len - 2; + total_len += sizeof(struct vlan_hdr); + } + + eth_len = total_len; + rule_act->encap_idx_info.info.l4_ck_mod = NBL_FLOW_L4_CK_NO_MODIFY; + rule_act->encap_idx_info.info.phid2_offset = total_len; + + /* add ip header */ + ip6_flow_hdr(ip, tun_key->tos, 0); + ip->hop_limit = tun_route_info.ttl; + ip->saddr = tun_route_info.fl.fl6.saddr; + ip->daddr = tun_route_info.fl.fl6.daddr; + + rule_act->encap_idx_info.info.len_en0 = 1; + rule_act->encap_idx_info.info.len_offset0 = total_len + NBL_FLOW_IPV6_LEN_OFFSET; + rule_act->encap_idx_info.info.dscp_offset = (total_len * 8) + 4; + total_len += sizeof(struct ipv6hdr); + + /* add tunnel proto header */ + ret = ((struct nbl_tc_tunnel *)key->tc_tunnel)->generate_tunnel_hdr((char *)ip + + sizeof(struct ipv6hdr), &ip->nexthdr, &key->ip_tun_key); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl tc flow gen v6 tun hdr err, ret:%d", ret); + goto destroy_neigh; + } + + rule_act->encap_idx_info.info.phid3_offset = total_len; + rule_act->encap_idx_info.info.sport_offset = total_len; + rule_act->encap_idx_info.info.len_en1 = 1; + rule_act->encap_idx_info.info.len_offset1 = total_len + NBL_FLOW_UDP_LEN_OFFSET; + rule_act->encap_idx_info.info.l4_ck_mod = NBL_FLOW_L4_CK_MODE_1; + total_len += sizeof(struct udphdr); + + /* tnl info */ + rule_act->encap_idx_info.info.vni_offset = total_len + NBL_FLOW_VNI_OFFSET; + total_len += ((struct nbl_tc_tunnel *)(key->tc_tunnel))->get_tun_hlen(); + + ip->payload_len = total_len - eth_len; + l4_len = (u8)(ip->payload_len - sizeof(struct ipv6hdr)); + ip->payload_len = be16_to_cpu(l4_len); + + udp = (struct udphdr *)((char *)ip + sizeof(struct ipv6hdr)); + vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + + if (udp) + udp->len = be16_to_cpu(l4_len); + + rule_act->encap_idx_info.info.tnl_len = total_len; + rule_act->encap_size = total_len; + rule_act->vni = be32_to_cpu(vxh->vx_vni); + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(tun_route_info.n, NULL); + goto destroy_neigh; + } + + nbl_tc_tun_route_cleanup(&tun_route_info); + + nbl_debug(common, NBL_DEBUG_FLOW, "create ipv6 header ok: encap_len:%d", total_len); + + return 0; + +destroy_neigh: + nbl_tc_tun_route_cleanup(&tun_route_info); + + return ret; +} + +int nbl_tc_tun_parse_encap_info(struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param, + struct net_device *encap_mirred_dev) +{ + int ret = 0; + const struct nbl_common_info *common = param->common; + struct nbl_dispatch_mgt *disp_mgt; + struct nbl_dispatch_ops *disp_ops; + unsigned short ip_family; + bool is_encap_find = false; + + ret = nbl_copy_tun_info(param->tunnel, rule_act); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "alloc tunnel_info failed, ret %d\n", ret); + return ret; + } + + ip_family = ip_tunnel_info_af(rule_act->tunnel); + memcpy(&rule_act->encap_key.ip_tun_key, &rule_act->tunnel->key, + sizeof(rule_act->encap_key.ip_tun_key)); + rule_act->encap_key.tc_tunnel = nbl_tc_get_tunnel(encap_mirred_dev); + if (!rule_act->encap_key.tc_tunnel) { + nbl_err(common, NBL_DEBUG_FLOW, "unsupport tunnel type: %s", + encap_mirred_dev->rtnl_link_ops->kind); + ret = -EOPNOTSUPP; + goto malloc_err; + } + + disp_mgt = NBL_SERV_MGT_TO_DISP_PRIV(param->serv_mgt); + disp_ops = NBL_SERV_MGT_TO_DISP_OPS(param->serv_mgt); + is_encap_find = disp_ops->tc_tun_encap_lookup(disp_mgt, rule_act, param); + if (is_encap_find) + goto parse_encap_finish; + + if (ip_family == AF_INET) + ret = nbl_tc_tun_create_header_ipv4(rule_act, param, + encap_mirred_dev, + &rule_act->encap_key); + else + ret = nbl_tc_tun_create_header_ipv6(rule_act, param, + encap_mirred_dev, + &rule_act->encap_key); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "create tnl header failed, ret %d!", ret); + goto malloc_err; + } + + ret = disp_ops->tc_tun_encap_add(disp_mgt, rule_act); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "add tnl encap hash failed, ret %d!", ret); + goto malloc_err; + } + +parse_encap_finish: + kfree(rule_act->tunnel); + rule_act->encap_parse_ok = true; + return ret; + +malloc_err: + kfree(rule_act->tunnel); + + return ret; +} + +struct nbl_tc_tunnel vxlan_tunnel = { + .tunnel_type = NBL_TC_TUNNEL_TYPE_VXLAN, + .generate_tunnel_hdr = nbl_tc_tun_gen_tunnel_header_vxlan, + .get_tun_hlen = nbl_tc_tun_get_vxlan_hdr_len, +}; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h new file mode 100644 index 0000000000000000000000000000000000000000..81a13ead91c24e213dd193f0262f04d29a6ed1a7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_core/nbl_tc_tun.h @@ -0,0 +1,58 @@ +#ifndef __NBL_TC_TUN_H__ +#define __NBL_TC_TUN_H__ + +#include +#include "nbl_include.h" +#include "nbl_core.h" +#include "nbl_resource.h" + +#define NBL_FLOW_IPV4 4 +#define NBL_FLOW_IPV6 6 +#define NBL_FLOW_IHL 5 +#define NBL_FLOW_DF 0x40 + +#define NBL_FLOW_L4_CK_NO_MODIFY 7 +#define NBL_FLOW_IPV4_LEN_OFFSET 2 +#define NBL_FLOW_IPV6_LEN_OFFSET 4 +#define NBL_FLOW_UDP_LEN_OFFSET 4 +#define NBL_FLOW_VNI_OFFSET 4 + +#define NBL_FLOW_L4_CK_MODE_0 0 +#define NBL_FLOW_L4_CK_MODE_1 1 + +enum { + NBL_TC_TUNNEL_TYPE_UNKNOWN, + NBL_TC_TUNNEL_TYPE_VXLAN, + NBL_TC_TUNNEL_TYPE_GENEVE, + NBL_TC_TUNNEL_TYPE_GRE, +}; + +struct nbl_decap_key { + struct ethhdr key; +}; + +struct nbl_tc_tunnel_route_info { + struct net_device *out_dev; + struct net_device *real_out_dev; + union { + struct flowi4 fl4; + struct flowi6 fl6; + } fl; + struct neighbour *n; + u8 ttl; +}; + +struct nbl_tc_tunnel { + u8 tunnel_type; + int (*generate_tunnel_hdr)(char buf[], u8 *ip_proto, + const struct ip_tunnel_key *tun_key); + int (*get_tun_hlen)(void); +}; + +extern struct nbl_tc_tunnel vxlan_tunnel; + +int nbl_tc_tun_parse_encap_info(struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param, + struct net_device *encap_mirred_dev); + +#endif /* end of __NBL_TC_TUN_H__ */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h new file mode 100644 index 0000000000000000000000000000000000000000..2fd12ecfadcbf5eeb0f48f910f43aafa5ad365d9 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_export/nbl_export_rdma.h @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_EXPORT_RDMA_H_ +#define _NBL_EXPORT_RDMA_H_ + +enum nbl_core_reset_event { + NBL_CORE_FATAL_ERR_EVENT, /* Most hw module is not work nomal exclude pcie/emp */ + NBL_CORE_RESET_MAX_EVENT +}; + +#include + +#define RDMA_MSG_MAX_SIZE 256 +#define NBL_COREDEV_TO_DMA_DEV(core) ((core)->dma_dev) + +struct nbl_chan_rdma_resp { + u8 resp_data[RDMA_MSG_MAX_SIZE]; + u16 data_len; +}; + +struct nbl_core_dev_lag_mem { + u16 vsi_id; + u8 eth_id; + bool active; +}; + +#define NBL_RDMA_LAG_MAX_PORTS 2 +struct nbl_core_dev_lag_info { + struct net_device *bond_netdev; + struct nbl_core_dev_lag_mem lag_mem[NBL_RDMA_LAG_MAX_PORTS]; + u16 lag_id; + u8 lag_num; +}; + +struct nbl_core_dev_info { + /* Devices */ + struct pci_dev *pdev; + struct net_device *netdev; + struct device *dma_dev; + /* Bar addr */ + u8 __iomem *hw_addr; + u64 real_hw_addr; + /* Interrupts */ + struct msix_entry *msix_entries; + u16 *global_vector_id; + u16 msix_count; + /* VSI */ + u16 vsi_id; + u8 real_bus; + u8 real_dev; + u8 real_function; + /* Send function */ + int (*send)(struct pci_dev *pdev, u8 *req_args, u8 req_len, + void *resp, u16 resp_len); + u8 eth_mode; + u16 function_id; + u8 eth_id; + /* Lag info */ + struct nbl_core_dev_lag_info lag_info; + int (*lag_mem_notify)(struct auxiliary_device *adev, + struct nbl_core_dev_lag_info *lag_info); + int (*offload_status_notify)(struct auxiliary_device *adev, bool status); + int (*register_bond)(struct pci_dev *pdev, bool enable); + bool is_lag; + /* Info */ + u32 mem_type; + u16 rdma_cap_num; +}; + +struct nbl_aux_dev { + struct auxiliary_device adev; + struct nbl_core_dev_info *cdev_info; + void (*recv)(struct auxiliary_device *device, void *req_args, u16 req_len, + struct nbl_chan_rdma_resp *resp); + void (*abnormal_event_process)(struct auxiliary_device *grc_adev); + void (*process_flr_event)(struct auxiliary_device *grc_adev, u16 vsi_id); + int (*reset_event_notify)(struct auxiliary_device *adev, enum nbl_core_reset_event event); + ssize_t (*qos_cfg_store)(struct auxiliary_device *adev, int offset, + const char *buf, size_t count); + ssize_t (*qos_cfg_show)(struct auxiliary_device *adev, int offset, char *buf); +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c new file mode 100644 index 0000000000000000000000000000000000000000..65eec7f69c5b6471c4d5dbbc9089aeba38421cc9 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.c @@ -0,0 +1,950 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ +#include "nbl_accel.h" + +static int nbl_res_alloc_ktls_tx_index(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + u32 index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + index = find_first_zero_bit(accel_mgt->tx_ktls_bitmap, NBL_MAX_KTLS_SESSION); + if (index >= NBL_MAX_KTLS_SESSION) + return -ENOSPC; + + set_bit(index, accel_mgt->tx_ktls_bitmap); + accel_mgt->dtls_cfg_info[index].vld = true; + accel_mgt->dtls_cfg_info[index].vsi = vsi; + return index; +} + +static void nbl_res_free_ktls_tx_index(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + clear_bit(index, accel_mgt->tx_ktls_bitmap); + memset(&accel_mgt->dtls_cfg_info[index], 0, sizeof(struct nbl_tls_cfg_info)); +} + +static void nbl_res_cfg_ktls_tx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->cfg_ktls_tx_keymat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, mode, salt, key, key_len); +} + +static int nbl_res_alloc_ktls_rx_index(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + u32 index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + index = find_first_zero_bit(accel_mgt->rx_ktls_bitmap, NBL_MAX_KTLS_SESSION); + if (index >= NBL_MAX_KTLS_SESSION) + return -ENOSPC; + + set_bit(index, accel_mgt->rx_ktls_bitmap); + accel_mgt->utls_cfg_info[index].vld = true; + accel_mgt->utls_cfg_info[index].vsi = vsi; + return index; +} + +static void nbl_res_free_ktls_rx_index(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + clear_bit(index, accel_mgt->rx_ktls_bitmap); + memset(&accel_mgt->utls_cfg_info[index], 0, sizeof(struct nbl_tls_cfg_info)); +} + +static void nbl_res_cfg_ktls_rx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + phy_ops->cfg_ktls_rx_keymat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, mode, salt, key, key_len); +} + +static void nbl_res_cfg_ktls_rx_record(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + phy_ops->cfg_ktls_rx_record(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, tcp_sn, rec_num, init); +} + +static int nbl_res_alloc_ipsec_tx_index(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + u32 index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + index = find_first_zero_bit(accel_mgt->tx_ipsec_bitmap, NBL_MAX_IPSEC_SESSION); + if (index >= NBL_MAX_IPSEC_SESSION) + return -ENOSPC; + + set_bit(index, accel_mgt->tx_ipsec_bitmap); + memcpy(&accel_mgt->tx_cfg_info[index], cfg_info, sizeof(struct nbl_ipsec_cfg_info)); + return index; +} + +static void nbl_res_free_ipsec_tx_index(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + clear_bit(index, accel_mgt->tx_ipsec_bitmap); + memset(&accel_mgt->tx_cfg_info[index], 0, sizeof(struct nbl_ipsec_cfg_info)); +} + +static int nbl_res_alloc_ipsec_rx_index(void *priv, struct nbl_ipsec_cfg_info *cfg_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + u32 index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + index = find_first_zero_bit(accel_mgt->rx_ipsec_bitmap, NBL_MAX_IPSEC_SESSION); + if (index >= NBL_MAX_IPSEC_SESSION) + return -ENOSPC; + + set_bit(index, accel_mgt->rx_ipsec_bitmap); + memcpy(&accel_mgt->rx_cfg_info[index], cfg_info, sizeof(struct nbl_ipsec_cfg_info)); + return index; +} + +static void nbl_res_free_ipsec_rx_index(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + clear_bit(index, accel_mgt->rx_ipsec_bitmap); + memset(&accel_mgt->rx_cfg_info[index], 0, sizeof(struct nbl_ipsec_cfg_info)); +} + +static void nbl_res_cfg_ipsec_tx_sad(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_esn_state *esn_state = &sa_entry->esn_state; + struct nbl_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + struct nbl_ipsec_cfg_info *cfg_info = &sa_entry->cfg_info; + struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; + u32 ip_data[NBL_DIPSEC_SAD_IP_TOTAL] = {0}; + int i; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + if (attrs->nat_flag) + phy_ops->cfg_dipsec_nat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), attrs->sport); + + phy_ops->cfg_dipsec_sad_iv(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, aes_gcm->seq_iv); + + phy_ops->cfg_dipsec_sad_esn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, esn_state->sn, esn_state->esn, + esn_state->wrap_en, esn_state->enable); + + phy_ops->cfg_dipsec_sad_lifetime(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, cfg_info->lft_cnt, cfg_info->lft_diff, + cfg_info->limit_enable, cfg_info->limit_type); + + phy_ops->cfg_dipsec_sad_crypto(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, aes_gcm->aes_key, aes_gcm->salt, + aes_gcm->crypto_type, attrs->tunnel_mode, aes_gcm->icv_len); + + if (attrs->is_ipv6) { + for (i = 0; i < NBL_DIPSEC_SAD_IP_LEN; i++) + ip_data[i] = ntohl(attrs->daddr.a6[NBL_DIPSEC_SAD_IP_LEN - i - 1]); + + for (i = 0; i < NBL_DIPSEC_SAD_IP_LEN; i++) + ip_data[i + NBL_DIPSEC_SAD_IP_LEN] = + ntohl(attrs->saddr.a6[NBL_DIPSEC_SAD_IP_LEN - i - 1]); + } else { + ip_data[0] = ntohl(attrs->daddr.a4); + ip_data[NBL_DIPSEC_SAD_IP_LEN] = ntohl(attrs->saddr.a4); + } + + phy_ops->cfg_dipsec_sad_encap(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, attrs->nat_flag, attrs->dport, attrs->spi, ip_data); +} + +static void nbl_res_cfg_ipsec_rx_sad(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_esn_state *esn_state = &sa_entry->esn_state; + struct nbl_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + struct nbl_ipsec_cfg_info *cfg_info = &sa_entry->cfg_info; + struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + if (attrs->nat_flag) + phy_ops->cfg_uipsec_nat(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + attrs->nat_flag, attrs->dport); + + phy_ops->cfg_uipsec_sad_esn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, esn_state->sn, esn_state->esn, + esn_state->overlap, esn_state->enable); + + phy_ops->cfg_uipsec_sad_lifetime(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, cfg_info->lft_cnt, cfg_info->lft_diff, + cfg_info->limit_enable, cfg_info->limit_type); + + phy_ops->cfg_uipsec_sad_crypto(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, aes_gcm->aes_key, aes_gcm->salt, + aes_gcm->crypto_type, attrs->tunnel_mode, aes_gcm->icv_len); + + if (esn_state->window_en) + phy_ops->cfg_uipsec_sad_window(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + index, esn_state->window_en, esn_state->option); +} + +static void nbl_uipsec_get_em_hash(struct nbl_flow_fem_entry *flow, u8 *key_data) +{ + u16 ht0_hash = 0; + u16 ht1_hash = 0; + u8 key[NBL_UIPSEC_BYTE_LEN]; + int i; + + for (i = 0; i < NBL_UIPSEC_BYTE_LEN; i++) + key[NBL_UIPSEC_BYTE_LEN - 1 - i] = key_data[i]; + + ht0_hash = NBL_CRC16_CCITT(key, NBL_UIPSEC_BYTE_LEN); + ht1_hash = NBL_CRC16_IBM(key, NBL_UIPSEC_BYTE_LEN); + + flow->ht0_hash = nbl_hash_transfer(ht0_hash, NBL_UIPSEC_POWER, 0); + flow->ht1_hash = nbl_hash_transfer(ht1_hash, NBL_UIPSEC_POWER, 0); +} + +static bool nbl_uipsec_ht0_ht1_search(struct nbl_ipsec_ht_mng *ipsec_ht0_mng, uint16_t ht0_hash, + struct nbl_ipsec_ht_mng *ipsec_ht1_mng, uint16_t ht1_hash, + struct nbl_common_info *common) +{ + struct nbl_flow_ht_tbl *node0 = NULL; + struct nbl_flow_ht_tbl *node1 = NULL; + u16 i = 0; + + node0 = ipsec_ht0_mng->hash_map[ht0_hash]; + if (node0) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node0->key[i].vid == 1 && node0->key[i].ht_other_index == ht1_hash) { + nbl_info(common, NBL_DEBUG_ACCEL, + "Conflicted ht on vid %d and kt_index %u\n", + node0->key[i].vid, node0->key[i].kt_index); + return true; + } + + node1 = ipsec_ht1_mng->hash_map[ht1_hash]; + if (node1) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node1->key[i].vid == 1 && node1->key[i].ht_other_index == ht0_hash) { + nbl_info(common, NBL_DEBUG_ACCEL, + "Conflicted ht on vid %d and kt_index %u\n", + node1->key[i].vid, node1->key[i].kt_index); + return true; + } + + return false; +} + +static int nbl_uipsec_find_ht_avail_table(struct nbl_ipsec_ht_mng *ipsec_ht0_mng, + struct nbl_ipsec_ht_mng *ipsec_ht1_mng, + u16 ht0_hash, u16 ht1_hash) +{ + struct nbl_flow_ht_tbl *pp_ht0_node = NULL; + struct nbl_flow_ht_tbl *pp_ht1_node = NULL; + + pp_ht0_node = ipsec_ht0_mng->hash_map[ht0_hash]; + pp_ht1_node = ipsec_ht1_mng->hash_map[ht1_hash]; + + if (!pp_ht0_node && !pp_ht1_node) { + return 0; + } else if (pp_ht0_node && !pp_ht1_node) { + if (pp_ht0_node->ref_cnt >= NBL_HASH_CFT_AVL) + return 1; + else + return 0; + } else if (!pp_ht0_node && pp_ht1_node) { + if (pp_ht1_node->ref_cnt >= NBL_HASH_CFT_AVL) + return 0; + else + return 1; + } else { + if ((pp_ht0_node->ref_cnt <= NBL_HASH_CFT_AVL || + (pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht0_node->ref_cnt < NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL))) + return 0; + else if (((pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt <= NBL_HASH_CFT_AVL) || + (pp_ht0_node->ref_cnt == NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt < NBL_HASH_CFT_MAX))) + return 1; + else + return -1; + } +} + +static void nbl_uipsec_cfg_em_tcam(struct nbl_resource_mgt *res_mgt, u32 index, + u32 *data, struct nbl_flow_fem_entry *flow) +{ + struct nbl_accel_mgt *accel_mgt; + struct nbl_common_info *common; + struct nbl_phy_ops *phy_ops; + u16 tcam_index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + tcam_index = find_first_zero_bit(accel_mgt->ipsec_tcam_id, NBL_MAX_IPSEC_TCAM); + if (tcam_index >= NBL_MAX_IPSEC_TCAM) { + nbl_err(common, NBL_DEBUG_ACCEL, + "There is no available ipsec tcam id left for sa index %u\n", index); + return; + } + + nbl_info(common, NBL_DEBUG_ACCEL, + "put sad index %u to ipsec tcam index %u.\n", index, tcam_index); + phy_ops->cfg_uipsec_em_tcam(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_index, data); + phy_ops->cfg_uipsec_em_ad(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_index, index); + + flow->tcam_index = tcam_index; + flow->tcam_flag = true; + set_bit(tcam_index, accel_mgt->ipsec_tcam_id); +} + +static int nbl_uipsec_insert_em_ht(struct nbl_ipsec_ht_mng *ipsec_ht_mng, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_flow_ht_tbl *node; + u16 ht_index; + u16 ht_other_index; + int i; + + ht_index = (flow->hash_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash); + ht_other_index = (flow->hash_table == NBL_HT0 ? flow->ht1_hash : flow->ht0_hash); + + node = ipsec_ht_mng->hash_map[ht_index]; + if (!node) { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + ipsec_ht_mng->hash_map[ht_index] = node; + } + + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (node->key[i].vid == 0) { + node->key[i].vid = 1; + node->key[i].ht_other_index = ht_other_index; + node->key[i].kt_index = flow->flow_id; + node->ref_cnt++; + flow->hash_bucket = i; + break; + } + } + + return 0; +} + +static void nbl_uipsec_cfg_em_flow(struct nbl_resource_mgt *res_mgt, u32 index, + u32 *data, struct nbl_flow_fem_entry *flow) +{ + struct nbl_phy_ops *phy_ops; + u16 ht_table; + u16 ht_index; + u16 ht_other_index; + u16 ht_bucket; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + ht_table = flow->hash_table; + ht_index = (flow->hash_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash); + ht_other_index = (flow->hash_table == NBL_HT0 ? flow->ht1_hash : flow->ht0_hash); + ht_bucket = flow->hash_bucket; + + phy_ops->cfg_uipsec_em_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, ht_table, + ht_index, ht_other_index, ht_bucket); + phy_ops->cfg_uipsec_em_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, data); +} + +static int nbl_accel_add_uipsec_rule(struct nbl_resource_mgt *res_mgt, u32 index, u32 *data, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_accel_mgt *accel_mgt; + struct nbl_common_info *common; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_ht_mng *ipsec_ht_mng = NULL; + u8 key_data[NBL_UIPSEC_BYTE_LEN]; + int ht_table; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + flow->flow_id = index; + memcpy(key_data, data, NBL_UIPSEC_BYTE_LEN); + nbl_uipsec_get_em_hash(flow, key_data); + + /* two flows have the same ht0&ht1, put the conflicted one to tcam */ + if (nbl_uipsec_ht0_ht1_search(&accel_mgt->ipsec_ht0_mng, flow->ht0_hash, + &accel_mgt->ipsec_ht1_mng, flow->ht1_hash, common)) + flow->tcam_flag = true; + + ht_table = nbl_uipsec_find_ht_avail_table(&accel_mgt->ipsec_ht0_mng, + &accel_mgt->ipsec_ht1_mng, + flow->ht0_hash, flow->ht1_hash); + if (ht_table < 0) + flow->tcam_flag = true; + + if (flow->tcam_flag) { + nbl_uipsec_cfg_em_tcam(res_mgt, index, data, flow); + return 0; + } + + ipsec_ht_mng = + (ht_table == NBL_HT0 ? &accel_mgt->ipsec_ht0_mng : &accel_mgt->ipsec_ht1_mng); + flow->hash_table = ht_table; + if (nbl_uipsec_insert_em_ht(ipsec_ht_mng, flow)) + return -ENOMEM; + + nbl_info(common, NBL_DEBUG_ACCEL, "cfg uipsec flow_item: %u, %u, %u, %u, %u\n", + flow->flow_id, flow->hash_table, flow->ht0_hash, + flow->ht1_hash, flow->hash_bucket); + nbl_uipsec_cfg_em_flow(res_mgt, index, data, flow); + + return 0; +} + +static int nbl_res_add_ipsec_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + struct nbl_accel_uipsec_rule *rule; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + + list_for_each_entry(rule, &accel_mgt->uprbac_head, node) + if (rule->index == index) + return -EEXIST; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + if (nbl_accel_add_uipsec_rule(res_mgt, index, data, &rule->uipsec_entry)) { + kfree(rule); + return -EFAULT; + } + + rule->index = index; + rule->vsi = vsi; + list_add_tail(&rule->node, &accel_mgt->uprbac_head); + + return 0; +} + +static void nbl_uipsec_clear_em_tcam(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_accel_mgt *accel_mgt; + struct nbl_common_info *common; + struct nbl_phy_ops *phy_ops; + u16 tcam_index; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + tcam_index = flow->tcam_index; + + nbl_info(common, NBL_DEBUG_ACCEL, + "del sad index %u from ipsec tcam index %u.\n", flow->flow_id, tcam_index); + phy_ops->clear_uipsec_tcam_ad(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_index); + clear_bit(tcam_index, accel_mgt->ipsec_tcam_id); +} + +static void nbl_uipsec_remove_em_ht(struct nbl_ipsec_ht_mng *ipsec_ht_mng, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_flow_ht_tbl *node; + u16 ht_index; + + ht_index = (flow->hash_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash); + node = ipsec_ht_mng->hash_map[ht_index]; + if (!node) + return; + + memset(&node->key[flow->hash_bucket], 0, sizeof(node->key[flow->hash_bucket])); + node->ref_cnt--; + if (!node->ref_cnt) { + kfree(node); + ipsec_ht_mng->hash_map[ht_index] = NULL; + } +} + +static void nbl_uipsec_clear_em_flow(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_phy_ops *phy_ops; + u16 ht_table; + u16 ht_index; + u16 ht_bucket; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + ht_table = flow->hash_table; + ht_index = (flow->hash_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash); + ht_bucket = flow->hash_bucket; + + phy_ops->clear_uipsec_ht_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), flow->flow_id, + ht_table, ht_index, ht_bucket); +} + +static void nbl_accel_del_uipsec_rule(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_fem_entry *flow) +{ + struct nbl_accel_mgt *accel_mgt; + struct nbl_common_info *common; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_ht_mng *ipsec_ht_mng = NULL; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (flow->tcam_flag) { + nbl_uipsec_clear_em_tcam(res_mgt, flow); + return; + } + + ipsec_ht_mng = (flow->hash_table == NBL_HT0 ? + &accel_mgt->ipsec_ht0_mng : &accel_mgt->ipsec_ht1_mng); + nbl_uipsec_remove_em_ht(ipsec_ht_mng, flow); + nbl_info(common, NBL_DEBUG_ACCEL, "del uipsec flow_item: %u, %u, %u, %u, %u\n", + flow->flow_id, flow->hash_table, flow->ht0_hash, + flow->ht1_hash, flow->hash_bucket); + + nbl_uipsec_clear_em_flow(res_mgt, flow); +} + +static void nbl_res_del_ipsec_rx_flow(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt; + struct nbl_accel_uipsec_rule *rule; + + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + + list_for_each_entry(rule, &accel_mgt->uprbac_head, node) + if (rule->index == index) + break; + + if (nbl_list_entry_is_head(rule, &accel_mgt->uprbac_head, node)) + return; + + nbl_accel_del_uipsec_rule(res_mgt, &rule->uipsec_entry); + list_del(&rule->node); + kfree(rule); +} + +static void nbl_res_flr_clear_accel(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_accel_mgt *accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + struct nbl_accel_uipsec_rule *uipsec_rule, *uipsec_rule_safe; + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + int i; + + if (nbl_res_vf_is_active(priv, func_id)) { + for (i = 0; i < NBL_MAX_IPSEC_SESSION; i++) { + if (accel_mgt->tx_cfg_info[i].vld && + accel_mgt->tx_cfg_info[i].vsi == vsi_id) { + clear_bit(i, accel_mgt->tx_ipsec_bitmap); + memset(&accel_mgt->tx_cfg_info[i], 0, + sizeof(struct nbl_ipsec_cfg_info)); + } + } + + list_for_each_entry_safe(uipsec_rule, uipsec_rule_safe, + &accel_mgt->uprbac_head, node) + if (uipsec_rule->vsi == vsi_id) { + nbl_accel_del_uipsec_rule(res_mgt, &uipsec_rule->uipsec_entry); + list_del(&uipsec_rule->node); + kfree(uipsec_rule); + } + + for (i = 0; i < NBL_MAX_IPSEC_SESSION; i++) { + if (accel_mgt->rx_cfg_info[i].vld && + accel_mgt->rx_cfg_info[i].vsi == vsi_id) { + clear_bit(i, accel_mgt->rx_ipsec_bitmap); + memset(&accel_mgt->rx_cfg_info[i], 0, + sizeof(struct nbl_ipsec_cfg_info)); + } + } + + for (i = 0; i < NBL_MAX_KTLS_SESSION; i++) { + if (accel_mgt->dtls_cfg_info[i].vld && + accel_mgt->dtls_cfg_info[i].vsi == vsi_id) { + clear_bit(i, accel_mgt->tx_ktls_bitmap); + memset(&accel_mgt->dtls_cfg_info[i], 0, + sizeof(struct nbl_tls_cfg_info)); + } + } + + for (i = 0; i < NBL_MAX_KTLS_SESSION; i++) { + if (accel_mgt->utls_cfg_info[i].vld && + accel_mgt->utls_cfg_info[i].vsi == vsi_id) { + clear_bit(i, accel_mgt->rx_ktls_bitmap); + memset(&accel_mgt->utls_cfg_info[i], 0, + sizeof(struct nbl_tls_cfg_info)); + } + } + } +} + +static bool nbl_res_check_ipsec_status(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + u32 dipsec_status; + u32 uipsec_status; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + dipsec_status = phy_ops->read_dipsec_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + uipsec_status = phy_ops->read_uipsec_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + if ((dipsec_status & NBL_IPSEC_SOFT_EXPIRE) || + (dipsec_status & NBL_IPSEC_HARD_EXPIRE) || + ((uipsec_status) & NBL_IPSEC_SOFT_EXPIRE) || + ((uipsec_status) & NBL_IPSEC_HARD_EXPIRE)) + return true; + + return false; +} + +static u32 nbl_res_get_dipsec_lft_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + union nbl_ipsec_lft_info lft_info; + u32 dipsec_status; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + lft_info.data = phy_ops->read_dipsec_lft_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + dipsec_status = phy_ops->reset_dipsec_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + lft_info.soft_vld = !!(dipsec_status & NBL_IPSEC_SOFT_EXPIRE); + lft_info.hard_vld = !!(dipsec_status & NBL_IPSEC_HARD_EXPIRE); + + return lft_info.data; +} + +static void nbl_res_handle_dipsec_soft_expire(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common; + struct nbl_accel_mgt *accel_mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_cfg_info *cfg_info; + u32 lifetime_diff; + u32 flag_wen; + u32 msb_wen; + bool need = false; + + common = NBL_RES_MGT_TO_COMMON(res_mgt); + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + cfg_info = &accel_mgt->tx_cfg_info[index]; + + if (!cfg_info->vld) + return; + + if (cfg_info->soft_round == 0) { + nbl_info(common, NBL_DEBUG_ACCEL, "dipsec sa %u soft expire.\n", index); + if (cfg_info->hard_round == 0) { + lifetime_diff = 0; + flag_wen = 1; + msb_wen = 0; + need = true; + } + } + + if (cfg_info->hard_round == 1) { + if (cfg_info->hard_remain > cfg_info->soft_remain) + lifetime_diff = cfg_info->hard_remain - + cfg_info->soft_remain; + else + lifetime_diff = (1 << NBL_IPSEC_LIFETIME_ROUND) + + cfg_info->hard_remain - + cfg_info->soft_remain; + flag_wen = 1; + msb_wen = 0; + need = true; + if (cfg_info->soft_round > 0) + nbl_info(common, NBL_DEBUG_ACCEL, + "dipsec sa %u soft expire in advance.\n", index); + } + + if (cfg_info->hard_round > 1) { + lifetime_diff = 0; + flag_wen = 0; + msb_wen = 1; + need = true; + if (cfg_info->soft_round) + cfg_info->soft_round--; + cfg_info->hard_round--; + } + + if (need) + phy_ops->cfg_dipsec_lft_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, + lifetime_diff, flag_wen, msb_wen); +} + +static u32 nbl_res_get_uipsec_lft_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + union nbl_ipsec_lft_info lft_info; + u32 uipsec_status; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + lft_info.data = phy_ops->read_uipsec_lft_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + uipsec_status = phy_ops->reset_uipsec_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + lft_info.soft_vld = !!(uipsec_status & NBL_IPSEC_SOFT_EXPIRE); + lft_info.hard_vld = !!(uipsec_status & NBL_IPSEC_HARD_EXPIRE); + + return lft_info.data; +} + +static void nbl_res_handle_uipsec_soft_expire(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common; + struct nbl_accel_mgt *accel_mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_ipsec_cfg_info *cfg_info; + u32 lifetime_diff; + u32 flag_wen; + u32 msb_wen; + bool need = false; + + common = NBL_RES_MGT_TO_COMMON(res_mgt); + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + cfg_info = &accel_mgt->rx_cfg_info[index]; + + if (!cfg_info->vld) + return; + + if (cfg_info->soft_round == 0) { + nbl_info(common, NBL_DEBUG_ACCEL, "uipsec sa %u soft expire.\n", index); + if (cfg_info->hard_round == 0) { + lifetime_diff = 0; + flag_wen = 1; + msb_wen = 0; + need = true; + } + } + + if (cfg_info->hard_round == 1) { + if (cfg_info->hard_remain > cfg_info->soft_remain) + lifetime_diff = cfg_info->hard_remain - + cfg_info->soft_remain; + else + lifetime_diff = (1 << NBL_IPSEC_LIFETIME_ROUND) + + cfg_info->hard_remain - + cfg_info->soft_remain; + flag_wen = 1; + msb_wen = 0; + need = true; + if (cfg_info->soft_round > 0) + nbl_info(common, NBL_DEBUG_ACCEL, + "uipsec sa %u soft expire in advance.\n", index); + } + + if (cfg_info->hard_round > 1) { + lifetime_diff = 0; + flag_wen = 0; + msb_wen = 1; + need = true; + if (cfg_info->soft_round) + cfg_info->soft_round--; + cfg_info->hard_round--; + } + + if (need) + phy_ops->cfg_uipsec_lft_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), index, + lifetime_diff, flag_wen, msb_wen); +} + +static void nbl_res_handle_dipsec_hard_expire(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common; + struct nbl_channel_ops *chan_ops; + struct nbl_accel_mgt *accel_mgt; + struct nbl_sa_search_key param; + struct nbl_chan_send_info chan_send; + u16 vsid; + u16 dstid; + + chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!accel_mgt->tx_cfg_info[index].vld) + return; + + vsid = accel_mgt->tx_cfg_info[index].vsi; + dstid = nbl_res_vsi_id_to_func_id(res_mgt, vsid); + param.family = accel_mgt->tx_cfg_info[index].sa_key.family; + param.mark = accel_mgt->tx_cfg_info[index].sa_key.mark; + param.spi = accel_mgt->tx_cfg_info[index].sa_key.spi; + memcpy(¶m.daddr, &accel_mgt->tx_cfg_info[index].sa_key.daddr, sizeof(param.daddr)); + + nbl_info(common, NBL_DEBUG_ACCEL, "dipsec sa %u hard expire.\n", index); + NBL_CHAN_SEND(chan_send, dstid, NBL_CHAN_MSG_NOTIFY_IPSEC_HARD_EXPIRE, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +static void nbl_res_handle_uipsec_hard_expire(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common; + struct nbl_channel_ops *chan_ops; + struct nbl_accel_mgt *accel_mgt; + struct nbl_sa_search_key param; + struct nbl_chan_send_info chan_send; + u16 vsid; + u16 dstid; + + chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + accel_mgt = NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!accel_mgt->rx_cfg_info[index].vld) + return; + + vsid = accel_mgt->rx_cfg_info[index].vsi; + dstid = nbl_res_vsi_id_to_func_id(res_mgt, vsid); + param.family = accel_mgt->rx_cfg_info[index].sa_key.family; + param.mark = accel_mgt->rx_cfg_info[index].sa_key.mark; + param.spi = accel_mgt->rx_cfg_info[index].sa_key.spi; + memcpy(¶m.daddr, &accel_mgt->rx_cfg_info[index].sa_key.daddr, sizeof(param.daddr)); + + nbl_info(common, NBL_DEBUG_ACCEL, "uipsec sa %u hard expire.\n", index); + NBL_CHAN_SEND(chan_send, dstid, NBL_CHAN_MSG_NOTIFY_IPSEC_HARD_EXPIRE, ¶m, + sizeof(param), NULL, 0, 0); + chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +/* NBL_ACCEL_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_ACCEL_OPS_TBL \ +do { \ + NBL_ACCEL_SET_OPS(alloc_ktls_tx_index, nbl_res_alloc_ktls_tx_index); \ + NBL_ACCEL_SET_OPS(free_ktls_tx_index, nbl_res_free_ktls_tx_index); \ + NBL_ACCEL_SET_OPS(cfg_ktls_tx_keymat, nbl_res_cfg_ktls_tx_keymat); \ + NBL_ACCEL_SET_OPS(alloc_ktls_rx_index, nbl_res_alloc_ktls_rx_index); \ + NBL_ACCEL_SET_OPS(free_ktls_rx_index, nbl_res_free_ktls_rx_index); \ + NBL_ACCEL_SET_OPS(cfg_ktls_rx_keymat, nbl_res_cfg_ktls_rx_keymat); \ + NBL_ACCEL_SET_OPS(cfg_ktls_rx_record, nbl_res_cfg_ktls_rx_record); \ + NBL_ACCEL_SET_OPS(alloc_ipsec_tx_index, nbl_res_alloc_ipsec_tx_index); \ + NBL_ACCEL_SET_OPS(free_ipsec_tx_index, nbl_res_free_ipsec_tx_index); \ + NBL_ACCEL_SET_OPS(alloc_ipsec_rx_index, nbl_res_alloc_ipsec_rx_index); \ + NBL_ACCEL_SET_OPS(free_ipsec_rx_index, nbl_res_free_ipsec_rx_index); \ + NBL_ACCEL_SET_OPS(cfg_ipsec_tx_sad, nbl_res_cfg_ipsec_tx_sad); \ + NBL_ACCEL_SET_OPS(cfg_ipsec_rx_sad, nbl_res_cfg_ipsec_rx_sad); \ + NBL_ACCEL_SET_OPS(add_ipsec_rx_flow, nbl_res_add_ipsec_rx_flow); \ + NBL_ACCEL_SET_OPS(del_ipsec_rx_flow, nbl_res_del_ipsec_rx_flow); \ + NBL_ACCEL_SET_OPS(flr_clear_accel, nbl_res_flr_clear_accel); \ + NBL_ACCEL_SET_OPS(check_ipsec_status, nbl_res_check_ipsec_status); \ + NBL_ACCEL_SET_OPS(get_dipsec_lft_info, nbl_res_get_dipsec_lft_info); \ + NBL_ACCEL_SET_OPS(handle_dipsec_soft_expire, nbl_res_handle_dipsec_soft_expire);\ + NBL_ACCEL_SET_OPS(handle_dipsec_hard_expire, nbl_res_handle_dipsec_hard_expire);\ + NBL_ACCEL_SET_OPS(get_uipsec_lft_info, nbl_res_get_uipsec_lft_info); \ + NBL_ACCEL_SET_OPS(handle_uipsec_soft_expire, nbl_res_handle_uipsec_soft_expire);\ + NBL_ACCEL_SET_OPS(handle_uipsec_hard_expire, nbl_res_handle_uipsec_hard_expire);\ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_accel_setup_mgt(struct device *dev, struct nbl_accel_mgt **accel_mgt) +{ + *accel_mgt = devm_kzalloc(dev, sizeof(struct nbl_accel_mgt), GFP_KERNEL); + if (!*accel_mgt) + return -ENOMEM; + + INIT_LIST_HEAD(&(*accel_mgt)->uprbac_head); + return 0; +} + +static void nbl_accel_remove_mgt(struct device *dev, struct nbl_accel_mgt **accel_mgt) +{ + devm_kfree(dev, *accel_mgt); + *accel_mgt = NULL; +} + +int nbl_accel_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_accel_mgt **accel_mgt; + struct nbl_phy_ops *phy_ops; + struct device *dev; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + accel_mgt = &NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->init_dprbac(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + phy_ops->init_uprbac(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + return nbl_accel_setup_mgt(dev, accel_mgt); +} + +void nbl_accel_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_accel_mgt **accel_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + accel_mgt = &NBL_RES_MGT_TO_ACCEL_MGT(res_mgt); + + if (!(*accel_mgt)) + return; + + nbl_accel_remove_mgt(dev, accel_mgt); +} + +int nbl_accel_setup_ops(struct nbl_resource_ops *res_ops) +{ + if (!res_ops) + return -EINVAL; + +#define NBL_ACCEL_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_ACCEL_OPS_TBL; +#undef NBL_ACCEL_SET_OPS + + return 0; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h new file mode 100644 index 0000000000000000000000000000000000000000..d8fc9fcc95d979147ab54c83cc2f8f23ee886e34 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_accel.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_ACCEL_H_ +#define _NBL_ACCEL_H_ +#include "nbl_resource.h" + +#define NBL_IPSEC_SOFT_EXPIRE 0x80 +#define NBL_IPSEC_HARD_EXPIRE 0x100 + +#define NBL_DIPSEC_SAD_IP_TOTAL 8 +#define NBL_DIPSEC_SAD_IP_LEN 4 +#define NBL_UIPSEC_BYTE_LEN 20 +#define NBL_UIPSEC_POWER 9 +#define NBL_IPSEC_LIFETIME_ROUND 31 + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c new file mode 100644 index 0000000000000000000000000000000000000000..ef9e1e3b66a922ad70db879089e2e677f08dfc2a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.c @@ -0,0 +1,2679 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_adminq.h" + +static int nbl_res_adminq_update_ring_num(void *priv); + +/* **** FW CMD FILTERS START **** */ + +static int nbl_res_adminq_check_net_ring_num(struct nbl_resource_mgt *res_mgt, + struct nbl_fw_cmd_net_ring_num_param *param) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u32 sum = 0, pf_real_num = 0, vf_real_num = 0; + int i; + + pf_real_num = NBL_VSI_PF_REAL_QUEUE_NUM(param->pf_def_max_net_qp_num); + vf_real_num = NBL_VSI_VF_REAL_QUEUE_NUM(param->vf_def_max_net_qp_num); + + if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC || vf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) + return -EINVAL; + + /* TODO: should we consider when pf_num is 8? */ + for (i = 0; i < NBL_COMMON_TO_ETH_MODE(common); i++) { + pf_real_num = param->net_max_qp_num[i] ? + NBL_VSI_PF_REAL_QUEUE_NUM(param->net_max_qp_num[i]) : + NBL_VSI_PF_REAL_QUEUE_NUM(param->pf_def_max_net_qp_num); + + if (pf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) + return -EINVAL; + + sum += pf_real_num; + } + + for (i = 0; i < res_info->max_vf_num; i++) { + vf_real_num = param->net_max_qp_num[i + NBL_MAX_PF] ? + NBL_VSI_VF_REAL_QUEUE_NUM(param->net_max_qp_num[i + NBL_MAX_PF]) : + NBL_VSI_VF_REAL_QUEUE_NUM(param->vf_def_max_net_qp_num); + + if (vf_real_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) + return -EINVAL; + + sum += vf_real_num; + } + + if (sum > NBL_MAX_TXRX_QUEUE) + return -EINVAL; + + return 0; +} + +static int nbl_res_adminq_check_rdma_cap(struct nbl_resource_mgt *res_mgt, + struct nbl_fw_cmd_rdma_cap_param *param) +{ + int count = 0, i, j; + + for (i = 0; i < NBL_RDMA_CAP_CMD_LEN; i++) + for (j = 0; j < BITS_PER_BYTE; j++) + if (param->rdma_func_bitmaps[i] & BIT(j)) + count++; + + if (count > NBL_RES_RDMA_MAX) + return -EINVAL; + + return 0; +} + +static int nbl_res_adminq_check_rdma_mem_type(struct nbl_resource_mgt *res_mgt, + struct nbl_fw_cmd_rdma_mem_type_param *param) +{ + return param->mem_type > NBL_RDMA_MEM_TYPE_MAX ? -EINVAL : 0; +} + +static u32 nbl_res_adminq_sum_vf_num(struct nbl_fw_cmd_vf_num_param *param) +{ + u32 count = 0; + int i; + + for (i = 0; i < NBL_VF_NUM_CMD_LEN; i++) + count += param->vf_max_num[i]; + + return count; +} + +static int nbl_res_adminq_check_vf_num_type(struct nbl_resource_mgt *res_mgt, + struct nbl_fw_cmd_vf_num_param *param) +{ + u32 count; + + count = nbl_res_adminq_sum_vf_num(param); + if (count > NBL_MAX_VF) + return -EINVAL; + + return 0; +} + +static int nbl_res_fw_cmd_filter_rw_in(struct nbl_resource_mgt *res_mgt, void *data, u16 len) +{ + struct nbl_chan_resource_write_param *param = (struct nbl_chan_resource_write_param *)data; + struct nbl_fw_cmd_net_ring_num_param *net_ring_num_param; + struct nbl_fw_cmd_rdma_cap_param *rdma_cap_param; + struct nbl_fw_cmd_rdma_mem_type_param *rdma_mem_type_param; + struct nbl_fw_cmd_vf_num_param *vf_num_param; + + switch (param->resid) { + case NBL_ADMINQ_PFA_TLV_NET_RING_NUM: + net_ring_num_param = (struct nbl_fw_cmd_net_ring_num_param *)param->data; + return nbl_res_adminq_check_net_ring_num(res_mgt, net_ring_num_param); + case NBL_ADMINQ_PFA_TLV_RDMA_CAP: + rdma_cap_param = (struct nbl_fw_cmd_rdma_cap_param *)param->data; + return nbl_res_adminq_check_rdma_cap(res_mgt, rdma_cap_param); + case NBL_ADMINQ_PFA_TLV_RDMA_MEM_TYPE: + rdma_mem_type_param = (struct nbl_fw_cmd_rdma_mem_type_param *)param->data; + return nbl_res_adminq_check_rdma_mem_type(res_mgt, rdma_mem_type_param); + case NBL_ADMINQ_PFA_TLV_VF_NUM: + vf_num_param = (struct nbl_fw_cmd_vf_num_param *)param->data; + return nbl_res_adminq_check_vf_num_type(res_mgt, vf_num_param); + default: + break; + } + + return 0; +} + +static int nbl_res_fw_cmd_filter_rw_out(struct nbl_resource_mgt *res_mgt, void *in, u16 in_len, + void *out, u16 out_len) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; + struct nbl_chan_resource_write_param *param = (struct nbl_chan_resource_write_param *)in; + struct nbl_fw_cmd_net_ring_num_param *net_ring_num_param; + struct nbl_fw_cmd_vf_num_param *vf_num_param; + size_t copy_len; + u32 count; + + switch (param->resid) { + case NBL_ADMINQ_PFA_TLV_NET_RING_NUM: + net_ring_num_param = (struct nbl_fw_cmd_net_ring_num_param *)param->data; + copy_len = min_t(size_t, sizeof(*num_info), (size_t)in_len); + memcpy(num_info, net_ring_num_param, copy_len); + break; + case NBL_ADMINQ_PFA_TLV_VF_NUM: + vf_num_param = (struct nbl_fw_cmd_vf_num_param *)param->data; + count = nbl_res_adminq_sum_vf_num(vf_num_param); + res_info->max_vf_num = count; + default: + break; + } + + return 0; +} + +static void nbl_res_adminq_add_cmd_filter_res_write(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_res_fw_cmd_filter filter = { + .in = nbl_res_fw_cmd_filter_rw_in, + .out = nbl_res_fw_cmd_filter_rw_out, + }; + u16 key = 0; + + key = NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE; + + if (nbl_common_alloc_hash_node(adminq_mgt->cmd_filter, &key, &filter, NULL)) + nbl_warn(common, NBL_DEBUG_ADMINQ, "Fail to register res_write in filter"); +} + +/* **** FW CMD FILTERS END **** */ + +static int nbl_res_adminq_set_module_eeprom_info(struct nbl_resource_mgt *res_mgt, + u8 eth_id, + u8 i2c_address, + u8 page, + u8 bank, + u32 offset, + u32 length, + u8 *data) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_module_eeprom_info param = {0}; + u32 xfer_size = 0; + u32 byte_offset = 0; + int data_length = length; + int ret = 0; + + do { + xfer_size = min_t(u32, data_length, NBL_MODULE_EEPRO_WRITE_MAX_LEN); + data_length -= xfer_size; + + param.eth_id = eth_id; + param.i2c_address = i2c_address; + param.page = page; + param.bank = bank; + param.write = 1; + param.offset = offset + byte_offset; + param.length = xfer_size; + memcpy(param.data, data + byte_offset, xfer_size); + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, + ¶m, sizeof(param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, i2c_address:%d, page:%d, bank:%d," + " offset:%d, length:%d\n", + ret, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, + eth_info->logic_eth_id[eth_id], + i2c_address, page, bank, offset + byte_offset, xfer_size); + } + byte_offset += xfer_size; + } while (!ret && data_length > 0); + + return ret; +} + +static int nbl_res_adminq_turn_module_eeprom_page(struct nbl_resource_mgt *res_mgt, + u8 eth_id, u8 page) +{ + int ret; + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + ret = nbl_res_adminq_set_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, + SFF_8636_TURNPAGE_ADDR, 1, &page); + if (ret) { + dev_err(dev, "eth %d set_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + + return ret; +} + +static void nbl_res_get_module_eeprom_page(u32 addr, u8 *upper_page, u8 *offset) +{ + if (addr >= SFF_8638_PAGESIZE) { + *upper_page = (addr - SFF_8638_PAGESIZE) / SFF_8638_PAGESIZE; + *offset = (u8)(addr - (*upper_page * SFF_8638_PAGESIZE)); + } else { + *upper_page = 0; + *offset = addr; + } +} + +static int nbl_res_adminq_get_module_eeprom_info(struct nbl_resource_mgt *res_mgt, + u8 eth_id, + u8 i2c_address, + u8 page, + u8 bank, + u32 offset, + u32 length, + u8 *data) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_module_eeprom_info param = {0}; + u32 xfer_size = 0; + u32 byte_offset = 0; + int data_length = length; + int ret = 0; + + /* read a maximum of 128 bytes each time */ + do { + xfer_size = min_t(u32, data_length, NBL_MAX_PHY_I2C_RESP_SIZE); + data_length -= xfer_size; + + param.eth_id = eth_id; + param.i2c_address = i2c_address; + param.page = page; + param.bank = bank; + param.write = 0; + param.offset = offset + byte_offset; + param.length = xfer_size; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, + ¶m, sizeof(param), data + byte_offset, xfer_size, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, i2c_address:%d, page:%d, bank:%d," + " offset:%d, length:%d\n", + ret, NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM, + eth_info->logic_eth_id[eth_id], + i2c_address, page, bank, offset + byte_offset, xfer_size); + } + byte_offset += xfer_size; + } while (!ret && data_length > 0); + + return ret; +} + +static int nbl_res_adminq_flash_read(struct nbl_resource_mgt *res_mgt, u32 bank_id, + u32 offset, u32 len, u8 *data) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_flash_read read_param; + int remain = len, sec_offset = 0, ret = 0; + + while (remain > 0) { + read_param.bank_id = bank_id; + read_param.offset = offset + sec_offset; + read_param.len = remain > NBL_CHAN_FLASH_READ_LEN ? NBL_CHAN_FLASH_READ_LEN : + remain; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_FLASH_READ, &read_param, sizeof(read_param), + data + sec_offset, read_param.len, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "adminq flash read fail on bank %d, offset %d", bank_id, offset); + return ret; + } + + remain -= read_param.len; + sec_offset += read_param.len; + } + + return ret; +} + +static int nbl_res_adminq_flash_erase(struct nbl_resource_mgt *res_mgt, u32 bank_id, + u32 offset, u32 len) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_flash_erase erase_param; + int remain = len, sec_offset = 0, ret = 0; + + while (remain > 0) { + erase_param.bank_id = bank_id; + erase_param.offset = offset + sec_offset; + /* When erase, it must be 4k-aligned, so we always erase 4k each time. */ + erase_param.len = NBL_CHAN_FLASH_ERASE_LEN; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_FLASH_ERASE, + &erase_param, sizeof(erase_param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "adminq flash erase fail on bank %d, offset %d", + bank_id, erase_param.offset); + return ret; + } + + remain -= erase_param.len; + sec_offset += erase_param.len; + } + + return ret; +} + +static int nbl_res_adminq_flash_write(struct nbl_resource_mgt *res_mgt, u32 bank_id, + u32 offset, u32 len, const u8 *data) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_flash_write *write_param = NULL; + int remain = len, sec_offset = 0, ret = 0; + + write_param = kzalloc(sizeof(*write_param), GFP_KERNEL); + if (!write_param) + return -ENOMEM; + + while (remain > 0) { + write_param->bank_id = bank_id; + write_param->offset = offset + sec_offset; + write_param->len = remain > NBL_CHAN_FLASH_WRITE_LEN ? NBL_CHAN_FLASH_WRITE_LEN : + remain; + memcpy(write_param->data, data + sec_offset, write_param->len); + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_FLASH_WRITE, + write_param, sizeof(*write_param), NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "adminq flash write fail on bank %d, offset %d", bank_id, offset); + kfree(write_param); + return ret; + } + + remain -= write_param->len; + sec_offset += write_param->len; + } + + kfree(write_param); + return ret; +} + +static int nbl_res_adminq_get_nvm_bank_index(struct nbl_resource_mgt *res_mgt, int *rbank) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_send_info chan_send; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_NVM_BANK_INDEX, NULL, 0, rbank, sizeof(*rbank), 1); + return chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +static int nbl_res_adminq_flash_set_nvm_bank(struct nbl_resource_mgt *res_mgt, int rbank, + int bank_id, int op) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u16 nvmidx; + u8 *idxbuf = NULL; + int ret = 0; + + idxbuf = kzalloc(NBL_ADMINQ_IDX_LEN, GFP_KERNEL); + if (!idxbuf) + return -ENOMEM; + + memset(idxbuf, 0xFF, NBL_ADMINQ_IDX_LEN); + + if (op == NBL_ADMINQ_NVM_BANK_REPAIR) + idxbuf[0] = rbank ? 0xFF : 0x00; + else if (op == NBL_ADMINQ_NVM_BANK_SWITCH) + idxbuf[0] = rbank ? 0x00 : 0xFF; + + idxbuf[1] = 0x5A; + strscpy((char *)&idxbuf[4080], "M181XXSRIS", NBL_ADMINQ_IDX_LEN - 4080); + + ret |= nbl_res_adminq_flash_erase(res_mgt, bank_id, 0, NBL_ADMINQ_IDX_LEN); + ret |= nbl_res_adminq_flash_write(res_mgt, bank_id, 0, NBL_ADMINQ_IDX_LEN, idxbuf); + + ret |= nbl_res_adminq_flash_read(res_mgt, bank_id, 0, sizeof(nvmidx), (u8 *)&nvmidx); + if (ret) + goto out; + + if (op == NBL_ADMINQ_NVM_BANK_SWITCH) + rbank = !rbank; + + if (((nvmidx >> 2) & 1) != rbank) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "S0 update bank index is %d but read back index is %d", + rbank, (nvmidx >> 2) & 1); + ret = -EFAULT; + goto out; + } + +out: + kfree(idxbuf); + return ret; +} + +static int nbl_res_adminq_flash_verify(struct nbl_resource_mgt *res_mgt, int *rbank) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_chan_send_info chan_send; + int verify_bank, sign0, sign1, ret = 0; + + verify_bank = 0; + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_VERIFY_NVM_BANK, &verify_bank, sizeof(verify_bank), + &sign0, sizeof(sign0), 1); + ret |= chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + + verify_bank = 1; + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_VERIFY_NVM_BANK, &verify_bank, sizeof(verify_bank), + &sign1, sizeof(sign1), 1); + ret |= chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + + sign0 = !sign0; + sign1 = !sign1; + + if (ret || (sign0 != 0 && sign0 != 1) || (sign1 != 0 && sign1 != 1) || (!sign0 && !sign1)) { + nbl_err(common, NBL_DEBUG_ADMINQ, + "Verify signature both invalid, ret %d, sign0 %d, sign1 %d", + ret, sign0, sign1); + return -EFAULT; + } + + if (sign0 != sign1) { + nbl_warn(common, NBL_DEBUG_ADMINQ, "WARN: bank0 and bank1 signature: %s/%s", + sign0 ? "pass" : "fail", sign1 ? "pass" : "fail"); + + /* Set rbank to fail bank to because we will switch bank idx next */ + if (sign0) + *rbank = 1; + else if (sign1) + *rbank = 0; + else + return -EFAULT; + } + + return 0; +} + +static int nbl_res_adminq_flash_lock(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_send_info chan_send; + u32 success = 0, ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_FLASH_LOCK, + NULL, 0, &success, sizeof(success), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + return ret; + + return !success; +} + +static int nbl_res_adminq_flash_unlock(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_send_info chan_send; + u32 success = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_FLASH_UNLOCK, NULL, 0, &success, sizeof(success), 1); + return chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +static int nbl_res_adminq_flash_prepare(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 nvmidx0, nvmidx1; + int rbank, ret = 0; + + ret = nbl_res_adminq_get_nvm_bank_index(res_mgt, &rbank); + if (ret || (rbank != 0 && rbank != 1)) + return -EFAULT; + + ret |= nbl_res_adminq_flash_read(res_mgt, BANKID_SR_BANK0, 0, + sizeof(nvmidx0), (u8 *)&nvmidx0); + ret |= nbl_res_adminq_flash_read(res_mgt, BANKID_SR_BANK1, 0, + sizeof(nvmidx1), (u8 *)&nvmidx1); + if (ret) + return ret; + + if ((((nvmidx0 >> 2) & 1) != rbank)) + ret = nbl_res_adminq_flash_set_nvm_bank(res_mgt, rbank, BANKID_SR_BANK0, + NBL_ADMINQ_NVM_BANK_REPAIR); + + if ((((nvmidx1 >> 2) & 1) != rbank)) + ret = nbl_res_adminq_flash_set_nvm_bank(res_mgt, rbank, BANKID_SR_BANK1, + NBL_ADMINQ_NVM_BANK_REPAIR); + + return ret; +} + +static int nbl_res_adminq_flash_image(void *priv, u32 module, const u8 *data, size_t len) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + int rbank, write_bank, ret = 0; + + switch (module) { + case NBL_ADMINQ_BANK_INDEX_SPI_BOOT: + ret |= nbl_res_adminq_flash_erase(res_mgt, BANKID_BOOT_BANK, 0, len); + ret |= nbl_res_adminq_flash_write(res_mgt, BANKID_BOOT_BANK, 0, len, data); + + break; + case NBL_ADMINQ_BANK_INDEX_NVM_BANK: + if (nbl_res_adminq_get_nvm_bank_index(res_mgt, &rbank)) + return -EFAULT; + + write_bank = rbank ? BANKID_NVM_BANK0 : BANKID_NVM_BANK1; + + ret |= nbl_res_adminq_flash_erase(res_mgt, write_bank, 0, len); + ret |= nbl_res_adminq_flash_write(res_mgt, write_bank, 0, len, data); + + break; + default: + return 0; + } + + return ret; +} + +static int nbl_res_adminq_flash_activate(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + int rbank, ret = 0; + + ret = nbl_res_adminq_get_nvm_bank_index(res_mgt, &rbank); + if (ret || (rbank != 0 && rbank != 1)) + return -EFAULT; + + ret = nbl_res_adminq_flash_verify(res_mgt, &rbank); + if (ret) + return ret; + + ret = nbl_res_adminq_flash_set_nvm_bank(res_mgt, rbank, BANKID_SR_BANK0, + NBL_ADMINQ_NVM_BANK_SWITCH); + if (ret) + return ret; + + ret = nbl_res_adminq_flash_set_nvm_bank(res_mgt, rbank, BANKID_SR_BANK1, + NBL_ADMINQ_NVM_BANK_SWITCH); + + return ret; +} + +/* get_emp_version is deprecated, repalced by get_firmware_version, 0x8102 */ +static int nbl_res_adminq_get_firmware_version(void *priv, char *firmware_verion) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_nvm_version_resp resp_param; + int ret = 0; + u32 version_type = NBL_FW_VERSION_RUNNING_BANK; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_GET_NVM_VERSION, + &version_type, sizeof(version_type), &resp_param, sizeof(resp_param), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_GET_NVM_VERSION); + return ret; + } + + if (!memcmp(resp_param.magic, FIRMWARE_MAGIC, sizeof(resp_param.magic))) { + snprintf(firmware_verion, ETHTOOL_FWVERS_LEN, + "%d.%d.%d build %04d%02d%02d %08x", + BCD2BYTE((resp_param.version >> 16) & 0xFF), + BCD2BYTE((resp_param.version >> 8) & 0xFF), + BCD2BYTE(resp_param.version & 0xFF), + BCD2SHORT((resp_param.build_date >> 16) & 0xFFFF), + BCD2BYTE((resp_param.build_date >> 8) & 0xFF), + BCD2BYTE(resp_param.build_date & 0xFF), + resp_param.build_hash); + } else { + dev_err(dev, "adminq msg firmware verion magic check failed\n"); + return -EINVAL; + } + + return 0; +} + +static int nbl_res_adminq_set_sfp_state(void *priv, u8 eth_id, u8 state) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_MODULE_SWITCH; + if (state) + data = NBL_PORT_SFP_ON + (key << NBL_PORT_KEY_KEY_SHIFT); + else + data = NBL_PORT_SFP_OFF + (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, sfp %s\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id], + state ? "on" : "off"); + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +int nbl_res_open_sfp(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + return nbl_res_adminq_set_sfp_state(res_mgt, eth_id, NBL_SFP_MODULE_ON); +} + +static int nbl_res_adminq_setup_loopback(void *priv, u32 eth_id, u32 enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_LOOPBACK; + if (enable) + data = NBL_PORT_ENABLE_LOOPBACK + (key << NBL_PORT_KEY_KEY_SHIFT); + else + data = NBL_PORT_DISABLE_LOOPBCK + (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, %s eth loopback\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id], + enable ? "enable" : "disable"); + + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +static bool nbl_res_adminq_check_fw_heartbeat(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + unsigned long check_time; + u32 seq_acked; + + if (adminq_mgt->fw_resetting) { + adminq_mgt->fw_last_hb_seq++; + return false; + } + + check_time = jiffies; + if (time_before(check_time, adminq_mgt->fw_last_hb_time + 5 * HZ)) + return true; + + seq_acked = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (adminq_mgt->fw_last_hb_seq == seq_acked) { + adminq_mgt->fw_last_hb_seq++; + adminq_mgt->fw_last_hb_time = check_time; + phy_ops->set_fw_ping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), adminq_mgt->fw_last_hb_seq); + return true; + } + + return false; +} + +static bool nbl_res_adminq_check_fw_reset(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u32 seq_acked; + + seq_acked = phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (adminq_mgt->fw_last_hb_seq != seq_acked) { + phy_ops->set_fw_ping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), adminq_mgt->fw_last_hb_seq); + return false; + } + + adminq_mgt->fw_resetting = false; + wake_up(&adminq_mgt->wait_queue); + return true; +} + +static int nbl_res_adminq_get_port_attributes(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 port_caps = 0; + u64 port_advertising = 0; + u64 key = 0; + int eth_id = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + for_each_set_bit(eth_id, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + key = NBL_PORT_KEY_CAPABILITIES; + port_caps = 0; + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_READ; + param->data[0] = key << NBL_PORT_KEY_KEY_SHIFT; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, (void *)&port_caps, sizeof(port_caps), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, get_port_caps\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + eth_info->port_caps[eth_id] = port_caps & NBL_PORT_KEY_DATA_MASK; + + dev_info(dev, "ctrl dev get eth %d port caps: %llx\n", + eth_info->logic_eth_id[eth_id], + eth_info->port_caps[eth_id]); + } + + for_each_set_bit(eth_id, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + key = NBL_PORT_KEY_ADVERT; + port_advertising = 0; + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_READ; + param->data[0] = key << NBL_PORT_KEY_KEY_SHIFT; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, + (void *)&port_advertising, sizeof(port_advertising), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, port_advertising\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + port_advertising = port_advertising & NBL_PORT_KEY_DATA_MASK; + /* set default FEC mode: auto */ + port_advertising = port_advertising & ~NBL_PORT_CAP_FEC_MASK; + port_advertising += BIT(NBL_PORT_CAP_FEC_RS); + port_advertising += BIT(NBL_PORT_CAP_FEC_BASER); + /* set default pause: tx on, rx on */ + port_advertising = port_advertising & ~NBL_PORT_CAP_PAUSE_MASK; + port_advertising += BIT(NBL_PORT_CAP_TX_PAUSE); + port_advertising += BIT(NBL_PORT_CAP_RX_PAUSE); + eth_info->port_advertising[eth_id] = port_advertising; + + dev_info(dev, "ctrl dev get eth %d port advertising: %llx\n", + eth_info->logic_eth_id[eth_id], + eth_info->port_advertising[eth_id]); + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_enable_port(void *priv, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int eth_id = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + if (enable) { + key = NBL_PORT_KEY_ENABLE; + data = NBL_PORT_FLAG_ENABLE_NOTIFY + (key << NBL_PORT_KEY_KEY_SHIFT); + } else { + key = NBL_PORT_KEY_DISABLE; + data = key << NBL_PORT_KEY_KEY_SHIFT; + } + + for_each_set_bit(eth_id, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + nbl_res_adminq_set_sfp_state(res_mgt, eth_id, NBL_SFP_MODULE_ON); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, %s port\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id], enable ? "enable" : "disable"); + kfree(param); + return ret; + } + + dev_info(dev, "ctrl dev %s eth %d\n", enable ? "enable" : "disable", + eth_info->logic_eth_id[eth_id]); + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_get_special_port_type(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 port_type = NBL_PORT_TYPE_UNKNOWN; + u8 cable_tech = 0; + int ret; + + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, 0); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + port_type = NBL_PORT_TYPE_UNKNOWN; + return port_type; + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, + 0, 0, SFF8636_DEVICE_TECH_OFFSET, + 1, &cable_tech); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + port_type = NBL_PORT_TYPE_UNKNOWN; + return port_type; + } + cable_tech = (cable_tech >> 4) & 0x0f; + switch (cable_tech) { + case SFF8636_TRANSMIT_FIBER_850nm_VCSEL: + case SFF8636_TRANSMIT_FIBER_1310nm_VCSEL: + case SFF8636_TRANSMIT_FIBER_1550nm_VCSEL: + case SFF8636_TRANSMIT_FIBER_1310nm_FP: + case SFF8636_TRANSMIT_FIBER_1310nm_DFB: + case SFF8636_TRANSMIT_FIBER_1550nm_DFB: + case SFF8636_TRANSMIT_FIBER_1310nm_EML: + case SFF8636_TRANSMIT_FIBER_1550nm_EML: + case SFF8636_TRANSMIT_FIBER_1490nm_DFB: + port_type = NBL_PORT_TYPE_FIBRE; + break; + case SFF8636_TRANSMIT_COPPER_UNEQUA: + case SFF8636_TRANSMIT_COPPER_PASSIVE_EQUALIZED: + case SFF8636_TRANSMIT_COPPER_NEAR_FAR_END: + case SFF8636_TRANSMIT_COPPER_FAR_END: + case SFF8636_TRANSMIT_COPPER_NEAR_END: + case SFF8636_TRANSMIT_COPPER_LINEAR_ACTIVE: + port_type = NBL_PORT_TYPE_COPPER; + break; + default: + dev_err(dev, "eth %d unknown port_type\n", eth_info->logic_eth_id[eth_id]); + port_type = NBL_PORT_TYPE_UNKNOWN; + break; + } + return port_type; +} + +static int nbl_res_adminq_get_common_port_type(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 data[SFF_8472_CABLE_SPEC_COMP + 1]; + u8 cable_tech = 0; + u8 cable_comp = 0; + u8 port_type = NBL_PORT_TYPE_UNKNOWN; + int ret; + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, 0, + SFF_8472_CABLE_SPEC_COMP + 1, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + port_type = NBL_PORT_TYPE_UNKNOWN; + return port_type; + } + + cable_tech = data[SFF_8472_CABLE_TECHNOLOGY]; + + if (cable_tech & SFF_PASSIVE_CABLE) { + cable_comp = data[SFF_8472_CABLE_SPEC_COMP]; + + /* determine if the port is a cooper cable */ + if (cable_comp == SFF_COPPER_UNSPECIFIED || + cable_comp == SFF_COPPER_8431_APPENDIX_E) + port_type = NBL_PORT_TYPE_COPPER; + else + port_type = NBL_PORT_TYPE_FIBRE; + } else if (cable_tech & SFF_ACTIVE_CABLE) { + cable_comp = data[SFF_8472_CABLE_SPEC_COMP]; + + /* determine if the port is a cooper cable */ + if (cable_comp == SFF_COPPER_UNSPECIFIED || + cable_comp == SFF_COPPER_8431_APPENDIX_E || + cable_comp == SFF_COPPER_8431_LIMITING) + port_type = NBL_PORT_TYPE_COPPER; + else + port_type = NBL_PORT_TYPE_FIBRE; + } else { + port_type = NBL_PORT_TYPE_FIBRE; + } + + return port_type; +} + +static int nbl_res_adminq_get_port_type(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) + return nbl_res_adminq_get_special_port_type(res_mgt, eth_id); + + return nbl_res_adminq_get_common_port_type(res_mgt, eth_id); +} + +static s32 nbl_res_adminq_get_module_bitrate(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 data[SFF_8472_SIGNALING_RATE_MAX + 1]; + u32 result; + u8 br_nom; + u8 br_max; + u8 identifier; + u8 encoding = 0; + int port_max_rate; + int ret; + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, 0); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return NBL_PORT_MAX_RATE_UNKNOWN; + } + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, 0, + SFF_8472_SIGNALING_RATE_MAX + 1, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return NBL_PORT_MAX_RATE_UNKNOWN; + } + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, + I2C_DEV_ADDR_A0, 0, 0, + SFF_8636_VENDOR_ENCODING, + 1, &encoding); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return NBL_PORT_MAX_RATE_UNKNOWN; + } + } + + br_nom = data[SFF_8472_SIGNALING_RATE]; + br_max = data[SFF_8472_SIGNALING_RATE_MAX]; + identifier = data[SFF_8472_IDENTIFIER]; + + /* sff-8472 section 5.6 */ + if (br_nom == 255) + result = (u32)br_max * 250; + else if (br_nom == 0) + result = 0; + else + result = (u32)br_nom * 100; + + switch (result / 1000) { + case 25: + port_max_rate = NBL_PORT_MAX_RATE_25G; + break; + case 10: + port_max_rate = NBL_PORT_MAX_RATE_10G; + break; + case 1: + port_max_rate = NBL_PORT_MAX_RATE_1G; + break; + default: + port_max_rate = NBL_PORT_MAX_RATE_UNKNOWN; + break; + } + + if (identifier == SFF_IDENTIFIER_QSFP28) + port_max_rate = NBL_PORT_MAX_RATE_100G; + + if (identifier == SFF_IDENTIFIER_PAM4 || encoding == SFF_8636_ENCODING_PAM4) + port_max_rate = NBL_PORT_MAX_RATE_100G_PAM4; + + return port_max_rate; +} + +static void nbl_res_eth_task_schedule(struct nbl_adminq_mgt *adminq_mgt) +{ + nbl_common_queue_work(&adminq_mgt->eth_task, true, false); +} + +static int nbl_res_adminq_get_bond_link_state(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + struct nbl_eth_bond_entry *entry = NULL; + int lag_id = nbl_res_eth_id_to_lag_id(res_mgt, eth_id); + int i, link_state = 0; + + if (lag_id < 0 || lag_id >= NBL_LAG_MAX_NUM) + return eth_info->link_state[eth_id]; + + /* bond_link_state will be 1 if any eth port is up */ + entry = ð_bond_info->entry[lag_id]; + for (i = 0; i < entry->lag_num && NBL_ETH_BOND_VALID_PORT(i); i++) + link_state |= !!(eth_info->link_state[entry->eth_id[i]]); + + return link_state; +} + +static int nbl_res_adminq_handle_link_state_update(u16 type, void *event_data, void *callback_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)callback_data; + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_event_link_status_update_data *data = + (struct nbl_event_link_status_update_data *)event_data; + int i; + + mutex_lock(&adminq_mgt->eth_lock); + + for (i = 0; i < data->num; i++) + adminq_mgt->link_state_changed[data->eth_id[i]] = 1; + + mutex_unlock(&adminq_mgt->eth_lock); + + nbl_res_eth_task_schedule(adminq_mgt); + + return 0; +} + +static void nbl_res_adminq_recv_port_notify(void *priv, void *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_port_notify *notify; + u8 last_module_inplace = 0; + u8 last_link_state = 0; + int i, eth_id = 0, eth_tmp = 0, lag_id = -1; + + notify = (struct nbl_port_notify *)data; + eth_id = notify->id; + + dev_info(dev, "eth_id:%d link_state:%d, module_inplace:%d, speed:%d, flow_ctrl:%d, fec:%d, advertising:%llx, lp_advertising:%llx\n", + eth_info->logic_eth_id[eth_id], notify->link_state, notify->module_inplace, + notify->speed * 10, notify->flow_ctrl, + notify->fec, notify->advertising, notify->lp_advertising); + + mutex_lock(&adminq_mgt->eth_lock); + + last_module_inplace = eth_info->module_inplace[eth_id]; + last_link_state = eth_info->link_state[eth_id]; + + eth_info->link_state[eth_id] = notify->link_state; + eth_info->module_inplace[eth_id] = notify->module_inplace; + /* when eth link down, don not update speed + * when config autoneg to off, ethtool read speed and set it with disable autoneg command, + * if eth is link down, the speed from emp is not credible, + * need to reserver last link up speed. + */ + if (notify->link_state || !eth_info->link_speed[eth_id]) + eth_info->link_speed[eth_id] = notify->speed * 10; + eth_info->active_fc[eth_id] = notify->flow_ctrl; + eth_info->active_fec[eth_id] = notify->fec; + eth_info->port_lp_advertising[eth_id] = notify->lp_advertising; + eth_info->port_advertising[eth_id] = notify->advertising; + + if (!last_module_inplace && notify->module_inplace) { + adminq_mgt->module_inplace_changed[eth_id] = 1; + nbl_res_eth_task_schedule(adminq_mgt); + } + + if (last_link_state != notify->link_state) { + /* If this eth belongs to a bond, any link_state update has to notify all vfs for + * all eths in this bond group. + */ + lag_id = nbl_res_eth_id_to_lag_id(res_mgt, eth_id); + if (lag_id >= 0 && lag_id < NBL_LAG_MAX_NUM) + for (i = 0; i < eth_bond_info->entry[lag_id].lag_num && + NBL_ETH_BOND_VALID_PORT(i); i++) { + eth_tmp = eth_bond_info->entry[lag_id].eth_id[i]; + adminq_mgt->link_state_changed[eth_tmp] = 1; + } + + adminq_mgt->link_state_changed[eth_id] = 1; + nbl_res_eth_task_schedule(adminq_mgt); + } + + mutex_unlock(&adminq_mgt->eth_lock); +} + +static int nbl_res_adminq_set_port_advertising(void *priv, + struct nbl_port_advertising *advertising) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + int eth_id = 0; + u64 key = 0; + u64 data = 0; + u64 new_advert = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + eth_id = advertising->eth_id; + new_advert = eth_info->port_advertising[eth_id]; + + /* set autoneg */ + if (advertising->autoneg != 0) { + new_advert = new_advert | NBL_PORT_CAP_AUTONEG_MASK | NBL_PORT_CAP_PAUSE_MASK; + new_advert |= BIT(NBL_PORT_CAP_AUTONEG); + } else { + new_advert = new_advert & ~NBL_PORT_CAP_AUTONEG_MASK; + } + + if (advertising->active_fc != 0) { + new_advert = new_advert & ~NBL_PORT_CAP_PAUSE_MASK; + if (advertising->active_fc & NBL_PORT_TX_PAUSE) + new_advert |= BIT(NBL_PORT_CAP_TX_PAUSE); + if (advertising->active_fc & NBL_PORT_RX_PAUSE) + new_advert |= BIT(NBL_PORT_CAP_RX_PAUSE); + } + + /* set FEC */ + if (advertising->active_fec != 0) { + new_advert = new_advert & ~NBL_PORT_CAP_FEC_MASK & ~BIT(NBL_PORT_CAP_FEC_AUTONEG); + + /* when ethtool set FEC_AUTO, we set default fec mode */ + if (advertising->active_fec == NBL_PORT_FEC_AUTO && + (!advertising->autoneg && + !(eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_AUTONEG)))) { + advertising->active_fec = NBL_PORT_FEC_OFF; + if (eth_info->link_speed[eth_id] == SPEED_1000) + advertising->active_fec = NBL_ETH_1G_DEFAULT_FEC_MODE; + if (eth_info->link_speed[eth_id] == SPEED_10000) + advertising->active_fec = NBL_ETH_10G_DEFAULT_FEC_MODE; + if (eth_info->link_speed[eth_id] == SPEED_25000) + advertising->active_fec = NBL_ETH_25G_DEFAULT_FEC_MODE; + } + + if (advertising->active_fec == NBL_PORT_FEC_OFF) + new_advert |= BIT(NBL_PORT_CAP_FEC_OFF); + if (advertising->active_fec == NBL_PORT_FEC_RS) + new_advert |= BIT(NBL_PORT_CAP_FEC_RS); + if (advertising->active_fec == NBL_PORT_FEC_BASER) + new_advert |= BIT(NBL_PORT_CAP_FEC_BASER); + if (advertising->active_fec == NBL_PORT_FEC_AUTO) { + new_advert |= NBL_PORT_CAP_FEC_MASK; + new_advert &= ~BIT(NBL_PORT_CAP_FEC_OFF); + if (eth_info->port_caps[eth_id] & BIT(NBL_PORT_CAP_FEC_AUTONEG)) + new_advert |= BIT(NBL_PORT_CAP_FEC_AUTONEG); + } + } + + /* set speed */ + if (advertising->speed_advert != 0) { + new_advert = (new_advert & (NBL_PORT_CAP_AUTONEG_MASK | NBL_PORT_CAP_FEC_MASK | + NBL_PORT_CAP_PAUSE_MASK | BIT(NBL_PORT_CAP_FEC_AUTONEG))) | + advertising->speed_advert; + } + + if (new_advert & NBL_PORT_CAP_SPEED_100G_MASK) { // 100G + if (new_advert & BIT(NBL_PORT_CAP_FEC_BASER)) { + dev_err(dev, "unsupport to set baser when speed is 100G\n"); + return -EOPNOTSUPP; + } + } else if (!(new_advert & NBL_PORT_CAP_SPEED_50G_MASK) && + !(new_advert & NBL_PORT_CAP_SPEED_25G_MASK) && + new_advert & NBL_PORT_CAP_SPEED_10G_MASK) { //10G + if (new_advert & BIT(NBL_PORT_CAP_FEC_RS)) { + new_advert = new_advert & ~NBL_PORT_CAP_FEC_MASK; + new_advert |= BIT(NBL_PORT_CAP_FEC_BASER); + dev_notice(dev, "speed 10G cannot set fec RS, only can set baser\n"); + dev_notice(dev, "set new_advert:%llx\n", new_advert); + } + } + + if (eth_info->port_max_rate[eth_id] != NBL_PORT_MAX_RATE_100G_PAM4) + new_advert &= ~NBL_PORT_CAP_PAM4_MASK; + else + new_advert |= NBL_PORT_CAP_PAM4_MASK; + + dev_notice(dev, "set NBL_PORT_KEY_ADVERT eth id %d new_advert 0x%llx\n", + eth_info->logic_eth_id[eth_id], new_advert); + + key = NBL_PORT_KEY_ADVERT; + data = new_advert + (key << NBL_PORT_KEY_KEY_SHIFT); + + param->id = advertising->eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, set_port_advertising\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return -EIO; + } + + eth_info->port_advertising[eth_id] = new_advert; + + kfree(param); + return 0; +} + +static int nbl_res_adminq_get_port_state(void *priv, u8 eth_id, struct nbl_port_state *port_state) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + port_state->port_caps = eth_info->port_caps[eth_id]; + port_state->port_advertising = eth_info->port_advertising[eth_id]; + port_state->port_lp_advertising = eth_info->port_lp_advertising[eth_id]; + port_state->link_speed = eth_info->link_speed[eth_id]; + port_state->active_fc = eth_info->active_fc[eth_id]; + port_state->active_fec = eth_info->active_fec[eth_id]; + port_state->link_state = eth_info->link_state[eth_id]; + port_state->module_inplace = eth_info->module_inplace[eth_id]; + port_state->fw_port_max_speed = res_mgt->resource_info->board_info.eth_speed; + port_state->module_repluged = eth_info->module_repluged[eth_id]; + eth_info->module_repluged[eth_id] = 0; + if (port_state->module_inplace) { + port_state->port_type = eth_info->port_type[eth_id]; + port_state->port_max_rate = eth_info->port_max_rate[eth_id]; + } else { + port_state->port_caps = port_state->port_caps & ~NBL_PORT_CAP_FEC_MASK; + port_state->port_caps = port_state->port_caps & ~NBL_PORT_CAP_PAUSE_MASK; + port_state->port_caps = port_state->port_caps & ~NBL_PORT_CAP_AUTONEG_MASK; + port_state->port_advertising = + port_state->port_advertising & ~NBL_PORT_CAP_FEC_MASK; + port_state->port_advertising = + port_state->port_advertising & ~NBL_PORT_CAP_PAUSE_MASK; + port_state->port_advertising = + port_state->port_advertising & ~NBL_PORT_CAP_AUTONEG_MASK; + } + + return 0; +} + +static int nbl_res_adminq_get_module_info(void *priv, u8 eth_id, struct ethtool_modinfo *info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + u8 sff8472_rev; + u8 addr_mode; + bool page_swap = false; + u8 module_inplace = 0; /* 1 inplace, 0 not inplace */ + u8 data[SFF_8472_COMPLIANCE + 1]; + int ret; + + module_inplace = eth_info->module_inplace[eth_id]; + if (!module_inplace) { + dev_err(dev, "Optical module of ETH port %u is not inplace\n", + eth_info->logic_eth_id[eth_id]); + return -EIO; + } + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + info->type = ETH_MODULE_SFF_8636; + info->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + return 0; + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, 0, + SFF_8472_COMPLIANCE + 1, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + + sff8472_rev = data[SFF_8472_COMPLIANCE]; + addr_mode = data[SFF_8472_DIAGNOSTIC]; + + /* check if can access page 0xA2 directly, see sff-8472 */ + if (addr_mode & SFF_8472_ADDRESSING_MODE) { + dev_err(dev, "Address change required to access page 0xA2" + " which is not supported\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == SFF_8472_UNSUPPORTED || page_swap || + !(addr_mode & SFF_DDM_IMPLEMENTED)) { + /* We have an SFP, but it does not support SFF-8472 */ + info->type = ETH_MODULE_SFF_8079; + info->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + info->type = ETH_MODULE_SFF_8472; + info->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int nbl_res_adminq_get_module_eeprom(void *priv, u8 eth_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + u8 module_inplace = 0; /* 1 inplace, 0 not inplace */ + u32 start = eeprom->offset; + u32 length = eeprom->len; + u8 turn_page, offset; + int ret; + + if (eeprom->len == 0) + return -EINVAL; + + module_inplace = eth_info->module_inplace[eth_id]; + if (!module_inplace) { + dev_err(dev, "Optical module of ETH port %u is not inplace\n", + eth_info->logic_eth_id[eth_id]); + return -EIO; + } + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + while (start < ETH_MODULE_SFF_8636_MAX_LEN) { + length = SFF_8638_PAGESIZE; + if (start + length > ETH_MODULE_SFF_8636_MAX_LEN) + length = ETH_MODULE_SFF_8636_MAX_LEN - start; + + nbl_res_get_module_eeprom_page(start, &turn_page, &offset); + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, turn_page); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, + I2C_DEV_ADDR_A0, 0, 0, + offset, length, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + start += length; + data += length; + length = eeprom->len - length; + } + return 0; + } + + /* Read A0 portion of eth EEPROM */ + if (start < ETH_MODULE_SFF_8079_LEN) { + if (start + eeprom->len > ETH_MODULE_SFF_8079_LEN) + length = ETH_MODULE_SFF_8079_LEN - start; + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, 0, 0, + start, length, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + start += length; + data += length; + length = eeprom->len - length; + } + + /* Read A2 portion of eth EEPROM */ + if (length) { + start -= ETH_MODULE_SFF_8079_LEN; + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A2, 0, 0, + start, length, data); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return -EIO; + } + } + + return 0; +} + +static int nbl_res_adminq_get_link_state(void *priv, u8 eth_id, + struct nbl_eth_link_info *eth_link_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + eth_link_info->link_status = eth_info->link_state[eth_id]; + eth_link_info->link_speed = eth_info->link_speed[eth_id]; + + return 0; +} + +static int nbl_res_adminq_get_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + u64 data = 0, key = 0, result = 0; + int param_len = 0, i, ret; + u8 reverse_mac[ETH_ALEN]; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_MAC_ADDRESS; + + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_READ; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, &result, sizeof(result), 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + memcpy(reverse_mac, &result, ETH_ALEN); + + /*convert mac address*/ + for (i = 0; i < ETH_ALEN; i++) + mac[i] = reverse_mac[ETH_ALEN - 1 - i]; + + kfree(param); + return 0; +} + +int nbl_res_get_eth_mac(struct nbl_resource_mgt *res_mgt, u8 *mac, u8 eth_id) +{ + return nbl_res_adminq_get_eth_mac_addr(res_mgt, mac, eth_id); +} + +static int nbl_res_adminq_set_eth_mac_addr(void *priv, u8 *mac, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + int i; + u8 reverse_mac[ETH_ALEN]; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KEY_MAC_ADDRESS; + + /*convert mac address*/ + for (i = 0; i < ETH_ALEN; i++) + reverse_mac[i] = mac[ETH_ALEN - 1 - i]; + + memcpy(&data, reverse_mac, ETH_ALEN); + + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x," + " eth_id:%d, reverse_mac=0x%x:%x:%x:%x:%x:%x\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id], reverse_mac[0], + reverse_mac[1], reverse_mac[2], reverse_mac[3], + reverse_mac[4], reverse_mac[5]); + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_ctrl_port_led(void *priv, u8 eth_id, + enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + key = NBL_PORT_KRY_LED_BLINK; + + switch (led_ctrl) { + case NBL_LED_REG_ACTIVE: + data = 1; + break; + case NBL_LED_REG_INACTIVE: + data = 0; + break; + default: + return 0; + } + + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "ctrl eth %d blink failed", eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_set_eth_pfc(void *priv, u8 eth_id, u8 *pfc) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + int i; + + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + if (pfc[i]) + data |= 1 << i; + } + + key = NBL_PORT_KEY_SET_PFC_CFG; + data += (key << NBL_PORT_KEY_KEY_SHIFT); + + memset(param, 0, param_len); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x, eth_id:%d,\n", + ret, NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + eth_info->logic_eth_id[eth_id]); + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_pt_filter_in(struct nbl_resource_mgt *res_mgt, + struct nbl_passthrough_fw_cmd_param *param) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_res_fw_cmd_filter *filter; + + filter = nbl_common_get_hash_node(adminq_mgt->cmd_filter, ¶m->opcode); + if (filter && filter->in) + return filter->in(res_mgt, param->data, param->in_size); + + return 0; +} + +static int nbl_res_adminq_pt_filter_out(struct nbl_resource_mgt *res_mgt, + struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_res_fw_cmd_filter *filter; + int ret = 0; + + filter = nbl_common_get_hash_node(adminq_mgt->cmd_filter, ¶m->opcode); + if (filter && filter->out) + ret = filter->out(res_mgt, param->data, param->in_size, + result->data, result->out_size); + + return 0; +} + +static int nbl_res_adminq_passthrough(void *priv, struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + u8 *in_data = NULL, *out_data = NULL; + int ret = 0; + + ret = nbl_res_adminq_pt_filter_in(res_mgt, param); + if (ret) + return ret; + + if (param->in_size) { + in_data = kzalloc(param->in_size, GFP_KERNEL); + if (!in_data) + goto in_data_fail; + memcpy(in_data, param->data, param->in_size); + } + if (param->out_size) { + out_data = kzalloc(param->out_size, GFP_KERNEL); + if (!out_data) + goto out_data_fail; + } + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, param->opcode, + in_data, param->in_size, out_data, param->out_size, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, param->opcode); + goto send_fail; + } + + result->opcode = param->opcode; + result->errcode = ret; + result->out_size = param->out_size; + if (result->out_size) + memcpy(result->data, out_data, param->out_size); + + nbl_res_adminq_pt_filter_out(res_mgt, param, result); + +send_fail: + kfree(out_data); +out_data_fail: + kfree(in_data); +in_data_fail: + return ret; +} + +static int nbl_res_adminq_update_ring_num(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_resource_read_param *param; + struct nbl_net_ring_num_info *info; + int ret = 0; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto alloc_param_fail; + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto alloc_info_fail; + } + + param->resid = NBL_ADMINQ_PFA_TLV_NET_RING_NUM; + param->offset = 0; + param->len = sizeof(*info); + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ, + param, sizeof(*param), info, sizeof(*info), 1); + + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ); + goto send_fail; + } + + if (info->pf_def_max_net_qp_num && info->vf_def_max_net_qp_num) + memcpy(&res_info->net_ring_num_info, info, sizeof(res_info->net_ring_num_info)); + +send_fail: + kfree(info); +alloc_info_fail: + kfree(param); +alloc_param_fail: + return ret; +} + +static int nbl_res_adminq_rdma_cap_default(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id = 0, vsi_id = 0; + int pf_num = NBL_RES_MGT_TO_PF_NUM(res_mgt); + int per_pf_num = (NBL_RES_RDMA_MAX - pf_num + 1) / pf_num, i, j; + int per_pf_vf_num = res_info->max_vf_num / pf_num; + int rdma_reserve = NBL_RES_MGT_TO_COMMON(res_mgt)->product_type == NBL_LEONIS_TYPE; + + per_pf_num = min_t(int, per_pf_num, per_pf_vf_num); + for (i = 0; i < pf_num; i++) { + vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, i, -1, NBL_VSI_DATA); + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + if (func_id < NBL_MAX_FUNC) + set_bit(func_id, res_info->rdma_info.func_cap); + + /* If we have reserved rdma aux dev, remove these on the last pf */ + for (j = 0; j < per_pf_num - (!(pf_num - i - 1)) * rdma_reserve; j++) { + vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, i, j, NBL_VSI_DATA); + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + if (func_id < NBL_MAX_FUNC) + set_bit(func_id, res_info->rdma_info.func_cap); + } + } + + return 0; +} + +static int nbl_res_adminq_rdma_cap_tlv(struct nbl_resource_mgt *res_mgt, + struct nbl_rdma_cap_info *info) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + int i, j; + + for (i = 0; i < NBL_RDMA_CAP_CMD_LEN; i++) + for (j = 0; j < BITS_PER_BYTE; j++) + if (info->rdma_func_bitmaps[i] & BIT(j)) + set_bit(i * BITS_PER_BYTE + j, res_info->rdma_info.func_cap); + + return 0; +} + +static int nbl_res_adminq_update_rdma_cap(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_resource_read_param *param; + struct nbl_rdma_cap_info *info; + int ret = 0; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto alloc_param_fail; + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto alloc_info_fail; + } + + param->resid = NBL_ADMINQ_PFA_TLV_RDMA_CAP; + param->offset = 0; + param->len = sizeof(*info); + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ, + param, sizeof(*param), info, sizeof(*info), 1); + + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ); + info->valid = 1; + } + + /* For some reason, valid == 0 means valid, and 1 means invalid */ + if (info->valid) + nbl_res_adminq_rdma_cap_default(res_mgt); + else + nbl_res_adminq_rdma_cap_tlv(res_mgt, info); + + kfree(info); +alloc_info_fail: + kfree(param); +alloc_param_fail: + return ret; +} + +static u16 nbl_res_adminq_get_rdma_cap_num(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 rdma_cap_num = 0; + int i; + + for (i = 0; i < NBL_MAX_FUNC; i++) + if (test_bit(i, res_info->rdma_info.func_cap)) + rdma_cap_num++; + + return rdma_cap_num; +} + +static int nbl_res_adminq_update_rdma_mem_type(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_resource_read_param *param; + struct nbl_rdma_mem_type_info *info; + int ret = 0; + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto alloc_param_fail; + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto alloc_info_fail; + } + + param->resid = NBL_ADMINQ_PFA_TLV_RDMA_MEM_TYPE; + param->offset = 0; + param->len = sizeof(*info); + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ, + param, sizeof(*param), info, sizeof(*info), 1); + + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_RESOURCE_READ); + goto send_fail; + } + + if (info->mem_type <= NBL_RDMA_MEM_TYPE_MAX) + res_info->rdma_info.mem_type = info->mem_type; + else + res_info->rdma_info.mem_type = NBL_RDMA_MEM_TYPE_MAX; + +send_fail: + kfree(info); +alloc_info_fail: + kfree(param); +alloc_param_fail: + return ret; +} + +static int nbl_res_adminq_set_ring_num(void *priv, struct nbl_fw_cmd_net_ring_num_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + struct nbl_chan_send_info chan_send; + struct nbl_chan_resource_write_param *data; + int data_len = sizeof(struct nbl_fw_cmd_net_ring_num_param); + int ret = 0; + + data = kzalloc(sizeof(*data) + data_len, GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->resid = NBL_ADMINQ_PFA_TLV_NET_RING_NUM; + data->offset = 0; + data->len = data_len; + + memcpy(data + 1, param, data_len); + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE, + data, sizeof(*data) + data_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq send msg failed with ret: %d\n", ret); + + kfree(data); + return ret; +} + +static int nbl_res_adminq_restore_default_cfg(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_port_key *param; + int param_len = 0; + u64 data = 0; + u64 key = 0; + int ret; + + key = NBL_PORT_KEY_RESTORE_DEFAULTE_CFG; + data = (key << NBL_PORT_KEY_KEY_SHIFT); + param_len = sizeof(struct nbl_port_key) + 1 * sizeof(u64); + param = kzalloc(param_len, GFP_KERNEL); + param->id = eth_id; + param->subop = NBL_PORT_SUBOP_WRITE; + param->data[0] = data; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES, + param, param_len, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) { + dev_err(dev, "ctrl eth %d restore defaulte cfg failed ret %d\n", + eth_info->logic_eth_id[eth_id], ret); + kfree(param); + return ret; + } + + kfree(param); + return 0; +} + +static int nbl_res_adminq_nway_reset(void *priv, u8 eth_id) +{ + return nbl_res_adminq_restore_default_cfg(priv, eth_id); +} + +static int nbl_res_adminq_init_port(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 eth_id; + + for_each_set_bit(eth_id, eth_info->eth_bitmap, NBL_MAX_ETHERNET) + nbl_res_adminq_restore_default_cfg(priv, eth_id); + + return 0; +} + +#define ADD_ETH_STATISTICS(name) {#name} +static struct nbl_leonis_eth_stats_info _eth_statistics[] = { + ADD_ETH_STATISTICS(eth_frames_tx), + ADD_ETH_STATISTICS(eth_frames_tx_ok), + ADD_ETH_STATISTICS(eth_frames_tx_badfcs), + ADD_ETH_STATISTICS(eth_unicast_frames_tx_ok), + ADD_ETH_STATISTICS(eth_multicast_frames_tx_ok), + ADD_ETH_STATISTICS(eth_broadcast_frames_tx_ok), + ADD_ETH_STATISTICS(eth_macctrl_frames_tx_ok), + ADD_ETH_STATISTICS(eth_fragment_frames_tx), + ADD_ETH_STATISTICS(eth_fragment_frames_tx_ok), + ADD_ETH_STATISTICS(eth_pause_frames_tx), + ADD_ETH_STATISTICS(eth_pause_macctrl_frames_tx), + ADD_ETH_STATISTICS(eth_pfc_frames_tx), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio0), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio1), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio2), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio3), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio4), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio5), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio6), + ADD_ETH_STATISTICS(eth_pfc_frames_tx_prio7), + ADD_ETH_STATISTICS(eth_verify_frames_tx), + ADD_ETH_STATISTICS(eth_respond_frames_tx), + ADD_ETH_STATISTICS(eth_frames_tx_64B), + ADD_ETH_STATISTICS(eth_frames_tx_65_to_127B), + ADD_ETH_STATISTICS(eth_frames_tx_128_to_255B), + ADD_ETH_STATISTICS(eth_frames_tx_256_to_511B), + ADD_ETH_STATISTICS(eth_frames_tx_512_to_1023B), + ADD_ETH_STATISTICS(eth_frames_tx_1024_to_1535B), + ADD_ETH_STATISTICS(eth_frames_tx_1536_to_2047B), + ADD_ETH_STATISTICS(eth_frames_tx_2048_to_MAXB), + ADD_ETH_STATISTICS(eth_undersize_frames_tx_goodfcs), + ADD_ETH_STATISTICS(eth_oversize_frames_tx_goodfcs), + ADD_ETH_STATISTICS(eth_undersize_frames_tx_badfcs), + ADD_ETH_STATISTICS(eth_oversize_frames_tx_badfcs), + ADD_ETH_STATISTICS(eth_octets_tx), + ADD_ETH_STATISTICS(eth_octets_tx_ok), + ADD_ETH_STATISTICS(eth_octets_tx_badfcs), + ADD_ETH_STATISTICS(eth_frames_rx), + ADD_ETH_STATISTICS(eth_frames_rx_ok), + ADD_ETH_STATISTICS(eth_frames_rx_badfcs), + ADD_ETH_STATISTICS(eth_undersize_frames_rx_goodfcs), + ADD_ETH_STATISTICS(eth_undersize_frames_rx_badfcs), + ADD_ETH_STATISTICS(eth_oversize_frames_rx_goodfcs), + ADD_ETH_STATISTICS(eth_oversize_frames_rx_badfcs), + ADD_ETH_STATISTICS(eth_frames_rx_misc_error), + ADD_ETH_STATISTICS(eth_frames_rx_misc_dropped), + ADD_ETH_STATISTICS(eth_unicast_frames_rx_ok), + ADD_ETH_STATISTICS(eth_multicast_frames_rx_ok), + ADD_ETH_STATISTICS(eth_broadcast_frames_rx_ok), + ADD_ETH_STATISTICS(eth_pause_frames_rx), + ADD_ETH_STATISTICS(eth_pfc_frames_rx), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio0), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio1), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio2), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio3), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio4), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio5), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio6), + ADD_ETH_STATISTICS(eth_pfc_frames_rx_prio7), + ADD_ETH_STATISTICS(eth_macctrl_frames_rx), + ADD_ETH_STATISTICS(eth_verify_frames_rx_ok), + ADD_ETH_STATISTICS(eth_respond_frames_rx_ok), + ADD_ETH_STATISTICS(eth_fragment_frames_rx_ok), + ADD_ETH_STATISTICS(eth_fragment_rx_smdc_nocontext), + ADD_ETH_STATISTICS(eth_fragment_rx_smds_seq_error), + ADD_ETH_STATISTICS(eth_fragment_rx_smdc_seq_error), + ADD_ETH_STATISTICS(eth_fragment_rx_frag_cnt_error), + ADD_ETH_STATISTICS(eth_frames_assembled_ok), + ADD_ETH_STATISTICS(eth_frames_assembled_error), + ADD_ETH_STATISTICS(eth_frames_rx_64B), + ADD_ETH_STATISTICS(eth_frames_rx_65_to_127B), + ADD_ETH_STATISTICS(eth_frames_rx_128_to_255B), + ADD_ETH_STATISTICS(eth_frames_rx_256_to_511B), + ADD_ETH_STATISTICS(eth_frames_rx_512_to_1023B), + ADD_ETH_STATISTICS(eth_frames_rx_1024_to_1535B), + ADD_ETH_STATISTICS(eth_frames_rx_1536_to_2047B), + ADD_ETH_STATISTICS(eth_frames_rx_2048_to_MAXB), + ADD_ETH_STATISTICS(eth_octets_rx), + ADD_ETH_STATISTICS(eth_octets_rx_ok), + ADD_ETH_STATISTICS(eth_octets_rx_badfcs), + ADD_ETH_STATISTICS(eth_octets_rx_dropped), +}; + +static void nbl_res_adminq_get_private_stat_len(void *priv, u32 *len) +{ + *len = ARRAY_SIZE(_eth_statistics); +} + +static void nbl_res_adminq_get_private_stat_data(void *priv, u32 eth_id, u64 *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + int data_length = sizeof(struct nbl_leonis_eth_stats); + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS, + ð_id, sizeof(eth_id), data, data_length, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq get eth %d stats failed ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); +} + +static void nbl_res_adminq_fill_private_stat_strings(void *priv, u8 *strings) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(_eth_statistics); i++) { + snprintf(strings, ETH_GSTRING_LEN, "%s", _eth_statistics[i].descp); + strings += ETH_GSTRING_LEN; + } +} + +static u32 nbl_convert_temp_type_eeprom_offset(enum nbl_hwmon_type type) +{ + switch (type) { + case NBL_HWMON_TEMP_INPUT: + return SFF_8636_TEMP; + case NBL_HWMON_TEMP_MAX: + return SFF_8636_TEMP_MAX; + case NBL_HWMON_TEMP_CRIT: + return SFF_8636_TEMP_CIRT; + default: + return SFF_8636_TEMP; + } +} + +static u32 nbl_convert_temp_type_qsfp28_eeprom_offset(enum nbl_hwmon_type type) +{ + switch (type) { + case NBL_HWMON_TEMP_INPUT: + return SFF_8636_QSFP28_TEMP; + case NBL_HWMON_TEMP_MAX: + return SFF_8636_QSFP28_TEMP_MAX; + case NBL_HWMON_TEMP_CRIT: + return SFF_8636_QSFP28_TEMP_CIRT; + default: + return SFF_8636_QSFP28_TEMP; + } +} + +/* return value need to convert to Mil degree Celsius(1/1000) */ +static int nbl_res_adminq_get_module_temp_common(void *priv, u8 eth_id, + enum nbl_hwmon_type type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct ethtool_modinfo info = {0}; + u32 offset; + int temp = 0; + int ret = 0; + + ret = nbl_res_adminq_get_module_info(res_mgt, eth_id, &info); + if (ret) { + dev_err(dev, "get_module_info eth id %d ret: %d\n", + eth_info->logic_eth_id[eth_id], ret); + return 0; + } + + if (info.eeprom_len <= ETH_MODULE_SFF_8079_LEN) + return 0; + + offset = nbl_convert_temp_type_eeprom_offset(type); + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A2, + 0, 0, offset, 1, (u8 *)&temp); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return 0; + } + + return temp * 1000; +} + +/* return value need to convert to Mil degree Celsius(1/1000) */ +static int nbl_res_adminq_get_module_temp_special(struct nbl_resource_mgt *res_mgt, u8 eth_id, + enum nbl_hwmon_type type) +{ + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u32 addr; + u8 offset, turn_page; + int temp = 0; + int ret = 0; + + addr = nbl_convert_temp_type_qsfp28_eeprom_offset(type); + + nbl_res_get_module_eeprom_page(addr, &turn_page, &offset); + + ret = nbl_res_adminq_turn_module_eeprom_page(res_mgt, eth_id, turn_page); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return 0; + } + + ret = nbl_res_adminq_get_module_eeprom_info(res_mgt, eth_id, I2C_DEV_ADDR_A0, + 0, 0, offset, 1, (u8 *)&temp); + if (ret) { + dev_err(dev, "eth %d get_module_eeprom_info failed %d\n", + eth_info->logic_eth_id[eth_id], ret); + return 0; + } + + return temp * 1000; +} + +static int nbl_res_adminq_get_module_temperature(void *priv, u8 eth_id, + enum nbl_hwmon_type type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + if (!eth_info->module_inplace[eth_id]) + return 0; + + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) + return nbl_res_adminq_get_module_temp_special(res_mgt, eth_id, type); + else + return nbl_res_adminq_get_module_temp_common(res_mgt, eth_id, type); +} + +static __maybe_unused int nbl_res_adminq_load_p4(void *priv, struct nbl_load_p4_param *p4_param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_load_p4 *param; + int ret = 0; + + param = kzalloc(sizeof(*param) + p4_param->size, GFP_KERNEL); + if (!param) + return -ENOMEM; + + param->addr = p4_param->addr; + param->size = p4_param->size; + param->section_index = p4_param->section_index; + param->section_offset = p4_param->section_offset; + param->load_start = p4_param->start; + param->load_end = p4_param->end; + strscpy(param->name, p4_param->name, sizeof(param->name)); + memcpy(param->data, p4_param->data, p4_param->size); + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_LOAD_P4, + param, sizeof(*param) + p4_param->size, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_LOAD_P4); + + kfree(param); + return ret; +} + +static __maybe_unused int nbl_res_adminq_load_p4_default(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(res_mgt->common); + struct nbl_chan_send_info chan_send; + int ret = 0; + + NBL_CHAN_SEND(chan_send, NBL_CHAN_ADMINQ_FUNCTION_ID, NBL_CHAN_MSG_ADMINQ_LOAD_P4_DEFAULT, + NULL, 0, NULL, 0, 1); + ret = chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); + if (ret) + dev_err(dev, "adminq send msg failed with ret: %d, msg_type: 0x%x\n", + ret, NBL_CHAN_MSG_ADMINQ_LOAD_P4_DEFAULT); + + return ret; +} + +static void nbl_res_adminq_cfg_eth_bond_event(void *priv, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_event_callback event_callback = {0}; + + event_callback.callback_data = res_mgt; + event_callback.callback = nbl_res_adminq_handle_link_state_update; + + if (enable) + nbl_event_register(NBL_EVENT_LINK_STATE_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + else + nbl_event_unregister(NBL_EVENT_LINK_STATE_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); +} + +/* NBL_ADMINQ_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_ADMINQ_OPS_TBL \ +do { \ + NBL_ADMINQ_SET_OPS(get_firmware_version, nbl_res_adminq_get_firmware_version); \ + NBL_ADMINQ_SET_OPS(flash_lock, nbl_res_adminq_flash_lock); \ + NBL_ADMINQ_SET_OPS(flash_unlock, nbl_res_adminq_flash_unlock); \ + NBL_ADMINQ_SET_OPS(flash_prepare, nbl_res_adminq_flash_prepare); \ + NBL_ADMINQ_SET_OPS(flash_image, nbl_res_adminq_flash_image); \ + NBL_ADMINQ_SET_OPS(flash_activate, nbl_res_adminq_flash_activate); \ + NBL_ADMINQ_SET_OPS(set_sfp_state, nbl_res_adminq_set_sfp_state); \ + NBL_ADMINQ_SET_OPS(setup_loopback, nbl_res_adminq_setup_loopback); \ + NBL_ADMINQ_SET_OPS(check_fw_heartbeat, nbl_res_adminq_check_fw_heartbeat); \ + NBL_ADMINQ_SET_OPS(check_fw_reset, nbl_res_adminq_check_fw_reset); \ + NBL_ADMINQ_SET_OPS(get_port_attributes, nbl_res_adminq_get_port_attributes); \ + NBL_ADMINQ_SET_OPS(update_ring_num, nbl_res_adminq_update_ring_num); \ + NBL_ADMINQ_SET_OPS(update_rdma_cap, nbl_res_adminq_update_rdma_cap); \ + NBL_ADMINQ_SET_OPS(update_rdma_mem_type, nbl_res_adminq_update_rdma_mem_type); \ + NBL_ADMINQ_SET_OPS(get_rdma_cap_num, nbl_res_adminq_get_rdma_cap_num); \ + NBL_ADMINQ_SET_OPS(set_ring_num, nbl_res_adminq_set_ring_num); \ + NBL_ADMINQ_SET_OPS(init_port, nbl_res_adminq_init_port); \ + NBL_ADMINQ_SET_OPS(enable_port, nbl_res_adminq_enable_port); \ + NBL_ADMINQ_SET_OPS(recv_port_notify, nbl_res_adminq_recv_port_notify); \ + NBL_ADMINQ_SET_OPS(set_port_advertising, nbl_res_adminq_set_port_advertising); \ + NBL_ADMINQ_SET_OPS(get_port_state, nbl_res_adminq_get_port_state); \ + NBL_ADMINQ_SET_OPS(get_module_info, nbl_res_adminq_get_module_info); \ + NBL_ADMINQ_SET_OPS(get_module_eeprom, nbl_res_adminq_get_module_eeprom); \ + NBL_ADMINQ_SET_OPS(get_link_state, nbl_res_adminq_get_link_state); \ + NBL_ADMINQ_SET_OPS(set_eth_mac_addr, nbl_res_adminq_set_eth_mac_addr); \ + NBL_ADMINQ_SET_OPS(ctrl_port_led, nbl_res_adminq_ctrl_port_led); \ + NBL_ADMINQ_SET_OPS(nway_reset, nbl_res_adminq_nway_reset); \ + NBL_ADMINQ_SET_OPS(set_eth_pfc, nbl_res_adminq_set_eth_pfc); \ + NBL_ADMINQ_SET_OPS(passthrough_fw_cmd, nbl_res_adminq_passthrough); \ + NBL_ADMINQ_SET_OPS(get_private_stat_len, nbl_res_adminq_get_private_stat_len); \ + NBL_ADMINQ_SET_OPS(get_private_stat_data, nbl_res_adminq_get_private_stat_data); \ + NBL_ADMINQ_SET_OPS(fill_private_stat_strings, nbl_res_adminq_fill_private_stat_strings);\ + NBL_ADMINQ_SET_OPS(get_module_temperature, nbl_res_adminq_get_module_temperature); \ + NBL_ADMINQ_SET_OPS(load_p4_default, nbl_res_adminq_load_p4_default); \ + NBL_ADMINQ_SET_OPS(cfg_eth_bond_event, nbl_res_adminq_cfg_eth_bond_event); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_adminq_setup_mgt(struct device *dev, struct nbl_adminq_mgt **adminq_mgt) +{ + *adminq_mgt = devm_kzalloc(dev, sizeof(struct nbl_adminq_mgt), GFP_KERNEL); + if (!*adminq_mgt) + return -ENOMEM; + + init_waitqueue_head(&(*adminq_mgt)->wait_queue); + return 0; +} + +static void nbl_adminq_remove_mgt(struct device *dev, struct nbl_adminq_mgt **adminq_mgt) +{ + devm_kfree(dev, *adminq_mgt); + *adminq_mgt = NULL; +} + +static int nbl_res_adminq_chan_notify_link_state_req(struct nbl_resource_mgt *res_mgt, + u16 fid, u8 link_state, u32 link_speed) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_send_info chan_send; + struct nbl_chan_param_notify_link_state link_info = {0}; + + chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + + link_info.link_state = link_state; + link_info.link_speed = link_speed; + NBL_CHAN_SEND(chan_send, fid, NBL_CHAN_MSG_NOTIFY_LINK_STATE, &link_info, + sizeof(link_info), NULL, 0, 0); + return chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +static int nbl_res_adminq_notify_eth_rep_link_req(struct nbl_resource_mgt *res_mgt, + u16 fid, u8 eth_id, u8 link_state) +{ + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct nbl_chan_param_eth_rep_notify_link_state param = {0}; + struct nbl_chan_send_info chan_send; + + chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + + param.eth_id = eth_id; + param.link_state = link_state; + NBL_CHAN_SEND(chan_send, fid, NBL_CHAN_MSG_NOTIFY_ETH_REP_LINK_STATE, ¶m, sizeof(param), + NULL, 0, 0); + return chan_ops->send_msg(NBL_RES_MGT_TO_CHAN_PRIV(res_mgt), &chan_send); +} + +static void nbl_res_adminq_notify_link_state(struct nbl_resource_mgt *res_mgt, u8 eth_id, + u8 link_state) +{ + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_resource_mgt_leonis *res_mgt_leonis = (struct nbl_resource_mgt_leonis *)res_mgt; + struct nbl_pmd_status *pmd_status = &res_mgt_leonis->pmd_status; + struct nbl_sriov_info *sriov_info; + struct nbl_queue_info *queue_info; + u16 pf_fid = 0, vf_fid = 0, bond_link_state = 0, link_speed = 0; + int i = 0, j = 0; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + if (eth_info->pf_bitmap[eth_id] & BIT(i)) + pf_fid = nbl_res_pfvfid_to_func_id(res_mgt, i, -1); + else + continue; + + sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[pf_fid]; + queue_info = &queue_mgt->queue_info[pf_fid]; + + /* send eth's link state to pf */ + if (queue_info->num_txrx_queues) + nbl_res_adminq_chan_notify_link_state_req(res_mgt, + pf_fid, + link_state, + eth_info->link_speed[eth_id]); + + /* Use bond_link_state for vfs. + * If there is no bond, then it will equals to link_state. + */ + bond_link_state = nbl_res_adminq_get_bond_link_state(res_mgt, eth_id); + + /* send eth's link state to pf's all vf */ + for (j = 0; j < sriov_info->num_vfs; j++) { + vf_fid = sriov_info->start_vf_func_id + j; + queue_info = &queue_mgt->queue_info[vf_fid]; + if (queue_info->num_txrx_queues) { + link_speed = eth_info->link_speed[eth_id]; + nbl_res_adminq_chan_notify_link_state_req(res_mgt, vf_fid, + bond_link_state, + link_speed); + } + } + } + + if (pmd_status->upcall_port_info.upcall_port_active) { + nbl_res_adminq_notify_eth_rep_link_req(res_mgt, + pmd_status->upcall_port_info.func_id, + eth_id, link_state); + } +} + +static void nbl_res_adminq_eth_task(struct work_struct *work) +{ + struct nbl_adminq_mgt *adminq_mgt = container_of(work, struct nbl_adminq_mgt, + eth_task); + struct nbl_resource_mgt *res_mgt = adminq_mgt->res_mgt; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u8 eth_id = 0; + u8 port_max_rate = 0; + + for (eth_id = 0 ; eth_id < NBL_MAX_ETHERNET; eth_id++) { + if (adminq_mgt->module_inplace_changed[eth_id]) { + /* module not-inplace, transitions to inplace status */ + /* read module register */ + port_max_rate = nbl_res_adminq_get_module_bitrate(res_mgt, eth_id); + + eth_info->port_max_rate[eth_id] = port_max_rate; + eth_info->port_type[eth_id] = nbl_res_adminq_get_port_type(res_mgt, eth_id); + eth_info->module_repluged[eth_id] = 1; + /* cooper support auto-negotiation */ + if (eth_info->port_type[eth_id] == NBL_PORT_TYPE_COPPER) + eth_info->port_caps[eth_id] |= BIT(NBL_PORT_CAP_AUTONEG); + else + eth_info->port_caps[eth_id] &= ~BIT_MASK(NBL_PORT_CAP_AUTONEG); + + adminq_mgt->module_inplace_changed[eth_id] = 0; + } + + mutex_lock(&adminq_mgt->eth_lock); + if (adminq_mgt->link_state_changed[eth_id]) { + /* eth link state changed, notify pf and vf */ + nbl_res_adminq_notify_link_state(res_mgt, eth_id, + eth_info->link_state[eth_id]); + adminq_mgt->link_state_changed[eth_id] = 0; + } + mutex_unlock(&adminq_mgt->eth_lock); + } +} + +static int nbl_res_adminq_setup_cmd_filter(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_hash_tbl_key tbl_key = {0}; + + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), sizeof(u16), + sizeof(struct nbl_res_fw_cmd_filter), + NBL_RES_FW_CMD_FILTER_MAX, false); + + adminq_mgt->cmd_filter = nbl_common_init_hash_table(&tbl_key); + if (!adminq_mgt->cmd_filter) + return -EFAULT; + + return 0; +} + +static void nbl_res_adminq_remove_cmd_filter(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_adminq_mgt *adminq_mgt = NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + + if (adminq_mgt->cmd_filter) + nbl_common_remove_hash_table(adminq_mgt->cmd_filter, NULL); + + adminq_mgt->cmd_filter = NULL; +} + +int nbl_adminq_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_adminq_mgt **adminq_mgt = &NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int ret; + + ret = nbl_adminq_setup_mgt(dev, adminq_mgt); + if (ret) + goto setup_mgt_fail; + + (*adminq_mgt)->res_mgt = res_mgt; + + (*adminq_mgt)->fw_last_hb_seq = (u32)phy_ops->get_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + INIT_WORK(&(*adminq_mgt)->eth_task, nbl_res_adminq_eth_task); + mutex_init(&(*adminq_mgt)->eth_lock); + + ret = nbl_res_adminq_setup_cmd_filter(res_mgt); + if (ret) + goto set_filter_fail; + + nbl_res_adminq_add_cmd_filter_res_write(res_mgt); + + return 0; + +set_filter_fail: + cancel_work_sync(&((*adminq_mgt)->eth_task)); + nbl_adminq_remove_mgt(dev, adminq_mgt); +setup_mgt_fail: + return ret; +} + +void nbl_adminq_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_adminq_mgt **adminq_mgt = &NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt); + + if (!(*adminq_mgt)) + return; + + nbl_res_adminq_remove_cmd_filter(res_mgt); + + cancel_work_sync(&((*adminq_mgt)->eth_task)); + nbl_adminq_remove_mgt(dev, adminq_mgt); +} + +int nbl_adminq_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_ADMINQ_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_ADMINQ_OPS_TBL; +#undef NBL_ADMINQ_SET_OPS + + return 0; +} + +void nbl_adminq_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_ADMINQ_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_ADMINQ_OPS_TBL; +#undef NBL_ADMINQ_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h new file mode 100644 index 0000000000000000000000000000000000000000..c8f1c03556f9e45e94ec37c1767166d4ca9b75a7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_adminq.h @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_ADMINQ_H_ +#define _NBL_ADMINQ_H_ + +#include "nbl_resource.h" + +/* SPI Bank Index */ +#define BANKID_DESC_BANK 0xA0 +#define BANKID_BOOT_BANK 0xA1 +#define BANKID_SR_BANK0 0xA2 +#define BANKID_SR_BANK1 0xA3 +#define BANKID_OSI_BANK0 0xA4 +#define BANKID_OSI_BANK1 0xA5 +#define BANKID_FSI_BANK0 0xA6 +#define BANKID_FSI_BANK1 0xA7 +#define BANKID_PHY_BANK 0xA8 +#define BANKID_NVM_BANK0 0xA9 +#define BANKID_NVM_BANK1 0xAA +#define BANKID_LOG_BANK 0xAB + +#define NBL_ADMINQ_IDX_LEN 4096 + +#define NBL_MAX_PHY_I2C_RESP_SIZE 128 + +#define I2C_DEV_ADDR_A0 0x50 +#define I2C_DEV_ADDR_A2 0x51 + +/* SFF moudle register addresses: 8 bit valid */ +#define SFF_8472_IDENTIFIER 0x0 +#define SFF_8472_10GB_CAPABILITY 0x3 /* check sff-8472 table 5-3 */ +#define SFF_8472_1GB_CAPABILITY 0x6 /* check sff-8472 table 5-3 */ +#define SFF_8472_CABLE_TECHNOLOGY 0x8 /* check sff-8472 table 5-3 */ +#define SFF_8472_EXTENDED_CAPA 0x24 /* check sff-8024 table 4-4 */ +#define SFF_8472_CABLE_SPEC_COMP 0x3C +#define SFF_8472_DIAGNOSTIC 0x5C /* digital diagnostic monitoring, relates to A2 */ +#define SFF_8472_COMPLIANCE 0x5E /* the specification revision version */ +#define SFF_8472_VENDOR_NAME 0x14 +#define SFF_8472_VENDOR_NAME_LEN 16 /* 16 bytes, from offset 0x14 to offset 0x23 */ +#define SFF_8472_VENDOR_PN 0x28 +#define SFF_8472_VENDOR_PN_LEN 16 +#define SFF_8472_VENDOR_OUI 0x25 /* name and oui cannot all be empty */ +#define SFF_8472_VENDOR_OUI_LEN 3 +#define SFF_8472_SIGNALING_RATE 0xC +#define SFF_8472_SIGNALING_RATE_MAX 0x42 +#define SFF_8472_SIGNALING_RATE_MIN 0x43 +/* optional status/control bits: soft rate select and tx disable */ +#define SFF_8472_OSCB 0x6E +/* extended status/control bits */ +#define SFF_8472_ESCB 0x76 +#define SFF8636_DEVICE_TECH_OFFSET 0x93 + +#define SFF_8636_VENDOR_ENCODING 0x8B +#define SFF_8636_ENCODING_PAM4 0x8 + +/* SFF status code */ +#define SFF_IDENTIFIER_SFP 0x3 +#define SFF_IDENTIFIER_QSFP28 0x11 +#define SFF_IDENTIFIER_PAM4 0x1E +#define SFF_PASSIVE_CABLE 0x4 +#define SFF_ACTIVE_CABLE 0x8 +#define SFF_8472_ADDRESSING_MODE 0x4 +#define SFF_8472_UNSUPPORTED 0x00 +#define SFF_8472_10G_SR_BIT 4 /* 850nm, short reach */ +#define SFF_8472_10G_LR_BIT 5 /* 1310nm, long reach */ +#define SFF_8472_10G_LRM_BIT 6 /* 1310nm, long reach multimode */ +#define SFF_8472_10G_ER_BIT 7 /* 1550nm, extended reach */ +#define SFF_8472_1G_SX_BIT 0 +#define SFF_8472_1G_LX_BIT 1 +#define SFF_8472_1G_CX_BIT 2 +#define SFF_8472_1G_T_BIT 3 +#define SFF_8472_SOFT_TX_DISABLE 6 +#define SFF_8472_SOFT_RATE_SELECT 4 +#define SFF_8472_EMPTY_ASCII 20 +#define SFF_DDM_IMPLEMENTED 0x40 +#define SFF_COPPER_UNSPECIFIED 0 +#define SFF_COPPER_8431_APPENDIX_E 1 +#define SFF_COPPER_8431_LIMITING 4 +#define SFF_8636_TURNPAGE_ADDR (127) +#define SFF_8638_PAGESIZE (128) + +#define SFF_8636_TEMP (0x60) +#define SFF_8636_TEMP_MAX (0x4) +#define SFF_8636_TEMP_CIRT (0x0) + +#define SFF_8636_QSFP28_TEMP (0x16) +#define SFF_8636_QSFP28_TEMP_MAX (0x204) +#define SFF_8636_QSFP28_TEMP_CIRT (0x200) + +/* Firmware version */ +#define FIRMWARE_MAGIC "M181FWV0" +#define BCD2BYTE(b) ({ typeof(b) _b = (b); \ + (((_b) & 0xF) + (((_b) >> 4) & 0xF) * 10); }) +#define BCD2SHORT(s) ({ typeof(s) _s = (s); \ + (((_s) & 0xF) + (((_s) >> 4) & 0xF) * 10 + \ + (((_s) >> 8) & 0xF) * 100 + (((_s) >> 12) & 0xF) * 1000); }) + +/* VSI fixed number of queues*/ +#define NBL_VSI_PF_REAL_QUEUE_NUM(num) (((num) * 2) + NBL_DEFAULT_REP_HW_QUEUE_NUM) +#define NBL_VSI_VF_REAL_QUEUE_NUM(num) (num) + +#define NBL_ADMINQ_PFA_TLV_VF_NUM (0x5804) +#define NBL_ADMINQ_PFA_TLV_NET_RING_NUM (0x5805) +#define NBL_ADMINQ_PFA_TLV_REP_RING_NUM (0x5806) +#define NBL_ADMINQ_PFA_TLV_ECPU_RING_NUM (0x5807) +#define NBL_ADMINQ_PFA_TLV_RDMA_CAP (0x5808) +#define NBL_ADMINQ_PFA_TLV_RDMA_MEM_TYPE (0x5809) + +enum { + NBL_FW_VERSION_BANK0 = 0, + NBL_FW_VERSION_BANK1 = 1, + NBL_FW_VERSION_RUNNING_BANK = 2, +}; + +enum { + NBL_ADMINQ_NVM_BANK_REPAIR = 0, + NBL_ADMINQ_NVM_BANK_SWITCH, +}; + +enum { + NBL_ADMINQ_BANK_INDEX_SPI_BOOT = 2, + NBL_ADMINQ_BANK_INDEX_NVM_BANK = 3, +}; + +struct nbl_leonis_eth_tx_stats { + u64 frames_txd; + u64 frames_txd_ok; + u64 frames_txd_badfcs; + u64 unicast_frames_txd_ok; + u64 multicast_frames_txd_ok; + u64 broadcast_frames_txd_ok; + u64 macctrl_frames_txd_ok; + u64 fragment_frames_txd; + u64 fragment_frames_txd_ok; + u64 pause_macctrl_frames_txd; + u64 pause_macctrl_toggle_frames_txd; + u64 pfc_macctrl_frames_txd; + u64 pfc_macctrl_toggle_frames_txd_0; + u64 pfc_macctrl_toggle_frames_txd_1; + u64 pfc_macctrl_toggle_frames_txd_2; + u64 pfc_macctrl_toggle_frames_txd_3; + u64 pfc_macctrl_toggle_frames_txd_4; + u64 pfc_macctrl_toggle_frames_txd_5; + u64 pfc_macctrl_toggle_frames_txd_6; + u64 pfc_macctrl_toggle_frames_txd_7; + u64 verify_frames_txd; + u64 respond_frames_txd; + u64 frames_txd_sizerange0; + u64 frames_txd_sizerange1; + u64 frames_txd_sizerange2; + u64 frames_txd_sizerange3; + u64 frames_txd_sizerange4; + u64 frames_txd_sizerange5; + u64 frames_txd_sizerange6; + u64 frames_txd_sizerange7; + u64 undersize_frames_txd_goodfcs; + u64 oversize_frames_txd_goodfcs; + u64 undersize_frames_txd_badfcs; + u64 oversize_frames_txd_badfcs; + u64 octets_txd; + u64 octets_txd_ok; + u64 octets_txd_badfcs; +}; + +struct nbl_leonis_eth_rx_stats { + u64 frames_rxd; + u64 frames_rxd_ok; + u64 frames_rxd_badfcs; + u64 undersize_frames_rxd_goodfcs; + u64 undersize_frames_rxd_badfcs; + u64 oversize_frames_rxd_goodfcs; + u64 oversize_frames_rxd_badfcs; + u64 frames_rxd_misc_error; + u64 frames_rxd_misc_dropped; + u64 unicast_frames_rxd_ok; + u64 multicast_frames_rxd_ok; + u64 broadcast_frames_rxd_ok; + u64 pause_macctrl_frames_rxd; + u64 pfc_macctrl_frames_rxd; + u64 pfc_macctrl_frames_rxd_0; + u64 pfc_macctrl_frames_rxd_1; + u64 pfc_macctrl_frames_rxd_2; + u64 pfc_macctrl_frames_rxd_3; + u64 pfc_macctrl_frames_rxd_4; + u64 pfc_macctrl_frames_rxd_5; + u64 pfc_macctrl_frames_rxd_6; + u64 pfc_macctrl_frames_rxd_7; + u64 macctrl_frames_rxd; + u64 verify_frames_rxd_ok; + u64 respond_frames_rxd_ok; + u64 fragment_frames_rxd_ok; + u64 fragment_frames_rxd_smdc_nocontext; + u64 fragment_frames_rxd_smds_seq_error; + u64 fragment_frames_rxd_smdc_seq_error; + u64 fragment_frames_rxd_frag_cnt_error; + u64 frames_assembled_ok; + u64 frames_assembled_error; + u64 frames_rxd_sizerange0; + u64 frames_rxd_sizerange1; + u64 frames_rxd_sizerange2; + u64 frames_rxd_sizerange3; + u64 frames_rxd_sizerange4; + u64 frames_rxd_sizerange5; + u64 frames_rxd_sizerange6; + u64 frames_rxd_sizerange7; + u64 octets_rxd; + u64 octets_rxd_ok; + u64 octets_rxd_badfcs; + u64 octets_rxd_dropped; +}; + +struct nbl_leonis_eth_stats { + struct nbl_leonis_eth_tx_stats tx_stats; + struct nbl_leonis_eth_rx_stats rx_stats; +}; + +struct nbl_leonis_eth_stats_info { + const char *descp; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c new file mode 100644 index 0000000000000000000000000000000000000000..2277e9e56970a105a2f21cfe3fb3af8c3ae8c617 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_fc.h" + +static void nbl_fc_update_stats(__maybe_unused struct flow_stats *flow_stats, + __maybe_unused u64 bytes, __maybe_unused u64 pkts, + __maybe_unused u64 drops, __maybe_unused u64 lastused) +{ + flow_stats_update(flow_stats, bytes, pkts, drops, lastused, + FLOW_ACTION_HW_STATS_DELAYED); +} + +static int nbl_fc_get_stats(void *priv, struct nbl_stats_param *param) +{ + int idx; + int i; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_fc_mgt *mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u64 pkts = 0; + u64 bytes = 0; + struct nbl_flow_counter *counter = NULL; + unsigned long cookie = param->f->cookie; + struct nbl_index_key_extra extra_key; + + if (phy_ops->get_hw_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)) == NBL_HW_FATAL_ERR) + return -EIO; + + mgt = NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + if (!mgt) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl flow fc not been init."); + return -EPERM; + } + + spin_lock(&mgt->counter_lock); + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + idx = nbl_common_get_index_with_data(mgt->cls_cookie_tbl[i], &cookie, &extra_key, + NULL, 0, (void **)&counter); + if (idx != U32_MAX) + break; + } + + if (!counter || i >= NBL_FC_TYPE_MAX) { + spin_unlock(&mgt->counter_lock); + return -EINVAL; + } + + if (i == NBL_FC_SPEC_TYPE) + mgt->fc_ops.get_spec_stats(counter, &pkts, &bytes); + else + mgt->fc_ops.get_flow_stats(counter, &pkts, &bytes); + + counter->lastpackets = counter->cache.packets; + counter->lastbytes = counter->cache.bytes; + + nbl_fc_update_stats(¶m->f->stats, bytes, pkts, 0, counter->lastuse); + + spin_unlock(&mgt->counter_lock); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl flow fc %u-%lu get pkts:(%llu), bytes:(%llu)", + counter->counter_id, cookie, pkts, bytes); + return 0; +} + +static void flow_counter_update(struct nbl_fc_mgt *mgt, enum nbl_pp_fc_type fc_type) +{ + u32 idx = 0; + u32 flow_num = 0; + u32 i = 0; + struct nbl_flow_counter *iter_counter = NULL; + struct nbl_flow_query_counter counter_array; + struct list_head *counter_list; + + memset(&counter_array, 0, sizeof(counter_array)); + + if (fc_type == NBL_FC_COMMON_TYPE) + counter_list = &mgt->counter_hash_list; + else + counter_list = &mgt->counter_stat_hash_list; + + spin_lock(&mgt->counter_lock); + list_for_each_entry(iter_counter, counter_list, entries) { + mgt->counter_update_list[idx].counter_id = iter_counter->counter_id; + mgt->counter_update_list[idx].cookie = iter_counter->cookie; + idx++; + } + spin_unlock(&mgt->counter_lock); + /* using command queue */ + for (i = 0; i < idx; i++) { + counter_array.counter_id[flow_num] = mgt->counter_update_list[i].counter_id; + counter_array.cookie[flow_num] = mgt->counter_update_list[i].cookie; + ++flow_num; + + /* send bluk of cmdqueue query */ + if (flow_num == NBL_FLOW_COUNT_NUM) { + mgt->fc_ops.update_stats(mgt, &counter_array, flow_num, 0, fc_type); + flow_num = 0; + } + } + + if (flow_num) { + mgt->fc_ops.update_stats(mgt, &counter_array, flow_num, 0, fc_type); + flow_num = 0; + } + + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc start update counter type %d, all=%u", + fc_type, idx); +} + +static void nbl_fc_stats_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct nbl_fc_mgt *mgt = container_of(delayed_work, + struct nbl_fc_mgt, counter_work); + unsigned long now = jiffies; + + if (!list_empty(&mgt->counter_hash_list) || !list_empty(&mgt->counter_stat_hash_list)) + queue_delayed_work(mgt->counter_wq, &mgt->counter_work, mgt->query_interval); + + /* no need too much overhead in counter update */ + if (time_before(now, mgt->next_query)) + return; + flow_counter_update(mgt, NBL_FC_COMMON_TYPE); + flow_counter_update(mgt, NBL_FC_SPEC_TYPE); + mgt->next_query = now + mgt->query_interval; +} + +static void nbl_fc_free_res(struct nbl_fc_mgt *mgt) +{ + int i; + + kfree(mgt->counter_update_list); + mgt->counter_update_list = NULL; + + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + nbl_common_remove_index_table(mgt->cls_cookie_tbl[i], NULL); + mgt->cls_cookie_tbl[i] = NULL; + } +} + +static int nbl_fc_init_hash_map(struct nbl_fc_mgt *mgt) +{ + int i; + u32 idx_num[NBL_FC_TYPE_MAX] = {NBL_COUNTER_MAX_ID, NBL_COUNTER_MAX_STAT_ID}; + struct nbl_index_tbl_key tbl_key; + + mgt->counter_update_list = kcalloc(NBL_COUNTER_MAX_ID, sizeof(*mgt->counter_update_list), + GFP_KERNEL); + if (!mgt->counter_update_list) + goto alloc_counter_list_failed; + + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + NBL_INDEX_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(mgt->common), 0, + idx_num[i], sizeof(unsigned long)); + mgt->cls_cookie_tbl[i] = nbl_common_init_index_table(&tbl_key); + if (!mgt->cls_cookie_tbl[i]) + goto alloc_index_tbl_failed; + } + + return 0; + +alloc_index_tbl_failed: +alloc_counter_list_failed: + return -ENOMEM; +} + +/* NBL_COUNTER_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_COUNTER_OPS_TBL \ +do { \ + NBL_COUNTER_SET_OPS(query_tc_stats, nbl_fc_get_stats); \ +} while (0) + +static void nbl_fc_remove_mgt(struct device *dev, struct nbl_fc_mgt **fc_mgt) +{ + devm_kfree(dev, *fc_mgt); + *fc_mgt = NULL; +} + +int nbl_fc_set_stats(struct nbl_fc_mgt *mgt, void *data, unsigned long cookie) +{ + int ret = 0; + int i; + int idx; + struct nbl_stats_data *data_info = (struct nbl_stats_data *)data; + struct nbl_flow_counter *counter_node = NULL; + struct nbl_index_key_extra extra_key; + + spin_lock(&mgt->counter_lock); + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + idx = nbl_common_get_index_with_data(mgt->cls_cookie_tbl[i], &cookie, &extra_key, + NULL, 0, (void **)&counter_node); + if (idx != U32_MAX) + break; + } + + if (!counter_node) { + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc cookie %lu is not exist now", + cookie); + ret = -ENOKEY; + goto counter_rte_hash_lookup_err; + } + + if (data_info->packets != counter_node->cache.packets) { + counter_node->cache.packets = data_info->packets; + counter_node->cache.bytes = data_info->bytes; + counter_node->lastuse = jiffies; + } + + spin_unlock(&mgt->counter_lock); + return 0; + +counter_rte_hash_lookup_err: + spin_unlock(&mgt->counter_lock); + return ret; +} + +int nbl_fc_setup_mgt(struct device *dev, struct nbl_fc_mgt **fc_mgt) +{ + struct nbl_fc_mgt *mgt; + *fc_mgt = devm_kzalloc(dev, sizeof(struct nbl_fc_mgt), GFP_KERNEL); + + mgt = *fc_mgt; + if (!mgt) + return -ENOMEM; + + spin_lock_init(&mgt->counter_lock); + INIT_LIST_HEAD(&mgt->counter_hash_list); + INIT_LIST_HEAD(&mgt->counter_stat_hash_list); + mgt->query_interval = NBL_COUNTER_PERIOD_INTERVAL; + + return 0; +} + +int nbl_fc_add_stats(void *priv, enum nbl_pp_fc_type fc_type, unsigned long cookie) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_fc_mgt *fc_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_counter *counter_node; + struct list_head *counter_list; + int ret = 0; + int idx = 0; + struct nbl_flow_counter counter_data; + struct nbl_index_key_extra extra_key; + + fc_mgt = NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + if (!fc_mgt) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc add failed: counter not init."); + return -EINVAL; + } + + spin_lock(&fc_mgt->counter_lock); + if (fc_type == NBL_FC_COMMON_TYPE) + counter_list = &fc_mgt->counter_hash_list; + else + counter_list = &fc_mgt->counter_stat_hash_list; + + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + idx = nbl_common_get_index_with_data(fc_mgt->cls_cookie_tbl[fc_type], &cookie, &extra_key, + NULL, 0, (void **)&counter_node); + if (idx != U32_MAX) { + nbl_err(common, NBL_DEBUG_FLOW, "nbl flow fc add failed: cookie exist(%lu-%d)\n", + cookie, fc_type); + ret = -EEXIST; + goto add_counter_failed; + } + + memset(&counter_data, 0, sizeof(counter_data)); + counter_data.cookie = cookie; + idx = nbl_common_alloc_index(fc_mgt->cls_cookie_tbl[fc_type], &cookie, NULL, &counter_data, + sizeof(counter_data), (void **)&counter_node); + if (idx == U32_MAX) + goto add_counter_failed; + + counter_node->counter_id = (u32)idx; + list_add(&counter_node->entries, counter_list); + + /* wake up update worker */ + mod_delayed_work(fc_mgt->counter_wq, &fc_mgt->counter_work, 0); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl flow fc add counter(%u-%lu-%d) success\n", + idx, cookie, fc_type); + ret = (int)idx; + +add_counter_failed: + spin_unlock(&fc_mgt->counter_lock); + return ret; +} + +int nbl_fc_del_stats(void *priv, unsigned long cookie) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_fc_mgt *fc_mgt; + struct nbl_flow_counter *counter_node = NULL; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct list_head *counter_list; + int ret = 0; + int idx; + int i; + struct nbl_flow_query_counter counter_array; + struct nbl_index_key_extra extra_key; + + fc_mgt = NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + if (!fc_mgt) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc del failed: counter not init."); + return -EINVAL; + } + + memset(&counter_array, 0, sizeof(counter_array)); + + spin_lock(&fc_mgt->counter_lock); + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + for (i = 0; i < NBL_FC_TYPE_MAX; i++) { + idx = nbl_common_get_index_with_data(fc_mgt->cls_cookie_tbl[i], &cookie, + &extra_key, NULL, 0, (void **)&counter_node); + if (idx != U32_MAX) + break; + } + + if (!counter_node || i >= NBL_FC_TYPE_MAX) { + nbl_debug(common, NBL_DEBUG_FLOW, "nbl flow fc del key(%lu) not exist", cookie); + ret = -ENOKEY; + goto del_counter_failed; + } + + if (i == NBL_FC_COMMON_TYPE) + counter_list = &fc_mgt->counter_hash_list; + else + counter_list = &fc_mgt->counter_stat_hash_list; + + counter_array.counter_id[0] = idx; + counter_array.cookie[0] = cookie; + fc_mgt->fc_ops.update_stats(fc_mgt, &counter_array, 1, 1, i); + list_del(&counter_node->entries); + nbl_common_free_index(fc_mgt->cls_cookie_tbl[i], &cookie); + nbl_debug(common, NBL_DEBUG_FLOW, "nbl flow fc del counter(%lu-%d) success\n", cookie, i); +del_counter_failed: + spin_unlock(&fc_mgt->counter_lock); + return ret; +} + +int nbl_fc_mgt_start(struct nbl_fc_mgt *mgt) +{ + int ret = -ENOMEM; + + mgt->counter_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, "nbl_fc_wq"); + if (!mgt->counter_wq) + goto init_counter_fail; + + ret = nbl_fc_init_hash_map(mgt); + if (ret) + goto init_counter_fail; + + INIT_DELAYED_WORK(&mgt->counter_work, nbl_fc_stats_work); + queue_delayed_work(mgt->counter_wq, &mgt->counter_work, mgt->query_interval); + + nbl_info(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc init success in tc mode"); + return 0; + +init_counter_fail: + nbl_err(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc init failed in tc mode"); + return ret; +} + +void nbl_fc_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_fc_mgt **fc_mgt; + struct nbl_fc_mgt *mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + fc_mgt = &NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + mgt = (*fc_mgt); + if (!mgt) + return; + + cancel_delayed_work_sync(&mgt->counter_work); + destroy_workqueue(mgt->counter_wq); + nbl_fc_free_res(mgt); + nbl_fc_remove_mgt(dev, fc_mgt); + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc deinit success in tc mode"); +} + +int nbl_fc_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_COUNTER_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_COUNTER_OPS_TBL; +#undef NBL_COUNTER_SET_OPS + + return 0; +} + +void nbl_fc_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_COUNTER_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_COUNTER_OPS_TBL; +#undef NBL_COUNTER_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h new file mode 100644 index 0000000000000000000000000000000000000000..1f59714c480ed91478997250f47b73ba432c6761 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fc.h @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_FC_H_ +#define _NBL_FC_H_ + +#include "nbl_resource.h" + +#define NBL_COUNTER_PERIOD_INTERVAL msecs_to_jiffies(3000) + +#define NBL_FLOW_STAT_CLR_OFT (3) +#define NBL_FLOW_STAT_NUM_MASK (0x7) + +#define NBL_CMDQ_ACL_STAT_BASE_LEN 32 + +struct nbl_stats_data { + u32 flow_id; + u64 bytes; + u64 packets; +}; + +int nbl_fc_add_stats(void *priv, enum nbl_pp_fc_type fc_type, unsigned long cookie); +int nbl_fc_del_stats(void *priv, unsigned long cookie); +int nbl_fc_setup_ops(struct nbl_resource_ops *res_ops); +void nbl_fc_remove_ops(struct nbl_resource_ops *res_ops); +int nbl_fc_mgt_start(struct nbl_fc_mgt *mgt); +void nbl_fc_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_fc_setup_mgt(struct device *dev, struct nbl_fc_mgt **fc_mgt); +int nbl_fc_set_stats(struct nbl_fc_mgt *mgt, void *data, unsigned long cookie); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c new file mode 100644 index 0000000000000000000000000000000000000000..72c79e857edc9c02e3298b2c5f14e29592bc263a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.c @@ -0,0 +1,1041 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_fd.h" +#include "nbl_p4_actions.h" + +struct nbl_fd_get_tlv_udf { + u64 val; + u64 mask; + bool valid; +}; + +struct nbl_fd_tcam_default_entry { + union nbl_fd_tcam_default_data_u data; + union nbl_fd_tcam_default_data_u mask; + struct nbl_flow_direct_entry *entry; + u32 action; +}; + +static int nbl_fd_get_profile_id(enum nbl_chan_fdir_flow_type type, u8 mode) +{ + switch (type) { + case NBL_CHAN_FDIR_FLOW_TCP_IPv4: + case NBL_CHAN_FDIR_FLOW_UDP_IPv4: + case NBL_CHAN_FDIR_FLOW_IPv4: + return mode == NBL_FD_MODE_DEFAULT ? NBL_FD_PROFILE_DEFAULT : NBL_FD_PROFILE_IPV4; + case NBL_CHAN_FDIR_FLOW_TCP_IPv6: + case NBL_CHAN_FDIR_FLOW_UDP_IPv6: + case NBL_CHAN_FDIR_FLOW_IPv6: + case NBL_CHAN_FDIR_FLOW_ETHER: + case NBL_CHAN_FDIR_FLOW_FULL: + return mode == NBL_FD_MODE_DEFAULT ? NBL_FD_PROFILE_DEFAULT : + NBL_FD_PROFILE_L2_IPV6; + default: + break; + }; + + return -1; +} + +static struct nbl_flow_direct_entry *nbl_fd_find_flow(struct nbl_flow_direct_info *info, + enum nbl_chan_fdir_rule_type rule_type, + u32 loc) +{ + struct nbl_flow_direct_entry *entry = NULL; + + if (rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return NULL; + + list_for_each_entry(entry, &info->list[rule_type], node) + if (entry->param.location == loc) + return entry; + + return NULL; +} + +static int nbl_fd_get_udf(u16 type, u16 length, u8 *val, void *data) +{ + struct nbl_fd_get_tlv_udf *udf = (struct nbl_fd_get_tlv_udf *)data; + + if (type != NBL_CHAN_FDIR_KEY_UDF) + return 0; + + udf->valid = 1; + udf->val = *(u64 *)val; + udf->mask = *(u64 *)(val + 8); + + return 1; +} + +static u16 nbl_fd_get_flow_layer(enum nbl_chan_fdir_flow_type type) +{ + switch (type) { + case NBL_CHAN_FDIR_FLOW_ETHER: + return 0; + case NBL_CHAN_FDIR_FLOW_IPv4: + case NBL_CHAN_FDIR_FLOW_IPv6: + return 1; + case NBL_CHAN_FDIR_FLOW_TCP_IPv4: + case NBL_CHAN_FDIR_FLOW_TCP_IPv6: + case NBL_CHAN_FDIR_FLOW_UDP_IPv4: + case NBL_CHAN_FDIR_FLOW_UDP_IPv6: + case NBL_CHAN_FDIR_FLOW_FULL: + default: + return 2; + } +} + +static int nbl_fd_validate_rule(struct nbl_flow_direct_mgt *fd_mgt, + struct nbl_chan_param_fdir_replace *param, + struct nbl_flow_direct_entry *entry) +{ + struct nbl_fd_get_tlv_udf udf = {0}; + int pid = -1; + u16 udf_offset; + u16 udf_layer; + bool rule_udf = false; + u8 *tlv; + + if (param->rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return -EINVAL; + + tlv = (u8 *)param + param->base_length; + nbl_flow_direct_parse_tlv_data(tlv, param->tlv_length, nbl_fd_get_udf, &udf); + if (udf.valid) { + udf_offset = (udf.val & NBL_FD_UDF_FLEX_OFFS_M) >> NBL_FD_UDF_FLEX_OFFS_S; + udf_layer = nbl_fd_get_flow_layer(param->flow_type); + + if (entry) + rule_udf = entry->udf; + + /* Offset must be the same for all rules */ + if (fd_mgt->udf_cnt > 0 && + (fd_mgt->udf_offset != udf_offset || fd_mgt->udf_layer != udf_layer) && + (fd_mgt->udf_cnt != 1 || !rule_udf)) + return -EINVAL; + + if (udf_offset > 52) + return -EINVAL; + + /* For offset, we don't support mask */ + if (((udf.mask & NBL_FD_UDF_FLEX_OFFS_M) >> NBL_FD_UDF_FLEX_OFFS_S) != 0xFFFFFFFF) + return -EINVAL; + } + + /* replace rule not check cnt current, alway keep full mode */ + if (entry) + return 0; + + pid = nbl_fd_get_profile_id(param->flow_type, fd_mgt->mode); + switch (pid) { + case NBL_FD_PROFILE_DEFAULT: + if (fd_mgt->cnt[NBL_FD_PROFILE_DEFAULT] >= NBL_FD_RULE_MAX_512) + return -EINVAL; + break; + case NBL_FD_PROFILE_IPV4: + if (fd_mgt->mode == NBL_FD_MODE_LITE && + fd_mgt->cnt[NBL_FD_PROFILE_IPV4] >= NBL_FD_RULE_MAX_1536) + return -EINVAL; + if (fd_mgt->mode == NBL_FD_MODE_FULL && + fd_mgt->cnt[NBL_FD_PROFILE_IPV4] >= NBL_FD_RULE_MAX_512 && + fd_mgt->cnt[NBL_FD_PROFILE_L2_IPV6] > 0) + return -EINVAL; + break; + case NBL_FD_PROFILE_L2_IPV6: + /* We will always try to change the mode to FULL, so if we are in LITE now, + * then don't support any IPV6 rules whatsoever. + */ + if (fd_mgt->mode == NBL_FD_MODE_LITE || + fd_mgt->cnt[NBL_FD_PROFILE_L2_IPV6] >= NBL_FD_RULE_MAX_512) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static struct nbl_flow_direct_entry *nbl_fd_add_flow(struct nbl_flow_direct_mgt *fd_mgt, + struct nbl_flow_direct_info *info, + struct nbl_chan_param_fdir_replace *param) +{ + struct nbl_flow_direct_entry *entry = NULL, *next = NULL; + struct nbl_fd_get_tlv_udf udf = {0}; + u8 pid; + + pid = nbl_fd_get_profile_id(param->flow_type, fd_mgt->mode); + if (pid > NBL_FD_PROFILE_MAX) + return NULL; + + entry = kzalloc(sizeof(*entry) + param->tlv_length, GFP_KERNEL); + if (!entry) + return NULL; + + entry->pid = pid; + memcpy(&entry->param, param, min_t(u32, sizeof(entry->param), param->base_length)); + memcpy(entry->param.tlv, ((u8 *)param + param->base_length), param->tlv_length); + entry->param.base_length = sizeof(entry->param); + + /* Maintain order */ + if (param->order) { + list_for_each_entry(next, &info->list[param->rule_type], node) + if (next->param.location >= entry->param.location) + break; + + if (nbl_list_entry_is_head(next, &info->list[param->rule_type], node)) + list_add(&entry->node, &info->list[param->rule_type]); + else + list_add(&entry->node, &list_prev_entry(next, node)->node); + } else { + list_add_tail(&entry->node, &info->list[param->rule_type]); + } + + info->cnt[param->rule_type]++; + fd_mgt->cnt[entry->pid]++; + + /* We have judged the capacity in validation, so we shouldn't have any trouble now. */ + if (fd_mgt->mode == NBL_FD_MODE_FULL && + fd_mgt->cnt[NBL_FD_PROFILE_IPV4] > NBL_FD_RULE_MAX_512) + fd_mgt->mode = NBL_FD_MODE_LITE; + + nbl_flow_direct_parse_tlv_data(param->tlv, param->tlv_length, nbl_fd_get_udf, &udf); + if (udf.valid) { + entry->udf = 1; + fd_mgt->udf_offset = (udf.val & NBL_FD_UDF_FLEX_OFFS_M) >> NBL_FD_UDF_FLEX_OFFS_S; + fd_mgt->udf_cnt++; + fd_mgt->udf_layer = nbl_fd_get_flow_layer(param->flow_type); + } + + return entry; +} + +static void nbl_fd_del_flow(struct nbl_flow_direct_mgt *fd_mgt, + struct nbl_flow_direct_info *info, + struct nbl_flow_direct_entry *entry) +{ + info->cnt[entry->param.rule_type]--; + fd_mgt->cnt[entry->pid]--; + + if (entry->udf) + fd_mgt->udf_cnt--; + + if (fd_mgt->mode == NBL_FD_MODE_LITE && + fd_mgt->cnt[NBL_FD_PROFILE_IPV4] <= NBL_FD_RULE_MAX_512) + fd_mgt->mode = NBL_FD_MODE_FULL; + + list_del(&entry->node); + kfree(entry); +} + +static int nbl_fd_find_and_del_flow(struct nbl_flow_direct_mgt *fd_mgt, + struct nbl_flow_direct_info *info, + enum nbl_chan_fdir_rule_type rule_type, + u32 loc) +{ + struct nbl_flow_direct_entry *entry = nbl_fd_find_flow(info, rule_type, loc); + + if (!entry) + return -ENOENT; + + nbl_fd_del_flow(fd_mgt, info, entry); + + return 0; +} + +static void nbl_fd_del_flow_all(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL, *entry_safe = NULL; + int i = 0, j; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) + for (j = 0; j < NBL_CHAN_FDIR_RULE_MAX; j++) + list_for_each_entry_safe(entry, entry_safe, &fd_mgt->info[i].list[j], node) + nbl_fd_del_flow(fd_mgt, &fd_mgt->info[i], entry); +} + +static int nbl_fd_setup_tcam_cfg(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + switch (fd_mgt->mode) { + case NBL_FD_MODE_DEFAULT: + phy_ops->set_fd_tcam_cfg_default(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + break; + case NBL_FD_MODE_FULL: + phy_ops->set_fd_tcam_cfg_full(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + break; + case NBL_FD_MODE_LITE: + phy_ops->set_fd_tcam_cfg_lite(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + break; + default: + return -EINVAL; + } + + if (fd_mgt->udf_cnt) + phy_ops->set_fd_udf(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + (u8)fd_mgt->udf_layer, + (u8)fd_mgt->udf_offset); + else + phy_ops->clear_fd_udf(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + return 0; +} + +static int nbl_fd_config_default_key_action(u16 type, u16 length, u8 *val, void *data) +{ + struct nbl_fd_tcam_default_entry *tcam_entry = (struct nbl_fd_tcam_default_entry *)data; + union nbl_fd_tcam_default_data_u *tcam_data = &tcam_entry->data; + union nbl_fd_tcam_default_data_u *tcam_mask = &tcam_entry->mask; + struct nbl_flow_direct_entry *entry = tcam_entry->entry; + union nbl_action_data action = {{0}}; + u8 reverse_mac[ETH_ALEN]; + u64 temp, mask; + u32 offset, udf_data, udf_mask; + + switch (type) { + case NBL_CHAN_FDIR_KEY_SRC_MAC: + nbl_convert_mac(val, reverse_mac); + ether_addr_copy((u8 *)&temp, reverse_mac); + tcam_data->info.src_mac = temp; + nbl_convert_mac(val + ETH_ALEN, reverse_mac); + ether_addr_copy((u8 *)&temp, reverse_mac); + tcam_mask->info.src_mac = temp; + break; + case NBL_CHAN_FDIR_KEY_DST_MAC: + nbl_convert_mac(val, reverse_mac); + ether_addr_copy((u8 *)&temp, reverse_mac); + tcam_data->info.dst_mac = temp; + nbl_convert_mac(val + ETH_ALEN, reverse_mac); + ether_addr_copy((u8 *)&temp, reverse_mac); + tcam_mask->info.dst_mac = temp; + break; + case NBL_CHAN_FDIR_KEY_PROTO: + tcam_data->info.ethertype = be16_to_cpu(*(u16 *)val); + tcam_mask->info.ethertype = be16_to_cpu(*(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv4: + tcam_data->info.sip_l = be32_to_cpu(*(u32 *)val); + tcam_mask->info.sip_l = be32_to_cpu(*(u32 *)(val + 4)); + break; + case NBL_CHAN_FDIR_KEY_DST_IPv4: + tcam_data->info.dip_l = be32_to_cpu(*(u32 *)val); + tcam_mask->info.dip_l = be32_to_cpu(*(u32 *)(val + 4)); + break; + case NBL_CHAN_FDIR_KEY_L4PROTO: + tcam_data->info.l4_proto = *val; + tcam_mask->info.l4_proto = *(val + 1); + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv6: + tcam_data->info.sip_l = be64_to_cpu(*((u64 *)val + 1)); + tcam_mask->info.sip_l = be64_to_cpu(*((u64 *)val + 3)); + tcam_data->info.sip_h = be64_to_cpu(*(u64 *)val); + tcam_mask->info.sip_h = be64_to_cpu(*(u64 *)val + 2); + break; + case NBL_CHAN_FDIR_KEY_DST_IPv6: + tcam_data->info.dip_l = be64_to_cpu(*((u64 *)val + 1)); + tcam_mask->info.dip_l = be64_to_cpu(*((u64 *)val + 3)); + tcam_data->info.dip_h = be64_to_cpu(*(u64 *)val); + tcam_mask->info.dip_h = be64_to_cpu(*(u64 *)val + 2); + break; + case NBL_CHAN_FDIR_KEY_SPORT: + /* hw generate key is little endian */ + tcam_data->info.l4_sport = be16_to_cpu(*(u16 *)val); + tcam_mask->info.l4_sport = be16_to_cpu(*(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_DPORT: + tcam_data->info.l4_dport = be16_to_cpu(*(u16 *)val); + tcam_mask->info.l4_dport = be16_to_cpu(*(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_UDF: + temp = *(u64 *)val; + mask = *(u64 *)(val + 8); + offset = (temp & NBL_FD_UDF_FLEX_OFFS_M) >> NBL_FD_UDF_FLEX_OFFS_S; + udf_data = temp & NBL_FD_UDF_FLEX_WORD_M; + udf_mask = mask & NBL_FD_UDF_FLEX_WORD_M; + + /* data: high addr means payload first bytes. */ + if (offset % 4 == 1) { + udf_data = (u8)udf_data << 24 | udf_data >> 8; + udf_mask = (u8)udf_mask << 24 | udf_mask >> 8; + + } else if (offset % 4 == 3) { + udf_data = udf_data >> 24 | udf_data << 8; + udf_mask = udf_mask >> 24 | udf_mask << 8; + } + + tcam_data->info.udf = udf_data; + tcam_mask->info.udf = udf_mask; + break; + case NBL_CHAN_FDIR_ACTION_QUEUE: + if (entry->param.global_queue_id != 0xFFFF) { + action.dqueue.que_id = entry->param.global_queue_id; + tcam_entry->action = action.data + (NBL_ACT_SET_QUE_IDX << 16); + } else { + action.data = 0xFFF; + tcam_entry->action = action.data + (NBL_ACT_SET_DPORT << 16); + } + break; + case NBL_CHAN_FDIR_ACTION_VSI: + if (entry->param.dport != 0xFFFF) { + action.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + action.dport.up.port_id = entry->param.dport; + action.dport.up.upcall_flag = AUX_KEEP_FWD_TYPE; + action.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + } else { + action.data = 0xFFF; + } + tcam_entry->action = action.data + (NBL_ACT_SET_DPORT << 16); + + break; + default: + break; + } + + return 0; +} + +static int nbl_fd_config_key(struct nbl_flow_direct_entry *entry, struct nbl_acl_tcam_param *data, + struct nbl_acl_tcam_param *mask, u32 *action, u16 vsi_id) +{ + struct nbl_fd_tcam_default_entry tcam_default_entry; + + memset(&tcam_default_entry, 0, sizeof(tcam_default_entry)); + tcam_default_entry.entry = entry; + + switch (entry->pid) { + case NBL_FD_PROFILE_DEFAULT: + nbl_flow_direct_parse_tlv_data(entry->param.tlv, entry->param.tlv_length, + nbl_fd_config_default_key_action, + &tcam_default_entry); + + tcam_default_entry.data.info.dport = (0x2 << 10) + vsi_id; + tcam_default_entry.mask.info.dport = 0xFFFF; + tcam_default_entry.data.info.pid = NBL_FD_PROFILE_DEFAULT; + tcam_default_entry.mask.info.pid = 0xE; + + memcpy(&data->info.data, &tcam_default_entry.data, sizeof(tcam_default_entry.data)); + memcpy(&mask->info.data, &tcam_default_entry.mask, sizeof(tcam_default_entry.mask)); + data->len = sizeof(tcam_default_entry.data); + mask->len = sizeof(tcam_default_entry.mask); + *action = tcam_default_entry.action; + + break; + case NBL_FD_PROFILE_IPV4: + case NBL_FD_PROFILE_L2_IPV6: + default: + return -EINVAL; + } + + return 0; +} + +static int nbl_fd_get_tcam_index(struct nbl_fd_tcam_index_info *info, u8 pid, + u16 *ram_index, u16 *depth_index, int mode) +{ + switch (pid) { + case NBL_FD_PROFILE_DEFAULT: + if (info->default_index[0].depth_index >= NBL_FD_TCAM_DEPTH) + return -EINVAL; + + *ram_index = 0; + *depth_index = info->default_index[0].depth_index++; + + break; + case NBL_FD_PROFILE_IPV4: + if (mode != NBL_FD_MODE_LITE && + (info->v4_cnt > 1 || info->v4[0].depth_index >= NBL_FD_TCAM_DEPTH)) + return -EINVAL; + + if (info->v4[info->v4_cnt].depth_index < NBL_FD_TCAM_DEPTH) { + *ram_index = info->v4_cnt; + *depth_index = info->v4[info->v4_cnt].depth_index++; + } else { + *ram_index = info->v4_cnt++; + *depth_index = info->v4[info->v4_cnt].depth_index++; + } + + break; + case NBL_FD_PROFILE_L2_IPV6: + if (mode == NBL_FD_MODE_LITE || info->v6[0].depth_index >= NBL_FD_TCAM_DEPTH) + return -EINVAL; + + *ram_index = NBL_FD_IPV4_TCAM_WIDTH; + *depth_index = info->v6[0].depth_index++; + + break; + default: + return -EINVAL; + } + + return 0; +} + +static u16 nbl_fd_get_action_index(u16 ram_index) +{ + /* This is a bit tricky... + * + * For DEFAULT mode, ram_index is always 0, so we always use action_ram 0. + * + * For FULL mode, IPV4 rules always have ram_index 0, so they use action_ram 0, and + * IPV6 rules always have ram_index equals to NBL_FD_IPV4_TCAM_WIDTH, so they use + * action_ram 1. + * + * For LITE mode, every 512 IPV4 rules use one action_ram. + */ + return ram_index / NBL_FD_IPV4_TCAM_WIDTH; +} + +static int nbl_fd_setup_tcam_for_list(struct nbl_resource_mgt *res_mgt, + struct nbl_fd_tcam_index_info *index_info, + struct list_head *head, u16 vsi_id) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_acl_tcam_param data, mask; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_flow_direct_entry *entry = NULL; + u16 ram_index = 0, depth_index = 0, action_index = 0; + u32 action = 0; + int ret; + + memset(&data, 0, sizeof(data)); + memset(&mask, 0, sizeof(mask)); + list_for_each_entry(entry, head, node) { + ret = nbl_fd_get_tcam_index(index_info, entry->pid, &ram_index, + &depth_index, fd_mgt->mode); + if (ret) + return ret; + + nbl_fd_config_key(entry, &data, &mask, &action, vsi_id); + action_index = nbl_fd_get_action_index(ram_index); + + entry->action_index = action_index; + entry->depth_index = depth_index; + ret = phy_ops->set_fd_action_ram(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + action, action_index, depth_index); + + ret = phy_ops->set_fd_tcam_ram(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + &data, &mask, ram_index, depth_index); + if (ret) + return ret; + } + + return 0; +} + +static int nbl_fd_setup_tcam(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_fd_tcam_index_info index_info; + u16 vsi_id = 0; + int i = 0, j, ret = 0; + + memset(&index_info, 0, sizeof(index_info)); + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, i, -1, NBL_VSI_DATA); + for (j = 0; j < NBL_CHAN_FDIR_RULE_MAX; j++) { + ret = nbl_fd_setup_tcam_for_list(res_mgt, &index_info, + &fd_mgt->info[i].list[j], vsi_id); + if (ret) + return ret; + } + } + + return 0; +} + +static int nbl_fd_setup_flow(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int ret = 0; + + if (fd_mgt->state != NBL_FD_STATE_ON) + return 0; + + phy_ops->clear_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + ret = nbl_fd_setup_tcam_cfg(res_mgt); + if (ret) + goto fail; + + ret = nbl_fd_setup_tcam(res_mgt); + if (ret) + goto fail; + + return 0; + +fail: + phy_ops->clear_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + return ret; +} + +static void nbl_fd_remove_flow(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->clear_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_fd_handle_queue_update(u16 type, void *event_data, void *callback_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)callback_data; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_event_queue_update_data *data = + (struct nbl_event_queue_update_data *)event_data; + struct nbl_flow_direct_entry *entry = NULL; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + union nbl_action_data action = {{0}}; + int pf_id, vf_id; + u32 action_data; + u16 func_id = data->func_id; + + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pf_id, &vf_id); + + if (pf_id < 0 || pf_id >= NBL_MAX_PF) + return 0; + + vf_id = vf_id + 1; + list_for_each_entry(entry, &fd_mgt->info[pf_id].list[NBL_CHAN_FDIR_RULE_NORMAL], node) { + if (entry->param.vf != vf_id) + continue; + + if (entry->param.ring < data->ring_num) { + entry->param.global_queue_id = data->map[entry->param.ring]; + action.dqueue.que_id = entry->param.global_queue_id; + action_data = action.data + (NBL_ACT_SET_QUE_IDX << 16); + } else { + entry->param.global_queue_id = 0xFFFF; + action.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + action.dport.up.port_id = 0x3FF; + action.dport.up.upcall_flag = AUX_KEEP_FWD_TYPE; + action.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + action_data = action.data + (NBL_ACT_SET_DPORT << 16); + } + + phy_ops->set_fd_action_ram(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + action_data, entry->action_index, entry->depth_index); + } + + return 0; +} + +static int nbl_fd_handle_state_update(u16 type, void *event_data, void *callback_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)callback_data; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_event_acl_state_update_data *data = + (struct nbl_event_acl_state_update_data *)event_data; + + if (fd_mgt->state == NBL_FD_STATE_OFF && !data->is_offload) { + fd_mgt->state = NBL_FD_STATE_ON; + nbl_fd_setup_flow(res_mgt); + } else if (fd_mgt->state == NBL_FD_STATE_ON && data->is_offload) { + nbl_fd_remove_flow(res_mgt); + fd_mgt->state = NBL_FD_STATE_OFF; + } + + return 0; +} + +/* --------- Res-layer ops Fucntions --------- */ + +static int nbl_fd_get_fd_flow_cnt(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + if (pf_id < 0 || pf_id >= NBL_MAX_PF) + return -EINVAL; + + if (rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return -EINVAL; + + return fd_mgt->info[pf_id].cnt[rule_type]; +} + +static int nbl_fd_get_fd_flow_all(void *priv, struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL; + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, param->vsi_id), index = 0; + + if (pf_id < 0 || pf_id >= NBL_MAX_PF) + return -EINVAL; + + if (param->rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return -EINVAL; + + list_for_each_entry(entry, &fd_mgt->info[pf_id].list[param->rule_type], node) { + if (index < param->start) + continue; + + if (index >= param->start + param->num) + break; + + rule_locs[index++] = entry->param.location; + } + + return 0; +} + +static int nbl_fd_get_fd_flow_max(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + + return fd_mgt->max_spec; +} + +static int nbl_fd_config_fd_flow_state(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL, *entry_safe = NULL; + int pf_id; + + pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + if (pf_id < 0 || pf_id >= NBL_MAX_PF) + return -EINVAL; + + if (rule_type >= NBL_CHAN_FDIR_RULE_MAX) + return -EINVAL; + + if (state == NBL_FD_STATE_OFF || state == NBL_FD_STATE_FLUSH) { + list_for_each_entry_safe(entry, entry_safe, + &fd_mgt->info[pf_id].list[rule_type], node) + nbl_fd_del_flow(fd_mgt, &fd_mgt->info[pf_id], entry); + nbl_fd_setup_flow(res_mgt); + } + if (state != NBL_FD_STATE_FLUSH) + fd_mgt->info[pf_id].state[rule_type] = state; + + return 0; +} + +static int nbl_fd_get_fd_flow(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL; + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + if (location >= fd_mgt->max_spec || pf_id < 0 || pf_id >= NBL_MAX_PF) + return -EINVAL; + + entry = nbl_fd_find_flow(&fd_mgt->info[pf_id], rule_type, location); + if (!entry) + return -ENOENT; + + memcpy(cmd, &entry->param, sizeof(*cmd) + entry->param.tlv_length); + return 0; +} + +static int nbl_fd_replace_fd_flow(void *priv, struct nbl_chan_param_fdir_replace *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_info *info = NULL; + struct nbl_flow_direct_entry *entry = NULL; + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, param->vsi), ret = 0; + + if (pf_id < 0 || pf_id >= NBL_MAX_PF || param->location >= fd_mgt->max_spec) + return -EINVAL; + + if (param->rule_type == NBL_CHAN_FDIR_RULE_NORMAL && + fd_mgt->info[pf_id].state[param->rule_type] == NBL_FD_STATE_OFF) + return -EINVAL; + + info = &fd_mgt->info[pf_id]; + entry = nbl_fd_find_flow(info, param->rule_type, param->location); + ret = nbl_fd_validate_rule(fd_mgt, param, entry); + if (ret) + return ret; + + if (entry) + nbl_fd_del_flow(fd_mgt, info, entry); + + entry = nbl_fd_add_flow(fd_mgt, info, param); + if (!entry) + goto add_entry_fail; + + ret = nbl_fd_setup_flow(res_mgt); + if (ret) + goto setup_flow_fail; + + return 0; + +setup_flow_fail: + nbl_fd_find_and_del_flow(fd_mgt, info, param->rule_type, param->location); +add_entry_fail: + return ret; +} + +static int nbl_fd_remove_fd_flow(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_info *info = NULL; + int pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + int ret; + + if (pf_id < 0 || pf_id >= NBL_MAX_PF || loc >= fd_mgt->max_spec) + return -EINVAL; + + info = &fd_mgt->info[pf_id]; + ret = nbl_fd_find_and_del_flow(fd_mgt, info, rule_type, loc); + if (ret) + return ret; + + return nbl_fd_setup_flow(res_mgt); +} + +static void nbl_fd_cfg_update_event(void *priv, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_event_callback event_callback = {0}; + + event_callback.callback_data = res_mgt; + + if (enable) { + event_callback.callback = nbl_fd_handle_state_update; + nbl_event_register(NBL_EVENT_ACL_STATE_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + event_callback.callback = nbl_fd_handle_queue_update; + nbl_event_register(NBL_EVENT_QUEUE_ALLOC, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } else { + event_callback.callback = nbl_fd_handle_state_update; + nbl_event_unregister(NBL_EVENT_ACL_STATE_UPDATE, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + event_callback.callback = nbl_fd_handle_queue_update; + nbl_event_unregister(NBL_EVENT_QUEUE_ALLOC, &event_callback, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + } +} + +char flow_type_name[NBL_CHAN_FDIR_FLOW_MAX_TYPE][16] = { + "Full/Isolate", + "ETHER", + "IPV4", + "IPV6", + "TCP_V4", + "TCP_V6", + "UDP_V4", + "UDP_V6", +}; + +char mode_name[NBL_FD_MODE_MAX][16] = { + "DEFAULT", + "FULL", + "LITE", +}; + +static int nbl_fd_dump_entry_tlv(u16 type, u16 length, u8 *val, void *data) +{ + struct seq_file *m = (struct seq_file *)(data); + + switch (type) { + case NBL_CHAN_FDIR_KEY_SRC_MAC: + seq_printf(m, "\tCompo [ SRC-MAC ]: data %02x-%02x-%02x-%02x-%02x-%02x, mask %02x-%02x-%02x-%02x-%02x-%02x\n", + val[0], val[1], val[2], val[3], val[4], val[5], + val[6], val[7], val[8], val[9], val[10], val[11]); + break; + case NBL_CHAN_FDIR_KEY_DST_MAC: + seq_printf(m, "\tCompo [ DST-MAC ]: data %02x-%02x-%02x-%02x-%02x-%02x, mask %02x-%02x-%02x-%02x-%02x-%02x\n", + val[0], val[1], val[2], val[3], val[4], val[5], + val[6], val[7], val[8], val[9], val[10], val[11]); + break; + case NBL_CHAN_FDIR_KEY_PROTO: + seq_printf(m, "\tCompo [ ETHERTYPE ]: data 0x%04x, mask 0x%04x\n", + *(u16 *)val, *(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv4: + seq_printf(m, "\tCompo [ SRC-IPV4 ]: data %pI4, mask %pI4\n", + (u32 *)val, (u32 *)(val + 4)); + break; + case NBL_CHAN_FDIR_KEY_DST_IPv4: + seq_printf(m, "\tCompo [ DST-IPV4 ]: data %pI4, mask %pI4\n", + (u32 *)val, (u32 *)(val + 4)); + break; + case NBL_CHAN_FDIR_KEY_L4PROTO: + seq_printf(m, "\tCompo [ IPPROTO ]: data 0x%x, mask 0x%x\n", + *(u8 *)val, *(u8 *)(val + 1)); + break; + case NBL_CHAN_FDIR_KEY_SRC_IPv6: + seq_printf(m, "\tCompo [SRC-IPV6 ]: data %pI6, mask %pI6\n", + val, val + 12); + break; + case NBL_CHAN_FDIR_KEY_DST_IPv6: + seq_printf(m, "\tCompo [DST-IPV6 ]: data %pI6, mask %pI6\n", + val, val + 12); + break; + case NBL_CHAN_FDIR_KEY_SPORT: + seq_printf(m, "\tCompo [ L4-SPORT ]: data 0x%x, mask 0x%x\n", + *(u16 *)val, *(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_DPORT: + seq_printf(m, "\tCompo [ L4-DPORT ]: data 0x%x, mask 0x%x\n", + *(u16 *)val, *(u16 *)(val + 2)); + break; + case NBL_CHAN_FDIR_KEY_UDF: + seq_printf(m, "\tCompo [ USER-DEF ]: data 0x%llx, mask 0x%llx\n", + *(u64 *)val, *(u64 *)(val + 8)); + break; + case NBL_CHAN_FDIR_ACTION_QUEUE: + seq_printf(m, "\tCompo [ GLOBAL-QUE ]: data 0x%llx\n", *(u64 *)val); + break; + case NBL_CHAN_FDIR_ACTION_VSI: + seq_printf(m, "\tCompo [ VSI ]: vsi 0x%llx\n", *(u64 *)val); + break; + default: + break; + } + + return 0; +} + +static void nbl_fd_dump_entry(struct seq_file *m, struct nbl_flow_direct_entry *entry) +{ + struct nbl_chan_param_fdir_replace *param = &entry->param; + + seq_printf(m, "\n[ %-10s]: pid %d, location %4d, global queue id %4u\n", + flow_type_name[param->flow_type], entry->pid, + param->location, param->global_queue_id); + + nbl_flow_direct_parse_tlv_data(entry->param.tlv, entry->param.tlv_length, + nbl_fd_dump_entry_tlv, m); +} + +static void nbl_fd_dump_flow(void *priv, struct seq_file *m) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_direct_mgt *fd_mgt = NBL_RES_MGT_TO_FD_MGT(res_mgt); + struct nbl_flow_direct_entry *entry = NULL; + int i, j; + + seq_puts(m, "\n/* ----------------------- Flow Direct ----------------------- */\n\n"); + + seq_printf(m, "[STATE\t\t %-4s\t]\n[MODE\t\t %-4s\t]\n[DEFAULT_CNT\t %-4d]\n[IPV4_CNT\t %-4d\t]\n[L2&IPV6_CNT\t %-4d\t]\n[UDF cnt/layer/offset:\t %-4d %-4d %-4d\t]\n", + fd_mgt->state == NBL_FD_STATE_OFF ? "OFF" : "ON", mode_name[fd_mgt->mode], + fd_mgt->cnt[NBL_FD_PROFILE_DEFAULT], fd_mgt->cnt[NBL_FD_PROFILE_IPV4], + fd_mgt->cnt[NBL_FD_PROFILE_L2_IPV6], fd_mgt->udf_cnt, fd_mgt->udf_layer, + fd_mgt->udf_offset); + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + for (j = 0; j < NBL_CHAN_FDIR_RULE_MAX; j++) { + seq_printf(m, "\nPF %d/%d: %d flows state %-4s -------------------\n", + i, j, fd_mgt->info[i].cnt[j], + fd_mgt->info[i].state[j] == NBL_FD_STATE_OFF ? "OFF" : "ON"); + + list_for_each_entry(entry, &fd_mgt->info[i].list[j], node) + nbl_fd_dump_entry(m, entry); + } + } + + seq_puts(m, "\n"); +} + +/* NBL_FD_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_FD_OPS_TBL \ +do { \ + NBL_FD_SET_OPS(get_fd_flow, nbl_fd_get_fd_flow); \ + NBL_FD_SET_OPS(get_fd_flow_cnt, nbl_fd_get_fd_flow_cnt); \ + NBL_FD_SET_OPS(get_fd_flow_all, nbl_fd_get_fd_flow_all); \ + NBL_FD_SET_OPS(get_fd_flow_max, nbl_fd_get_fd_flow_max); \ + NBL_FD_SET_OPS(config_fd_flow_state, nbl_fd_config_fd_flow_state); \ + NBL_FD_SET_OPS(replace_fd_flow, nbl_fd_replace_fd_flow); \ + NBL_FD_SET_OPS(remove_fd_flow, nbl_fd_remove_fd_flow); \ + NBL_FD_SET_OPS(cfg_fd_update_event, nbl_fd_cfg_update_event); \ + NBL_FD_SET_OPS(dump_fd_flow, nbl_fd_dump_flow); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_fd_setup_mgt(struct device *dev, struct nbl_flow_direct_mgt **fd_mgt) +{ + int i, j; + + *fd_mgt = devm_kzalloc(dev, sizeof(struct nbl_flow_direct_mgt), GFP_KERNEL); + if (!*fd_mgt) + return -ENOMEM; + + for (i = 0; i < NBL_MAX_PF; i++) { + for (j = 0; j < NBL_CHAN_FDIR_RULE_MAX; j++) { + INIT_LIST_HEAD(&(*fd_mgt)->info[i].list[j]); + (*fd_mgt)->info[i].state[j] = NBL_FD_STATE_OFF; + } + } + + (*fd_mgt)->udf_cnt = 0; + (*fd_mgt)->udf_layer = 0; + + (*fd_mgt)->mode = NBL_FD_MODE_DEFAULT; + (*fd_mgt)->max_spec = NBL_FD_RULE_MAX_DEFAULT; + (*fd_mgt)->state = NBL_FD_STATE_ON; + + return 0; +} + +static void nbl_fd_remove_mgt(struct device *dev, struct nbl_flow_direct_mgt **fd_mgt) +{ + devm_kfree(dev, *fd_mgt); + *fd_mgt = NULL; +} + +int nbl_fd_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_flow_direct_mgt **fd_mgt = &NBL_RES_MGT_TO_FD_MGT(res_mgt); + int ret = 0; + + ret = nbl_fd_setup_mgt(dev, fd_mgt); + + return ret; +} + +void nbl_fd_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_flow_direct_mgt **fd_mgt = &NBL_RES_MGT_TO_FD_MGT(res_mgt); + + if (!(*fd_mgt)) + return; + + nbl_fd_remove_flow(res_mgt); + nbl_fd_del_flow_all(res_mgt); + nbl_fd_remove_mgt(dev, fd_mgt); +} + +int nbl_fd_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_FD_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_FD_OPS_TBL; +#undef NBL_FD_SET_OPS + + return 0; +} + +void nbl_fd_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_FD_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_FD_OPS_TBL; +#undef NBL_FD_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h new file mode 100644 index 0000000000000000000000000000000000000000..05020a7cb3e2dd790e83c5458d8f3edab126820a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_fd.h @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_FD_H_ +#define _NBL_FD_H_ + +#include "nbl_resource.h" + +#define NBL_FD_RULE_MAX_512 (512) +#define NBL_FD_RULE_MAX_1024 (1024) +#define NBL_FD_RULE_MAX_1536 (1536) +#define NBL_FD_RULE_MAX_DEFAULT (NBL_FD_RULE_MAX_512) +#define NBL_FD_RULE_MAX (NBL_FD_RULE_MAX_1536) + +#define NBL_FD_TCAM_DEPTH (512) + +#define NBL_FD_IPV4_TCAM_WIDTH (5) +#define NBL_FD_L2_IPV6_TCAM_WIDTH (10) +#define NBL_FD_DEFAULT_MODE_DEPTH (1) +#define NBL_FD_LITE_MODE_DEPTH (4) +#define NBL_FD_FULL_MODE_DEPTH (1) + +#define NBL_FD_UDF_FLEX_WORD_M GENMASK_ULL(31, 0) +#define NBL_FD_UDF_FLEX_OFFS_S 32 +#define NBL_FD_UDF_FLEX_OFFS_M GENMASK_ULL(63, NBL_FD_UDF_FLEX_OFFS_S) +#define NBL_FD_UDF_FLEX_FLTR_M GENMASK_ULL(63, 0) + +union nbl_fd_tcam_default_data_u { + struct nbl_fd_tcam_default_data { + u64 rsv1:12; + u64 dport:16; + u64 padding:8; + u64 l4_proto:8; + u64 l4_dport:16; + u64 l4_sport:16; + u64 ethertype:16; + u64 src_mac:48; + u64 dst_mac:48; + u64 udf:32; + u64 dip_l:64; + u64 dip_h:64; + u64 sip_l:64; + u64 sip_h:64; + u64 pid:4; + } __packed info; +#define NBL_FD_TCAM_DEFAULT_DATA_TAB_WIDTH (sizeof(struct nbl_fd_tcam_default_data) / sizeof(u32)) + u32 data[NBL_FD_TCAM_DEFAULT_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_fd_tcam_default_data)]; +}; + +union nbl_fd_tcam_ipv4_data_u { + struct nbl_fd_tcam_ipv4_data { + u64 rsv1:28; + u64 dport:16; + u64 padding:8; + u64 l4_proto:8; + u64 l4_dport:16; + u64 l4_sport:16; + u64 udf:32; + u64 dip:32; + u64 sip:32; + u64 pid:4; + } __packed info; +#define NBL_FD_TCAM_IPV4_DATA_TAB_WIDTH (sizeof(struct nbl_fd_tcam_ipv4_data) / sizeof(u32)) + u32 data[NBL_FD_TCAM_IPV4_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_fd_tcam_ipv4_data)]; +}; + +union nbl_fd_tcam_l2_ipv6_data_u { + struct nbl_fd_tcam_l2_ipv6_data { + u64 rsv:28; + u64 dport:16; + u64 padding:8; + u64 l4_proto:8; + u64 l4_dport:16; + u64 l4_sport:16; + u64 ehtertype:16; + u64 udf:32; + u32 dip[NBL_IPV6_U32LEN]; + u32 sip[NBL_IPV6_U32LEN]; + u64 pid:4; + } __packed info; +#define NBL_FD_TCAM_L2_IPV6_DATA_TAB_WIDTH (sizeof(struct nbl_fd_tcam_l2_ipv6_data) / sizeof(u32)) + u32 data[NBL_FD_TCAM_L2_IPV6_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_fd_tcam_l2_ipv6_data)]; +}; + +struct nbl_fd_tcam_index { + u16 depth_index; +}; + +struct nbl_fd_tcam_index_info { + struct nbl_fd_tcam_index default_index[NBL_FD_DEFAULT_MODE_DEPTH]; + struct nbl_fd_tcam_index v4[NBL_FD_LITE_MODE_DEPTH]; + struct nbl_fd_tcam_index v6[NBL_FD_FULL_MODE_DEPTH]; + u8 default_cnt; + u8 v4_cnt; + u8 v6_cnt; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..1ee6260640dec62554324cda168914fe969599cf --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw.h @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_HW_H_ +#define _NBL_HW_H_ + +#include "nbl_include.h" + +#define NBL_MAX_ETHERNET (4) + +#define NBL_PT_PP0 0 +#define NBL_PT_LEN 3 +#define NBL_TCAM_TABLE_LEN (64) +#define NBL_MCC_ID_INVALID U16_MAX +#define NBL_KT_BYTE_LEN 40 +#define NBL_KT_BYTE_HALF_LEN 20 + +#define NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2 0 +#define NBL_EM0_PT_PHY_UP_LLDP_LACP 1 +#define NBL_EM0_PT_PHY_UP_UNICAST_L2 2 +#define NBL_EM0_PT_PHY_DOWN_UNICAST_L2 3 +#define NBL_EM0_PT_PHY_UP_MULTICAST_L2 4 +#define NBL_EM0_PT_PHY_DOWN_MULTICAST_L2 5 +#define NBL_EM0_PT_PHY_UP_MULTICAST_L3 6 +#define NBL_EM0_PT_PHY_DOWN_MULTICAST_L3 7 +#define NBL_EM0_PT_PHY_DPRBAC_IPV4 8 +#define NBL_EM0_PT_PHY_DPRBAC_IPV6 9 +#define NBL_EM0_PT_PHY_UL4S_IPV4 10 +#define NBL_EM0_PT_PHY_UL4S_IPV6 11 +#define NBL_EM0_PT_PMD_ND_UPCALL 12 + +#define NBL_PP0_PROFILE_ID_MIN (0) +#define NBL_PP0_PROFILE_ID_MAX (15) +#define NBL_PP1_PROFILE_ID_MIN (16) +#define NBL_PP1_PROFILE_ID_MAX (31) +#define NBL_PP2_PROFILE_ID_MIN (32) +#define NBL_PP2_PROFILE_ID_MAX (47) +#define NBL_PP_PROFILE_NUM (16) + +#define NBL_QID_MAP_TABLE_ENTRIES (4096) +#define NBL_EPRO_RSS_RET_TBL_DEPTH (8192 * 2) +#define NBL_EPRO_RSS_ENTRY_SIZE_UNIT (16) + +#define NBL_EPRO_RSS_SK_SIZE 40 +#define NBL_EPRO_RSS_PER_KEY_SIZE 8 +#define NBL_EPRO_RSS_KEY_NUM (NBL_EPRO_RSS_SK_SIZE / NBL_EPRO_RSS_PER_KEY_SIZE) + +enum { + NBL_HT0, + NBL_HT1, + NBL_HT_MAX, +}; + +enum { + NBL_KT_HALF_MODE, + NBL_KT_FULL_MODE, +}; + +enum nbl_pp_type { + NBL_PP_TYPE_0, + NBL_PP_TYPE_1, + NBL_PP_TYPE_2, + NBL_PP_TYPE_MAX, +}; + +enum nbl_pp_at_type { + NBL_AT_TYPE_0, + NBL_AT_TYPE_1, + NBL_AT_TYPE_2, + NBL_AT_TYPE_MAX, +}; + +enum nbl_pp_fc_type { + NBL_FC_COMMON_TYPE, + NBL_FC_SPEC_TYPE, + NBL_FC_TYPE_MAX, +}; + +#pragma pack(1) +union nbl_action_data { + struct clear_flag_act { + u16 clear_flag:8; + u16 start_offset:5; + u16 rsv:1; + u16 identify:2; + #define NBL_CLEAR_FLAGS_IDENTIFY (0) + } clear_flag; + + struct set_flag_act { + u16 set_flag:8; + u16 start_offset:5; + u16 rsv:1; + u16 identify:2; + #define NBL_SET_FLAGS_IDENTIFY (1) + } set_flag; + + struct set_fwd_type_act { + u16 next_stg:4; + u16 next_stg_vld:1; + u16 fwd_type:3; + u16 fwd_type_vld:1; + u16 cos:3; + u16 set_cos_vld:1; + u16 rsv:1; + u16 identify:2; + #define NBL_SET_FWD_TYPE_IDENTIFY (2) + } set_fwd_type; + + /* FLOW ACTION */ + struct flow_id_act { + u16 flow_id; + } flow_idx; + + struct rss_id_act { + u16 rss_id:10; + u16 rss_tc_en:1; + u16 rsv:5; + } rss_idx; + + struct port_car_act { + u16 car_id:10; + u16 rsv:6; + } port_car; + + struct flow_car_act { + u16 car_id:12; + u16 rsv:4; + } flow_car; + + struct cascade_act_act { + u16 table_id; + } cascade_act; + + struct mirror_id_act { + u16 mirror_id:4; + u16 mirror_mode:2; + #define NBL_MIRROR_MODE_IN (0) + #define NBL_MIRROR_MODE_FLOW (1) + #define NBL_MIRROR_MODE_OUT (2) + uint32_t rsv:10; + } mirror_idx; + + union dport_act { + struct { + /* port_type = SET_DPORT_TYPE_ETH_LAG, set the eth and lag field. */ + u16 dport_info:10; + u16 dport_type:2; + #define FWD_DPORT_TYPE_ETH (0) + #define FWD_DPORT_TYPE_LAG (1) + #define FWD_DPORT_TYPE_VSI (2) + u16 dport_id:4; + #define FWD_DPORT_ID_HOST_TLS (0) + #define FWD_DPORT_ID_ECPU_TLS (1) + #define FWD_DPORT_ID_HOST_RDMA (2) + #define FWD_DPORT_ID_ECPU_RDMA (3) + #define FWD_DPORT_ID_EMP (4) + #define FWD_DPORT_ID_BMC (5) + #define FWD_DPORT_ID_LOOP_BACK (7) + #define FWD_DPORT_ID_ETH0 (8) + #define FWD_DPORT_ID_ETH1 (9) + #define FWD_DPORT_ID_ETH2 (10) + #define FWD_DPORT_ID_ETH3 (11) + } fwd_dport; + + struct { + /* port_type = SET_DPORT_TYPE_ETH_LAG, set the eth and lag field. */ + u16 eth_id:2; + u16 lag_id:2; + u16 eth_vld:1; + u16 lag_vld:1; + u16 rsv:4; + u16 port_type:2; + u16 next_stg_sel:2; + u16 upcall_flag:2; + } down; + + struct { + /* port_type = SET_DPORT_TYPE_VSI_HOST and SET_DPORT_TYPE_VSI_ECPU, + * set the port_id field as the vsi_id. + * port_type = SET_DPORT_TYPE_SP_PORT, set the port_id as the defined + * PORT_TYPE_SP_*. + */ + u16 port_id:10; + #define PORT_TYPE_SP_DROP (0x3FF) + #define PORT_TYPE_SP_GLB_LB (0x3FE) + #define PORT_TYPE_SP_BMC (0x3FD) + #define PORT_TYPE_SP_EMP (0x3FC) + u16 port_type:2; + #define SET_DPORT_TYPE_VSI_HOST (0) + #define SET_DPORT_TYPE_VSI_ECPU (1) + #define SET_DPORT_TYPE_ETH_LAG (2) + #define SET_DPORT_TYPE_SP_PORT (3) + u16 next_stg_sel:2; + #define NEXT_STG_SEL_NONE (0) + #define NEXT_STG_SEL_ACL_S0 (1) + #define NEXT_STG_SEL_EPRO (2) + #define NEXT_STG_SEL_BYPASS (3) + u16 upcall_flag:2; + #define AUX_KEEP_FWD_TYPE (0) + #define AUX_FWD_TYPE_NML_FWD (1) + #define AUX_FWD_TYPE_UPCALL (2) + } up; + } dport; + + struct dqueue_act { + u16 que_id:11; + u16 rsv:5; + } dqueue; + + struct mcc_id_act { + u16 mcc_id:13; + u16 pri:1; + #define NBL_MCC_PRI_HIGH (0) + #define NBL_MCC_PRI_LOW (1) + uint32_t rsv:2; + } mcc_idx; + + struct vni_id_act { + u16 vni_id; + } vni_idx; + + struct stat_flow_id_act { + u16 stat_flow_id:11; + u16 rsv:5; + } stat_flow_idx; + + struct prbac_id_act { + u16 prbac_id; + } prbac_idx; + + struct dp_hash_act { + u16 dp_hash; + } dp_hash_idx; + + struct pri_mdf_dscp_act { + u16 dscp:6; + u16 i_ip_flag:1; + u16 o_ip_flag:1; + u16 off_sel:1; + #define NBL_DSCP_MDF_OFF_SEL_IPV4 (0) + #define NBL_DSCP_MDF_OFF_SEL_IPV6 (1) + u16 rsv:1; + u16 dscp_flag:1; + u16 rsv1:5; + } pri_mdf_dscp; + + struct pri_mdf_vlan_act { + u16 pri:3; + u16 rsv0:3; + u16 i_cvlan_flag:1; + u16 i_svlan_flag:1; + u16 o_cvlan_flag:1; + u16 o_svlan_flag:1; + u16 rsv1:6; + } pri_mdf_vlan; + + struct ttl_mdf_act { + u16 ttl_value:8; + u16 ttl_sub1_flag:1; + u16 rsv:7; + } ttl_mdf; + + struct vlan_mdf_act { + u16 vlan_value; + } vlan_mdf; + + struct dscp_mdf_act { + u16 ecn_value:2; + u16 dscp_value:6; + u16 ecn_en:1; + u16 dscp_en:1; + u16 rsv:6; + } dscp_mdf; + + struct index_value_act { + u16 index; + } index_value; + + struct set_aux_act { + u16 nstg_val:4; + u16 nstg_vld:1; + u16 ftype_val:3; + u16 ftype_vld:1; + u16 pkt_cos_val:3; + u16 pcos_vld:1; + u16 rsv:1; + #define NBL_SET_AUX_CLR_FLG (0) + #define NBL_SET_AUX_SET_FLG (1) + #define NBL_SET_AUX_SET_AUX (2) + u16 sub_id:2; + } set_aux; + + u16 data; +}; + +#pragma pack() + +enum nbl_chan_flow_rule_type { + NBL_FLOW_EPRO_ECPVPT_REG = 0, + NBL_FLOW_EPRO_ECPIPT_REG, + NBL_FLOW_DPED_TAB_TNL_REG, + NBL_FLOW_DPED_REPLACE, + NBL_FLOW_UPED_REPLACE, + NBL_FLOW_DPED_MIRROR_TABLE, + NBL_FLOW_DPED_MIR_CMD_0_TABLE, + NBL_FLOW_EPRO_MT_REG, + NBL_FLOW_EM0_TCAM_TABLE_REG, + NBL_FLOW_EM1_TCAM_TABLE_REG, + NBL_FLOW_EM2_TCAM_TABLE_REG, + NBL_FLOW_EM0_AD_TABLE_REG, + NBL_FLOW_EM1_AD_TABLE_REG, + NBL_FLOW_EM2_AD_TABLE_REG, + NBL_FLOW_IPRO_UDL_PKT_FLT_DMAC_REG, + NBL_FLOW_IPRO_UDL_PKT_FLT_CTRL_REG, + NBL_FLOW_ACTION_RAM_TBL, + NBL_FLOW_MCC_TBL_REG, + NBL_FLOW_EPRO_EPT_REG, + NBL_FLOW_IPRO_UP_SRC_PORT_TBL_REG, + NBL_FLOW_UCAR_FLOW_REG, + NBL_FLOW_EPRO_VPT_REG, + NBL_FLOW_UCAR_FLOW_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_GRP_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_GRP_REG, + NBL_FLOW_DSCH_VN_SHA2GRP_MAP_TBL_REG, + NBL_FLOW_DSCH_VN_GRP2SHA_MAP_TBL_REG, + NBL_FLOW_SHAPING_DPORT_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_DPORT_REG, + NBL_FLOW_DSCH_PSHA_EN_ADDR, + NBL_FLOW_UCAR_FLOW_4K_REG, + NBL_FLOW_UCAR_FLOW_4K_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_NET_TIMMING_ADD_ADDR, + NBL_FLOW_SHAPING_NET_REG, + NBL_FLOW_DSCH_VN_NET2SHA_MAP_TBL_REG, + NBL_FLOW_DSCH_VN_SHA2NET_MAP_TBL_REG, + NBL_FLOW_UCAR_CAR_CTRL_ADDR, + NBL_FLOW_UCAR_GREEN_CELL_ADDR, + NBL_FLOW_UCAR_GREEN_PKT_ADDR, + NBL_FLOW_UPED_VSI_TYPE_REG, + NBL_FLOW_DPED_VSI_TYPE_REG, +}; + +enum nbl_chan_flow_mode { + NBL_FLOW_READ_MODE = 0, + NBL_FLOW_WRITE_MODE, + NBL_FLOW_READ_OR_WRITE_MODE, + NBL_FLOW_READ_AND_WRITE_MODE, + NBL_FLOW_READ_OR_AND_WRITE_MODE, +}; + +#define SFF8636_TRANSMIT_FIBER_850nm_VCSEL (0x0) +#define SFF8636_TRANSMIT_FIBER_1310nm_VCSEL (0x1) +#define SFF8636_TRANSMIT_FIBER_1550nm_VCSEL (0x2) +#define SFF8636_TRANSMIT_FIBER_1310nm_FP (0x3) +#define SFF8636_TRANSMIT_FIBER_1310nm_DFB (0x4) +#define SFF8636_TRANSMIT_FIBER_1550nm_DFB (0x5) +#define SFF8636_TRANSMIT_FIBER_1310nm_EML (0x6) +#define SFF8636_TRANSMIT_FIBER_1550nm_EML (0x7) +#define SFF8636_TRANSMIT_FIBER_OTHER (0x8) +#define SFF8636_TRANSMIT_FIBER_1490nm_DFB (0x9) +#define SFF8636_TRANSMIT_COPPER_UNEQUA (0xa) +#define SFF8636_TRANSMIT_COPPER_PASSIVE_EQUALIZED (0xb) +#define SFF8636_TRANSMIT_COPPER_NEAR_FAR_END (0xc) +#define SFF8636_TRANSMIT_COPPER_FAR_END (0xd) +#define SFF8636_TRANSMIT_COPPER_NEAR_END (0xe) +#define SFF8636_TRANSMIT_COPPER_LINEAR_ACTIVE (0xf) + +#define NBL_SPORT_ETH_OFFSET 8 + +enum { + NBL_FD_PROFILE_IPV4 = 2, + NBL_FD_PROFILE_L2_IPV6 = 4, + NBL_FD_PROFILE_DEFAULT = 6, + NBL_FD_PROFILE_MAX, +}; + +struct nbl_event_link_status_update_data { + u8 num; + u8 eth_id[NBL_MAX_ETHERNET]; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h new file mode 100644 index 0000000000000000000000000000000000000000..105f130a9024e32d857b7cd3fe6f0bc1b4e6c44e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath.h @@ -0,0 +1,27 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#include "nbl_datapath_upa.h" +#include "nbl_datapath_dpa.h" +#include "nbl_datapath_uqm.h" +#include "nbl_datapath_dqm.h" +#include "nbl_datapath_ustat.h" +#include "nbl_datapath_dstat.h" +#include "nbl_datapath_upmem.h" +#include "nbl_datapath_dpmem.h" +//#include "nbl_datapath_uvn.h" +//#include "nbl_datapath_dvn.h" +#include "nbl_datapath_ucar.h" +// #include "nbl_datapath_dsch.h" +//#include "nbl_datapath_shaping.h" +#include "nbl_datapath_uped.h" +#include "nbl_datapath_dped.h" +#include "nbl_datapath_drmux.h" +#include "nbl_datapath_urmux.h" +#include "nbl_datapath_ddmux.h" +//#include "nbl_datapath_ul4s.h" +//#include "nbl_datapath_dl4s.h" +//#include "nbl_datapath_ustore.h" +#include "nbl_datapath_dstore.h" +//#include "nbl_datapath_ubm.h" +//#include "nbl_datapath_dbm.h" diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dbm.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dbm.h new file mode 100644 index 0000000000000000000000000000000000000000..17d82597333c38413e02c96aedaba56bc55e2abb --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dbm.h @@ -0,0 +1,354 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DBM_H +#define NBL_DBM_H 1 + +#include + +#define NBL_DBM_BASE (0x0070C000) + +#define NBL_DBM_INT_STATUS_ADDR (0x70c000) +#define NBL_DBM_INT_STATUS_DEPTH (1) +#define NBL_DBM_INT_STATUS_WIDTH (32) +#define NBL_DBM_INT_STATUS_DWLEN (1) +union dbm_int_status_u { + struct dbm_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_w_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_r_err:1; /* [2] Default:0x0 RWC */ + u32 mif_wr_err:1; /* [3] Default:0x0 RWC */ + u32 mif_rd_err:1; /* [4] Default:0x0 RWC */ + u32 bitmap_pntr_err:1; /* [5] Default:0x0 RWC */ + u32 tail_pntr_err:1; /* [6] Default:0x0 RWC */ + u32 weight_pntr_err:1; /* [7] Default:0x0 RWC */ + u32 cor_err:1; /* [8] Default:0x0 RWC */ + u32 cif_err:1; /* [9] Default:0x0 RWC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DBM_INT_MASK_ADDR (0x70c004) +#define NBL_DBM_INT_MASK_DEPTH (1) +#define NBL_DBM_INT_MASK_WIDTH (32) +#define NBL_DBM_INT_MASK_DWLEN (1) +union dbm_int_mask_u { + struct dbm_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 fifo_w_err:1; /* [1] Default:0x0 RW */ + u32 fifo_r_err:1; /* [2] Default:0x0 RW */ + u32 mif_wr_err:1; /* [3] Default:0x0 RW */ + u32 mif_rd_err:1; /* [4] Default:0x0 RW */ + u32 bitmap_pntr_err:1; /* [5] Default:0x0 RW */ + u32 tail_pntr_err:1; /* [6] Default:0x0 RW */ + u32 weight_pntr_err:1; /* [7] Default:0x0 RW */ + u32 cor_err:1; /* [8] Default:0x0 RW */ + u32 cif_err:1; /* [9] Default:0x0 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DBM_INT_SET_ADDR (0x70c008) +#define NBL_DBM_INT_SET_DEPTH (1) +#define NBL_DBM_INT_SET_WIDTH (32) +#define NBL_DBM_INT_SET_DWLEN (1) +union dbm_int_set_u { + struct dbm_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 fifo_w_err:1; /* [1] Default:0x0 WO */ + u32 fifo_r_err:1; /* [2] Default:0x0 WO */ + u32 mif_wr_err:1; /* [3] Default:0x0 WO */ + u32 mif_rd_err:1; /* [4] Default:0x0 WO */ + u32 bitmap_pntr_err:1; /* [5] Default:0x0 WO */ + u32 tail_pntr_err:1; /* [6] Default:0x0 WO */ + u32 weight_pntr_err:1; /* [7] Default:0x0 WO */ + u32 cor_err:1; /* [8] Default:0x0 WO */ + u32 cif_err:1; /* [9] Default:0x0 WO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_INT_SET_DWLEN]; +} __packed; + +#define NBL_DBM_UCOR_ERR_INFO_ADDR (0x70c00c) +#define NBL_DBM_UCOR_ERR_INFO_DEPTH (1) +#define NBL_DBM_UCOR_ERR_INFO_WIDTH (32) +#define NBL_DBM_UCOR_ERR_INFO_DWLEN (1) +union dbm_ucor_err_info_u { + struct dbm_ucor_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_UCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DBM_MIF_WR_ERR_INFO_ADDR (0x70c02c) +#define NBL_DBM_MIF_WR_ERR_INFO_DEPTH (1) +#define NBL_DBM_MIF_WR_ERR_INFO_WIDTH (32) +#define NBL_DBM_MIF_WR_ERR_INFO_DWLEN (1) +union dbm_mif_wr_err_info_u { + struct dbm_mif_wr_err_info { + u32 sel:1; /* [0] Default:0x0 RO */ + u32 err_type_id:2; /* [2:1] Default:0x0 RO */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_MIF_WR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DBM_MIF_RD_ERR_INFO_ADDR (0x70c034) +#define NBL_DBM_MIF_RD_ERR_INFO_DEPTH (1) +#define NBL_DBM_MIF_RD_ERR_INFO_WIDTH (32) +#define NBL_DBM_MIF_RD_ERR_INFO_DWLEN (1) +union dbm_mif_rd_err_info_u { + struct dbm_mif_rd_err_info { + u32 sel:1; /* [0] Default:0x0 RO */ + u32 err_type_id:1; /* [1] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_MIF_RD_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DBM_WEIGHT_PNTR_ERR_INFO_ADDR (0x70c03c) +#define NBL_DBM_WEIGHT_PNTR_ERR_INFO_DEPTH (1) +#define NBL_DBM_WEIGHT_PNTR_ERR_INFO_WIDTH (32) +#define NBL_DBM_WEIGHT_PNTR_ERR_INFO_DWLEN (1) +union dbm_weight_pntr_err_info_u { + struct dbm_weight_pntr_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_WEIGHT_PNTR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DBM_TAIL_PNTR_ERR_INFO_ADDR (0x70c044) +#define NBL_DBM_TAIL_PNTR_ERR_INFO_DEPTH (1) +#define NBL_DBM_TAIL_PNTR_ERR_INFO_WIDTH (32) +#define NBL_DBM_TAIL_PNTR_ERR_INFO_DWLEN (1) +union dbm_tail_pntr_err_info_u { + struct dbm_tail_pntr_err_info { + u32 head_pntr:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_TAIL_PNTR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DBM_BITMAP_PNTR_ERR_INFO_ADDR (0x70c04c) +#define NBL_DBM_BITMAP_PNTR_ERR_INFO_DEPTH (1) +#define NBL_DBM_BITMAP_PNTR_ERR_INFO_WIDTH (32) +#define NBL_DBM_BITMAP_PNTR_ERR_INFO_DWLEN (1) +union dbm_bitmap_pntr_err_info_u { + struct dbm_bitmap_pntr_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_BITMAP_PNTR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DBM_PNTR_AGING_INFO_ADDR (0x70c054) +#define NBL_DBM_PNTR_AGING_INFO_DEPTH (1) +#define NBL_DBM_PNTR_AGING_INFO_WIDTH (32) +#define NBL_DBM_PNTR_AGING_INFO_DWLEN (1) +union dbm_pntr_aging_info_u { + struct dbm_pntr_aging_info { + u32 addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_PNTR_AGING_INFO_DWLEN]; +} __packed; + +#define NBL_DBM_COR_ERR_INFO_ADDR (0x70c05c) +#define NBL_DBM_COR_ERR_INFO_DEPTH (1) +#define NBL_DBM_COR_ERR_INFO_WIDTH (32) +#define NBL_DBM_COR_ERR_INFO_DWLEN (1) +union dbm_cor_err_info_u { + struct dbm_cor_err_info { + u32 ram_addr:12; /* [11:0] Default:0x0 RO */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DBM_CIF_ERR_INFO_ADDR (0x70c064) +#define NBL_DBM_CIF_ERR_INFO_DEPTH (1) +#define NBL_DBM_CIF_ERR_INFO_WIDTH (32) +#define NBL_DBM_CIF_ERR_INFO_DWLEN (1) +union dbm_cif_err_info_u { + struct dbm_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DBM_CAR_CTRL_ADDR (0x70c100) +#define NBL_DBM_CAR_CTRL_DEPTH (1) +#define NBL_DBM_CAR_CTRL_WIDTH (32) +#define NBL_DBM_CAR_CTRL_DWLEN (1) +union dbm_car_ctrl_u { + struct dbm_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DBM_INIT_START_ADDR (0x70c104) +#define NBL_DBM_INIT_START_DEPTH (1) +#define NBL_DBM_INIT_START_WIDTH (32) +#define NBL_DBM_INIT_START_DWLEN (1) +union dbm_init_start_u { + struct dbm_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_INIT_START_DWLEN]; +} __packed; + +#define NBL_DBM_AGING_EN_ADDR (0x70c120) +#define NBL_DBM_AGING_EN_DEPTH (1) +#define NBL_DBM_AGING_EN_WIDTH (32) +#define NBL_DBM_AGING_EN_DWLEN (1) +union dbm_aging_en_u { + struct dbm_aging_en { + u32 vld:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_AGING_EN_DWLEN]; +} __packed; + +#define NBL_DBM_AGING_TIME_UNIT_ADDR (0x70c124) +#define NBL_DBM_AGING_TIME_UNIT_DEPTH (1) +#define NBL_DBM_AGING_TIME_UNIT_WIDTH (32) +#define NBL_DBM_AGING_TIME_UNIT_DWLEN (1) +union dbm_aging_time_unit_u { + struct dbm_aging_time_unit { + u32 value:32; /* [31:0] Default:0xffff RW */ + } __packed info; + u32 data[NBL_DBM_AGING_TIME_UNIT_DWLEN]; +} __packed; + +#define NBL_DBM_LIST_RAM_RD_ADDR (0x70c128) +#define NBL_DBM_LIST_RAM_RD_DEPTH (1) +#define NBL_DBM_LIST_RAM_RD_WIDTH (32) +#define NBL_DBM_LIST_RAM_RD_DWLEN (1) +union dbm_list_ram_rd_u { + struct dbm_list_ram_rd { + u32 sel:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_LIST_RAM_RD_DWLEN]; +} __packed; + +#define NBL_DBM_INIT_DONE_ADDR (0x70c200) +#define NBL_DBM_INIT_DONE_DEPTH (1) +#define NBL_DBM_INIT_DONE_WIDTH (32) +#define NBL_DBM_INIT_DONE_DWLEN (1) +union dbm_init_done_u { + struct dbm_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DBM_NFULL_HISTORY_ADDR (0x70c218) +#define NBL_DBM_NFULL_HISTORY_DEPTH (1) +#define NBL_DBM_NFULL_HISTORY_WIDTH (32) +#define NBL_DBM_NFULL_HISTORY_DWLEN (1) +union dbm_nfull_history_u { + struct dbm_nfull_history { + u32 ped:1; /* [0] Default:0x0 RC */ + u32 uqm:1; /* [1] Default:0x0 RC */ + u32 split:1; /* [2] Default:0x0 RC */ + u32 chk:1; /* [3] Default:0x0 RC */ + u32 pntr:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_NFULL_HISTORY_DWLEN]; +} __packed; + +#define NBL_DBM_NAFULL_HISTORY_ADDR (0x70c21c) +#define NBL_DBM_NAFULL_HISTORY_DEPTH (1) +#define NBL_DBM_NAFULL_HISTORY_WIDTH (32) +#define NBL_DBM_NAFULL_HISTORY_DWLEN (1) +union dbm_nafull_history_u { + struct dbm_nafull_history { + u32 ped:1; /* [0] Default:0x0 RC */ + u32 uqm:1; /* [1] Default:0x0 RC */ + u32 split:1; /* [2] Default:0x0 RC */ + u32 chk:1; /* [3] Default:0x0 RC */ + u32 pntr:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_NAFULL_HISTORY_DWLEN]; +} __packed; + +#define NBL_DBM_WERR_HISTORY_ADDR (0x70c220) +#define NBL_DBM_WERR_HISTORY_DEPTH (1) +#define NBL_DBM_WERR_HISTORY_WIDTH (32) +#define NBL_DBM_WERR_HISTORY_DWLEN (1) +union dbm_werr_history_u { + struct dbm_werr_history { + u32 ped:1; /* [0] Default:0x0 RC */ + u32 uqm:1; /* [1] Default:0x0 RC */ + u32 split:1; /* [2] Default:0x0 RC */ + u32 chk:1; /* [3] Default:0x0 RC */ + u32 pntr:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_WERR_HISTORY_DWLEN]; +} __packed; + +#define NBL_DBM_RERR_HISTORY_ADDR (0x70c224) +#define NBL_DBM_RERR_HISTORY_DEPTH (1) +#define NBL_DBM_RERR_HISTORY_WIDTH (32) +#define NBL_DBM_RERR_HISTORY_DWLEN (1) +union dbm_rerr_history_u { + struct dbm_rerr_history { + u32 ped:1; /* [0] Default:0x0 RC */ + u32 uqm:1; /* [1] Default:0x0 RC */ + u32 split:1; /* [2] Default:0x0 RC */ + u32 chk:1; /* [3] Default:0x0 RC */ + u32 pntr:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_RERR_HISTORY_DWLEN]; +} __packed; + +#define NBL_DBM_BITMAP_RAM_STATUS_CURR_ADDR (0x70c400) +#define NBL_DBM_BITMAP_RAM_STATUS_CURR_DEPTH (32) +#define NBL_DBM_BITMAP_RAM_STATUS_CURR_WIDTH (32) +#define NBL_DBM_BITMAP_RAM_STATUS_CURR_DWLEN (1) +union dbm_bitmap_ram_status_curr_u { + struct dbm_bitmap_ram_status_curr { + u32 bitmap_pntr:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_BITMAP_RAM_STATUS_CURR_DWLEN]; +} __packed; +#define NBL_DBM_BITMAP_RAM_STATUS_CURR_REG(r) (NBL_DBM_BITMAP_RAM_STATUS_CURR_ADDR + \ + (NBL_DBM_BITMAP_RAM_STATUS_CURR_DWLEN * 4) * (r)) + +#define NBL_DBM_LIST_RAM_ADDR (0x710000) +#define NBL_DBM_LIST_RAM_DEPTH (1024) +#define NBL_DBM_LIST_RAM_WIDTH (32) +#define NBL_DBM_LIST_RAM_DWLEN (1) +union dbm_list_ram_u { + struct dbm_list_ram { + u32 pntr:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DBM_LIST_RAM_DWLEN]; +} __packed; +#define NBL_DBM_LIST_RAM_REG(r) (NBL_DBM_LIST_RAM_ADDR + \ + (NBL_DBM_LIST_RAM_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ddmux.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ddmux.h new file mode 100644 index 0000000000000000000000000000000000000000..d9457b8a4031e3182ea545ba8b29f2fe8a301988 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ddmux.h @@ -0,0 +1,859 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DDMUX_H +#define NBL_DDMUX_H 1 + +#include + +#define NBL_DDMUX_BASE (0x00984000) + +#define NBL_DDMUX_INT_STATUS_ADDR (0x984000) +#define NBL_DDMUX_INT_STATUS_DEPTH (1) +#define NBL_DDMUX_INT_STATUS_WIDTH (32) +#define NBL_DDMUX_INT_STATUS_DWLEN (1) +union ddmux_int_status_u { + struct ddmux_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 parity_err:1; /* [4] Default:0x0 RWC */ + u32 cif_err:1; /* [5] Default:0x0 RWC */ + u32 eth0_rdy_err:1; /* [6] Default:0x0 RWC */ + u32 eth1_rdy_err:1; /* [7] Default:0x0 RWC */ + u32 eth2_rdy_err:1; /* [8] Default:0x0 RWC */ + u32 eth3_rdy_err:1; /* [9] Default:0x0 RWC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DDMUX_INT_MASK_ADDR (0x984004) +#define NBL_DDMUX_INT_MASK_DEPTH (1) +#define NBL_DDMUX_INT_MASK_WIDTH (32) +#define NBL_DDMUX_INT_MASK_DWLEN (1) +union ddmux_int_mask_u { + struct ddmux_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 parity_err:1; /* [4] Default:0x0 RW */ + u32 cif_err:1; /* [5] Default:0x0 RW */ + u32 eth0_rdy_err:1; /* [6] Default:0x0 RW */ + u32 eth1_rdy_err:1; /* [7] Default:0x0 RW */ + u32 eth2_rdy_err:1; /* [8] Default:0x0 RW */ + u32 eth3_rdy_err:1; /* [9] Default:0x0 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DDMUX_INT_SET_ADDR (0x984008) +#define NBL_DDMUX_INT_SET_DEPTH (1) +#define NBL_DDMUX_INT_SET_WIDTH (32) +#define NBL_DDMUX_INT_SET_DWLEN (1) +union ddmux_int_set_u { + struct ddmux_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 parity_err:1; /* [4] Default:0x0 WO */ + u32 cif_err:1; /* [5] Default:0x0 WO */ + u32 eth0_rdy_err:1; /* [6] Default:0x0 WO */ + u32 eth1_rdy_err:1; /* [7] Default:0x0 WO */ + u32 eth2_rdy_err:1; /* [8] Default:0x0 WO */ + u32 eth3_rdy_err:1; /* [9] Default:0x0 WO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_INT_SET_DWLEN]; +} __packed; + +#define NBL_DDMUX_UCOR_ERR_INFO_ADDR (0x98400c) +#define NBL_DDMUX_UCOR_ERR_INFO_DEPTH (1) +#define NBL_DDMUX_UCOR_ERR_INFO_WIDTH (32) +#define NBL_DDMUX_UCOR_ERR_INFO_DWLEN (1) +union ddmux_ucor_err_info_u { + struct ddmux_ucor_err_info { + u32 ram_addr:28; /* [27:0] Default:0x0 RO */ + u32 ram_id:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_UCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DDMUX_COR_ERR_INFO_ADDR (0x984014) +#define NBL_DDMUX_COR_ERR_INFO_DEPTH (1) +#define NBL_DDMUX_COR_ERR_INFO_WIDTH (32) +#define NBL_DDMUX_COR_ERR_INFO_DWLEN (1) +union ddmux_cor_err_info_u { + struct ddmux_cor_err_info { + u32 ram_addr:28; /* [27:0] Default:0x0 RO */ + u32 ram_id:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DDMUX_PARITY_ERR_INFO_ADDR (0x98402c) +#define NBL_DDMUX_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DDMUX_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DDMUX_PARITY_ERR_INFO_DWLEN (1) +union ddmux_parity_err_info_u { + struct ddmux_parity_err_info { + u32 ram_addr:28; /* [27:0] Default:0x0 RO */ + u32 ram_id:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DDMUX_CIF_ERR_INFO_ADDR (0x984044) +#define NBL_DDMUX_CIF_ERR_INFO_DEPTH (1) +#define NBL_DDMUX_CIF_ERR_INFO_WIDTH (32) +#define NBL_DDMUX_CIF_ERR_INFO_DWLEN (1) +union ddmux_cif_err_info_u { + struct ddmux_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DDMUX_CAR_CTRL_ADDR (0x984100) +#define NBL_DDMUX_CAR_CTRL_DEPTH (1) +#define NBL_DDMUX_CAR_CTRL_WIDTH (32) +#define NBL_DDMUX_CAR_CTRL_DWLEN (1) +union ddmux_car_ctrl_u { + struct ddmux_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_FC_TH_ADDR (0x98411c) +#define NBL_DDMUX_ETH0_FC_TH_DEPTH (1) +#define NBL_DDMUX_ETH0_FC_TH_WIDTH (32) +#define NBL_DDMUX_ETH0_FC_TH_DWLEN (1) +union ddmux_eth0_fc_th_u { + struct ddmux_eth0_fc_th { + u32 fc_th:11; /* [10:0] Default:320 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_FC_TH_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_FC_TH_ADDR (0x984120) +#define NBL_DDMUX_ETH1_FC_TH_DEPTH (1) +#define NBL_DDMUX_ETH1_FC_TH_WIDTH (32) +#define NBL_DDMUX_ETH1_FC_TH_DWLEN (1) +union ddmux_eth1_fc_th_u { + struct ddmux_eth1_fc_th { + u32 fc_th:11; /* [10:0] Default:320 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_FC_TH_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_FC_TH_ADDR (0x984124) +#define NBL_DDMUX_ETH2_FC_TH_DEPTH (1) +#define NBL_DDMUX_ETH2_FC_TH_WIDTH (32) +#define NBL_DDMUX_ETH2_FC_TH_DWLEN (1) +union ddmux_eth2_fc_th_u { + struct ddmux_eth2_fc_th { + u32 fc_th:11; /* [10:0] Default:320 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_FC_TH_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_FC_TH_ADDR (0x984128) +#define NBL_DDMUX_ETH3_FC_TH_DEPTH (1) +#define NBL_DDMUX_ETH3_FC_TH_WIDTH (32) +#define NBL_DDMUX_ETH3_FC_TH_DWLEN (1) +union ddmux_eth3_fc_th_u { + struct ddmux_eth3_fc_th { + u32 fc_th:11; /* [10:0] Default:320 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_FC_TH_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH_LINK_FC_EN_ADDR (0x98412c) +#define NBL_DDMUX_ETH_LINK_FC_EN_DEPTH (1) +#define NBL_DDMUX_ETH_LINK_FC_EN_WIDTH (32) +#define NBL_DDMUX_ETH_LINK_FC_EN_DWLEN (1) +union ddmux_eth_link_fc_en_u { + struct ddmux_eth_link_fc_en { + u32 eth0_link_fc_en:1; /* [0] Default:0x1 RW */ + u32 eth1_link_fc_en:1; /* [1] Default:0x1 RW */ + u32 eth2_link_fc_en:1; /* [2] Default:0x1 RW */ + u32 eth3_link_fc_en:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH_LINK_FC_EN_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_RDY_TIMEOUT_ADDR (0x984130) +#define NBL_DDMUX_ETH0_RDY_TIMEOUT_DEPTH (1) +#define NBL_DDMUX_ETH0_RDY_TIMEOUT_WIDTH (32) +#define NBL_DDMUX_ETH0_RDY_TIMEOUT_DWLEN (1) +union ddmux_eth0_rdy_timeout_u { + struct ddmux_eth0_rdy_timeout { + u32 eth0_rdy_timeout:16; /* [15:0] Default:0xffff RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_RDY_TIMEOUT_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_RDY_TIMEOUT_ADDR (0x984134) +#define NBL_DDMUX_ETH1_RDY_TIMEOUT_DEPTH (1) +#define NBL_DDMUX_ETH1_RDY_TIMEOUT_WIDTH (32) +#define NBL_DDMUX_ETH1_RDY_TIMEOUT_DWLEN (1) +union ddmux_eth1_rdy_timeout_u { + struct ddmux_eth1_rdy_timeout { + u32 eth1_rdy_timeout:16; /* [15:0] Default:0xffff RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_RDY_TIMEOUT_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_RDY_TIMEOUT_ADDR (0x984138) +#define NBL_DDMUX_ETH2_RDY_TIMEOUT_DEPTH (1) +#define NBL_DDMUX_ETH2_RDY_TIMEOUT_WIDTH (32) +#define NBL_DDMUX_ETH2_RDY_TIMEOUT_DWLEN (1) +union ddmux_eth2_rdy_timeout_u { + struct ddmux_eth2_rdy_timeout { + u32 eth2_rdy_timeout:16; /* [15:0] Default:0xffff RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_RDY_TIMEOUT_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_RDY_TIMEOUT_ADDR (0x98413c) +#define NBL_DDMUX_ETH3_RDY_TIMEOUT_DEPTH (1) +#define NBL_DDMUX_ETH3_RDY_TIMEOUT_WIDTH (32) +#define NBL_DDMUX_ETH3_RDY_TIMEOUT_DWLEN (1) +union ddmux_eth3_rdy_timeout_u { + struct ddmux_eth3_rdy_timeout { + u32 eth3_rdy_timeout:16; /* [15:0] Default:0xffff RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_RDY_TIMEOUT_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH_RDY_MASK_EN_ADDR (0x984140) +#define NBL_DDMUX_ETH_RDY_MASK_EN_DEPTH (1) +#define NBL_DDMUX_ETH_RDY_MASK_EN_WIDTH (32) +#define NBL_DDMUX_ETH_RDY_MASK_EN_DWLEN (1) +union ddmux_eth_rdy_mask_en_u { + struct ddmux_eth_rdy_mask_en { + u32 eth0_rsy_mask_en:1; /* [0] Default:0x0 RW */ + u32 eth1_rdy_mask_en:1; /* [1] Default:0x0 RW */ + u32 eth2_rdy_mask_en:1; /* [2] Default:0x0 RW */ + u32 eth3_rdy_mask_en:1; /* [3] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_ETH_RDY_MASK_EN_DWLEN]; +} __packed; + +#define NBL_DDMUX_FWD_DPORT_ADDR (0x984144) +#define NBL_DDMUX_FWD_DPORT_DEPTH (1) +#define NBL_DDMUX_FWD_DPORT_WIDTH (32) +#define NBL_DDMUX_FWD_DPORT_DWLEN (1) +union ddmux_fwd_dport_u { + struct ddmux_fwd_dport { + u32 action_id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DDMUX_FWD_DPORT_DWLEN]; +} __packed; + +#define NBL_DDMUX_OUT_FLAG_MASK_ADDR (0x984148) +#define NBL_DDMUX_OUT_FLAG_MASK_DEPTH (1) +#define NBL_DDMUX_OUT_FLAG_MASK_WIDTH (32) +#define NBL_DDMUX_OUT_FLAG_MASK_DWLEN (1) +union ddmux_out_flag_mask_u { + struct ddmux_out_flag_mask { + u32 mask:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DDMUX_OUT_FLAG_MASK_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_PKT_ADDR (0x984800) +#define NBL_DDMUX_ETH0_TX_PKT_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_PKT_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_PKT_DWLEN (1) +union ddmux_eth0_tx_pkt_u { + struct ddmux_eth0_tx_pkt { + u32 eth0_tx_pkt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_PKT_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_BYTE_ADDR (0x984804) +#define NBL_DDMUX_ETH0_TX_BYTE_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_BYTE_WIDTH (64) +#define NBL_DDMUX_ETH0_TX_BYTE_DWLEN (2) +union ddmux_eth0_tx_byte_u { + struct ddmux_eth0_tx_byte { + u32 eth0_tx_byte_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_BYTE_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_UC_ADDR (0x98480c) +#define NBL_DDMUX_ETH0_TX_UC_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_UC_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_UC_DWLEN (1) +union ddmux_eth0_tx_uc_u { + struct ddmux_eth0_tx_uc { + u32 eth0_tx_uc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_UC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_MC_ADDR (0x984810) +#define NBL_DDMUX_ETH0_TX_MC_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_MC_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_MC_DWLEN (1) +union ddmux_eth0_tx_mc_u { + struct ddmux_eth0_tx_mc { + u32 eth0_tx_mc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_MC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_BC_ADDR (0x984814) +#define NBL_DDMUX_ETH0_TX_BC_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_BC_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_BC_DWLEN (1) +union ddmux_eth0_tx_bc_u { + struct ddmux_eth0_tx_bc { + u32 eth0_tx_bc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_BC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_LESS_60_ADDR (0x984818) +#define NBL_DDMUX_ETH0_TX_LESS_60_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_LESS_60_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_LESS_60_DWLEN (1) +union ddmux_eth0_tx_less_60_u { + struct ddmux_eth0_tx_less_60 { + u32 eth0_tx_less_60:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_LESS_60_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_61_123_ADDR (0x984820) +#define NBL_DDMUX_ETH0_TX_61_123_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_61_123_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_61_123_DWLEN (1) +union ddmux_eth0_tx_61_123_u { + struct ddmux_eth0_tx_61_123 { + u32 eth0_tx_61_123:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_61_123_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_124_251_ADDR (0x984824) +#define NBL_DDMUX_ETH0_TX_124_251_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_124_251_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_124_251_DWLEN (1) +union ddmux_eth0_tx_124_251_u { + struct ddmux_eth0_tx_124_251 { + u32 eth0_tx_124_251:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_124_251_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_252_507_ADDR (0x984828) +#define NBL_DDMUX_ETH0_TX_252_507_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_252_507_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_252_507_DWLEN (1) +union ddmux_eth0_tx_252_507_u { + struct ddmux_eth0_tx_252_507 { + u32 eth0_tx_252_507:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_252_507_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_508_1019_ADDR (0x98482c) +#define NBL_DDMUX_ETH0_TX_508_1019_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_508_1019_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_508_1019_DWLEN (1) +union ddmux_eth0_tx_508_1019_u { + struct ddmux_eth0_tx_508_1019 { + u32 eth0_tx_508_1019:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_508_1019_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_1020_1514_ADDR (0x984830) +#define NBL_DDMUX_ETH0_TX_1020_1514_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_1020_1514_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_1020_1514_DWLEN (1) +union ddmux_eth0_tx_1020_1514_u { + struct ddmux_eth0_tx_1020_1514 { + u32 eth0_tx_1020_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_1020_1514_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_LARGE_1514_ADDR (0x984834) +#define NBL_DDMUX_ETH0_TX_LARGE_1514_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_LARGE_1514_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_LARGE_1514_DWLEN (1) +union ddmux_eth0_tx_large_1514_u { + struct ddmux_eth0_tx_large_1514 { + u32 eth0_tx_large_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_LARGE_1514_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH0_TX_MAC_ERR_ADDR (0x984838) +#define NBL_DDMUX_ETH0_TX_MAC_ERR_DEPTH (1) +#define NBL_DDMUX_ETH0_TX_MAC_ERR_WIDTH (32) +#define NBL_DDMUX_ETH0_TX_MAC_ERR_DWLEN (1) +union ddmux_eth0_tx_mac_err_u { + struct ddmux_eth0_tx_mac_err { + u32 eth0_tx_mac_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH0_TX_MAC_ERR_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_PKT_ADDR (0x984900) +#define NBL_DDMUX_ETH1_TX_PKT_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_PKT_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_PKT_DWLEN (1) +union ddmux_eth1_tx_pkt_u { + struct ddmux_eth1_tx_pkt { + u32 eth1_tx_pkt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_PKT_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_BYTE_ADDR (0x984904) +#define NBL_DDMUX_ETH1_TX_BYTE_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_BYTE_WIDTH (64) +#define NBL_DDMUX_ETH1_TX_BYTE_DWLEN (2) +union ddmux_eth1_tx_byte_u { + struct ddmux_eth1_tx_byte { + u32 eth1_tx_byte_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_BYTE_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_UC_ADDR (0x98490c) +#define NBL_DDMUX_ETH1_TX_UC_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_UC_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_UC_DWLEN (1) +union ddmux_eth1_tx_uc_u { + struct ddmux_eth1_tx_uc { + u32 eth1_tx_uc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_UC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_MC_ADDR (0x984910) +#define NBL_DDMUX_ETH1_TX_MC_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_MC_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_MC_DWLEN (1) +union ddmux_eth1_tx_mc_u { + struct ddmux_eth1_tx_mc { + u32 eth1_tx_mc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_MC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_BC_ADDR (0x984914) +#define NBL_DDMUX_ETH1_TX_BC_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_BC_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_BC_DWLEN (1) +union ddmux_eth1_tx_bc_u { + struct ddmux_eth1_tx_bc { + u32 eth1_tx_bc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_BC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_LESS_60_ADDR (0x984918) +#define NBL_DDMUX_ETH1_TX_LESS_60_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_LESS_60_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_LESS_60_DWLEN (1) +union ddmux_eth1_tx_less_60_u { + struct ddmux_eth1_tx_less_60 { + u32 eth1_tx_less_60:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_LESS_60_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_61_123_ADDR (0x984920) +#define NBL_DDMUX_ETH1_TX_61_123_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_61_123_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_61_123_DWLEN (1) +union ddmux_eth1_tx_61_123_u { + struct ddmux_eth1_tx_61_123 { + u32 eth1_tx_61_123:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_61_123_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_124_251_ADDR (0x984924) +#define NBL_DDMUX_ETH1_TX_124_251_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_124_251_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_124_251_DWLEN (1) +union ddmux_eth1_tx_124_251_u { + struct ddmux_eth1_tx_124_251 { + u32 eth1_tx_124_251:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_124_251_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_252_507_ADDR (0x984928) +#define NBL_DDMUX_ETH1_TX_252_507_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_252_507_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_252_507_DWLEN (1) +union ddmux_eth1_tx_252_507_u { + struct ddmux_eth1_tx_252_507 { + u32 eth1_tx_252_507:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_252_507_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_508_1019_ADDR (0x98492c) +#define NBL_DDMUX_ETH1_TX_508_1019_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_508_1019_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_508_1019_DWLEN (1) +union ddmux_eth1_tx_508_1019_u { + struct ddmux_eth1_tx_508_1019 { + u32 eth1_tx_508_1019:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_508_1019_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_1020_1514_ADDR (0x984930) +#define NBL_DDMUX_ETH1_TX_1020_1514_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_1020_1514_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_1020_1514_DWLEN (1) +union ddmux_eth1_tx_1020_1514_u { + struct ddmux_eth1_tx_1020_1514 { + u32 eth1_tx_1020_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_1020_1514_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_LARGE_1514_ADDR (0x984934) +#define NBL_DDMUX_ETH1_TX_LARGE_1514_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_LARGE_1514_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_LARGE_1514_DWLEN (1) +union ddmux_eth1_tx_large_1514_u { + struct ddmux_eth1_tx_large_1514 { + u32 eth1_tx_large_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_LARGE_1514_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH1_TX_MAC_ERR_ADDR (0x984938) +#define NBL_DDMUX_ETH1_TX_MAC_ERR_DEPTH (1) +#define NBL_DDMUX_ETH1_TX_MAC_ERR_WIDTH (32) +#define NBL_DDMUX_ETH1_TX_MAC_ERR_DWLEN (1) +union ddmux_eth1_tx_mac_err_u { + struct ddmux_eth1_tx_mac_err { + u32 eth1_tx_mac_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH1_TX_MAC_ERR_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_PKT_ADDR (0x984a00) +#define NBL_DDMUX_ETH2_TX_PKT_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_PKT_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_PKT_DWLEN (1) +union ddmux_eth2_tx_pkt_u { + struct ddmux_eth2_tx_pkt { + u32 eth2_tx_pkt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_PKT_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_BYTE_ADDR (0x984a04) +#define NBL_DDMUX_ETH2_TX_BYTE_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_BYTE_WIDTH (64) +#define NBL_DDMUX_ETH2_TX_BYTE_DWLEN (2) +union ddmux_eth2_tx_byte_u { + struct ddmux_eth2_tx_byte { + u32 eth2_tx_byte_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_BYTE_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_UC_ADDR (0x984a0c) +#define NBL_DDMUX_ETH2_TX_UC_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_UC_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_UC_DWLEN (1) +union ddmux_eth2_tx_uc_u { + struct ddmux_eth2_tx_uc { + u32 eth2_tx_uc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_UC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_MC_ADDR (0x984a10) +#define NBL_DDMUX_ETH2_TX_MC_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_MC_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_MC_DWLEN (1) +union ddmux_eth2_tx_mc_u { + struct ddmux_eth2_tx_mc { + u32 eth2_tx_mc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_MC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_BC_ADDR (0x984a14) +#define NBL_DDMUX_ETH2_TX_BC_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_BC_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_BC_DWLEN (1) +union ddmux_eth2_tx_bc_u { + struct ddmux_eth2_tx_bc { + u32 eth2_tx_bc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_BC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_LESS_60_ADDR (0x984a18) +#define NBL_DDMUX_ETH2_TX_LESS_60_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_LESS_60_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_LESS_60_DWLEN (1) +union ddmux_eth2_tx_less_60_u { + struct ddmux_eth2_tx_less_60 { + u32 eth2_tx_less_60:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_LESS_60_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_61_123_ADDR (0x984a20) +#define NBL_DDMUX_ETH2_TX_61_123_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_61_123_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_61_123_DWLEN (1) +union ddmux_eth2_tx_61_123_u { + struct ddmux_eth2_tx_61_123 { + u32 eth2_tx_61_123:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_61_123_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_124_251_ADDR (0x984a24) +#define NBL_DDMUX_ETH2_TX_124_251_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_124_251_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_124_251_DWLEN (1) +union ddmux_eth2_tx_124_251_u { + struct ddmux_eth2_tx_124_251 { + u32 eth2_tx_124_251:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_124_251_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_252_507_ADDR (0x984a28) +#define NBL_DDMUX_ETH2_TX_252_507_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_252_507_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_252_507_DWLEN (1) +union ddmux_eth2_tx_252_507_u { + struct ddmux_eth2_tx_252_507 { + u32 eth2_tx_252_507:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_252_507_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_508_1019_ADDR (0x984a2c) +#define NBL_DDMUX_ETH2_TX_508_1019_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_508_1019_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_508_1019_DWLEN (1) +union ddmux_eth2_tx_508_1019_u { + struct ddmux_eth2_tx_508_1019 { + u32 eth2_tx_508_1019:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_508_1019_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_1020_1514_ADDR (0x984a30) +#define NBL_DDMUX_ETH2_TX_1020_1514_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_1020_1514_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_1020_1514_DWLEN (1) +union ddmux_eth2_tx_1020_1514_u { + struct ddmux_eth2_tx_1020_1514 { + u32 eth2_tx_1020_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_1020_1514_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_LARGE_1514_ADDR (0x984a34) +#define NBL_DDMUX_ETH2_TX_LARGE_1514_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_LARGE_1514_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_LARGE_1514_DWLEN (1) +union ddmux_eth2_tx_large_1514_u { + struct ddmux_eth2_tx_large_1514 { + u32 eth2_tx_large_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_LARGE_1514_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH2_TX_MAC_ERR_ADDR (0x984a38) +#define NBL_DDMUX_ETH2_TX_MAC_ERR_DEPTH (1) +#define NBL_DDMUX_ETH2_TX_MAC_ERR_WIDTH (32) +#define NBL_DDMUX_ETH2_TX_MAC_ERR_DWLEN (1) +union ddmux_eth2_tx_mac_err_u { + struct ddmux_eth2_tx_mac_err { + u32 eth2_tx_mac_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH2_TX_MAC_ERR_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_PKT_ADDR (0x984b00) +#define NBL_DDMUX_ETH3_TX_PKT_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_PKT_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_PKT_DWLEN (1) +union ddmux_eth3_tx_pkt_u { + struct ddmux_eth3_tx_pkt { + u32 eth3_tx_pkt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_PKT_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_BYTE_ADDR (0x984b04) +#define NBL_DDMUX_ETH3_TX_BYTE_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_BYTE_WIDTH (64) +#define NBL_DDMUX_ETH3_TX_BYTE_DWLEN (2) +union ddmux_eth3_tx_byte_u { + struct ddmux_eth3_tx_byte { + u32 eth3_tx_byte_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_BYTE_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_UC_ADDR (0x984b0c) +#define NBL_DDMUX_ETH3_TX_UC_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_UC_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_UC_DWLEN (1) +union ddmux_eth3_tx_uc_u { + struct ddmux_eth3_tx_uc { + u32 eth3_tx_uc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_UC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_MC_ADDR (0x984b10) +#define NBL_DDMUX_ETH3_TX_MC_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_MC_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_MC_DWLEN (1) +union ddmux_eth3_tx_mc_u { + struct ddmux_eth3_tx_mc { + u32 eth3_tx_mc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_MC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_BC_ADDR (0x984b14) +#define NBL_DDMUX_ETH3_TX_BC_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_BC_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_BC_DWLEN (1) +union ddmux_eth3_tx_bc_u { + struct ddmux_eth3_tx_bc { + u32 eth3_tx_bc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_BC_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_LESS_60_ADDR (0x984b18) +#define NBL_DDMUX_ETH3_TX_LESS_60_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_LESS_60_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_LESS_60_DWLEN (1) +union ddmux_eth3_tx_less_60_u { + struct ddmux_eth3_tx_less_60 { + u32 eth3_tx_less_60:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_LESS_60_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_61_123_ADDR (0x984b20) +#define NBL_DDMUX_ETH3_TX_61_123_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_61_123_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_61_123_DWLEN (1) +union ddmux_eth3_tx_61_123_u { + struct ddmux_eth3_tx_61_123 { + u32 eth3_tx_61_123:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_61_123_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_124_251_ADDR (0x984b24) +#define NBL_DDMUX_ETH3_TX_124_251_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_124_251_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_124_251_DWLEN (1) +union ddmux_eth3_tx_124_251_u { + struct ddmux_eth3_tx_124_251 { + u32 eth3_tx_124_251:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_124_251_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_252_507_ADDR (0x984b28) +#define NBL_DDMUX_ETH3_TX_252_507_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_252_507_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_252_507_DWLEN (1) +union ddmux_eth3_tx_252_507_u { + struct ddmux_eth3_tx_252_507 { + u32 eth3_tx_252_507:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_252_507_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_508_1019_ADDR (0x984b2c) +#define NBL_DDMUX_ETH3_TX_508_1019_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_508_1019_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_508_1019_DWLEN (1) +union ddmux_eth3_tx_508_1019_u { + struct ddmux_eth3_tx_508_1019 { + u32 eth3_tx_508_1019:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_508_1019_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_1020_1514_ADDR (0x984b30) +#define NBL_DDMUX_ETH3_TX_1020_1514_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_1020_1514_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_1020_1514_DWLEN (1) +union ddmux_eth3_tx_1020_1514_u { + struct ddmux_eth3_tx_1020_1514 { + u32 eth3_tx_1020_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_1020_1514_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_LARGE_1514_ADDR (0x984b34) +#define NBL_DDMUX_ETH3_TX_LARGE_1514_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_LARGE_1514_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_LARGE_1514_DWLEN (1) +union ddmux_eth3_tx_large_1514_u { + struct ddmux_eth3_tx_large_1514 { + u32 eth3_tx_large_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_LARGE_1514_DWLEN]; +} __packed; + +#define NBL_DDMUX_ETH3_TX_MAC_ERR_ADDR (0x984b38) +#define NBL_DDMUX_ETH3_TX_MAC_ERR_DEPTH (1) +#define NBL_DDMUX_ETH3_TX_MAC_ERR_WIDTH (32) +#define NBL_DDMUX_ETH3_TX_MAC_ERR_DWLEN (1) +union ddmux_eth3_tx_mac_err_u { + struct ddmux_eth3_tx_mac_err { + u32 eth3_tx_mac_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DDMUX_ETH3_TX_MAC_ERR_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dl4s.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dl4s.h new file mode 100644 index 0000000000000000000000000000000000000000..1c675fdcc654d3cb04573992090829c542d893d9 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dl4s.h @@ -0,0 +1,703 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DL4S_H +#define NBL_DL4S_H 1 + +#include + +#define NBL_DL4S_BASE (0x00614000) + +#define NBL_DL4S_INT_STATUS_ADDR (0x614000) +#define NBL_DL4S_INT_STATUS_DEPTH (1) +#define NBL_DL4S_INT_STATUS_WIDTH (32) +#define NBL_DL4S_INT_STATUS_DWLEN (1) +union dl4s_int_status_u { + struct dl4s_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 parity_err:1; /* [5] Default:0x0 RWC */ + u32 ce_mod_err:1; /* [6] Default:0x0 RWC */ + u32 rec_len_err:1; /* [7] Default:0x0 RWC */ + u32 rec_ver_err:1; /* [8] Default:0x0 RWC */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DL4S_INT_MASK_ADDR (0x614004) +#define NBL_DL4S_INT_MASK_DEPTH (1) +#define NBL_DL4S_INT_MASK_WIDTH (32) +#define NBL_DL4S_INT_MASK_DWLEN (1) +union dl4s_int_mask_u { + struct dl4s_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 parity_err:1; /* [5] Default:0x0 RW */ + u32 ce_mod_err:1; /* [6] Default:0x0 RW */ + u32 rec_len_err:1; /* [7] Default:0x0 RW */ + u32 rec_ver_err:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DL4S_INT_SET_ADDR (0x614008) +#define NBL_DL4S_INT_SET_DEPTH (1) +#define NBL_DL4S_INT_SET_WIDTH (32) +#define NBL_DL4S_INT_SET_DWLEN (1) +union dl4s_int_set_u { + struct dl4s_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 parity_err:1; /* [5] Default:0x0 WO */ + u32 ce_mod_err:1; /* [6] Default:0x0 WO */ + u32 rec_len_err:1; /* [7] Default:0x0 WO */ + u32 rec_ver_err:1; /* [8] Default:0x0 WO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_INT_SET_DWLEN]; +} __packed; + +#define NBL_DL4S_COR_ERR_INFO_ADDR (0x614010) +#define NBL_DL4S_COR_ERR_INFO_DEPTH (1) +#define NBL_DL4S_COR_ERR_INFO_WIDTH (32) +#define NBL_DL4S_COR_ERR_INFO_DWLEN (1) +union dl4s_cor_err_info_u { + struct dl4s_cor_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_UNCOR_ERR_INFO_ADDR (0x614018) +#define NBL_DL4S_UNCOR_ERR_INFO_DEPTH (1) +#define NBL_DL4S_UNCOR_ERR_INFO_WIDTH (32) +#define NBL_DL4S_UNCOR_ERR_INFO_DWLEN (1) +union dl4s_uncor_err_info_u { + struct dl4s_uncor_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_UNCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_PARITY_ERR_INFO_ADDR (0x614030) +#define NBL_DL4S_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DL4S_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DL4S_PARITY_ERR_INFO_DWLEN (1) +union dl4s_parity_err_info_u { + struct dl4s_parity_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_CIF_ERR_INFO_ADDR (0x614038) +#define NBL_DL4S_CIF_ERR_INFO_DEPTH (1) +#define NBL_DL4S_CIF_ERR_INFO_WIDTH (32) +#define NBL_DL4S_CIF_ERR_INFO_DWLEN (1) +union dl4s_cif_err_info_u { + struct dl4s_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_CAR_CTRL_ADDR (0x614100) +#define NBL_DL4S_CAR_CTRL_DEPTH (1) +#define NBL_DL4S_CAR_CTRL_WIDTH (32) +#define NBL_DL4S_CAR_CTRL_DWLEN (1) +union dl4s_car_ctrl_u { + struct dl4s_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DL4S_INIT_START_ADDR (0x614104) +#define NBL_DL4S_INIT_START_DEPTH (1) +#define NBL_DL4S_INIT_START_WIDTH (32) +#define NBL_DL4S_INIT_START_DWLEN (1) +union dl4s_init_start_u { + struct dl4s_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_INIT_START_DWLEN]; +} __packed; + +#define NBL_DL4S_INTF_DATA_BUF_TH_ADDR (0x614124) +#define NBL_DL4S_INTF_DATA_BUF_TH_DEPTH (1) +#define NBL_DL4S_INTF_DATA_BUF_TH_WIDTH (32) +#define NBL_DL4S_INTF_DATA_BUF_TH_DWLEN (1) +union dl4s_intf_data_buf_th_u { + struct dl4s_intf_data_buf_th { + u32 aful_high_th:8; /* [7:0] Default:224 RW */ + u32 aful_low_th:8; /* [15:8] Default:200 RW */ + u32 drop_th:8; /* [23:16] Default:168 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_INTF_DATA_BUF_TH_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_UDP_IV_BUF_TH_ADDR (0x614180) +#define NBL_DL4S_REC_UDP_IV_BUF_TH_DEPTH (1) +#define NBL_DL4S_REC_UDP_IV_BUF_TH_WIDTH (32) +#define NBL_DL4S_REC_UDP_IV_BUF_TH_DWLEN (1) +union dl4s_rec_udp_iv_buf_th_u { + struct dl4s_rec_udp_iv_buf_th { + u32 aful_high_th:8; /* [7:0] Default:32 RW */ + u32 aful_low_th:8; /* [15:8] Default:24 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_UDP_IV_BUF_TH_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_UDP_DATA_BUF_TH_ADDR (0x614188) +#define NBL_DL4S_REC_UDP_DATA_BUF_TH_DEPTH (1) +#define NBL_DL4S_REC_UDP_DATA_BUF_TH_WIDTH (32) +#define NBL_DL4S_REC_UDP_DATA_BUF_TH_DWLEN (1) +union dl4s_rec_udp_data_buf_th_u { + struct dl4s_rec_udp_data_buf_th { + u32 aful_high_th:8; /* [7:0] Default:32 RW */ + u32 aful_low_th:8; /* [15:8] Default:24 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_UDP_DATA_BUF_TH_DWLEN]; +} __packed; + +#define NBL_DL4S_INTF_DATA_BUF_STATUS_CURR_ADDR (0x614224) +#define NBL_DL4S_INTF_DATA_BUF_STATUS_CURR_DEPTH (1) +#define NBL_DL4S_INTF_DATA_BUF_STATUS_CURR_WIDTH (32) +#define NBL_DL4S_INTF_DATA_BUF_STATUS_CURR_DWLEN (1) +union dl4s_intf_data_buf_status_curr_u { + struct dl4s_intf_data_buf_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:8; /* [11:4] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_INTF_DATA_BUF_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_UDP_IV_BUF_STATUS_CURR_ADDR (0x614280) +#define NBL_DL4S_REC_UDP_IV_BUF_STATUS_CURR_DEPTH (1) +#define NBL_DL4S_REC_UDP_IV_BUF_STATUS_CURR_WIDTH (32) +#define NBL_DL4S_REC_UDP_IV_BUF_STATUS_CURR_DWLEN (1) +union dl4s_rec_udp_iv_buf_status_curr_u { + struct dl4s_rec_udp_iv_buf_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:8; /* [11:4] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_UDP_IV_BUF_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_UDP_INFO_STATUS_CURR_ADDR (0x614284) +#define NBL_DL4S_REC_UDP_INFO_STATUS_CURR_DEPTH (1) +#define NBL_DL4S_REC_UDP_INFO_STATUS_CURR_WIDTH (32) +#define NBL_DL4S_REC_UDP_INFO_STATUS_CURR_DWLEN (1) +union dl4s_rec_udp_info_status_curr_u { + struct dl4s_rec_udp_info_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:8; /* [11:4] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_UDP_INFO_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_UDP_INFO_DONE_STATUS_CURR_ADDR (0x61428c) +#define NBL_DL4S_REC_UDP_INFO_DONE_STATUS_CURR_DEPTH (1) +#define NBL_DL4S_REC_UDP_INFO_DONE_STATUS_CURR_WIDTH (32) +#define NBL_DL4S_REC_UDP_INFO_DONE_STATUS_CURR_DWLEN (1) +union dl4s_rec_udp_info_done_status_curr_u { + struct dl4s_rec_udp_info_done_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:8; /* [11:4] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_UDP_INFO_DONE_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_UDP_DATA_DONE_STATUS_CURR_ADDR (0x614290) +#define NBL_DL4S_REC_UDP_DATA_DONE_STATUS_CURR_DEPTH (1) +#define NBL_DL4S_REC_UDP_DATA_DONE_STATUS_CURR_WIDTH (32) +#define NBL_DL4S_REC_UDP_DATA_DONE_STATUS_CURR_DWLEN (1) +union dl4s_rec_udp_data_done_status_curr_u { + struct dl4s_rec_udp_data_done_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:8; /* [11:4] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_UDP_DATA_DONE_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_DL4S_L4S_RX_SYNC_ADDR (0x614420) +#define NBL_DL4S_L4S_RX_SYNC_DEPTH (1) +#define NBL_DL4S_L4S_RX_SYNC_WIDTH (32) +#define NBL_DL4S_L4S_RX_SYNC_DWLEN (1) +union dl4s_l4s_rx_sync_u { + struct dl4s_l4s_rx_sync { + u32 redun_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_L4S_RX_SYNC_DWLEN]; +} __packed; + +#define NBL_DL4S_L4S_RX_INFO_ADDR (0x614424) +#define NBL_DL4S_L4S_RX_INFO_DEPTH (1) +#define NBL_DL4S_L4S_RX_INFO_WIDTH (32) +#define NBL_DL4S_L4S_RX_INFO_DWLEN (1) +union dl4s_l4s_rx_info_u { + struct dl4s_l4s_rx_info { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 drcnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_L4S_RX_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_L4S_RX_LEN_ADDR (0x614428) +#define NBL_DL4S_L4S_RX_LEN_DEPTH (1) +#define NBL_DL4S_L4S_RX_LEN_WIDTH (32) +#define NBL_DL4S_L4S_RX_LEN_DWLEN (1) +union dl4s_l4s_rx_len_u { + struct dl4s_l4s_rx_len { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_L4S_RX_LEN_DWLEN]; +} __packed; + +#define NBL_DL4S_L4S_RX_CELL_ADDR (0x61442c) +#define NBL_DL4S_L4S_RX_CELL_DEPTH (1) +#define NBL_DL4S_L4S_RX_CELL_WIDTH (32) +#define NBL_DL4S_L4S_RX_CELL_DWLEN (1) +union dl4s_l4s_rx_cell_u { + struct dl4s_l4s_rx_cell { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv1:8; /* [23:16] Default:0x0 RO */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_L4S_RX_CELL_DWLEN]; +} __packed; + +#define NBL_DL4S_L4S_TX_INFO_ADDR (0x614430) +#define NBL_DL4S_L4S_TX_INFO_DEPTH (1) +#define NBL_DL4S_L4S_TX_INFO_WIDTH (32) +#define NBL_DL4S_L4S_TX_INFO_DWLEN (1) +union dl4s_l4s_tx_info_u { + struct dl4s_l4s_tx_info { + u32 cell_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 pkt_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_L4S_TX_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_L4S_TX_LEN_ADDR (0x614434) +#define NBL_DL4S_L4S_TX_LEN_DEPTH (1) +#define NBL_DL4S_L4S_TX_LEN_WIDTH (32) +#define NBL_DL4S_L4S_TX_LEN_DWLEN (1) +union dl4s_l4s_tx_len_u { + struct dl4s_l4s_tx_len { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_L4S_TX_LEN_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_PARSE_RX_ADDR (0x614440) +#define NBL_DL4S_REC_PARSE_RX_DEPTH (1) +#define NBL_DL4S_REC_PARSE_RX_WIDTH (32) +#define NBL_DL4S_REC_PARSE_RX_DWLEN (1) +union dl4s_rec_parse_rx_u { + struct dl4s_rec_parse_rx { + u32 ind_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 redun_head_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 redun_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 sync_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_REC_PARSE_RX_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_PARSE_INFO_ADDR (0x614444) +#define NBL_DL4S_REC_PARSE_INFO_DEPTH (1) +#define NBL_DL4S_REC_PARSE_INFO_WIDTH (32) +#define NBL_DL4S_REC_PARSE_INFO_DWLEN (1) +union dl4s_rec_parse_info_u { + struct dl4s_rec_parse_info { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 info_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 rcnt:8; /* [23:16] Default:0x0 RCTR */ + u32 wcnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_REC_PARSE_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_PARSE_RES_ADDR (0x614448) +#define NBL_DL4S_REC_PARSE_RES_DEPTH (1) +#define NBL_DL4S_REC_PARSE_RES_WIDTH (32) +#define NBL_DL4S_REC_PARSE_RES_DWLEN (1) +union dl4s_rec_parse_res_u { + struct dl4s_rec_parse_res { + u32 eor_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 sor_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_REC_PARSE_RES_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_PARSE_INI_ADDR (0x61444c) +#define NBL_DL4S_REC_PARSE_INI_DEPTH (1) +#define NBL_DL4S_REC_PARSE_INI_WIDTH (32) +#define NBL_DL4S_REC_PARSE_INI_DWLEN (1) +union dl4s_rec_parse_ini_u { + struct dl4s_rec_parse_ini { + u32 resync_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 init_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 eob_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 sob_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_REC_PARSE_INI_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_DEPART_ADDR (0x614450) +#define NBL_DL4S_REC_DEPART_DEPTH (1) +#define NBL_DL4S_REC_DEPART_WIDTH (32) +#define NBL_DL4S_REC_DEPART_DWLEN (1) +union dl4s_rec_depart_u { + struct dl4s_rec_depart { + u32 eob_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 sob_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 eor_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 sor_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_REC_DEPART_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_DEPART_INFO_ADDR (0x614454) +#define NBL_DL4S_REC_DEPART_INFO_DEPTH (1) +#define NBL_DL4S_REC_DEPART_INFO_WIDTH (32) +#define NBL_DL4S_REC_DEPART_INFO_DWLEN (1) +union dl4s_rec_depart_info_u { + struct dl4s_rec_depart_info { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 eicv_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 loss_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_REC_DEPART_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_DEPART_HEAD_ADDR (0x614458) +#define NBL_DL4S_REC_DEPART_HEAD_DEPTH (1) +#define NBL_DL4S_REC_DEPART_HEAD_WIDTH (32) +#define NBL_DL4S_REC_DEPART_HEAD_DWLEN (1) +union dl4s_rec_depart_head_u { + struct dl4s_rec_depart_head { + u32 cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_DEPART_HEAD_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_DEP_ADA_ADDR (0x614460) +#define NBL_DL4S_REC_DEP_ADA_DEPTH (1) +#define NBL_DL4S_REC_DEP_ADA_WIDTH (32) +#define NBL_DL4S_REC_DEP_ADA_DWLEN (1) +union dl4s_rec_dep_ada_u { + struct dl4s_rec_dep_ada { + u32 eob_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 sob_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 eor_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 sor_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_REC_DEP_ADA_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_DEP_ADA_INFO_ADDR (0x614464) +#define NBL_DL4S_REC_DEP_ADA_INFO_DEPTH (1) +#define NBL_DL4S_REC_DEP_ADA_INFO_WIDTH (32) +#define NBL_DL4S_REC_DEP_ADA_INFO_DWLEN (1) +union dl4s_rec_dep_ada_info_u { + struct dl4s_rec_dep_ada_info { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_DEP_ADA_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_UPD_ADA_ADDR (0x614470) +#define NBL_DL4S_REC_UPD_ADA_DEPTH (1) +#define NBL_DL4S_REC_UPD_ADA_WIDTH (32) +#define NBL_DL4S_REC_UPD_ADA_DWLEN (1) +union dl4s_rec_upd_ada_u { + struct dl4s_rec_upd_ada { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 icv_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 sob_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_UPD_ADA_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_UPDATE_ADDR (0x614480) +#define NBL_DL4S_REC_UPDATE_DEPTH (1) +#define NBL_DL4S_REC_UPDATE_WIDTH (32) +#define NBL_DL4S_REC_UPDATE_DWLEN (1) +union dl4s_rec_update_u { + struct dl4s_rec_update { + u32 bp_rcnt:8; /* [7:0] Default:0x0 RCTR */ + u32 bp_wcnt:8; /* [15:8] Default:0x0 RCTR */ + u32 cip_rcnt:8; /* [23:16] Default:0x0 RCTR */ + u32 cip_wcnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_REC_UPDATE_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_UPDATE_INFO_ADDR (0x614484) +#define NBL_DL4S_REC_UPDATE_INFO_DEPTH (1) +#define NBL_DL4S_REC_UPDATE_INFO_WIDTH (32) +#define NBL_DL4S_REC_UPDATE_INFO_DWLEN (1) +union dl4s_rec_update_info_u { + struct dl4s_rec_update_info { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 icv_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 cell_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_REC_UPDATE_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_UNL4S_RX_INFO_ADDR (0x614490) +#define NBL_DL4S_UNL4S_RX_INFO_DEPTH (1) +#define NBL_DL4S_UNL4S_RX_INFO_WIDTH (32) +#define NBL_DL4S_UNL4S_RX_INFO_DWLEN (1) +union dl4s_unl4s_rx_info_u { + struct dl4s_unl4s_rx_info { + u32 cell_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 pkt_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_UNL4S_RX_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_UNL4S_RX_LEN_ADDR (0x614494) +#define NBL_DL4S_UNL4S_RX_LEN_DEPTH (1) +#define NBL_DL4S_UNL4S_RX_LEN_WIDTH (32) +#define NBL_DL4S_UNL4S_RX_LEN_DWLEN (1) +union dl4s_unl4s_rx_len_u { + struct dl4s_unl4s_rx_len { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_UNL4S_RX_LEN_DWLEN]; +} __packed; + +#define NBL_DL4S_UNL4S_TX_INFO_ADDR (0x6144a0) +#define NBL_DL4S_UNL4S_TX_INFO_DEPTH (1) +#define NBL_DL4S_UNL4S_TX_INFO_WIDTH (32) +#define NBL_DL4S_UNL4S_TX_INFO_DWLEN (1) +union dl4s_unl4s_tx_info_u { + struct dl4s_unl4s_tx_info { + u32 cell_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 pkt_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_UNL4S_TX_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_UNL4S_TX_LEN_ADDR (0x6144a4) +#define NBL_DL4S_UNL4S_TX_LEN_DEPTH (1) +#define NBL_DL4S_UNL4S_TX_LEN_WIDTH (32) +#define NBL_DL4S_UNL4S_TX_LEN_DWLEN (1) +union dl4s_unl4s_tx_len_u { + struct dl4s_unl4s_tx_len { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_UNL4S_TX_LEN_DWLEN]; +} __packed; + +#define NBL_DL4S_RD_INFO_ADDR (0x6144b0) +#define NBL_DL4S_RD_INFO_DEPTH (1) +#define NBL_DL4S_RD_INFO_WIDTH (32) +#define NBL_DL4S_RD_INFO_DWLEN (1) +union dl4s_rd_info_u { + struct dl4s_rd_info { + u32 unl4s_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 l4s_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_RD_INFO_DWLEN]; +} __packed; + +#define NBL_DL4S_INT_TX_ADDR (0x6144b4) +#define NBL_DL4S_INT_TX_DEPTH (1) +#define NBL_DL4S_INT_TX_WIDTH (32) +#define NBL_DL4S_INT_TX_DWLEN (1) +union dl4s_int_tx_u { + struct dl4s_int_tx { + u32 data_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 info_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DL4S_INT_TX_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_DEP_ADA_IV_ADDR (0x614564) +#define NBL_DL4S_REC_DEP_ADA_IV_DEPTH (1) +#define NBL_DL4S_REC_DEP_ADA_IV_WIDTH (32) +#define NBL_DL4S_REC_DEP_ADA_IV_DWLEN (1) +union dl4s_rec_dep_ada_iv_u { + struct dl4s_rec_dep_ada_iv { + u32 val:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_DEP_ADA_IV_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_DEP_ADA_SN_ADDR (0x614568) +#define NBL_DL4S_REC_DEP_ADA_SN_DEPTH (1) +#define NBL_DL4S_REC_DEP_ADA_SN_WIDTH (32) +#define NBL_DL4S_REC_DEP_ADA_SN_DWLEN (1) +union dl4s_rec_dep_ada_sn_u { + struct dl4s_rec_dep_ada_sn { + u32 val:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_DEP_ADA_SN_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_DEP_ADA_CE_ADDR (0x61456c) +#define NBL_DL4S_REC_DEP_ADA_CE_DEPTH (1) +#define NBL_DL4S_REC_DEP_ADA_CE_WIDTH (32) +#define NBL_DL4S_REC_DEP_ADA_CE_DWLEN (1) +union dl4s_rec_dep_ada_ce_u { + struct dl4s_rec_dep_ada_ce { + u32 len:11; /* [10:0] Default:0x0 RO */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 sid:10; /* [25:16] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_DEP_ADA_CE_DWLEN]; +} __packed; + +#define NBL_DL4S_INIT_DONE_ADDR (0x614600) +#define NBL_DL4S_INIT_DONE_DEPTH (1) +#define NBL_DL4S_INIT_DONE_WIDTH (32) +#define NBL_DL4S_INIT_DONE_DWLEN (1) +union dl4s_init_done_u { + struct dl4s_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DL4S_VER_YEAR_MONTH_ADDR (0x614604) +#define NBL_DL4S_VER_YEAR_MONTH_DEPTH (1) +#define NBL_DL4S_VER_YEAR_MONTH_WIDTH (32) +#define NBL_DL4S_VER_YEAR_MONTH_DWLEN (1) +union dl4s_ver_year_month_u { + struct dl4s_ver_year_month { + u32 month:8; /* [7:0] Default:0x06 RO */ + u32 year:8; /* [15:8] Default:0x22 RO */ + u32 version:8; /* [23:16] Default:0x21 RO */ + u32 reg_type:8; /* [31:24] Default:0x5a RO */ + } __packed info; + u32 data[NBL_DL4S_VER_YEAR_MONTH_DWLEN]; +} __packed; + +#define NBL_DL4S_REG_TEST_ADDR (0x614608) +#define NBL_DL4S_REG_TEST_DEPTH (1) +#define NBL_DL4S_REG_TEST_WIDTH (32) +#define NBL_DL4S_REG_TEST_DWLEN (1) +union dl4s_reg_test_u { + struct dl4s_reg_test { + u32 reg_test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DL4S_REG_TEST_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_FULL_DROP_ADDR (0x614610) +#define NBL_DL4S_REC_FULL_DROP_DEPTH (1) +#define NBL_DL4S_REC_FULL_DROP_WIDTH (32) +#define NBL_DL4S_REC_FULL_DROP_DWLEN (1) +union dl4s_rec_full_drop_u { + struct dl4s_rec_full_drop { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x00 RO */ + } __packed info; + u32 data[NBL_DL4S_REC_FULL_DROP_DWLEN]; +} __packed; + +#define NBL_DL4S_REC_HEAD_VERSION_ADDR (0x614630) +#define NBL_DL4S_REC_HEAD_VERSION_DEPTH (1) +#define NBL_DL4S_REC_HEAD_VERSION_WIDTH (32) +#define NBL_DL4S_REC_HEAD_VERSION_DWLEN (1) +union dl4s_rec_head_version_u { + struct dl4s_rec_head_version { + u32 v0:16; /* [15:0] Default:0x0302 RW */ + u32 v1:16; /* [31:16] Default:0x0303 RW */ + } __packed info; + u32 data[NBL_DL4S_REC_HEAD_VERSION_DWLEN]; +} __packed; + +#define NBL_DL4S_SCH_ADDR (0x6146c0) +#define NBL_DL4S_SCH_DEPTH (1) +#define NBL_DL4S_SCH_WIDTH (32) +#define NBL_DL4S_SCH_DWLEN (1) +union dl4s_sch_u { + struct dl4s_sch { + u32 pri:2; /* [1:0] Default:0x2 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_SCH_DWLEN]; +} __packed; + +#define NBL_DL4S_TAB_KEY_SALT_ADDR (0x624000) +#define NBL_DL4S_TAB_KEY_SALT_DEPTH (1024) +#define NBL_DL4S_TAB_KEY_SALT_WIDTH (512) +#define NBL_DL4S_TAB_KEY_SALT_DWLEN (16) +union dl4s_tab_key_salt_u { + struct dl4s_tab_key_salt { + u32 key_arr[8]; /* [255:0] Default:0x0 RW */ + u32 salt:32; /* [287:256] Default:0x0 RW */ + u32 mode:2; /* [289:288] Default:0x0 RW */ + u32 ena:1; /* [290] Default:0x0 RW */ + u32 rsv:29; /* [511:291] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:291] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DL4S_TAB_KEY_SALT_DWLEN]; +} __packed; +#define NBL_DL4S_TAB_KEY_SALT_REG(r) (NBL_DL4S_TAB_KEY_SALT_ADDR + \ + (NBL_DL4S_TAB_KEY_SALT_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h new file mode 100644 index 0000000000000000000000000000000000000000..3d7e0bf51a59282a3fd8b4d59e345fc5b4b9c52c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpa.h @@ -0,0 +1,760 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DPA_H +#define NBL_DPA_H 1 + +#include + +#define NBL_DPA_BASE (0x0085C000) + +#define NBL_DPA_INT_STATUS_ADDR (0x85c000) +#define NBL_DPA_INT_STATUS_DEPTH (1) +#define NBL_DPA_INT_STATUS_WIDTH (32) +#define NBL_DPA_INT_STATUS_DWLEN (1) +union dpa_int_status_u { + struct dpa_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_underflow:1; /* [1] Default:0x0 RWC */ + u32 fifo_overflow:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 RWC */ + u32 ucor_err:1; /* [7] Default:0x0 RWC */ + u32 cor_err:1; /* [8] Default:0x0 RWC */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DPA_INT_MASK_ADDR (0x85c004) +#define NBL_DPA_INT_MASK_DEPTH (1) +#define NBL_DPA_INT_MASK_WIDTH (32) +#define NBL_DPA_INT_MASK_DWLEN (1) +union dpa_int_mask_u { + struct dpa_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_underflow:1; /* [1] Default:0x0 RW */ + u32 fifo_overflow:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 RW */ + u32 ucor_err:1; /* [7] Default:0x0 RW */ + u32 cor_err:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DPA_INT_SET_ADDR (0x85c008) +#define NBL_DPA_INT_SET_DEPTH (1) +#define NBL_DPA_INT_SET_WIDTH (32) +#define NBL_DPA_INT_SET_DWLEN (1) +union dpa_int_set_u { + struct dpa_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_underflow:1; /* [1] Default:0x0 WO */ + u32 fifo_overflow:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 WO */ + u32 ucor_err:1; /* [7] Default:0x0 WO */ + u32 cor_err:1; /* [8] Default:0x0 WO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INT_SET_DWLEN]; +} __packed; + +#define NBL_DPA_INIT_DONE_ADDR (0x85c00c) +#define NBL_DPA_INIT_DONE_DEPTH (1) +#define NBL_DPA_INIT_DONE_WIDTH (32) +#define NBL_DPA_INIT_DONE_DWLEN (1) +union dpa_init_done_u { + struct dpa_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DPA_CIF_ERR_INFO_ADDR (0x85c040) +#define NBL_DPA_CIF_ERR_INFO_DEPTH (1) +#define NBL_DPA_CIF_ERR_INFO_WIDTH (32) +#define NBL_DPA_CIF_ERR_INFO_DWLEN (1) +union dpa_cif_err_info_u { + struct dpa_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPA_CFG_ERR_INFO_ADDR (0x85c050) +#define NBL_DPA_CFG_ERR_INFO_DEPTH (1) +#define NBL_DPA_CFG_ERR_INFO_WIDTH (32) +#define NBL_DPA_CFG_ERR_INFO_DWLEN (1) +union dpa_cfg_err_info_u { + struct dpa_cfg_err_info { + u32 id0:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPA_CAR_CTRL_ADDR (0x85c100) +#define NBL_DPA_CAR_CTRL_DEPTH (1) +#define NBL_DPA_CAR_CTRL_WIDTH (32) +#define NBL_DPA_CAR_CTRL_DWLEN (1) +union dpa_car_ctrl_u { + struct dpa_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DPA_INIT_START_ADDR (0x85c180) +#define NBL_DPA_INIT_START_DEPTH (1) +#define NBL_DPA_INIT_START_WIDTH (32) +#define NBL_DPA_INIT_START_DWLEN (1) +union dpa_init_start_u { + struct dpa_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_INIT_START_DWLEN]; +} __packed; + +#define NBL_DPA_LAYO_CKSUM0_CTRL_ADDR (0x85c1b0) +#define NBL_DPA_LAYO_CKSUM0_CTRL_DEPTH (4) +#define NBL_DPA_LAYO_CKSUM0_CTRL_WIDTH (32) +#define NBL_DPA_LAYO_CKSUM0_CTRL_DWLEN (1) +union dpa_layo_cksum0_ctrl_u { + struct dpa_layo_cksum0_ctrl { + u32 data:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_DPA_LAYO_CKSUM0_CTRL_DWLEN]; +} __packed; +#define NBL_DPA_LAYO_CKSUM0_CTRL_REG(r) (NBL_DPA_LAYO_CKSUM0_CTRL_ADDR + \ + (NBL_DPA_LAYO_CKSUM0_CTRL_DWLEN * 4) * (r)) + +#define NBL_DPA_FWD_TYPE_STAGE_0_ADDR (0x85c1d0) +#define NBL_DPA_FWD_TYPE_STAGE_0_DEPTH (1) +#define NBL_DPA_FWD_TYPE_STAGE_0_WIDTH (32) +#define NBL_DPA_FWD_TYPE_STAGE_0_DWLEN (1) +union dpa_fwd_type_stage_0_u { + struct dpa_fwd_type_stage_0 { + u32 tbl:32; /* [31:0] Default:0xF3FFFFC2 RW */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_STAGE_0_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_STAGE_1_ADDR (0x85c1d4) +#define NBL_DPA_FWD_TYPE_STAGE_1_DEPTH (1) +#define NBL_DPA_FWD_TYPE_STAGE_1_WIDTH (32) +#define NBL_DPA_FWD_TYPE_STAGE_1_DWLEN (1) +union dpa_fwd_type_stage_1_u { + struct dpa_fwd_type_stage_1 { + u32 tbl:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_STAGE_1_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_STAGE_2_ADDR (0x85c1d8) +#define NBL_DPA_FWD_TYPE_STAGE_2_DEPTH (1) +#define NBL_DPA_FWD_TYPE_STAGE_2_WIDTH (32) +#define NBL_DPA_FWD_TYPE_STAGE_2_DWLEN (1) +union dpa_fwd_type_stage_2_u { + struct dpa_fwd_type_stage_2 { + u32 tbl:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_STAGE_2_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_BYPASS_0_ADDR (0x85c1e0) +#define NBL_DPA_FWD_TYPE_BYPASS_0_DEPTH (1) +#define NBL_DPA_FWD_TYPE_BYPASS_0_WIDTH (32) +#define NBL_DPA_FWD_TYPE_BYPASS_0_DWLEN (1) +union dpa_fwd_type_bypass_0_u { + struct dpa_fwd_type_bypass_0 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_BYPASS_0_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_BYPASS_1_ADDR (0x85c1e4) +#define NBL_DPA_FWD_TYPE_BYPASS_1_DEPTH (1) +#define NBL_DPA_FWD_TYPE_BYPASS_1_WIDTH (32) +#define NBL_DPA_FWD_TYPE_BYPASS_1_DWLEN (1) +union dpa_fwd_type_bypass_1_u { + struct dpa_fwd_type_bypass_1 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_BYPASS_1_DWLEN]; +} __packed; + +#define NBL_DPA_FWD_TYPE_BYPASS_2_ADDR (0x85c1e8) +#define NBL_DPA_FWD_TYPE_BYPASS_2_DEPTH (1) +#define NBL_DPA_FWD_TYPE_BYPASS_2_WIDTH (32) +#define NBL_DPA_FWD_TYPE_BYPASS_2_DWLEN (1) +union dpa_fwd_type_bypass_2_u { + struct dpa_fwd_type_bypass_2 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_FWD_TYPE_BYPASS_2_DWLEN]; +} __packed; + +#define NBL_DPA_DPORT_EXTRACT_ADDR (0x85c1ec) +#define NBL_DPA_DPORT_EXTRACT_DEPTH (1) +#define NBL_DPA_DPORT_EXTRACT_WIDTH (32) +#define NBL_DPA_DPORT_EXTRACT_DWLEN (1) +union dpa_dport_extract_u { + struct dpa_dport_extract { + u32 id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_DPORT_EXTRACT_DWLEN]; +} __packed; + +#define NBL_DPA_LAYO_PHV_ADDR (0x85c1f0) +#define NBL_DPA_LAYO_PHV_DEPTH (1) +#define NBL_DPA_LAYO_PHV_WIDTH (32) +#define NBL_DPA_LAYO_PHV_DWLEN (1) +union dpa_layo_phv_u { + struct dpa_layo_phv { + u32 len:7; /* [6:0] Default:0x5A RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_LAYO_PHV_DWLEN]; +} __packed; + +#define NBL_DPA_L4S_PAD_ADDR (0x85c1f4) +#define NBL_DPA_L4S_PAD_DEPTH (1) +#define NBL_DPA_L4S_PAD_WIDTH (32) +#define NBL_DPA_L4S_PAD_DWLEN (1) +union dpa_l4s_pad_u { + struct dpa_l4s_pad { + u32 p_length:7; /* [6:0] Default:0x3C RW */ + u32 en:1; /* [7] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_L4S_PAD_DWLEN]; +} __packed; + +#define NBL_DPA_IP_EXT_PROTOCOL_ADDR (0x85c1fc) +#define NBL_DPA_IP_EXT_PROTOCOL_DEPTH (1) +#define NBL_DPA_IP_EXT_PROTOCOL_WIDTH (32) +#define NBL_DPA_IP_EXT_PROTOCOL_DWLEN (1) +union dpa_ip_ext_protocol_u { + struct dpa_ip_ext_protocol { + u32 tcp:8; /* [7:0] Default:0x6 RW */ + u32 udp:8; /* [15:8] Default:0x11 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_IP_EXT_PROTOCOL_DWLEN]; +} __packed; + +#define NBL_DPA_L3V6_ML_DA_ADDR (0x85c204) +#define NBL_DPA_L3V6_ML_DA_DEPTH (1) +#define NBL_DPA_L3V6_ML_DA_WIDTH (32) +#define NBL_DPA_L3V6_ML_DA_DWLEN (1) +union dpa_l3v6_ml_da_u { + struct dpa_l3v6_ml_da { + u32 ml_da:16; /* [15:0] Default:0x3333 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_L3V6_ML_DA_DWLEN]; +} __packed; + +#define NBL_DPA_NEXT_KEY_ADDR (0x85c208) +#define NBL_DPA_NEXT_KEY_DEPTH (1) +#define NBL_DPA_NEXT_KEY_WIDTH (32) +#define NBL_DPA_NEXT_KEY_DWLEN (1) +union dpa_next_key_u { + struct dpa_next_key { + u32 key_b:8; /* [7:0] Default:0x10 RW */ + u32 key_a:8; /* [15:8] Default:0x0C RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_NEXT_KEY_DWLEN]; +} __packed; + +#define NBL_DPA_L3_ML_DA_ADDR (0x85c20c) +#define NBL_DPA_L3_ML_DA_DEPTH (1) +#define NBL_DPA_L3_ML_DA_WIDTH (32) +#define NBL_DPA_L3_ML_DA_DWLEN (1) +union dpa_l3_ml_da_u { + struct dpa_l3_ml_da { + u32 ml_da_0:16; /* [15:0] Default:0x5e00 RW */ + u32 ml_da_1:16; /* [31:16] Default:0x0100 RW */ + } __packed info; + u32 data[NBL_DPA_L3_ML_DA_DWLEN]; +} __packed; + +#define NBL_DPA_CK_CTRL_ADDR (0x85c210) +#define NBL_DPA_CK_CTRL_DEPTH (1) +#define NBL_DPA_CK_CTRL_WIDTH (32) +#define NBL_DPA_CK_CTRL_DWLEN (1) +union dpa_ck_ctrl_u { + struct dpa_ck_ctrl { + u32 tcp_csum_en:1; /* [0] Default:0x1 RW */ + u32 udp_csum_en:1; /* [1] Default:0x1 RW */ + u32 sctp_crc32c_en:1; /* [2] Default:0x1 RW */ + u32 ipv4_ck_en:1; /* [3] Default:0x1 RW */ + u32 ipv6_ck_en:1; /* [4] Default:0x1 RW */ + u32 DA_ck_en:1; /* [5] Default:0x1 RW */ + u32 ipv6_ext_en:1; /* [6] Default:0x0 RW */ + u32 vlan_error_en:1; /* [7] Default:0x1 RW */ + u32 ctrl_p_en:1; /* [8] Default:0x0 RW */ + u32 ip_tlen_ck_en:1; /* [9] Default:0x0 RW */ + u32 not_uc_p_plck_aux_en:1; /* [10] Default:0x0 RW */ + u32 sctp_crc_plck_aux_en:1; /* [11] Default:0x1 RW */ + u32 tcp_csum_offset_id:2; /* [13:12] Default:0x2 RW */ + u32 udp_csum_offset_id:2; /* [15:14] Default:0x2 RW */ + u32 sctp_crc32c_offset_id:2; /* [17:16] Default:0x2 RW */ + u32 ipv4_ck_offset_id:2; /* [19:18] Default:0x1 RW */ + u32 ipv6_ck_offset_id:2; /* [21:20] Default:0x1 RW */ + u32 DA_ck_offset_id:2; /* [23:22] Default:0x0 RW */ + u32 plck_offset_id:2; /* [25:24] Default:0x3 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_CK_CTRL_DWLEN]; +} __packed; + +#define NBL_DPA_MC_INDEX_ADDR (0x85c214) +#define NBL_DPA_MC_INDEX_DEPTH (1) +#define NBL_DPA_MC_INDEX_WIDTH (32) +#define NBL_DPA_MC_INDEX_DWLEN (1) +union dpa_mc_index_u { + struct dpa_mc_index { + u32 l2_mc_index:5; /* [4:0] Default:0x8 RW */ + u32 rsv2:3; /* [7:5] Default:0x00 RO */ + u32 l3_mc_index:5; /* [12:8] Default:0x9 RW */ + u32 rsv1:3; /* [15:13] Default:0x00 RO */ + u32 ctrl_p_index:5; /* [20:16] Default:0xF RW */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_MC_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_CTRL_P_DA_ADDR (0x85c218) +#define NBL_DPA_CTRL_P_DA_DEPTH (1) +#define NBL_DPA_CTRL_P_DA_WIDTH (32) +#define NBL_DPA_CTRL_P_DA_DWLEN (1) +union dpa_ctrl_p_da_u { + struct dpa_ctrl_p_da { + u32 ctrl_da_0:16; /* [15:0] Default:0xC200 RW */ + u32 ctrl_da_1:16; /* [31:16] Default:0x0180 RW */ + } __packed info; + u32 data[NBL_DPA_CTRL_P_DA_DWLEN]; +} __packed; + +#define NBL_DPA_VLAN_INDEX_ADDR (0x85c220) +#define NBL_DPA_VLAN_INDEX_DEPTH (1) +#define NBL_DPA_VLAN_INDEX_WIDTH (32) +#define NBL_DPA_VLAN_INDEX_DWLEN (1) +union dpa_vlan_index_u { + struct dpa_vlan_index { + u32 o_vlan2_index:5; /* [4:0] Default:0x11 RW */ + u32 rsv1:3; /* [7:5] Default:0x0 RO */ + u32 o_vlan1_index:5; /* [12:8] Default:0x10 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_VLAN_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_PRI_VLAN_INDEX_ADDR (0x85c224) +#define NBL_DPA_PRI_VLAN_INDEX_DEPTH (1) +#define NBL_DPA_PRI_VLAN_INDEX_WIDTH (32) +#define NBL_DPA_PRI_VLAN_INDEX_DWLEN (1) +union dpa_pri_vlan_index_u { + struct dpa_pri_vlan_index { + u32 ext_vlan2:7; /* [6:0] Default:0x30 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 ext_vlan1:7; /* [14:8] Default:0x2E RW */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PRI_VLAN_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_PRI_DSCP_INDEX_ADDR (0x85c228) +#define NBL_DPA_PRI_DSCP_INDEX_DEPTH (1) +#define NBL_DPA_PRI_DSCP_INDEX_WIDTH (32) +#define NBL_DPA_PRI_DSCP_INDEX_DWLEN (1) +union dpa_pri_dscp_index_u { + struct dpa_pri_dscp_index { + u32 ext_dscp:7; /* [6:0] Default:0x32 RW */ + u32 rsv2:9; /* [15:7] Default:0x0 RO */ + u32 ipv4_flag:5; /* [20:16] Default:0x1 RW */ + u32 rsv1:3; /* [23:21] Default:0x0 RO */ + u32 ipv6_flag:5; /* [28:24] Default:0x2 RW */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PRI_DSCP_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_RDMA_INDEX_ADDR (0x85c22c) +#define NBL_DPA_RDMA_INDEX_DEPTH (1) +#define NBL_DPA_RDMA_INDEX_WIDTH (32) +#define NBL_DPA_RDMA_INDEX_DWLEN (1) +union dpa_rdma_index_u { + struct dpa_rdma_index { + u32 rdma_index:5; /* [4:0] Default:0xA RW */ + u32 rsv:27; /* [31:5] Default:0x00 RO */ + } __packed info; + u32 data[NBL_DPA_RDMA_INDEX_DWLEN]; +} __packed; + +#define NBL_DPA_PRI_SEL_CONF_ADDR (0x85c230) +#define NBL_DPA_PRI_SEL_CONF_DEPTH (6) +#define NBL_DPA_PRI_SEL_CONF_WIDTH (32) +#define NBL_DPA_PRI_SEL_CONF_DWLEN (1) +union dpa_pri_sel_conf_u { + struct dpa_pri_sel_conf { + u32 pri_sel:5; /* [4:0] Default:0x0 RW */ + u32 pri_default:3; /* [7:5] Default:0x0 RW */ + u32 pri_disen:1; /* [8] Default:0x1 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PRI_SEL_CONF_DWLEN]; +} __packed; +#define NBL_DPA_PRI_SEL_CONF_REG(r) (NBL_DPA_PRI_SEL_CONF_ADDR + \ + (NBL_DPA_PRI_SEL_CONF_DWLEN * 4) * (r)) + +#define NBL_DPA_ERROR_DROP_ADDR (0x85c248) +#define NBL_DPA_ERROR_DROP_DEPTH (1) +#define NBL_DPA_ERROR_DROP_WIDTH (32) +#define NBL_DPA_ERROR_DROP_DWLEN (1) +union dpa_error_drop_u { + struct dpa_error_drop { + u32 en:7; /* [6:0] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_ERROR_DROP_DWLEN]; +} __packed; + +#define NBL_DPA_ERROR_CODE_ADDR (0x85c24c) +#define NBL_DPA_ERROR_CODE_DEPTH (1) +#define NBL_DPA_ERROR_CODE_WIDTH (32) +#define NBL_DPA_ERROR_CODE_DWLEN (1) +union dpa_error_code_u { + struct dpa_error_code { + u32 no:32; /* [31:0] Default:0x09123456 RW */ + } __packed info; + u32 data[NBL_DPA_ERROR_CODE_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_SCAN_ADDR (0x85c250) +#define NBL_DPA_PTYPE_SCAN_DEPTH (1) +#define NBL_DPA_PTYPE_SCAN_WIDTH (32) +#define NBL_DPA_PTYPE_SCAN_DWLEN (1) +union dpa_ptype_scan_u { + struct dpa_ptype_scan { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_SCAN_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_SCAN_TH_ADDR (0x85c254) +#define NBL_DPA_PTYPE_SCAN_TH_DEPTH (1) +#define NBL_DPA_PTYPE_SCAN_TH_WIDTH (32) +#define NBL_DPA_PTYPE_SCAN_TH_DWLEN (1) +union dpa_ptype_scan_th_u { + struct dpa_ptype_scan_th { + u32 th:32; /* [31:00] Default:0x40 RW */ + } __packed info; + u32 data[NBL_DPA_PTYPE_SCAN_TH_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_SCAN_MASK_ADDR (0x85c258) +#define NBL_DPA_PTYPE_SCAN_MASK_DEPTH (1) +#define NBL_DPA_PTYPE_SCAN_MASK_WIDTH (32) +#define NBL_DPA_PTYPE_SCAN_MASK_DWLEN (1) +union dpa_ptype_scan_mask_u { + struct dpa_ptype_scan_mask { + u32 addr:8; /* [7:0] Default:0x0 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_SCAN_MASK_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_INSERT_SEARCH_ADDR (0x85c25c) +#define NBL_DPA_PTYPE_INSERT_SEARCH_DEPTH (1) +#define NBL_DPA_PTYPE_INSERT_SEARCH_WIDTH (32) +#define NBL_DPA_PTYPE_INSERT_SEARCH_DWLEN (1) +union dpa_ptype_insert_search_u { + struct dpa_ptype_insert_search { + u32 ctrl:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_INSERT_SEARCH_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_INSERT_SEARCH_0_ADDR (0x85c260) +#define NBL_DPA_PTYPE_INSERT_SEARCH_0_DEPTH (1) +#define NBL_DPA_PTYPE_INSERT_SEARCH_0_WIDTH (32) +#define NBL_DPA_PTYPE_INSERT_SEARCH_0_DWLEN (1) +union dpa_ptype_insert_search_0_u { + struct dpa_ptype_insert_search_0 { + u32 key0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_PTYPE_INSERT_SEARCH_0_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ADDR (0x85c268) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_DEPTH (1) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_WIDTH (32) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_DWLEN (1) +union dpa_ptype_insert_search_result_u { + struct dpa_ptype_insert_search_result { + u32 result:8; /* [7:0] Default:0x0 RO */ + u32 hit:1; /* [8] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_DWLEN]; +} __packed; + +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_ADDR (0x85c270) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DEPTH (1) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_WIDTH (32) +#define NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DWLEN (1) +union dpa_ptype_insert_search_result_ack_u { + struct dpa_ptype_insert_search_result_ack { + u32 vld:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DWLEN]; +} __packed; + +#define NBL_DPA_CFG_TEST_ADDR (0x85c80c) +#define NBL_DPA_CFG_TEST_DEPTH (1) +#define NBL_DPA_CFG_TEST_WIDTH (32) +#define NBL_DPA_CFG_TEST_DWLEN (1) +union dpa_cfg_test_u { + struct dpa_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_DPA_BP_STATE_ADDR (0x85cb00) +#define NBL_DPA_BP_STATE_DEPTH (1) +#define NBL_DPA_BP_STATE_WIDTH (32) +#define NBL_DPA_BP_STATE_DWLEN (1) +union dpa_bp_state_u { + struct dpa_bp_state { + u32 pa_rmux_data_bp:1; /* [0] Default:0x0 RO */ + u32 pa_rmux_info_bp:1; /* [1] Default:0x0 RO */ + u32 store_pa_data_bp:1; /* [2] Default:0x0 RO */ + u32 store_pa_info_bp:1; /* [3] Default:0x0 RO */ + u32 rx_data_fifo_afull:1; /* [4] Default:0x0 RO */ + u32 rx_info_fifo_afull:1; /* [5] Default:0x0 RO */ + u32 rx_ctrl_fifo_afull:1; /* [6] Default:0x0 RO */ + u32 cinf1_fifo_afull:1; /* [7] Default:0x0 RO */ + u32 ctrl_cinf1_fifo_afull:1; /* [8] Default:0x0 RO */ + u32 layo_info_fifo_afull:1; /* [9] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_BP_STATE_DWLEN]; +} __packed; + +#define NBL_DPA_BP_HISTORY_ADDR (0x85cb04) +#define NBL_DPA_BP_HISTORY_DEPTH (1) +#define NBL_DPA_BP_HISTORY_WIDTH (32) +#define NBL_DPA_BP_HISTORY_DWLEN (1) +union dpa_bp_history_u { + struct dpa_bp_history { + u32 pa_rmux_data_bp:1; /* [0] Default:0x0 RC */ + u32 pa_rmux_info_bp:1; /* [1] Default:0x0 RC */ + u32 store_pa_data_bp:1; /* [2] Default:0x0 RC */ + u32 store_pa_info_bp:1; /* [3] Default:0x0 RC */ + u32 rx_data_fifo_afull:1; /* [4] Default:0x0 RC */ + u32 rx_info_fifo_afull:1; /* [5] Default:0x0 RC */ + u32 rx_ctrl_fifo_afull:1; /* [6] Default:0x0 RC */ + u32 cinf1_fifo_afull:1; /* [7] Default:0x0 RC */ + u32 ctrl_cinf1_fifo_afull:1; /* [8] Default:0x0 RC */ + u32 layo_info_fifo_afull:1; /* [9] Default:0x0 RC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_DPA_PRI_CONF_TABLE_ADDR (0x85e000) +#define NBL_DPA_PRI_CONF_TABLE_DEPTH (48) +#define NBL_DPA_PRI_CONF_TABLE_WIDTH (32) +#define NBL_DPA_PRI_CONF_TABLE_DWLEN (1) +union dpa_pri_conf_table_u { + struct dpa_pri_conf_table { + u32 pri0:4; /* [3:0] Default:0x0 RW */ + u32 pri1:4; /* [7:4] Default:0x0 RW */ + u32 pri2:4; /* [11:8] Default:0x0 RW */ + u32 pri3:4; /* [15:12] Default:0x0 RW */ + u32 pri4:4; /* [19:16] Default:0x0 RW */ + u32 pri5:4; /* [23:20] Default:0x0 RW */ + u32 pri6:4; /* [27:24] Default:0x0 RW */ + u32 pri7:4; /* [31:28] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_PRI_CONF_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_PRI_CONF_TABLE_REG(r) (NBL_DPA_PRI_CONF_TABLE_ADDR + \ + (NBL_DPA_PRI_CONF_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPA_KEY_TCAM_ADDR (0x85f000) +#define NBL_DPA_KEY_TCAM_DEPTH (128) +#define NBL_DPA_KEY_TCAM_WIDTH (64) +#define NBL_DPA_KEY_TCAM_DWLEN (2) +union dpa_key_tcam_u { + struct dpa_key_tcam { + u32 key_b:16; /* [15:0] Default:0x0 RW */ + u32 key_a:16; /* [31:16] Default:0x0 RW */ + u32 key_valid:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_KEY_TCAM_DWLEN]; +} __packed; +#define NBL_DPA_KEY_TCAM_REG(r) (NBL_DPA_KEY_TCAM_ADDR + \ + (NBL_DPA_KEY_TCAM_DWLEN * 4) * (r)) + +#define NBL_DPA_MASK_TCAM_ADDR (0x85f800) +#define NBL_DPA_MASK_TCAM_DEPTH (128) +#define NBL_DPA_MASK_TCAM_WIDTH (32) +#define NBL_DPA_MASK_TCAM_DWLEN (1) +union dpa_mask_tcam_u { + struct dpa_mask_tcam { + u32 mask_b:16; /* [15:0] Default:0x0 RW */ + u32 mask_a:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_MASK_TCAM_DWLEN]; +} __packed; +#define NBL_DPA_MASK_TCAM_REG(r) (NBL_DPA_MASK_TCAM_ADDR + \ + (NBL_DPA_MASK_TCAM_DWLEN * 4) * (r)) + +#define NBL_DPA_ACT_TABLE_ADDR (0x860000) +#define NBL_DPA_ACT_TABLE_DEPTH (128) +#define NBL_DPA_ACT_TABLE_WIDTH (128) +#define NBL_DPA_ACT_TABLE_DWLEN (4) +union dpa_act_table_u { + struct dpa_act_table { + u32 flag_control_0:8; /* [7:0] Default:0x0 RW */ + u32 flag_control_1:8; /* [15:8] Default:0x0 RW */ + u32 flag_control_2:8; /* [23:16] Default:0x0 RW */ + u32 legality_check:8; /* [31:24] Default:0x0 RW */ + u32 nxt_off_B:8; /* [39:32] Default:0x0 RW */ + u32 nxt_off_A:8; /* [47:40] Default:0x0 RW */ + u32 protocol_header_off:8; /* [55:48] Default:0x0 RW */ + u32 payload_length:8; /* [63:56] Default:0x0 RW */ + u32 mask:8; /* [71:64] Default:0x0 RW */ + u32 nxt_stg:4; /* [75:72] Default:0x0 RW */ + u32 rsv_l:32; /* [127:76] Default:0x0 RO */ + u32 rsv_h:20; /* [127:76] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_ACT_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_ACT_TABLE_REG(r) (NBL_DPA_ACT_TABLE_ADDR + \ + (NBL_DPA_ACT_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPA_EXT_CONF_TABLE_ADDR (0x861000) +#define NBL_DPA_EXT_CONF_TABLE_DEPTH (512) +#define NBL_DPA_EXT_CONF_TABLE_WIDTH (32) +#define NBL_DPA_EXT_CONF_TABLE_DWLEN (1) +union dpa_ext_conf_table_u { + struct dpa_ext_conf_table { + u32 dst_offset:8; /* [7:0] Default:0x0 RW */ + u32 source_offset:6; /* [13:8] Default:0x0 RW */ + u32 mode_start_off:2; /* [15:14] Default:0x0 RW */ + u32 lx_sel:2; /* [17:16] Default:0x0 RW */ + u32 mode_sel:1; /* [18] Default:0x0 RW */ + u32 op_en:1; /* [19] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_EXT_CONF_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_EXT_CONF_TABLE_REG(r) (NBL_DPA_EXT_CONF_TABLE_ADDR + \ + (NBL_DPA_EXT_CONF_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPA_EXT_INDEX_TCAM_ADDR (0x862000) +#define NBL_DPA_EXT_INDEX_TCAM_DEPTH (32) +#define NBL_DPA_EXT_INDEX_TCAM_WIDTH (64) +#define NBL_DPA_EXT_INDEX_TCAM_DWLEN (2) +union dpa_ext_index_tcam_u { + struct dpa_ext_index_tcam { + u32 type_index:32; /* [31:0] Default:0x0 RW */ + u32 type_valid:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_EXT_INDEX_TCAM_DWLEN]; +} __packed; +#define NBL_DPA_EXT_INDEX_TCAM_REG(r) (NBL_DPA_EXT_INDEX_TCAM_ADDR + \ + (NBL_DPA_EXT_INDEX_TCAM_DWLEN * 4) * (r)) + +#define NBL_DPA_EXT_INDEX_TCAM_MASK_ADDR (0x862200) +#define NBL_DPA_EXT_INDEX_TCAM_MASK_DEPTH (32) +#define NBL_DPA_EXT_INDEX_TCAM_MASK_WIDTH (32) +#define NBL_DPA_EXT_INDEX_TCAM_MASK_DWLEN (1) +union dpa_ext_index_tcam_mask_u { + struct dpa_ext_index_tcam_mask { + u32 mask:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPA_EXT_INDEX_TCAM_MASK_DWLEN]; +} __packed; +#define NBL_DPA_EXT_INDEX_TCAM_MASK_REG(r) (NBL_DPA_EXT_INDEX_TCAM_MASK_ADDR + \ + (NBL_DPA_EXT_INDEX_TCAM_MASK_DWLEN * 4) * (r)) + +#define NBL_DPA_EXT_INDEX_TABLE_ADDR (0x862300) +#define NBL_DPA_EXT_INDEX_TABLE_DEPTH (32) +#define NBL_DPA_EXT_INDEX_TABLE_WIDTH (32) +#define NBL_DPA_EXT_INDEX_TABLE_DWLEN (1) +union dpa_ext_index_table_u { + struct dpa_ext_index_table { + u32 p_index:3; /* [2:0] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_EXT_INDEX_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_EXT_INDEX_TABLE_REG(r) (NBL_DPA_EXT_INDEX_TABLE_ADDR + \ + (NBL_DPA_EXT_INDEX_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPA_TYPE_INDEX_TCAM_ADDR (0x864000) +#define NBL_DPA_TYPE_INDEX_TCAM_DEPTH (256) +#define NBL_DPA_TYPE_INDEX_TCAM_WIDTH (128) +#define NBL_DPA_TYPE_INDEX_TCAM_DWLEN (4) +union dpa_type_index_tcam_u { + struct dpa_type_index_tcam { + u32 layo_x:32; /* [31:0] Default:0xFFFFFFFF RW */ + u32 layo_y:32; /* [63:32] Default:0xFFFFFFFF RW */ + u32 type_valid:1; /* [64] Default:0x0 RW */ + u32 rsv_l:32; /* [127:65] Default:0x0 RO */ + u32 rsv_h:31; /* [127:65] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_TYPE_INDEX_TCAM_DWLEN]; +} __packed; +#define NBL_DPA_TYPE_INDEX_TCAM_REG(r) (NBL_DPA_TYPE_INDEX_TCAM_ADDR + \ + (NBL_DPA_TYPE_INDEX_TCAM_DWLEN * 4) * (r)) + +#define NBL_DPA_PACKET_TYPE_TABLE_ADDR (0x866000) +#define NBL_DPA_PACKET_TYPE_TABLE_DEPTH (256) +#define NBL_DPA_PACKET_TYPE_TABLE_WIDTH (32) +#define NBL_DPA_PACKET_TYPE_TABLE_DWLEN (1) +union dpa_packet_type_table_u { + struct dpa_packet_type_table { + u32 p_type:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPA_PACKET_TYPE_TABLE_DWLEN]; +} __packed; +#define NBL_DPA_PACKET_TYPE_TABLE_REG(r) (NBL_DPA_PACKET_TYPE_TABLE_ADDR + \ + (NBL_DPA_PACKET_TYPE_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h new file mode 100644 index 0000000000000000000000000000000000000000..3ca3dbd24e707244b29bd2c6af22b018fad63ab4 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h @@ -0,0 +1,2147 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DPED_H +#define NBL_DPED_H 1 + +#include + +#define NBL_DPED_BASE (0x0075C000) + +#define NBL_DPED_INT_STATUS_ADDR (0x75c000) +#define NBL_DPED_INT_STATUS_DEPTH (1) +#define NBL_DPED_INT_STATUS_WIDTH (32) +#define NBL_DPED_INT_STATUS_DWLEN (1) +union dped_int_status_u { + struct dped_int_status { + u32 pkt_length_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 input_err:1; /* [5] Default:0x0 RWC */ + u32 cfg_err:1; /* [6] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [7] Default:0x0 RWC */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 RWC */ + u32 meta_ucor_err:1; /* [9] Default:0x0 RWC */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 RWC */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x0 RWC */ + u32 meta_value_err:1; /* [12] Default:0x0 RWC */ + u32 edit_atnum_err:1; /* [13] Default:0x0 RWC */ + u32 header_oft_ovf:1; /* [14] Default:0x0 RWC */ + u32 edit_pos_err:1; /* [15] Default:0x0 RWC */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 RWC */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 RWC */ + u32 add_head_ovf:1; /* [18] Default:0x0 RWC */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DPED_INT_MASK_ADDR (0x75c004) +#define NBL_DPED_INT_MASK_DEPTH (1) +#define NBL_DPED_INT_MASK_WIDTH (32) +#define NBL_DPED_INT_MASK_DWLEN (1) +union dped_int_mask_u { + struct dped_int_mask { + u32 pkt_length_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 input_err:1; /* [5] Default:0x0 RW */ + u32 cfg_err:1; /* [6] Default:0x0 RW */ + u32 data_ucor_err:1; /* [7] Default:0x0 RW */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 RW */ + u32 meta_ucor_err:1; /* [9] Default:0x0 RW */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 RW */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x1 RW */ + u32 meta_value_err:1; /* [12] Default:0x0 RW */ + u32 edit_atnum_err:1; /* [13] Default:0x0 RW */ + u32 header_oft_ovf:1; /* [14] Default:0x0 RW */ + u32 edit_pos_err:1; /* [15] Default:0x0 RW */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 RW */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 RW */ + u32 add_head_ovf:1; /* [18] Default:0x0 RW */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DPED_INT_SET_ADDR (0x75c008) +#define NBL_DPED_INT_SET_DEPTH (1) +#define NBL_DPED_INT_SET_WIDTH (32) +#define NBL_DPED_INT_SET_DWLEN (1) +union dped_int_set_u { + struct dped_int_set { + u32 pkt_length_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 input_err:1; /* [5] Default:0x0 WO */ + u32 cfg_err:1; /* [6] Default:0x0 WO */ + u32 data_ucor_err:1; /* [7] Default:0x0 WO */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 WO */ + u32 meta_ucor_err:1; /* [9] Default:0x0 WO */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 WO */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x0 WO */ + u32 meta_value_err:1; /* [12] Default:0x0 WO */ + u32 edit_atnum_err:1; /* [13] Default:0x0 WO */ + u32 header_oft_ovf:1; /* [14] Default:0x0 WO */ + u32 edit_pos_err:1; /* [15] Default:0x0 WO */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 WO */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 WO */ + u32 add_head_ovf:1; /* [18] Default:0x0 WO */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INT_SET_DWLEN]; +} __packed; + +#define NBL_DPED_INIT_DONE_ADDR (0x75c00c) +#define NBL_DPED_INIT_DONE_DEPTH (1) +#define NBL_DPED_INIT_DONE_WIDTH (32) +#define NBL_DPED_INIT_DONE_DWLEN (1) +union dped_init_done_u { + struct dped_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DPED_PKT_LENGTH_ERR_INFO_ADDR (0x75c020) +#define NBL_DPED_PKT_LENGTH_ERR_INFO_DEPTH (1) +#define NBL_DPED_PKT_LENGTH_ERR_INFO_WIDTH (32) +#define NBL_DPED_PKT_LENGTH_ERR_INFO_DWLEN (1) +union dped_pkt_length_err_info_u { + struct dped_pkt_length_err_info { + u32 ptr_eop:1; /* [0] Default:0x0 RC */ + u32 pkt_eop:1; /* [1] Default:0x0 RC */ + u32 pkt_mod:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PKT_LENGTH_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_CIF_ERR_INFO_ADDR (0x75c040) +#define NBL_DPED_CIF_ERR_INFO_DEPTH (1) +#define NBL_DPED_CIF_ERR_INFO_WIDTH (32) +#define NBL_DPED_CIF_ERR_INFO_DWLEN (1) +union dped_cif_err_info_u { + struct dped_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_INPUT_ERR_INFO_ADDR (0x75c048) +#define NBL_DPED_INPUT_ERR_INFO_DEPTH (1) +#define NBL_DPED_INPUT_ERR_INFO_WIDTH (32) +#define NBL_DPED_INPUT_ERR_INFO_DWLEN (1) +union dped_input_err_info_u { + struct dped_input_err_info { + u32 eoc_miss:1; /* [0] Default:0x0 RC */ + u32 soc_miss:1; /* [1] Default:0x0 RC */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INPUT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_CFG_ERR_INFO_ADDR (0x75c050) +#define NBL_DPED_CFG_ERR_INFO_DEPTH (1) +#define NBL_DPED_CFG_ERR_INFO_WIDTH (32) +#define NBL_DPED_CFG_ERR_INFO_DWLEN (1) +union dped_cfg_err_info_u { + struct dped_cfg_err_info { + u32 length:1; /* [0] Default:0x0 RC */ + u32 rd_conflict:1; /* [1] Default:0x0 RC */ + u32 rd_addr:8; /* [9:2] Default:0x0 RC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_ADDR (0x75c06c) +#define NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_DEPTH (1) +#define NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_WIDTH (32) +#define NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_DWLEN (1) +union dped_fwd_atid_nomat_err_info_u { + struct dped_fwd_atid_nomat_err_info { + u32 dport:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_ATID_NOMAT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_META_VALUE_ERR_INFO_ADDR (0x75c070) +#define NBL_DPED_META_VALUE_ERR_INFO_DEPTH (1) +#define NBL_DPED_META_VALUE_ERR_INFO_WIDTH (32) +#define NBL_DPED_META_VALUE_ERR_INFO_DWLEN (1) +union dped_meta_value_err_info_u { + struct dped_meta_value_err_info { + u32 sport:1; /* [0] Default:0x0 RC */ + u32 dport:1; /* [1] Default:0x0 RC */ + u32 dscp_ecn:1; /* [2] Default:0x0 RC */ + u32 tnl:1; /* [3] Default:0x0 RC */ + u32 vni:1; /* [4] Default:0x0 RC */ + u32 vni_one:1; /* [5] Default:0x0 RC */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_META_VALUE_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_EDIT_ATNUM_ERR_INFO_ADDR (0x75c078) +#define NBL_DPED_EDIT_ATNUM_ERR_INFO_DEPTH (1) +#define NBL_DPED_EDIT_ATNUM_ERR_INFO_WIDTH (32) +#define NBL_DPED_EDIT_ATNUM_ERR_INFO_DWLEN (1) +union dped_edit_atnum_err_info_u { + struct dped_edit_atnum_err_info { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 del_add:1; /* [1] Default:0x0 RC */ + u32 ttl:1; /* [2] Default:0x0 RC */ + u32 dscp:1; /* [3] Default:0x0 RC */ + u32 tnl:1; /* [4] Default:0x0 RC */ + u32 sport:1; /* [5] Default:0x0 RC */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_EDIT_ATNUM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_HEADER_OFT_OVF_ADDR (0x75c080) +#define NBL_DPED_HEADER_OFT_OVF_DEPTH (1) +#define NBL_DPED_HEADER_OFT_OVF_WIDTH (32) +#define NBL_DPED_HEADER_OFT_OVF_DWLEN (1) +union dped_header_oft_ovf_u { + struct dped_header_oft_ovf { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 rsv2:7; /* [7:1] Default:0x0 RO */ + u32 add_del:6; /* [13:8] Default:0x0 RC */ + u32 dscp_ecn:1; /* [14] Default:0x0 RC */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 ttl:1; /* [16] Default:0x0 RC */ + u32 sctp:1; /* [17] Default:0x0 RC */ + u32 dscp:1; /* [18] Default:0x0 RC */ + u32 pri:1; /* [19] Default:0x0 RC */ + u32 len0:1; /* [20] Default:0x0 RC */ + u32 len1:1; /* [21] Default:0x0 RC */ + u32 ck0:1; /* [22] Default:0x0 RC */ + u32 ck1:1; /* [23] Default:0x0 RC */ + u32 ck_start0_0:1; /* [24] Default:0x0 RC */ + u32 ck_start0_1:1; /* [25] Default:0x0 RC */ + u32 ck_start1_0:1; /* [26] Default:0x0 RC */ + u32 ck_start1_1:1; /* [27] Default:0x0 RC */ + u32 head:1; /* [28] Default:0x0 RC */ + u32 ck_len0:1; /* [29] Default:0x0 RC */ + u32 ck_len1:1; /* [30] Default:0x0 RC */ + u32 rsv:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HEADER_OFT_OVF_DWLEN]; +} __packed; + +#define NBL_DPED_EDIT_POS_ERR_ADDR (0x75c088) +#define NBL_DPED_EDIT_POS_ERR_DEPTH (1) +#define NBL_DPED_EDIT_POS_ERR_WIDTH (32) +#define NBL_DPED_EDIT_POS_ERR_DWLEN (1) +union dped_edit_pos_err_u { + struct dped_edit_pos_err { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 cross_level:6; /* [6:1] Default:0x0 RC */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 add_del:6; /* [13:8] Default:0x0 RC */ + u32 dscp_ecn:1; /* [14] Default:0x0 RC */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 ttl:1; /* [16] Default:0x0 RC */ + u32 sctp:1; /* [17] Default:0x0 RC */ + u32 dscp:1; /* [18] Default:0x0 RC */ + u32 pri:1; /* [19] Default:0x0 RC */ + u32 len0:1; /* [20] Default:0x0 RC */ + u32 len1:1; /* [21] Default:0x0 RC */ + u32 ck0:1; /* [22] Default:0x0 RC */ + u32 ck1:1; /* [23] Default:0x0 RC */ + u32 ck_start0_0:1; /* [24] Default:0x0 RC */ + u32 ck_start0_1:1; /* [25] Default:0x0 RC */ + u32 ck_start1_0:1; /* [26] Default:0x0 RC */ + u32 ck_start1_1:1; /* [27] Default:0x0 RC */ + u32 ck_len0:1; /* [28] Default:0x0 RC */ + u32 ck_len1:1; /* [29] Default:0x0 RC */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_EDIT_POS_ERR_DWLEN]; +} __packed; + +#define NBL_DPED_DA_OFT_LEN_OVF_ADDR (0x75c090) +#define NBL_DPED_DA_OFT_LEN_OVF_DEPTH (1) +#define NBL_DPED_DA_OFT_LEN_OVF_WIDTH (32) +#define NBL_DPED_DA_OFT_LEN_OVF_DWLEN (1) +union dped_da_oft_len_ovf_u { + struct dped_da_oft_len_ovf { + u32 at0:5; /* [4:0] Default:0x0 RC */ + u32 at1:5; /* [9:5] Default:0x0 RC */ + u32 at2:5; /* [14:10] Default:0x0 RC */ + u32 at3:5; /* [19:15] Default:0x0 RC */ + u32 at4:5; /* [24:20] Default:0x0 RC */ + u32 at5:5; /* [29:25] Default:0x0 RC */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_DA_OFT_LEN_OVF_DWLEN]; +} __packed; + +#define NBL_DPED_LXOFFSET_OVF_ADDR (0x75c098) +#define NBL_DPED_LXOFFSET_OVF_DEPTH (1) +#define NBL_DPED_LXOFFSET_OVF_WIDTH (32) +#define NBL_DPED_LXOFFSET_OVF_DWLEN (1) +union dped_lxoffset_ovf_u { + struct dped_lxoffset_ovf { + u32 l2:1; /* [0] Default:0x0 RC */ + u32 l3:1; /* [1] Default:0x0 RC */ + u32 l4:1; /* [2] Default:0x0 RC */ + u32 pld:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_LXOFFSET_OVF_DWLEN]; +} __packed; + +#define NBL_DPED_ADD_HEAD_OVF_ADDR (0x75c0a0) +#define NBL_DPED_ADD_HEAD_OVF_DEPTH (1) +#define NBL_DPED_ADD_HEAD_OVF_WIDTH (32) +#define NBL_DPED_ADD_HEAD_OVF_DWLEN (1) +union dped_add_head_ovf_u { + struct dped_add_head_ovf { + u32 tnl_l2:1; /* [0] Default:0x0 RC */ + u32 tnl_pkt:1; /* [1] Default:0x0 RC */ + u32 rsv1:14; /* [15:2] Default:0x0 RO */ + u32 mir_l2:1; /* [16] Default:0x0 RC */ + u32 mir_pkt:1; /* [17] Default:0x0 RC */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_ADD_HEAD_OVF_DWLEN]; +} __packed; + +#define NBL_DPED_CAR_CTRL_ADDR (0x75c100) +#define NBL_DPED_CAR_CTRL_DEPTH (1) +#define NBL_DPED_CAR_CTRL_WIDTH (32) +#define NBL_DPED_CAR_CTRL_DWLEN (1) +union dped_car_ctrl_u { + struct dped_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DPED_INIT_START_ADDR (0x75c10c) +#define NBL_DPED_INIT_START_DEPTH (1) +#define NBL_DPED_INIT_START_WIDTH (32) +#define NBL_DPED_INIT_START_DWLEN (1) +union dped_init_start_u { + struct dped_init_start { + u32 start:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INIT_START_DWLEN]; +} __packed; + +#define NBL_DPED_TIMEOUT_CFG_ADDR (0x75c110) +#define NBL_DPED_TIMEOUT_CFG_DEPTH (1) +#define NBL_DPED_TIMEOUT_CFG_WIDTH (32) +#define NBL_DPED_TIMEOUT_CFG_DWLEN (1) +union dped_timeout_cfg_u { + struct dped_timeout_cfg { + u32 fsm_max_num:16; /* [15:00] Default:0xfff RW */ + u32 tab:8; /* [23:16] Default:0x40 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TIMEOUT_CFG_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_MAX_LENGTH_ADDR (0x75c154) +#define NBL_DPED_TNL_MAX_LENGTH_DEPTH (1) +#define NBL_DPED_TNL_MAX_LENGTH_WIDTH (32) +#define NBL_DPED_TNL_MAX_LENGTH_DWLEN (1) +union dped_tnl_max_length_u { + struct dped_tnl_max_length { + u32 th:7; /* [6:0] Default:0x5A RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TNL_MAX_LENGTH_DWLEN]; +} __packed; + +#define NBL_DPED_PKT_DROP_EN_ADDR (0x75c170) +#define NBL_DPED_PKT_DROP_EN_DEPTH (1) +#define NBL_DPED_PKT_DROP_EN_WIDTH (32) +#define NBL_DPED_PKT_DROP_EN_DWLEN (1) +union dped_pkt_drop_en_u { + struct dped_pkt_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PKT_DROP_EN_DWLEN]; +} __packed; + +#define NBL_DPED_PKT_HERR_DROP_EN_ADDR (0x75c174) +#define NBL_DPED_PKT_HERR_DROP_EN_DEPTH (1) +#define NBL_DPED_PKT_HERR_DROP_EN_WIDTH (32) +#define NBL_DPED_PKT_HERR_DROP_EN_DWLEN (1) +union dped_pkt_herr_drop_en_u { + struct dped_pkt_herr_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PKT_HERR_DROP_EN_DWLEN]; +} __packed; + +#define NBL_DPED_PKT_PARITY_DROP_EN_ADDR (0x75c178) +#define NBL_DPED_PKT_PARITY_DROP_EN_DEPTH (1) +#define NBL_DPED_PKT_PARITY_DROP_EN_WIDTH (32) +#define NBL_DPED_PKT_PARITY_DROP_EN_DWLEN (1) +union dped_pkt_parity_drop_en_u { + struct dped_pkt_parity_drop_en { + u32 en0:1; /* [0] Default:0x1 RW */ + u32 en1:1; /* [1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PKT_PARITY_DROP_EN_DWLEN]; +} __packed; + +#define NBL_DPED_TTL_DROP_EN_ADDR (0x75c17c) +#define NBL_DPED_TTL_DROP_EN_DEPTH (1) +#define NBL_DPED_TTL_DROP_EN_WIDTH (32) +#define NBL_DPED_TTL_DROP_EN_DWLEN (1) +union dped_ttl_drop_en_u { + struct dped_ttl_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TTL_DROP_EN_DWLEN]; +} __packed; + +#define NBL_DPED_TTL_ERROR_CODE_ADDR (0x75c188) +#define NBL_DPED_TTL_ERROR_CODE_DEPTH (1) +#define NBL_DPED_TTL_ERROR_CODE_WIDTH (32) +#define NBL_DPED_TTL_ERROR_CODE_DWLEN (1) +union dped_ttl_error_code_u { + struct dped_ttl_error_code { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv1:7; /* [7:1] Default:0x0 RO */ + u32 id:4; /* [11:8] Default:0x6 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TTL_ERROR_CODE_DWLEN]; +} __packed; + +#define NBL_DPED_HIGH_PRI_PKT_EN_ADDR (0x75c190) +#define NBL_DPED_HIGH_PRI_PKT_EN_DEPTH (1) +#define NBL_DPED_HIGH_PRI_PKT_EN_WIDTH (32) +#define NBL_DPED_HIGH_PRI_PKT_EN_DWLEN (1) +union dped_high_pri_pkt_en_u { + struct dped_high_pri_pkt_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HIGH_PRI_PKT_EN_DWLEN]; +} __packed; + +#define NBL_DPED_PADDING_CFG_ADDR (0x75c194) +#define NBL_DPED_PADDING_CFG_DEPTH (1) +#define NBL_DPED_PADDING_CFG_WIDTH (32) +#define NBL_DPED_PADDING_CFG_DWLEN (1) +union dped_padding_cfg_u { + struct dped_padding_cfg { + u32 th:6; /* [5:0] Default:0x3B RW */ + u32 rsv1:2; /* [7:6] Default:0x0 RO */ + u32 mode:2; /* [9:8] Default:0x0 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_PADDING_CFG_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL0_ADDR (0x75c204) +#define NBL_DPED_HW_EDIT_FLAG_SEL0_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL0_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL0_DWLEN (1) +union dped_hw_edit_flag_sel0_u { + struct dped_hw_edit_flag_sel0 { + u32 oft:5; /* [4:0] Default:0x1 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL0_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL1_ADDR (0x75c208) +#define NBL_DPED_HW_EDIT_FLAG_SEL1_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL1_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL1_DWLEN (1) +union dped_hw_edit_flag_sel1_u { + struct dped_hw_edit_flag_sel1 { + u32 oft:5; /* [4:0] Default:0x2 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL1_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL2_ADDR (0x75c20c) +#define NBL_DPED_HW_EDIT_FLAG_SEL2_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL2_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL2_DWLEN (1) +union dped_hw_edit_flag_sel2_u { + struct dped_hw_edit_flag_sel2 { + u32 oft:5; /* [4:0] Default:0x3 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL2_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL3_ADDR (0x75c210) +#define NBL_DPED_HW_EDIT_FLAG_SEL3_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL3_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL3_DWLEN (1) +union dped_hw_edit_flag_sel3_u { + struct dped_hw_edit_flag_sel3 { + u32 oft:5; /* [4:0] Default:0x4 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL3_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDIT_FLAG_SEL4_ADDR (0x75c214) +#define NBL_DPED_HW_EDIT_FLAG_SEL4_DEPTH (1) +#define NBL_DPED_HW_EDIT_FLAG_SEL4_WIDTH (32) +#define NBL_DPED_HW_EDIT_FLAG_SEL4_DWLEN (1) +union dped_hw_edit_flag_sel4_u { + struct dped_hw_edit_flag_sel4 { + u32 oft:5; /* [4:0] Default:0xe RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDIT_FLAG_SEL4_DWLEN]; +} __packed; + +#define NBL_DPED_RDMA_FLAG_ADDR (0x75c22c) +#define NBL_DPED_RDMA_FLAG_DEPTH (1) +#define NBL_DPED_RDMA_FLAG_WIDTH (32) +#define NBL_DPED_RDMA_FLAG_DWLEN (1) +union dped_rdma_flag_u { + struct dped_rdma_flag { + u32 oft:5; /* [4:0] Default:0xa RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_RDMA_FLAG_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_DPORT_ADDR (0x75c230) +#define NBL_DPED_FWD_DPORT_DEPTH (1) +#define NBL_DPED_FWD_DPORT_WIDTH (32) +#define NBL_DPED_FWD_DPORT_DWLEN (1) +union dped_fwd_dport_u { + struct dped_fwd_dport { + u32 id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_DPORT_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_MIRID_ADDR (0x75c238) +#define NBL_DPED_FWD_MIRID_DEPTH (1) +#define NBL_DPED_FWD_MIRID_WIDTH (32) +#define NBL_DPED_FWD_MIRID_DWLEN (1) +union dped_fwd_mirid_u { + struct dped_fwd_mirid { + u32 id:6; /* [5:0] Default:0x8 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_MIRID_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_VNI0_ADDR (0x75c244) +#define NBL_DPED_FWD_VNI0_DEPTH (1) +#define NBL_DPED_FWD_VNI0_WIDTH (32) +#define NBL_DPED_FWD_VNI0_DWLEN (1) +union dped_fwd_vni0_u { + struct dped_fwd_vni0 { + u32 id:6; /* [5:0] Default:0xe RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_VNI0_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_VNI1_ADDR (0x75c248) +#define NBL_DPED_FWD_VNI1_DEPTH (1) +#define NBL_DPED_FWD_VNI1_WIDTH (32) +#define NBL_DPED_FWD_VNI1_DWLEN (1) +union dped_fwd_vni1_u { + struct dped_fwd_vni1 { + u32 id:6; /* [5:0] Default:0xf RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_VNI1_DWLEN]; +} __packed; + +#define NBL_DPED_FWD_PRI_MDF_ADDR (0x75c250) +#define NBL_DPED_FWD_PRI_MDF_DEPTH (1) +#define NBL_DPED_FWD_PRI_MDF_WIDTH (32) +#define NBL_DPED_FWD_PRI_MDF_DWLEN (1) +union dped_fwd_pri_mdf_u { + struct dped_fwd_pri_mdf { + u32 id:6; /* [5:0] Default:0x15 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_FWD_PRI_MDF_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_TYPE0_ADDR (0x75c260) +#define NBL_DPED_VLAN_TYPE0_DEPTH (1) +#define NBL_DPED_VLAN_TYPE0_WIDTH (32) +#define NBL_DPED_VLAN_TYPE0_DWLEN (1) +union dped_vlan_type0_u { + struct dped_vlan_type0 { + u32 vau:16; /* [15:0] Default:0x8100 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_TYPE0_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_TYPE1_ADDR (0x75c264) +#define NBL_DPED_VLAN_TYPE1_DEPTH (1) +#define NBL_DPED_VLAN_TYPE1_WIDTH (32) +#define NBL_DPED_VLAN_TYPE1_DWLEN (1) +union dped_vlan_type1_u { + struct dped_vlan_type1 { + u32 vau:16; /* [15:0] Default:0x88A8 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_TYPE1_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_TYPE2_ADDR (0x75c268) +#define NBL_DPED_VLAN_TYPE2_DEPTH (1) +#define NBL_DPED_VLAN_TYPE2_WIDTH (32) +#define NBL_DPED_VLAN_TYPE2_DWLEN (1) +union dped_vlan_type2_u { + struct dped_vlan_type2 { + u32 vau:16; /* [15:0] Default:0x9100 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_TYPE2_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_TYPE3_ADDR (0x75c26c) +#define NBL_DPED_VLAN_TYPE3_DEPTH (1) +#define NBL_DPED_VLAN_TYPE3_WIDTH (32) +#define NBL_DPED_VLAN_TYPE3_DWLEN (1) +union dped_vlan_type3_u { + struct dped_vlan_type3 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_TYPE3_DWLEN]; +} __packed; + +#define NBL_DPED_L3_LEN_MDY_CMD_0_ADDR (0x75c300) +#define NBL_DPED_L3_LEN_MDY_CMD_0_DEPTH (1) +#define NBL_DPED_L3_LEN_MDY_CMD_0_WIDTH (32) +#define NBL_DPED_L3_LEN_MDY_CMD_0_DWLEN (1) +union dped_l3_len_mdy_cmd_0_u { + struct dped_l3_len_mdy_cmd_0 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0x2 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x2 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x2 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x0 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L3_LEN_MDY_CMD_0_DWLEN]; +} __packed; + +#define NBL_DPED_L3_LEN_MDY_CMD_1_ADDR (0x75c304) +#define NBL_DPED_L3_LEN_MDY_CMD_1_DEPTH (1) +#define NBL_DPED_L3_LEN_MDY_CMD_1_WIDTH (32) +#define NBL_DPED_L3_LEN_MDY_CMD_1_DWLEN (1) +union dped_l3_len_mdy_cmd_1_u { + struct dped_l3_len_mdy_cmd_1 { + u32 value:8; /* [7:0] Default:0x28 RW */ + u32 in_oft:7; /* [14:8] Default:0x4 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x2 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x1 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x0 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L3_LEN_MDY_CMD_1_DWLEN]; +} __packed; + +#define NBL_DPED_L4_LEN_MDY_CMD_0_ADDR (0x75c308) +#define NBL_DPED_L4_LEN_MDY_CMD_0_DEPTH (1) +#define NBL_DPED_L4_LEN_MDY_CMD_0_WIDTH (32) +#define NBL_DPED_L4_LEN_MDY_CMD_0_DWLEN (1) +union dped_l4_len_mdy_cmd_0_u { + struct dped_l4_len_mdy_cmd_0 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0xc RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x3 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x0 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x1 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L4_LEN_MDY_CMD_0_DWLEN]; +} __packed; + +#define NBL_DPED_L4_LEN_MDY_CMD_1_ADDR (0x75c30c) +#define NBL_DPED_L4_LEN_MDY_CMD_1_DEPTH (1) +#define NBL_DPED_L4_LEN_MDY_CMD_1_WIDTH (32) +#define NBL_DPED_L4_LEN_MDY_CMD_1_DWLEN (1) +union dped_l4_len_mdy_cmd_1_u { + struct dped_l4_len_mdy_cmd_1 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0x4 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x3 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x0 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x1 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_LEN_MDY_CMD_1_DWLEN]; +} __packed; + +#define NBL_DPED_L3_CK_CMD_00_ADDR (0x75c310) +#define NBL_DPED_L3_CK_CMD_00_DEPTH (1) +#define NBL_DPED_L3_CK_CMD_00_WIDTH (32) +#define NBL_DPED_L3_CK_CMD_00_DWLEN (1) +union dped_l3_ck_cmd_00_u { + struct dped_l3_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0xa RW */ + u32 phid:2; /* [27:26] Default:0x2 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L3_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_L3_CK_CMD_01_ADDR (0x75c314) +#define NBL_DPED_L3_CK_CMD_01_DEPTH (1) +#define NBL_DPED_L3_CK_CMD_01_WIDTH (32) +#define NBL_DPED_L3_CK_CMD_01_DWLEN (1) +union dped_l3_ck_cmd_01_u { + struct dped_l3_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L3_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_00_ADDR (0x75c318) +#define NBL_DPED_L4_CK_CMD_00_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_00_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_00_DWLEN (1) +union dped_l4_ck_cmd_00_u { + struct dped_l4_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x6 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x10 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_01_ADDR (0x75c31c) +#define NBL_DPED_L4_CK_CMD_01_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_01_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_01_DWLEN (1) +union dped_l4_ck_cmd_01_u { + struct dped_l4_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_10_ADDR (0x75c320) +#define NBL_DPED_L4_CK_CMD_10_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_10_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_10_DWLEN (1) +union dped_l4_ck_cmd_10_u { + struct dped_l4_ck_cmd_10 { + u32 value:8; /* [7:0] Default:0x11 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_10_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_11_ADDR (0x75c324) +#define NBL_DPED_L4_CK_CMD_11_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_11_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_11_DWLEN (1) +union dped_l4_ck_cmd_11_u { + struct dped_l4_ck_cmd_11 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_11_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_20_ADDR (0x75c328) +#define NBL_DPED_L4_CK_CMD_20_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_20_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_20_DWLEN (1) +union dped_l4_ck_cmd_20_u { + struct dped_l4_ck_cmd_20 { + u32 value:8; /* [7:0] Default:0x2e RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x10 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_20_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_21_ADDR (0x75c32c) +#define NBL_DPED_L4_CK_CMD_21_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_21_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_21_DWLEN (1) +union dped_l4_ck_cmd_21_u { + struct dped_l4_ck_cmd_21 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_21_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_30_ADDR (0x75c330) +#define NBL_DPED_L4_CK_CMD_30_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_30_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_30_DWLEN (1) +union dped_l4_ck_cmd_30_u { + struct dped_l4_ck_cmd_30 { + u32 value:8; /* [7:0] Default:0x39 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_30_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_31_ADDR (0x75c334) +#define NBL_DPED_L4_CK_CMD_31_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_31_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_31_DWLEN (1) +union dped_l4_ck_cmd_31_u { + struct dped_l4_ck_cmd_31 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_31_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_40_ADDR (0x75c338) +#define NBL_DPED_L4_CK_CMD_40_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_40_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_40_DWLEN (1) +union dped_l4_ck_cmd_40_u { + struct dped_l4_ck_cmd_40 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x8 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x1 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_40_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_41_ADDR (0x75c33c) +#define NBL_DPED_L4_CK_CMD_41_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_41_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_41_DWLEN (1) +union dped_l4_ck_cmd_41_u { + struct dped_l4_ck_cmd_41 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x0 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_41_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_50_ADDR (0x75c340) +#define NBL_DPED_L4_CK_CMD_50_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_50_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_50_DWLEN (1) +union dped_l4_ck_cmd_50_u { + struct dped_l4_ck_cmd_50 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x2 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_50_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_51_ADDR (0x75c344) +#define NBL_DPED_L4_CK_CMD_51_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_51_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_51_DWLEN (1) +union dped_l4_ck_cmd_51_u { + struct dped_l4_ck_cmd_51 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_51_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_60_ADDR (0x75c348) +#define NBL_DPED_L4_CK_CMD_60_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_60_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_60_DWLEN (1) +union dped_l4_ck_cmd_60_u { + struct dped_l4_ck_cmd_60 { + u32 value:8; /* [7:0] Default:0x62 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x2 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_60_DWLEN]; +} __packed; + +#define NBL_DPED_L4_CK_CMD_61_ADDR (0x75c34c) +#define NBL_DPED_L4_CK_CMD_61_DEPTH (1) +#define NBL_DPED_L4_CK_CMD_61_WIDTH (32) +#define NBL_DPED_L4_CK_CMD_61_DWLEN (1) +union dped_l4_ck_cmd_61_u { + struct dped_l4_ck_cmd_61 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x0 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_L4_CK_CMD_61_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L3_CK_CMD_00_ADDR (0x75c350) +#define NBL_DPED_TNL_L3_CK_CMD_00_DEPTH (1) +#define NBL_DPED_TNL_L3_CK_CMD_00_WIDTH (32) +#define NBL_DPED_TNL_L3_CK_CMD_00_DWLEN (1) +union dped_tnl_l3_ck_cmd_00_u { + struct dped_tnl_l3_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0xa RW */ + u32 phid:2; /* [27:26] Default:0x2 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L3_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L3_CK_CMD_01_ADDR (0x75c354) +#define NBL_DPED_TNL_L3_CK_CMD_01_DEPTH (1) +#define NBL_DPED_TNL_L3_CK_CMD_01_WIDTH (32) +#define NBL_DPED_TNL_L3_CK_CMD_01_DWLEN (1) +union dped_tnl_l3_ck_cmd_01_u { + struct dped_tnl_l3_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L3_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_00_ADDR (0x75c360) +#define NBL_DPED_TNL_L4_CK_CMD_00_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_00_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_00_DWLEN (1) +union dped_tnl_l4_ck_cmd_00_u { + struct dped_tnl_l4_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x11 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_01_ADDR (0x75c364) +#define NBL_DPED_TNL_L4_CK_CMD_01_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_01_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_01_DWLEN (1) +union dped_tnl_l4_ck_cmd_01_u { + struct dped_tnl_l4_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_10_ADDR (0x75c368) +#define NBL_DPED_TNL_L4_CK_CMD_10_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_10_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_10_DWLEN (1) +union dped_tnl_l4_ck_cmd_10_u { + struct dped_tnl_l4_ck_cmd_10 { + u32 value:8; /* [7:0] Default:0x39 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_10_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_11_ADDR (0x75c36c) +#define NBL_DPED_TNL_L4_CK_CMD_11_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_11_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_11_DWLEN (1) +union dped_tnl_l4_ck_cmd_11_u { + struct dped_tnl_l4_ck_cmd_11 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_11_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_20_ADDR (0x75c370) +#define NBL_DPED_TNL_L4_CK_CMD_20_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_20_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_20_DWLEN (1) +union dped_tnl_l4_ck_cmd_20_u { + struct dped_tnl_l4_ck_cmd_20 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_20_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_21_ADDR (0x75c374) +#define NBL_DPED_TNL_L4_CK_CMD_21_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_21_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_21_DWLEN (1) +union dped_tnl_l4_ck_cmd_21_u { + struct dped_tnl_l4_ck_cmd_21 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x14 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_21_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_30_ADDR (0x75c378) +#define NBL_DPED_TNL_L4_CK_CMD_30_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_30_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_30_DWLEN (1) +union dped_tnl_l4_ck_cmd_30_u { + struct dped_tnl_l4_ck_cmd_30 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_30_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_31_ADDR (0x75c37c) +#define NBL_DPED_TNL_L4_CK_CMD_31_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_31_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_31_DWLEN (1) +union dped_tnl_l4_ck_cmd_31_u { + struct dped_tnl_l4_ck_cmd_31 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x8 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_31_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_40_ADDR (0x75c380) +#define NBL_DPED_TNL_L4_CK_CMD_40_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_40_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_40_DWLEN (1) +union dped_tnl_l4_ck_cmd_40_u { + struct dped_tnl_l4_ck_cmd_40 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_40_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_41_ADDR (0x75c384) +#define NBL_DPED_TNL_L4_CK_CMD_41_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_41_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_41_DWLEN (1) +union dped_tnl_l4_ck_cmd_41_u { + struct dped_tnl_l4_ck_cmd_41 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x8 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_41_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_50_ADDR (0x75c388) +#define NBL_DPED_TNL_L4_CK_CMD_50_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_50_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_50_DWLEN (1) +union dped_tnl_l4_ck_cmd_50_u { + struct dped_tnl_l4_ck_cmd_50 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_50_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_51_ADDR (0x75c38c) +#define NBL_DPED_TNL_L4_CK_CMD_51_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_51_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_51_DWLEN (1) +union dped_tnl_l4_ck_cmd_51_u { + struct dped_tnl_l4_ck_cmd_51 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x8 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_51_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_60_ADDR (0x75c390) +#define NBL_DPED_TNL_L4_CK_CMD_60_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_60_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_60_DWLEN (1) +union dped_tnl_l4_ck_cmd_60_u { + struct dped_tnl_l4_ck_cmd_60 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x0 RW */ + u32 phid:2; /* [27:26] Default:0x0 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_60_DWLEN]; +} __packed; + +#define NBL_DPED_TNL_L4_CK_CMD_61_ADDR (0x75c394) +#define NBL_DPED_TNL_L4_CK_CMD_61_DEPTH (1) +#define NBL_DPED_TNL_L4_CK_CMD_61_WIDTH (32) +#define NBL_DPED_TNL_L4_CK_CMD_61_DWLEN (1) +union dped_tnl_l4_ck_cmd_61_u { + struct dped_tnl_l4_ck_cmd_61 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x8 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DPED_TNL_L4_CK_CMD_61_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_00_ADDR (0x75c3a0) +#define NBL_DPED_MIR_CMD_00_DEPTH (1) +#define NBL_DPED_MIR_CMD_00_WIDTH (32) +#define NBL_DPED_MIR_CMD_00_DWLEN (1) +union dped_mir_cmd_00_u { + struct dped_mir_cmd_00 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_00_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_01_ADDR (0x75c3a4) +#define NBL_DPED_MIR_CMD_01_DEPTH (1) +#define NBL_DPED_MIR_CMD_01_WIDTH (32) +#define NBL_DPED_MIR_CMD_01_DWLEN (1) +union dped_mir_cmd_01_u { + struct dped_mir_cmd_01 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_01_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_10_ADDR (0x75c3a8) +#define NBL_DPED_MIR_CMD_10_DEPTH (1) +#define NBL_DPED_MIR_CMD_10_WIDTH (32) +#define NBL_DPED_MIR_CMD_10_DWLEN (1) +union dped_mir_cmd_10_u { + struct dped_mir_cmd_10 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_10_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_11_ADDR (0x75c3ac) +#define NBL_DPED_MIR_CMD_11_DEPTH (1) +#define NBL_DPED_MIR_CMD_11_WIDTH (32) +#define NBL_DPED_MIR_CMD_11_DWLEN (1) +union dped_mir_cmd_11_u { + struct dped_mir_cmd_11 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_11_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_20_ADDR (0x75c3b0) +#define NBL_DPED_MIR_CMD_20_DEPTH (1) +#define NBL_DPED_MIR_CMD_20_WIDTH (32) +#define NBL_DPED_MIR_CMD_20_DWLEN (1) +union dped_mir_cmd_20_u { + struct dped_mir_cmd_20 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_20_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_21_ADDR (0x75c3b4) +#define NBL_DPED_MIR_CMD_21_DEPTH (1) +#define NBL_DPED_MIR_CMD_21_WIDTH (32) +#define NBL_DPED_MIR_CMD_21_DWLEN (1) +union dped_mir_cmd_21_u { + struct dped_mir_cmd_21 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_21_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_30_ADDR (0x75c3b8) +#define NBL_DPED_MIR_CMD_30_DEPTH (1) +#define NBL_DPED_MIR_CMD_30_WIDTH (32) +#define NBL_DPED_MIR_CMD_30_DWLEN (1) +union dped_mir_cmd_30_u { + struct dped_mir_cmd_30 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_30_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_31_ADDR (0x75c3bc) +#define NBL_DPED_MIR_CMD_31_DEPTH (1) +#define NBL_DPED_MIR_CMD_31_WIDTH (32) +#define NBL_DPED_MIR_CMD_31_DWLEN (1) +union dped_mir_cmd_31_u { + struct dped_mir_cmd_31 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_31_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_40_ADDR (0x75c3c0) +#define NBL_DPED_MIR_CMD_40_DEPTH (1) +#define NBL_DPED_MIR_CMD_40_WIDTH (32) +#define NBL_DPED_MIR_CMD_40_DWLEN (1) +union dped_mir_cmd_40_u { + struct dped_mir_cmd_40 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_40_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_41_ADDR (0x75c3c4) +#define NBL_DPED_MIR_CMD_41_DEPTH (1) +#define NBL_DPED_MIR_CMD_41_WIDTH (32) +#define NBL_DPED_MIR_CMD_41_DWLEN (1) +union dped_mir_cmd_41_u { + struct dped_mir_cmd_41 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_41_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_50_ADDR (0x75c3c8) +#define NBL_DPED_MIR_CMD_50_DEPTH (1) +#define NBL_DPED_MIR_CMD_50_WIDTH (32) +#define NBL_DPED_MIR_CMD_50_DWLEN (1) +union dped_mir_cmd_50_u { + struct dped_mir_cmd_50 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_50_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_51_ADDR (0x75c3cc) +#define NBL_DPED_MIR_CMD_51_DEPTH (1) +#define NBL_DPED_MIR_CMD_51_WIDTH (32) +#define NBL_DPED_MIR_CMD_51_DWLEN (1) +union dped_mir_cmd_51_u { + struct dped_mir_cmd_51 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_51_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_60_ADDR (0x75c3d0) +#define NBL_DPED_MIR_CMD_60_DEPTH (1) +#define NBL_DPED_MIR_CMD_60_WIDTH (32) +#define NBL_DPED_MIR_CMD_60_DWLEN (1) +union dped_mir_cmd_60_u { + struct dped_mir_cmd_60 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_60_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_61_ADDR (0x75c3d4) +#define NBL_DPED_MIR_CMD_61_DEPTH (1) +#define NBL_DPED_MIR_CMD_61_WIDTH (32) +#define NBL_DPED_MIR_CMD_61_DWLEN (1) +union dped_mir_cmd_61_u { + struct dped_mir_cmd_61 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_61_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_70_ADDR (0x75c3d8) +#define NBL_DPED_MIR_CMD_70_DEPTH (1) +#define NBL_DPED_MIR_CMD_70_WIDTH (32) +#define NBL_DPED_MIR_CMD_70_DWLEN (1) +union dped_mir_cmd_70_u { + struct dped_mir_cmd_70 { + u32 len:7; /* [6:0] Default:0x0 RW */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 oft:7; /* [14:8] Default:0x0 RW */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 mode:1; /* [16] Default:0x0 RW */ + u32 en:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_70_DWLEN]; +} __packed; + +#define NBL_DPED_MIR_CMD_71_ADDR (0x75c3dc) +#define NBL_DPED_MIR_CMD_71_DEPTH (1) +#define NBL_DPED_MIR_CMD_71_WIDTH (32) +#define NBL_DPED_MIR_CMD_71_DWLEN (1) +union dped_mir_cmd_71_u { + struct dped_mir_cmd_71 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 type_sel:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIR_CMD_71_DWLEN]; +} __packed; + +#define NBL_DPED_DSCP_CK_EN_ADDR (0x75c3e8) +#define NBL_DPED_DSCP_CK_EN_DEPTH (1) +#define NBL_DPED_DSCP_CK_EN_WIDTH (32) +#define NBL_DPED_DSCP_CK_EN_DWLEN (1) +union dped_dscp_ck_en_u { + struct dped_dscp_ck_en { + u32 l4_en:1; /* [0] Default:0x0 RW */ + u32 l3_en:1; /* [1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_DSCP_CK_EN_DWLEN]; +} __packed; + +#define NBL_DPED_RDMA_ECN_REMARK_ADDR (0x75c3f0) +#define NBL_DPED_RDMA_ECN_REMARK_DEPTH (1) +#define NBL_DPED_RDMA_ECN_REMARK_WIDTH (32) +#define NBL_DPED_RDMA_ECN_REMARK_DWLEN (1) +union dped_rdma_ecn_remark_u { + struct dped_rdma_ecn_remark { + u32 vau:2; /* [1:0] Default:0x1 RW */ + u32 rsv1:2; /* [3:2] Default:0x0 RO */ + u32 en:1; /* [4] Default:0x0 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_RDMA_ECN_REMARK_DWLEN]; +} __packed; + +#define NBL_DPED_VLAN_OFFSET_ADDR (0x75c3f4) +#define NBL_DPED_VLAN_OFFSET_DEPTH (1) +#define NBL_DPED_VLAN_OFFSET_WIDTH (32) +#define NBL_DPED_VLAN_OFFSET_DWLEN (1) +union dped_vlan_offset_u { + struct dped_vlan_offset { + u32 oft:8; /* [7:0] Default:0xC RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_VLAN_OFFSET_DWLEN]; +} __packed; + +#define NBL_DPED_DSCP_OFFSET_0_ADDR (0x75c3f8) +#define NBL_DPED_DSCP_OFFSET_0_DEPTH (1) +#define NBL_DPED_DSCP_OFFSET_0_WIDTH (32) +#define NBL_DPED_DSCP_OFFSET_0_DWLEN (1) +union dped_dscp_offset_0_u { + struct dped_dscp_offset_0 { + u32 oft:8; /* [7:0] Default:0x8 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_DSCP_OFFSET_0_DWLEN]; +} __packed; + +#define NBL_DPED_DSCP_OFFSET_1_ADDR (0x75c3fc) +#define NBL_DPED_DSCP_OFFSET_1_DEPTH (1) +#define NBL_DPED_DSCP_OFFSET_1_WIDTH (32) +#define NBL_DPED_DSCP_OFFSET_1_DWLEN (1) +union dped_dscp_offset_1_u { + struct dped_dscp_offset_1 { + u32 oft:8; /* [7:0] Default:0x4 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_DSCP_OFFSET_1_DWLEN]; +} __packed; + +#define NBL_DPED_CFG_TEST_ADDR (0x75c600) +#define NBL_DPED_CFG_TEST_DEPTH (1) +#define NBL_DPED_CFG_TEST_WIDTH (32) +#define NBL_DPED_CFG_TEST_DWLEN (1) +union dped_cfg_test_u { + struct dped_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_DPED_BP_STATE_ADDR (0x75c608) +#define NBL_DPED_BP_STATE_DEPTH (1) +#define NBL_DPED_BP_STATE_WIDTH (32) +#define NBL_DPED_BP_STATE_DWLEN (1) +union dped_bp_state_u { + struct dped_bp_state { + u32 bm_rtn_tout:1; /* [0] Default:0x0 RO */ + u32 bm_not_rdy:1; /* [1] Default:0x0 RO */ + u32 dprbac_fc:1; /* [2] Default:0x0 RO */ + u32 qm_fc:1; /* [3] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_BP_STATE_DWLEN]; +} __packed; + +#define NBL_DPED_BP_HISTORY_ADDR (0x75c60c) +#define NBL_DPED_BP_HISTORY_DEPTH (1) +#define NBL_DPED_BP_HISTORY_WIDTH (32) +#define NBL_DPED_BP_HISTORY_DWLEN (1) +union dped_bp_history_u { + struct dped_bp_history { + u32 bm_rtn_tout:1; /* [0] Default:0x0 RC */ + u32 bm_not_rdy:1; /* [1] Default:0x0 RC */ + u32 dprbac_fc:1; /* [2] Default:0x0 RC */ + u32 qm_fc:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_DPED_MIRID_IND_ADDR (0x75c900) +#define NBL_DPED_MIRID_IND_DEPTH (1) +#define NBL_DPED_MIRID_IND_WIDTH (32) +#define NBL_DPED_MIRID_IND_DWLEN (1) +union dped_mirid_ind_u { + struct dped_mirid_ind { + u32 nomat:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MIRID_IND_DWLEN]; +} __packed; + +#define NBL_DPED_MD_AUX_OFT_ADDR (0x75c904) +#define NBL_DPED_MD_AUX_OFT_DEPTH (1) +#define NBL_DPED_MD_AUX_OFT_WIDTH (32) +#define NBL_DPED_MD_AUX_OFT_DWLEN (1) +union dped_md_aux_oft_u { + struct dped_md_aux_oft { + u32 l2_oft:8; /* [7:0] Default:0x0 RO */ + u32 l3_oft:8; /* [15:8] Default:0x0 RO */ + u32 l4_oft:8; /* [23:16] Default:0x0 RO */ + u32 pld_oft:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_AUX_OFT_DWLEN]; +} __packed; + +#define NBL_DPED_MD_AUX_PKT_LEN_ADDR (0x75c908) +#define NBL_DPED_MD_AUX_PKT_LEN_DEPTH (1) +#define NBL_DPED_MD_AUX_PKT_LEN_WIDTH (32) +#define NBL_DPED_MD_AUX_PKT_LEN_DWLEN (1) +union dped_md_aux_pkt_len_u { + struct dped_md_aux_pkt_len { + u32 len:14; /* [13:0] Default:0x0 RO */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_AUX_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_DPED_MD_FWD_MIR_ADDR (0x75c90c) +#define NBL_DPED_MD_FWD_MIR_DEPTH (1) +#define NBL_DPED_MD_FWD_MIR_WIDTH (32) +#define NBL_DPED_MD_FWD_MIR_DWLEN (1) +union dped_md_fwd_mir_u { + struct dped_md_fwd_mir { + u32 id:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_FWD_MIR_DWLEN]; +} __packed; + +#define NBL_DPED_MD_FWD_DPORT_ADDR (0x75c910) +#define NBL_DPED_MD_FWD_DPORT_DEPTH (1) +#define NBL_DPED_MD_FWD_DPORT_WIDTH (32) +#define NBL_DPED_MD_FWD_DPORT_DWLEN (1) +union dped_md_fwd_dport_u { + struct dped_md_fwd_dport { + u32 id:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_FWD_DPORT_DWLEN]; +} __packed; + +#define NBL_DPED_MD_AUX_PLD_CKSUM_ADDR (0x75c914) +#define NBL_DPED_MD_AUX_PLD_CKSUM_DEPTH (1) +#define NBL_DPED_MD_AUX_PLD_CKSUM_WIDTH (32) +#define NBL_DPED_MD_AUX_PLD_CKSUM_DWLEN (1) +union dped_md_aux_pld_cksum_u { + struct dped_md_aux_pld_cksum { + u32 ck:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_AUX_PLD_CKSUM_DWLEN]; +} __packed; + +#define NBL_DPED_INNER_PKT_CKSUM_ADDR (0x75c918) +#define NBL_DPED_INNER_PKT_CKSUM_DEPTH (1) +#define NBL_DPED_INNER_PKT_CKSUM_WIDTH (32) +#define NBL_DPED_INNER_PKT_CKSUM_DWLEN (1) +union dped_inner_pkt_cksum_u { + struct dped_inner_pkt_cksum { + u32 ck:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_INNER_PKT_CKSUM_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_0_ADDR (0x75c920) +#define NBL_DPED_MD_EDIT_0_DEPTH (1) +#define NBL_DPED_MD_EDIT_0_WIDTH (32) +#define NBL_DPED_MD_EDIT_0_DWLEN (1) +union dped_md_edit_0_u { + struct dped_md_edit_0 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_0_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_1_ADDR (0x75c924) +#define NBL_DPED_MD_EDIT_1_DEPTH (1) +#define NBL_DPED_MD_EDIT_1_WIDTH (32) +#define NBL_DPED_MD_EDIT_1_DWLEN (1) +union dped_md_edit_1_u { + struct dped_md_edit_1 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_1_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_2_ADDR (0x75c928) +#define NBL_DPED_MD_EDIT_2_DEPTH (1) +#define NBL_DPED_MD_EDIT_2_WIDTH (32) +#define NBL_DPED_MD_EDIT_2_DWLEN (1) +union dped_md_edit_2_u { + struct dped_md_edit_2 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_2_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_3_ADDR (0x75c92c) +#define NBL_DPED_MD_EDIT_3_DEPTH (1) +#define NBL_DPED_MD_EDIT_3_WIDTH (32) +#define NBL_DPED_MD_EDIT_3_DWLEN (1) +union dped_md_edit_3_u { + struct dped_md_edit_3 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_3_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_4_ADDR (0x75c930) +#define NBL_DPED_MD_EDIT_4_DEPTH (1) +#define NBL_DPED_MD_EDIT_4_WIDTH (32) +#define NBL_DPED_MD_EDIT_4_DWLEN (1) +union dped_md_edit_4_u { + struct dped_md_edit_4 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_4_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_5_ADDR (0x75c934) +#define NBL_DPED_MD_EDIT_5_DEPTH (1) +#define NBL_DPED_MD_EDIT_5_WIDTH (32) +#define NBL_DPED_MD_EDIT_5_DWLEN (1) +union dped_md_edit_5_u { + struct dped_md_edit_5 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_5_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_6_ADDR (0x75c938) +#define NBL_DPED_MD_EDIT_6_DEPTH (1) +#define NBL_DPED_MD_EDIT_6_WIDTH (32) +#define NBL_DPED_MD_EDIT_6_DWLEN (1) +union dped_md_edit_6_u { + struct dped_md_edit_6 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_6_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_7_ADDR (0x75c93c) +#define NBL_DPED_MD_EDIT_7_DEPTH (1) +#define NBL_DPED_MD_EDIT_7_WIDTH (32) +#define NBL_DPED_MD_EDIT_7_DWLEN (1) +union dped_md_edit_7_u { + struct dped_md_edit_7 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_7_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_8_ADDR (0x75c940) +#define NBL_DPED_MD_EDIT_8_DEPTH (1) +#define NBL_DPED_MD_EDIT_8_WIDTH (32) +#define NBL_DPED_MD_EDIT_8_DWLEN (1) +union dped_md_edit_8_u { + struct dped_md_edit_8 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_8_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_9_ADDR (0x75c944) +#define NBL_DPED_MD_EDIT_9_DEPTH (1) +#define NBL_DPED_MD_EDIT_9_WIDTH (32) +#define NBL_DPED_MD_EDIT_9_DWLEN (1) +union dped_md_edit_9_u { + struct dped_md_edit_9 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_9_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_10_ADDR (0x75c948) +#define NBL_DPED_MD_EDIT_10_DEPTH (1) +#define NBL_DPED_MD_EDIT_10_WIDTH (32) +#define NBL_DPED_MD_EDIT_10_DWLEN (1) +union dped_md_edit_10_u { + struct dped_md_edit_10 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_10_DWLEN]; +} __packed; + +#define NBL_DPED_MD_EDIT_11_ADDR (0x75c94c) +#define NBL_DPED_MD_EDIT_11_DEPTH (1) +#define NBL_DPED_MD_EDIT_11_WIDTH (32) +#define NBL_DPED_MD_EDIT_11_DWLEN (1) +union dped_md_edit_11_u { + struct dped_md_edit_11 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_MD_EDIT_11_DWLEN]; +} __packed; + +#define NBL_DPED_ADD_DEL_LEN_ADDR (0x75c950) +#define NBL_DPED_ADD_DEL_LEN_DEPTH (1) +#define NBL_DPED_ADD_DEL_LEN_WIDTH (32) +#define NBL_DPED_ADD_DEL_LEN_DWLEN (1) +union dped_add_del_len_u { + struct dped_add_del_len { + u32 len:9; /* [8:0] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_ADD_DEL_LEN_DWLEN]; +} __packed; + +#define NBL_DPED_TTL_INFO_ADDR (0x75c970) +#define NBL_DPED_TTL_INFO_DEPTH (1) +#define NBL_DPED_TTL_INFO_WIDTH (32) +#define NBL_DPED_TTL_INFO_DWLEN (1) +union dped_ttl_info_u { + struct dped_ttl_info { + u32 old_ttl:8; /* [7:0] Default:0x0 RO */ + u32 new_ttl:8; /* [15:8] Default:0x0 RO */ + u32 ttl_val:1; /* [16] Default:0x0 RC */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TTL_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_LEN_INFO_VLD_ADDR (0x75c974) +#define NBL_DPED_LEN_INFO_VLD_DEPTH (1) +#define NBL_DPED_LEN_INFO_VLD_WIDTH (32) +#define NBL_DPED_LEN_INFO_VLD_DWLEN (1) +union dped_len_info_vld_u { + struct dped_len_info_vld { + u32 length0:1; /* [0] Default:0x0 RC */ + u32 length1:1; /* [1] Default:0x0 RC */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_LEN_INFO_VLD_DWLEN]; +} __packed; + +#define NBL_DPED_LEN0_INFO_ADDR (0x75c978) +#define NBL_DPED_LEN0_INFO_DEPTH (1) +#define NBL_DPED_LEN0_INFO_WIDTH (32) +#define NBL_DPED_LEN0_INFO_DWLEN (1) +union dped_len0_info_u { + struct dped_len0_info { + u32 old_len:16; /* [15:0] Default:0x0 RO */ + u32 new_len:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_LEN0_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_LEN1_INFO_ADDR (0x75c97c) +#define NBL_DPED_LEN1_INFO_DEPTH (1) +#define NBL_DPED_LEN1_INFO_WIDTH (32) +#define NBL_DPED_LEN1_INFO_DWLEN (1) +union dped_len1_info_u { + struct dped_len1_info { + u32 old_len:16; /* [15:0] Default:0x0 RO */ + u32 new_len:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_LEN1_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_EDIT_ATNUM_INFO_ADDR (0x75c980) +#define NBL_DPED_EDIT_ATNUM_INFO_DEPTH (1) +#define NBL_DPED_EDIT_ATNUM_INFO_WIDTH (32) +#define NBL_DPED_EDIT_ATNUM_INFO_DWLEN (1) +union dped_edit_atnum_info_u { + struct dped_edit_atnum_info { + u32 replace:4; /* [3:0] Default:0x0 RO */ + u32 del:4; /* [7:4] Default:0x0 RO */ + u32 add:4; /* [11:8] Default:0x0 RO */ + u32 ttl:4; /* [15:12] Default:0x0 RO */ + u32 dscp:4; /* [19:16] Default:0x0 RO */ + u32 tnl:4; /* [23:20] Default:0x0 RO */ + u32 sport:4; /* [27:24] Default:0x0 RO */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_EDIT_ATNUM_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_EDIT_NO_AT_INFO_ADDR (0x75c984) +#define NBL_DPED_EDIT_NO_AT_INFO_DEPTH (1) +#define NBL_DPED_EDIT_NO_AT_INFO_WIDTH (32) +#define NBL_DPED_EDIT_NO_AT_INFO_DWLEN (1) +union dped_edit_no_at_info_u { + struct dped_edit_no_at_info { + u32 l3_len:1; /* [0] Default:0x0 RC */ + u32 l4_len:1; /* [1] Default:0x0 RC */ + u32 l3_ck:1; /* [2] Default:0x0 RC */ + u32 l4_ck:1; /* [3] Default:0x0 RC */ + u32 sctp_ck:1; /* [4] Default:0x0 RC */ + u32 padding:1; /* [5] Default:0x0 RC */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_EDIT_NO_AT_INFO_DWLEN]; +} __packed; + +#define NBL_DPED_HW_EDT_PROF_ADDR (0x75d000) +#define NBL_DPED_HW_EDT_PROF_DEPTH (32) +#define NBL_DPED_HW_EDT_PROF_WIDTH (32) +#define NBL_DPED_HW_EDT_PROF_DWLEN (1) +union dped_hw_edt_prof_u { + struct dped_hw_edt_prof { + u32 l4_len:2; /* [1:0] Default:0x2 RW */ + u32 l3_len:2; /* [3:2] Default:0x2 RW */ + u32 l4_ck:3; /* [6:4] Default:0x7 RW */ + u32 l3_ck:1; /* [7:7] Default:0x0 RW */ + u32 l4_ck_zero_free:1; /* [8:8] Default:0x1 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_HW_EDT_PROF_DWLEN]; +} __packed; +#define NBL_DPED_HW_EDT_PROF_REG(r) (NBL_DPED_HW_EDT_PROF_ADDR + \ + (NBL_DPED_HW_EDT_PROF_DWLEN * 4) * (r)) + +#define NBL_DPED_OUT_MASK_ADDR (0x75e000) +#define NBL_DPED_OUT_MASK_DEPTH (24) +#define NBL_DPED_OUT_MASK_WIDTH (64) +#define NBL_DPED_OUT_MASK_DWLEN (2) +union dped_out_mask_u { + struct dped_out_mask { + u32 flag:32; /* [31:0] Default:0x0 RW */ + u32 fwd:30; /* [61:32] Default:0x0 RW */ + u32 rsv:2; /* [63:62] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_OUT_MASK_DWLEN]; +} __packed; +#define NBL_DPED_OUT_MASK_REG(r) (NBL_DPED_OUT_MASK_ADDR + \ + (NBL_DPED_OUT_MASK_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_EDIT_CMD_ADDR (0x75f000) +#define NBL_DPED_TAB_EDIT_CMD_DEPTH (32) +#define NBL_DPED_TAB_EDIT_CMD_WIDTH (32) +#define NBL_DPED_TAB_EDIT_CMD_DWLEN (1) +union dped_tab_edit_cmd_u { + struct dped_tab_edit_cmd { + u32 in_offset:8; /* [7:0] Default:0x0 RW */ + u32 phid:2; /* [9:8] Default:0x0 RW */ + u32 len:7; /* [16:10] Default:0x0 RW */ + u32 mode:4; /* [20:17] Default:0xf RW */ + u32 l4_ck_ofld_upt:1; /* [21] Default:0x1 RW */ + u32 l3_ck_ofld_upt:1; /* [22] Default:0x1 RW */ + u32 rsv:9; /* [31:23] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TAB_EDIT_CMD_DWLEN]; +} __packed; +#define NBL_DPED_TAB_EDIT_CMD_REG(r) (NBL_DPED_TAB_EDIT_CMD_ADDR + \ + (NBL_DPED_TAB_EDIT_CMD_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_MIR_ADDR (0x760000) +#define NBL_DPED_TAB_MIR_DEPTH (8) +#define NBL_DPED_TAB_MIR_WIDTH (1024) +#define NBL_DPED_TAB_MIR_DWLEN (32) +union dped_tab_mir_u { + struct dped_tab_mir { + u32 cfg_mir_data:16; /* [719:0] Default:0x0 RW */ + u32 cfg_mir_data_arr[22]; /* [719:0] Default:0x0 RW */ + u32 cfg_mir_info_l:32; /* [755:720] Default:0x0 RW */ + u32 cfg_mir_info_h:4; /* [755:720] Default:0x0 RW */ + u32 rsv:12; /* [1023:756] Default:0x0 RO */ + u32 rsv_arr[8]; /* [1023:756] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TAB_MIR_DWLEN]; +} __packed; +#define NBL_DPED_TAB_MIR_REG(r) (NBL_DPED_TAB_MIR_ADDR + \ + (NBL_DPED_TAB_MIR_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_VSI_TYPE_ADDR (0x761000) +#define NBL_DPED_TAB_VSI_TYPE_DEPTH (1031) +#define NBL_DPED_TAB_VSI_TYPE_WIDTH (32) +#define NBL_DPED_TAB_VSI_TYPE_DWLEN (1) +union dped_tab_vsi_type_u { + struct dped_tab_vsi_type { + u32 sel:4; /* [3:0] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TAB_VSI_TYPE_DWLEN]; +} __packed; +#define NBL_DPED_TAB_VSI_TYPE_REG(r) (NBL_DPED_TAB_VSI_TYPE_ADDR + \ + (NBL_DPED_TAB_VSI_TYPE_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_REPLACE_ADDR (0x763000) +#define NBL_DPED_TAB_REPLACE_DEPTH (2048) +#define NBL_DPED_TAB_REPLACE_WIDTH (64) +#define NBL_DPED_TAB_REPLACE_DWLEN (2) +union dped_tab_replace_u { + struct dped_tab_replace { + u32 vau_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPED_TAB_REPLACE_DWLEN]; +} __packed; +#define NBL_DPED_TAB_REPLACE_REG(r) (NBL_DPED_TAB_REPLACE_ADDR + \ + (NBL_DPED_TAB_REPLACE_DWLEN * 4) * (r)) + +#define NBL_DPED_TAB_TNL_ADDR (0x7dc000) +#define NBL_DPED_TAB_TNL_DEPTH (4096) +#define NBL_DPED_TAB_TNL_WIDTH (1024) +#define NBL_DPED_TAB_TNL_DWLEN (32) +union dped_tab_tnl_u { + struct dped_tab_tnl { + u32 cfg_tnl_data:16; /* [719:0] Default:0x0 RW */ + u32 cfg_tnl_data_arr[22]; /* [719:0] Default:0x0 RW */ + u32 cfg_tnl_info:8; /* [791:720] Default:0x0 RW */ + u32 cfg_tnl_info_arr[2]; /* [791:720] Default:0x0 RW */ + u32 rsv_l:32; /* [1023:792] Default:0x0 RO */ + u32 rsv_h:8; /* [1023:792] Default:0x0 RO */ + u32 rsv_arr[6]; /* [1023:792] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPED_TAB_TNL_DWLEN]; +} __packed; +#define NBL_DPED_TAB_TNL_REG(r) (NBL_DPED_TAB_TNL_ADDR + \ + (NBL_DPED_TAB_TNL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpmem.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpmem.h new file mode 100644 index 0000000000000000000000000000000000000000..d161f550addd5ec38d83cd942f802b67a823ee88 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dpmem.h @@ -0,0 +1,195 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DPMEM_H +#define NBL_DPMEM_H 1 + +#include + +#define NBL_DPMEM_BASE (0x00708000) + +#define NBL_DPMEM_INT_STATUS_ADDR (0x708000) +#define NBL_DPMEM_INT_STATUS_DEPTH (1) +#define NBL_DPMEM_INT_STATUS_WIDTH (32) +#define NBL_DPMEM_INT_STATUS_DWLEN (1) +union dpmem_int_status_u { + struct dpmem_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 cpu_lgc_hzd:1; /* [2] Default:0x0 RWC */ + u32 parity_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DPMEM_INT_MASK_ADDR (0x708004) +#define NBL_DPMEM_INT_MASK_DEPTH (1) +#define NBL_DPMEM_INT_MASK_WIDTH (32) +#define NBL_DPMEM_INT_MASK_DWLEN (1) +union dpmem_int_mask_u { + struct dpmem_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 cpu_lgc_hzd:1; /* [2] Default:0x0 RW */ + u32 parity_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DPMEM_INT_SET_ADDR (0x708008) +#define NBL_DPMEM_INT_SET_DEPTH (1) +#define NBL_DPMEM_INT_SET_WIDTH (32) +#define NBL_DPMEM_INT_SET_DWLEN (1) +union dpmem_int_set_u { + struct dpmem_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 cpu_lgc_hzd:1; /* [2] Default:0x0 WO */ + u32 parity_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_INT_SET_DWLEN]; +} __packed; + +#define NBL_DPMEM_COR_ERR_INFO_ADDR (0x70800c) +#define NBL_DPMEM_COR_ERR_INFO_DEPTH (1) +#define NBL_DPMEM_COR_ERR_INFO_WIDTH (32) +#define NBL_DPMEM_COR_ERR_INFO_DWLEN (1) +union dpmem_cor_err_info_u { + struct dpmem_cor_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPMEM_PARITY_ERR_INFO_ADDR (0x708014) +#define NBL_DPMEM_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DPMEM_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DPMEM_PARITY_ERR_INFO_DWLEN (1) +union dpmem_parity_err_info_u { + struct dpmem_parity_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPMEM_CIF_ERR_INFO_ADDR (0x70801c) +#define NBL_DPMEM_CIF_ERR_INFO_DEPTH (1) +#define NBL_DPMEM_CIF_ERR_INFO_WIDTH (32) +#define NBL_DPMEM_CIF_ERR_INFO_DWLEN (1) +union dpmem_cif_err_info_u { + struct dpmem_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPMEM_CAR_CTRL_ADDR (0x708100) +#define NBL_DPMEM_CAR_CTRL_DEPTH (1) +#define NBL_DPMEM_CAR_CTRL_WIDTH (32) +#define NBL_DPMEM_CAR_CTRL_DWLEN (1) +union dpmem_car_ctrl_u { + struct dpmem_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DPMEM_INIT_START_ADDR (0x708104) +#define NBL_DPMEM_INIT_START_DEPTH (1) +#define NBL_DPMEM_INIT_START_WIDTH (32) +#define NBL_DPMEM_INIT_START_DWLEN (1) +union dpmem_init_start_u { + struct dpmem_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_INIT_START_DWLEN]; +} __packed; + +#define NBL_DPMEM_MEM_ACCESS_MODE_ADDR (0x708108) +#define NBL_DPMEM_MEM_ACCESS_MODE_DEPTH (1) +#define NBL_DPMEM_MEM_ACCESS_MODE_WIDTH (32) +#define NBL_DPMEM_MEM_ACCESS_MODE_DWLEN (1) +union dpmem_mem_access_mode_u { + struct dpmem_mem_access_mode { + u32 mode:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_MEM_ACCESS_MODE_DWLEN]; +} __packed; + +#define NBL_DPMEM_MEM_ACCESS_EN_ADDR (0x70810c) +#define NBL_DPMEM_MEM_ACCESS_EN_DEPTH (1) +#define NBL_DPMEM_MEM_ACCESS_EN_WIDTH (32) +#define NBL_DPMEM_MEM_ACCESS_EN_DWLEN (1) +union dpmem_mem_access_en_u { + struct dpmem_mem_access_en { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_MEM_ACCESS_EN_DWLEN]; +} __packed; + +#define NBL_DPMEM_MEM_ACCESS_ADDR_ADDR (0x708110) +#define NBL_DPMEM_MEM_ACCESS_ADDR_DEPTH (1) +#define NBL_DPMEM_MEM_ACCESS_ADDR_WIDTH (32) +#define NBL_DPMEM_MEM_ACCESS_ADDR_DWLEN (1) +union dpmem_mem_access_addr_u { + struct dpmem_mem_access_addr { + u32 row_raddr:10; /* [9:0] Default:0x0 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 col_raddr:7; /* [22:16] Default:0x0 RW */ + u32 rsv:9; /* [31:23] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_MEM_ACCESS_ADDR_DWLEN]; +} __packed; + +#define NBL_DPMEM_CFG_TEST_ADDR (0x708114) +#define NBL_DPMEM_CFG_TEST_DEPTH (1) +#define NBL_DPMEM_CFG_TEST_WIDTH (32) +#define NBL_DPMEM_CFG_TEST_DWLEN (1) +union dpmem_cfg_test_u { + struct dpmem_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPMEM_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_DPMEM_INIT_DONE_ADDR (0x708400) +#define NBL_DPMEM_INIT_DONE_DEPTH (1) +#define NBL_DPMEM_INIT_DONE_WIDTH (32) +#define NBL_DPMEM_INIT_DONE_DWLEN (1) +union dpmem_init_done_u { + struct dpmem_init_done { + u32 init_done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DPMEM_MEM_ACCESS_RDATA_ADDR (0x708404) +#define NBL_DPMEM_MEM_ACCESS_RDATA_DEPTH (1) +#define NBL_DPMEM_MEM_ACCESS_RDATA_WIDTH (32) +#define NBL_DPMEM_MEM_ACCESS_RDATA_DWLEN (1) +union dpmem_mem_access_rdata_u { + struct dpmem_mem_access_rdata { + u32 rdata:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPMEM_MEM_ACCESS_RDATA_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dqm.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dqm.h new file mode 100644 index 0000000000000000000000000000000000000000..7841de910e8c6f649de4f9ce0bfe43551ce30b39 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dqm.h @@ -0,0 +1,583 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DQM_H +#define NBL_DQM_H 1 + +#include + +#define NBL_DQM_BASE (0x00714000) + +#define NBL_DQM_INT_STATUS_ADDR (0x714000) +#define NBL_DQM_INT_STATUS_DEPTH (1) +#define NBL_DQM_INT_STATUS_WIDTH (32) +#define NBL_DQM_INT_STATUS_DWLEN (1) +union dqm_int_status_u { + struct dqm_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_w_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_r_err:1; /* [2] Default:0x0 RWC */ + u32 dport_err:1; /* [3] Default:0x0 RWC */ + u32 weight_err:1; /* [4] Default:0x0 RWC */ + u32 dport_value_err:1; /* [5] Default:0x0 RWC */ + u32 sport_value_err:1; /* [6] Default:0x0 RWC */ + u32 slice_del_overflow:1; /* [7] Default:0x0 RWC */ + u32 cor_err:1; /* [8] Default:0x0 RWC */ + u32 cif_err:1; /* [9] Default:0x0 RWC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DQM_INT_MASK_ADDR (0x714004) +#define NBL_DQM_INT_MASK_DEPTH (1) +#define NBL_DQM_INT_MASK_WIDTH (32) +#define NBL_DQM_INT_MASK_DWLEN (1) +union dqm_int_mask_u { + struct dqm_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 fifo_w_err:1; /* [1] Default:0x0 RW */ + u32 fifo_r_err:1; /* [2] Default:0x0 RW */ + u32 dport_err:1; /* [3] Default:0x0 RW */ + u32 weight_err:1; /* [4] Default:0x0 RW */ + u32 dport_value_err:1; /* [5] Default:0x0 RW */ + u32 sport_value_err:1; /* [6] Default:0x0 RW */ + u32 slice_del_overflow:1; /* [7] Default:0x0 RW */ + u32 cor_err:1; /* [8] Default:0x0 RW */ + u32 cif_err:1; /* [9] Default:0x0 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DQM_INT_SET_ADDR (0x714008) +#define NBL_DQM_INT_SET_DEPTH (1) +#define NBL_DQM_INT_SET_WIDTH (32) +#define NBL_DQM_INT_SET_DWLEN (1) +union dqm_int_set_u { + struct dqm_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 fifo_w_err:1; /* [1] Default:0x0 WO */ + u32 fifo_r_err:1; /* [2] Default:0x0 WO */ + u32 dport_err:1; /* [3] Default:0x0 WO */ + u32 weight_err:1; /* [4] Default:0x0 WO */ + u32 dport_value_err:1; /* [5] Default:0x0 WO */ + u32 sport_value_err:1; /* [6] Default:0x0 WO */ + u32 slice_del_overflow:1; /* [7] Default:0x0 WO */ + u32 cor_err:1; /* [8] Default:0x0 WO */ + u32 cif_err:1; /* [9] Default:0x0 WO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_INT_SET_DWLEN]; +} __packed; + +#define NBL_DQM_UCOR_ERR_INFO_ADDR (0x71400c) +#define NBL_DQM_UCOR_ERR_INFO_DEPTH (1) +#define NBL_DQM_UCOR_ERR_INFO_WIDTH (32) +#define NBL_DQM_UCOR_ERR_INFO_DWLEN (1) +union dqm_ucor_err_info_u { + struct dqm_ucor_err_info { + u32 ram_addr:11; /* [10:0] Default:0x0 RO */ + u32 ram_id:4; /* [14:11] Default:0x0 RO */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_UCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DQM_DPORT_VALUE_ERR_INFO_ADDR (0x71402c) +#define NBL_DQM_DPORT_VALUE_ERR_INFO_DEPTH (1) +#define NBL_DQM_DPORT_VALUE_ERR_INFO_WIDTH (32) +#define NBL_DQM_DPORT_VALUE_ERR_INFO_DWLEN (1) +union dqm_dport_value_err_info_u { + struct dqm_dport_value_err_info { + u32 id:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_DPORT_VALUE_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DQM_SPORT_VALUE_ERR_INFO_ADDR (0x714034) +#define NBL_DQM_SPORT_VALUE_ERR_INFO_DEPTH (1) +#define NBL_DQM_SPORT_VALUE_ERR_INFO_WIDTH (32) +#define NBL_DQM_SPORT_VALUE_ERR_INFO_DWLEN (1) +union dqm_sport_value_err_info_u { + struct dqm_sport_value_err_info { + u32 id:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_SPORT_VALUE_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DQM_COR_ERR_INFO_ADDR (0x714048) +#define NBL_DQM_COR_ERR_INFO_DEPTH (1) +#define NBL_DQM_COR_ERR_INFO_WIDTH (32) +#define NBL_DQM_COR_ERR_INFO_DWLEN (1) +union dqm_cor_err_info_u { + struct dqm_cor_err_info { + u32 ram_addr:11; /* [10:0] Default:0x0 RO */ + u32 ram_id:4; /* [14:11] Default:0x0 RO */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DQM_CIF_ERR_INFO_ADDR (0x714050) +#define NBL_DQM_CIF_ERR_INFO_DEPTH (1) +#define NBL_DQM_CIF_ERR_INFO_WIDTH (32) +#define NBL_DQM_CIF_ERR_INFO_DWLEN (1) +union dqm_cif_err_info_u { + struct dqm_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DQM_CAR_CTRL_ADDR (0x714100) +#define NBL_DQM_CAR_CTRL_DEPTH (1) +#define NBL_DQM_CAR_CTRL_WIDTH (32) +#define NBL_DQM_CAR_CTRL_DWLEN (1) +union dqm_car_ctrl_u { + struct dqm_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DQM_INIT_START_ADDR (0x714104) +#define NBL_DQM_INIT_START_DEPTH (1) +#define NBL_DQM_INIT_START_WIDTH (32) +#define NBL_DQM_INIT_START_DWLEN (1) +union dqm_init_start_u { + struct dqm_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_INIT_START_DWLEN]; +} __packed; + +#define NBL_DQM_ACTION_ID_ADDR (0x714138) +#define NBL_DQM_ACTION_ID_DEPTH (1) +#define NBL_DQM_ACTION_ID_WIDTH (32) +#define NBL_DQM_ACTION_ID_DWLEN (1) +union dqm_action_id_u { + struct dqm_action_id { + u32 dport:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_ACTION_ID_DWLEN]; +} __packed; + +#define NBL_DQM_FLAG_OFFSET_ADDR (0x71413c) +#define NBL_DQM_FLAG_OFFSET_DEPTH (1) +#define NBL_DQM_FLAG_OFFSET_WIDTH (32) +#define NBL_DQM_FLAG_OFFSET_DWLEN (1) +union dqm_flag_offset_u { + struct dqm_flag_offset { + u32 rdma:5; /* [4:0] Default:0xA RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_FLAG_OFFSET_DWLEN]; +} __packed; + +#define NBL_DQM_INQUE_SCH_ADDR (0x714140) +#define NBL_DQM_INQUE_SCH_DEPTH (1) +#define NBL_DQM_INQUE_SCH_WIDTH (32) +#define NBL_DQM_INQUE_SCH_DWLEN (1) +union dqm_inque_sch_u { + struct dqm_inque_sch { + u32 epro_ppe:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_INQUE_SCH_DWLEN]; +} __packed; + +#define NBL_DQM_QUE_TYPE_ADDR (0x714144) +#define NBL_DQM_QUE_TYPE_DEPTH (1) +#define NBL_DQM_QUE_TYPE_WIDTH (32) +#define NBL_DQM_QUE_TYPE_DWLEN (1) +union dqm_que_type_u { + struct dqm_que_type { + u32 bp_drop:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_QUE_TYPE_DWLEN]; +} __packed; + +#define NBL_DQM_STAT_TYPE_ADDR (0x714148) +#define NBL_DQM_STAT_TYPE_DEPTH (1) +#define NBL_DQM_STAT_TYPE_WIDTH (32) +#define NBL_DQM_STAT_TYPE_DWLEN (1) +union dqm_stat_type_u { + struct dqm_stat_type { + u32 bp_drop:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_STAT_TYPE_DWLEN]; +} __packed; + +#define NBL_DQM_LB_TTL_DROP_ADDR (0x71414c) +#define NBL_DQM_LB_TTL_DROP_DEPTH (1) +#define NBL_DQM_LB_TTL_DROP_WIDTH (32) +#define NBL_DQM_LB_TTL_DROP_DWLEN (1) +union dqm_lb_ttl_drop_u { + struct dqm_lb_ttl_drop { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_LB_TTL_DROP_DWLEN]; +} __packed; + +#define NBL_DQM_HIGH_PRI_EN_ADDR (0x714154) +#define NBL_DQM_HIGH_PRI_EN_DEPTH (1) +#define NBL_DQM_HIGH_PRI_EN_WIDTH (32) +#define NBL_DQM_HIGH_PRI_EN_DWLEN (1) +union dqm_high_pri_en_u { + struct dqm_high_pri_en { + u32 inque:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_HIGH_PRI_EN_DWLEN]; +} __packed; + +#define NBL_DQM_ERR_DROP_EN_ADDR (0x714160) +#define NBL_DQM_ERR_DROP_EN_DEPTH (1) +#define NBL_DQM_ERR_DROP_EN_WIDTH (32) +#define NBL_DQM_ERR_DROP_EN_DWLEN (1) +union dqm_err_drop_en_u { + struct dqm_err_drop_en { + u32 dport:1; /* [0] Default:0x1 RW */ + u32 sport:1; /* [1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_ERR_DROP_EN_DWLEN]; +} __packed; + +#define NBL_DQM_QUE_LEN_ADDR (0x714200) +#define NBL_DQM_QUE_LEN_DEPTH (40) +#define NBL_DQM_QUE_LEN_WIDTH (32) +#define NBL_DQM_QUE_LEN_DWLEN (1) +union dqm_que_len_u { + struct dqm_que_len { + u32 unshare:12; /* [11:0] Default:0xC RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 sahre:12; /* [27:16] Default:0x205 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_QUE_LEN_DWLEN]; +} __packed; +#define NBL_DQM_QUE_LEN_REG(r) (NBL_DQM_QUE_LEN_ADDR + \ + (NBL_DQM_QUE_LEN_DWLEN * 4) * (r)) + +#define NBL_DQM_PORT_LEN_ADDR (0x714300) +#define NBL_DQM_PORT_LEN_DEPTH (5) +#define NBL_DQM_PORT_LEN_WIDTH (32) +#define NBL_DQM_PORT_LEN_DWLEN (1) +union dqm_port_len_u { + struct dqm_port_len { + u32 port:12; /* [11:0] Default:0x1F9 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_PORT_LEN_DWLEN]; +} __packed; +#define NBL_DQM_PORT_LEN_REG(r) (NBL_DQM_PORT_LEN_ADDR + \ + (NBL_DQM_PORT_LEN_DWLEN * 4) * (r)) + +#define NBL_DQM_COS_LEN_ADDR (0x714360) +#define NBL_DQM_COS_LEN_DEPTH (1) +#define NBL_DQM_COS_LEN_WIDTH (32) +#define NBL_DQM_COS_LEN_DWLEN (1) +union dqm_cos_len_u { + struct dqm_cos_len { + u32 low:12; /* [11:0] Default:0x1F9 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 high:12; /* [27:16] Default:0x1F9 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_COS_LEN_DWLEN]; +} __packed; + +#define NBL_DQM_COS_TSH_ADDR (0x714400) +#define NBL_DQM_COS_TSH_DEPTH (32) +#define NBL_DQM_COS_TSH_WIDTH (32) +#define NBL_DQM_COS_TSH_DWLEN (1) +union dqm_cos_tsh_u { + struct dqm_cos_tsh { + u32 low:12; /* [11:0] Default:0x18 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 high:12; /* [27:16] Default:0x24 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_COS_TSH_DWLEN]; +} __packed; +#define NBL_DQM_COS_TSH_REG(r) (NBL_DQM_COS_TSH_ADDR + \ + (NBL_DQM_COS_TSH_DWLEN * 4) * (r)) + +#define NBL_DQM_PORT_TSH_ADDR (0x714500) +#define NBL_DQM_PORT_TSH_DEPTH (4) +#define NBL_DQM_PORT_TSH_WIDTH (32) +#define NBL_DQM_PORT_TSH_DWLEN (1) +union dqm_port_tsh_u { + struct dqm_port_tsh { + u32 low:12; /* [11:0] Default:0x48 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 high:12; /* [27:16] Default:0x66 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_PORT_TSH_DWLEN]; +} __packed; +#define NBL_DQM_PORT_TSH_REG(r) (NBL_DQM_PORT_TSH_ADDR + \ + (NBL_DQM_PORT_TSH_DWLEN * 4) * (r)) + +#define NBL_DQM_COS_WEIGHT_ADDR (0x714624) +#define NBL_DQM_COS_WEIGHT_DEPTH (1) +#define NBL_DQM_COS_WEIGHT_WIDTH (48) +#define NBL_DQM_COS_WEIGHT_DWLEN (2) +union dqm_cos_weight_u { + struct dqm_cos_weight { + u32 cos_l:32; /* [47:0] Default:0xffff_ffff_ffff RW */ + u32 cos_h:16; /* [47:0] Default:0xffff_ffff_ffff RW */ + } __packed info; + u32 data[NBL_DQM_COS_WEIGHT_DWLEN]; +} __packed; + +#define NBL_DQM_BP_TSH_ADDR (0x714638) +#define NBL_DQM_BP_TSH_DEPTH (1) +#define NBL_DQM_BP_TSH_WIDTH (32) +#define NBL_DQM_BP_TSH_DWLEN (1) +union dqm_bp_tsh_u { + struct dqm_bp_tsh { + u32 timming:32; /* [31:0] Default:0x00ff_ffff RW */ + } __packed info; + u32 data[NBL_DQM_BP_TSH_DWLEN]; +} __packed; + +#define NBL_DQM_PORT_AGING_ADDR (0x71463c) +#define NBL_DQM_PORT_AGING_DEPTH (1) +#define NBL_DQM_PORT_AGING_WIDTH (32) +#define NBL_DQM_PORT_AGING_DWLEN (1) +union dqm_port_aging_u { + struct dqm_port_aging { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_PORT_AGING_DWLEN]; +} __packed; + +#define NBL_DQM_SHAPING_TIMING_ADD_PERIOD_ADDR (0x714648) +#define NBL_DQM_SHAPING_TIMING_ADD_PERIOD_DEPTH (1) +#define NBL_DQM_SHAPING_TIMING_ADD_PERIOD_WIDTH (32) +#define NBL_DQM_SHAPING_TIMING_ADD_PERIOD_DWLEN (1) +union dqm_shaping_timing_add_period_u { + struct dqm_shaping_timing_add_period { + u32 sch:12; /* [11:0] Default:0x3C0 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_SHAPING_TIMING_ADD_PERIOD_DWLEN]; +} __packed; + +#define NBL_DQM_SHAPING_DEPTH_ADDR (0x71464c) +#define NBL_DQM_SHAPING_DEPTH_DEPTH (1) +#define NBL_DQM_SHAPING_DEPTH_WIDTH (32) +#define NBL_DQM_SHAPING_DEPTH_DWLEN (1) +union dqm_shaping_depth_u { + struct dqm_shaping_depth { + u32 sch:4; /* [3:0] Default:0x5 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_SHAPING_DEPTH_DWLEN]; +} __packed; + +#define NBL_DQM_SHAPING_COLOR_Y_DROP_ADDR (0x714650) +#define NBL_DQM_SHAPING_COLOR_Y_DROP_DEPTH (1) +#define NBL_DQM_SHAPING_COLOR_Y_DROP_WIDTH (32) +#define NBL_DQM_SHAPING_COLOR_Y_DROP_DWLEN (1) +union dqm_shaping_color_y_drop_u { + struct dqm_shaping_color_y_drop { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_SHAPING_COLOR_Y_DROP_DWLEN]; +} __packed; + +#define NBL_DQM_INIT_DONE_ADDR (0x714800) +#define NBL_DQM_INIT_DONE_DEPTH (1) +#define NBL_DQM_INIT_DONE_WIDTH (32) +#define NBL_DQM_INIT_DONE_DWLEN (1) +union dqm_init_done_u { + struct dqm_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DQM_NFULL_HISTORY_ADDR (0x714840) +#define NBL_DQM_NFULL_HISTORY_DEPTH (1) +#define NBL_DQM_NFULL_HISTORY_WIDTH (32) +#define NBL_DQM_NFULL_HISTORY_DWLEN (1) +union dqm_nfull_history_u { + struct dqm_nfull_history { + u32 epro:1; /* [0] Default:0x0 RC */ + u32 ppe:1; /* [1] Default:0x0 RC */ + u32 info_buf_0:1; /* [2] Default:0x0 RC */ + u32 info_buf_1:1; /* [3] Default:0x0 RC */ + u32 info_buf_2:1; /* [4] Default:0x0 RC */ + u32 info_buf_3:1; /* [5] Default:0x0 RC */ + u32 info_buf_4:1; /* [6] Default:0x0 RC */ + u32 pkt_len_buf_0:1; /* [7] Default:0x0 RC */ + u32 pkt_len_buf_1:1; /* [8] Default:0x0 RC */ + u32 pkt_len_buf_2:1; /* [9] Default:0x0 RC */ + u32 pkt_len_buf_3:1; /* [10] Default:0x0 RC */ + u32 pkt_len_buf_4:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_NFULL_HISTORY_DWLEN]; +} __packed; + +#define NBL_DQM_NAFULL_HISTORY_ADDR (0x714844) +#define NBL_DQM_NAFULL_HISTORY_DEPTH (1) +#define NBL_DQM_NAFULL_HISTORY_WIDTH (32) +#define NBL_DQM_NAFULL_HISTORY_DWLEN (1) +union dqm_nafull_history_u { + struct dqm_nafull_history { + u32 epro:1; /* [0] Default:0x0 RC */ + u32 ppe:1; /* [1] Default:0x0 RC */ + u32 info_buf_0:1; /* [2] Default:0x0 RC */ + u32 info_buf_1:1; /* [3] Default:0x0 RC */ + u32 info_buf_2:1; /* [4] Default:0x0 RC */ + u32 info_buf_3:1; /* [5] Default:0x0 RC */ + u32 info_buf_4:1; /* [6] Default:0x0 RC */ + u32 pkt_len_buf_0:1; /* [7] Default:0x0 RC */ + u32 pkt_len_buf_1:1; /* [8] Default:0x0 RC */ + u32 pkt_len_buf_2:1; /* [9] Default:0x0 RC */ + u32 pkt_len_buf_3:1; /* [10] Default:0x0 RC */ + u32 pkt_len_buf_4:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_NAFULL_HISTORY_DWLEN]; +} __packed; + +#define NBL_DQM_WERR_HISTORY_ADDR (0x714848) +#define NBL_DQM_WERR_HISTORY_DEPTH (1) +#define NBL_DQM_WERR_HISTORY_WIDTH (32) +#define NBL_DQM_WERR_HISTORY_DWLEN (1) +union dqm_werr_history_u { + struct dqm_werr_history { + u32 epro:1; /* [0] Default:0x0 RC */ + u32 ppe:1; /* [1] Default:0x0 RC */ + u32 mfifo:1; /* [2] Default:0x0 RC */ + u32 info_buf_0:1; /* [3] Default:0x0 RC */ + u32 info_buf_1:1; /* [4] Default:0x0 RC */ + u32 info_buf_2:1; /* [5] Default:0x0 RC */ + u32 info_buf_3:1; /* [6] Default:0x0 RC */ + u32 info_buf_4:1; /* [7] Default:0x0 RC */ + u32 pkt_len_buf_0:1; /* [8] Default:0x0 RC */ + u32 pkt_len_buf_1:1; /* [9] Default:0x0 RC */ + u32 pkt_len_buf_2:1; /* [10] Default:0x0 RC */ + u32 pkt_len_buf_3:1; /* [11] Default:0x0 RC */ + u32 pkt_len_buf_4:1; /* [12] Default:0x0 RC */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_WERR_HISTORY_DWLEN]; +} __packed; + +#define NBL_DQM_RERR_HISTORY_ADDR (0x71484c) +#define NBL_DQM_RERR_HISTORY_DEPTH (1) +#define NBL_DQM_RERR_HISTORY_WIDTH (32) +#define NBL_DQM_RERR_HISTORY_DWLEN (1) +union dqm_rerr_history_u { + struct dqm_rerr_history { + u32 epro:1; /* [0] Default:0x0 RC */ + u32 ppe:1; /* [1] Default:0x0 RC */ + u32 mfifo:1; /* [2] Default:0x0 RC */ + u32 info_buf_0:1; /* [3] Default:0x0 RC */ + u32 info_buf_1:1; /* [4] Default:0x0 RC */ + u32 info_buf_2:1; /* [5] Default:0x0 RC */ + u32 info_buf_3:1; /* [6] Default:0x0 RC */ + u32 info_buf_4:1; /* [7] Default:0x0 RC */ + u32 pkt_len_buf_0:1; /* [8] Default:0x0 RC */ + u32 pkt_len_buf_1:1; /* [9] Default:0x0 RC */ + u32 pkt_len_buf_2:1; /* [10] Default:0x0 RC */ + u32 pkt_len_buf_3:1; /* [11] Default:0x0 RC */ + u32 pkt_len_buf_4:1; /* [12] Default:0x0 RC */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_RERR_HISTORY_DWLEN]; +} __packed; + +#define NBL_DQM_QUE_RDY_LOW_ADDR (0x714be0) +#define NBL_DQM_QUE_RDY_LOW_DEPTH (1) +#define NBL_DQM_QUE_RDY_LOW_WIDTH (32) +#define NBL_DQM_QUE_RDY_LOW_DWLEN (1) +union dqm_que_rdy_low_u { + struct dqm_que_rdy_low { + u32 pntr:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_QUE_RDY_LOW_DWLEN]; +} __packed; + +#define NBL_DQM_QUE_RDY_HIGH_ADDR (0x714be4) +#define NBL_DQM_QUE_RDY_HIGH_DEPTH (1) +#define NBL_DQM_QUE_RDY_HIGH_WIDTH (32) +#define NBL_DQM_QUE_RDY_HIGH_DWLEN (1) +union dqm_que_rdy_high_u { + struct dqm_que_rdy_high { + u32 pntr:8; /* [7:0] Default:0x0 RO */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_QUE_RDY_HIGH_DWLEN]; +} __packed; + +#define NBL_DQM_LEN_UNDERFLOW_ADDR (0x714be8) +#define NBL_DQM_LEN_UNDERFLOW_DEPTH (1) +#define NBL_DQM_LEN_UNDERFLOW_WIDTH (32) +#define NBL_DQM_LEN_UNDERFLOW_DWLEN (1) +union dqm_len_underflow_u { + struct dqm_len_underflow { + u32 share:1; /* [0] Default:0x0 RO */ + u32 cos_l:1; /* [1] Default:0x0 RO */ + u32 cos_h:1; /* [2] Default:0x0 RO */ + u32 port:5; /* [7:3] Default:0x0 RO */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_LEN_UNDERFLOW_DWLEN]; +} __packed; + +#define NBL_DQM_TOTAL_LEN_ADDR (0x714bec) +#define NBL_DQM_TOTAL_LEN_DEPTH (1) +#define NBL_DQM_TOTAL_LEN_WIDTH (32) +#define NBL_DQM_TOTAL_LEN_DWLEN (1) +union dqm_total_len_u { + struct dqm_total_len { + u32 pntr:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_TOTAL_LEN_DWLEN]; +} __packed; + +#define NBL_DQM_SHAPING_TBL_ADDR (0x718000) +#define NBL_DQM_SHAPING_TBL_DEPTH (5) +#define NBL_DQM_SHAPING_TBL_WIDTH (128) +#define NBL_DQM_SHAPING_TBL_DWLEN (4) +union dqm_shaping_tbl_u { + struct dqm_shaping_tbl { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:19; /* [19:1] Default:0x0 RW */ + u32 cir:19; /* [38:20] Default:0x0 RW */ + u32 pir:19; /* [57:39] Default:0x0 RW */ + u32 cbs:21; /* [78:58] Default:0x0 RW */ + u32 pbs:21; /* [99:79] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DQM_SHAPING_TBL_DWLEN]; +} __packed; +#define NBL_DQM_SHAPING_TBL_REG(r) (NBL_DQM_SHAPING_TBL_ADDR + \ + (NBL_DQM_SHAPING_TBL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_drmux.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_drmux.h new file mode 100644 index 0000000000000000000000000000000000000000..06734ac04a0fbb81e0e53fb6365d91caec811e61 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_drmux.h @@ -0,0 +1,269 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DRMUX_H +#define NBL_DRMUX_H 1 + +#include + +#define NBL_DRMUX_BASE (0x00654000) + +#define NBL_DRMUX_INT_STATUS_ADDR (0x654000) +#define NBL_DRMUX_INT_STATUS_DEPTH (1) +#define NBL_DRMUX_INT_STATUS_WIDTH (32) +#define NBL_DRMUX_INT_STATUS_DWLEN (1) +union drmux_int_status_u { + struct drmux_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 parity_err:1; /* [4] Default:0x0 RWC */ + u32 cif_err:1; /* [5] Default:0x0 RWC */ + u32 lowp:1; /* [6] Default:0x0 RWC */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DRMUX_INT_MASK_ADDR (0x654004) +#define NBL_DRMUX_INT_MASK_DEPTH (1) +#define NBL_DRMUX_INT_MASK_WIDTH (32) +#define NBL_DRMUX_INT_MASK_DWLEN (1) +union drmux_int_mask_u { + struct drmux_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 parity_err:1; /* [4] Default:0x0 RW */ + u32 cif_err:1; /* [5] Default:0x0 RW */ + u32 lowp:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DRMUX_INT_SET_ADDR (0x654008) +#define NBL_DRMUX_INT_SET_DEPTH (1) +#define NBL_DRMUX_INT_SET_WIDTH (32) +#define NBL_DRMUX_INT_SET_DWLEN (1) +union drmux_int_set_u { + struct drmux_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 parity_err:1; /* [4] Default:0x0 WO */ + u32 cif_err:1; /* [5] Default:0x0 WO */ + u32 lowp:1; /* [6] Default:0x0 WO */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_INT_SET_DWLEN]; +} __packed; + +#define NBL_DRMUX_UCOR_ERR_INFO_ADDR (0x65400c) +#define NBL_DRMUX_UCOR_ERR_INFO_DEPTH (1) +#define NBL_DRMUX_UCOR_ERR_INFO_WIDTH (32) +#define NBL_DRMUX_UCOR_ERR_INFO_DWLEN (1) +union drmux_ucor_err_info_u { + struct drmux_ucor_err_info { + u32 ram_addr:28; /* [27:0] Default:0x0 RO */ + u32 ram_id:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_UCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DRMUX_COR_ERR_INFO_ADDR (0x654014) +#define NBL_DRMUX_COR_ERR_INFO_DEPTH (1) +#define NBL_DRMUX_COR_ERR_INFO_WIDTH (32) +#define NBL_DRMUX_COR_ERR_INFO_DWLEN (1) +union drmux_cor_err_info_u { + struct drmux_cor_err_info { + u32 ram_addr:28; /* [27:0] Default:0x0 RO */ + u32 ram_id:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DRMUX_PARITY_ERR_INFO_ADDR (0x65402c) +#define NBL_DRMUX_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DRMUX_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DRMUX_PARITY_ERR_INFO_DWLEN (1) +union drmux_parity_err_info_u { + struct drmux_parity_err_info { + u32 ram_addr:28; /* [27:0] Default:0x0 RO */ + u32 ram_id:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DRMUX_CIF_ERR_INFO_ADDR (0x654034) +#define NBL_DRMUX_CIF_ERR_INFO_DEPTH (1) +#define NBL_DRMUX_CIF_ERR_INFO_WIDTH (32) +#define NBL_DRMUX_CIF_ERR_INFO_DWLEN (1) +union drmux_cif_err_info_u { + struct drmux_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DRMUX_CAR_CTRL_ADDR (0x654100) +#define NBL_DRMUX_CAR_CTRL_DEPTH (1) +#define NBL_DRMUX_CAR_CTRL_WIDTH (32) +#define NBL_DRMUX_CAR_CTRL_DWLEN (1) +union drmux_car_ctrl_u { + struct drmux_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DRMUX_DL4S_WEIGHT_ADDR (0x654128) +#define NBL_DRMUX_DL4S_WEIGHT_DEPTH (1) +#define NBL_DRMUX_DL4S_WEIGHT_WIDTH (32) +#define NBL_DRMUX_DL4S_WEIGHT_DWLEN (1) +union drmux_dl4s_weight_u { + struct drmux_dl4s_weight { + u32 cfg_dl4s_weight:8; /* [7:0] Default:0x7f RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_DL4S_WEIGHT_DWLEN]; +} __packed; + +#define NBL_DRMUX_DRDMA_WEIGHT_ADDR (0x65412c) +#define NBL_DRMUX_DRDMA_WEIGHT_DEPTH (1) +#define NBL_DRMUX_DRDMA_WEIGHT_WIDTH (32) +#define NBL_DRMUX_DRDMA_WEIGHT_DWLEN (1) +union drmux_drdma_weight_u { + struct drmux_drdma_weight { + u32 cfg_drdma_weight:8; /* [7:0] Default:0x7f RW */ + u32 rsv:24; /* [31:8] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DRMUX_DRDMA_WEIGHT_DWLEN]; +} __packed; + +#define NBL_DRMUX_EMP_WEIGHT_ADDR (0x654130) +#define NBL_DRMUX_EMP_WEIGHT_DEPTH (1) +#define NBL_DRMUX_EMP_WEIGHT_WIDTH (32) +#define NBL_DRMUX_EMP_WEIGHT_DWLEN (1) +union drmux_emp_weight_u { + struct drmux_emp_weight { + u32 cfg_emp_weight:8; /* [7:0] Default:0x7f RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_EMP_WEIGHT_DWLEN]; +} __packed; + +#define NBL_DRMUX_ACTION_ID_ADDR (0x65413c) +#define NBL_DRMUX_ACTION_ID_DEPTH (1) +#define NBL_DRMUX_ACTION_ID_WIDTH (32) +#define NBL_DRMUX_ACTION_ID_DWLEN (1) +union drmux_action_id_u { + struct drmux_action_id { + u32 dport_action_id:6; /* [5:0] Default:0x9 RW */ + u32 rsv2:2; /* [7:6] Default:0x0 RO */ + u32 dque_action_id:6; /* [13:8] Default:0xa RW */ + u32 rsv1:2; /* [15:14] Default:0x0 RO */ + u32 prbacidx_action_id:6; /* [21:16] Default:0x11 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_ACTION_ID_DWLEN]; +} __packed; + +#define NBL_DRMUX_INF_PRI_ADDR (0x654140) +#define NBL_DRMUX_INF_PRI_DEPTH (1) +#define NBL_DRMUX_INF_PRI_WIDTH (32) +#define NBL_DRMUX_INF_PRI_DWLEN (1) +union drmux_inf_pri_u { + struct drmux_inf_pri { + u32 dport_pri:2; /* [1:0] Default:0x0 RW */ + u32 rsv2:6; /* [7:2] Default:0x0 RO */ + u32 dque_pri:2; /* [9:8] Default:0x0 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 prbacidx_pri:2; /* [17:16] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_INF_PRI_DWLEN]; +} __packed; + +#define NBL_DRMUX_LOWP_TIME_ADDR (0x654144) +#define NBL_DRMUX_LOWP_TIME_DEPTH (1) +#define NBL_DRMUX_LOWP_TIME_WIDTH (64) +#define NBL_DRMUX_LOWP_TIME_DWLEN (2) +union drmux_lowp_time_u { + struct drmux_lowp_time { + u32 lowp_time_arr[2]; /* [63:0] Default:0x3938700 RW */ + } __packed info; + u32 data[NBL_DRMUX_LOWP_TIME_DWLEN]; +} __packed; + +#define NBL_DRMUX_LOWP_EN_ADDR (0x65414c) +#define NBL_DRMUX_LOWP_EN_DEPTH (1) +#define NBL_DRMUX_LOWP_EN_WIDTH (32) +#define NBL_DRMUX_LOWP_EN_DWLEN (1) +union drmux_lowp_en_u { + struct drmux_lowp_en { + u32 lowp_en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_LOWP_EN_DWLEN]; +} __packed; + +#define NBL_DRMUX_LOWP_OFF_ADDR (0x654150) +#define NBL_DRMUX_LOWP_OFF_DEPTH (1) +#define NBL_DRMUX_LOWP_OFF_WIDTH (32) +#define NBL_DRMUX_LOWP_OFF_DWLEN (1) +union drmux_lowp_off_u { + struct drmux_lowp_off { + u32 lowp_off:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_LOWP_OFF_DWLEN]; +} __packed; + +#define NBL_DRMUX_SCH_COS_ADDR (0x654154) +#define NBL_DRMUX_SCH_COS_DEPTH (1) +#define NBL_DRMUX_SCH_COS_WIDTH (32) +#define NBL_DRMUX_SCH_COS_DWLEN (1) +union drmux_sch_cos_u { + struct drmux_sch_cos { + u32 emp_sch_cos:3; /* [2:0] Default:0x0 RW */ + u32 bmc_sch_cos:3; /* [5:3] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_SCH_COS_DWLEN]; +} __packed; + +#define NBL_DRMUX_EMP_BG_ID_ADDR (0x654158) +#define NBL_DRMUX_EMP_BG_ID_DEPTH (1) +#define NBL_DRMUX_EMP_BG_ID_WIDTH (32) +#define NBL_DRMUX_EMP_BG_ID_DWLEN (1) +union drmux_emp_bg_id_u { + struct drmux_emp_bg_id { + u32 bg_id:3; /* [2:0] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_EMP_BG_ID_DWLEN]; +} __packed; + +#define NBL_DRMUX_UPED_BG_ID_ADDR (0x65415c) +#define NBL_DRMUX_UPED_BG_ID_DEPTH (1) +#define NBL_DRMUX_UPED_BG_ID_WIDTH (32) +#define NBL_DRMUX_UPED_BG_ID_DWLEN (1) +union drmux_uped_bg_id_u { + struct drmux_uped_bg_id { + u32 bg_id:12; /* [11:0] Default:0x0 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DRMUX_UPED_BG_ID_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dsch.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dsch.h new file mode 100644 index 0000000000000000000000000000000000000000..129ccfc35afe2896f44f9d75769ba20e899a1139 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dsch.h @@ -0,0 +1,3466 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DSCH_H +#define NBL_DSCH_H 1 + +#include + +#define NBL_DSCH_BASE (0x00404000) + +#define NBL_DSCH_INT_STATUS_ADDR (0x404000) +#define NBL_DSCH_INT_STATUS_DEPTH (1) +#define NBL_DSCH_INT_STATUS_WIDTH (32) +#define NBL_DSCH_INT_STATUS_DWLEN (1) +union dsch_int_status_u { + struct dsch_int_status { + u32 vn_ucor_err:1; /* [0] Default:0x0 RWC */ + u32 vn_cor_err:1; /* [1] Default:0x0 RWC */ + u32 vn_fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 vn_fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 vn_parity_err:1; /* [4] Default:0x0 RWC */ + u32 vn_list_err:1; /* [5] Default:0x0 RWC */ + u32 rdma_ucor_err:1; /* [6] Default:0x0 RWC */ + u32 rdma_cor_err:1; /* [7] Default:0x0 RWC */ + u32 rdma_fifo_uflw_err:1; /* [8] Default:0x0 RWC */ + u32 rdma_fifo_dflw_err:1; /* [9] Default:0x0 RWC */ + u32 rdma_parity_err:1; /* [10] Default:0x0 RWC */ + u32 rdma_list_err:1; /* [11] Default:0x0 RWC */ + u32 cif_err:1; /* [12] Default:0x0 RWC */ + u32 vn_other_abn:1; /* [13] Default:0x0 RWC */ + u32 rdma_other_abn:1; /* [14] Default:0x0 RWC */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DSCH_INT_MASK_ADDR (0x404004) +#define NBL_DSCH_INT_MASK_DEPTH (1) +#define NBL_DSCH_INT_MASK_WIDTH (32) +#define NBL_DSCH_INT_MASK_DWLEN (1) +union dsch_int_mask_u { + struct dsch_int_mask { + u32 vn_ucor_err:1; /* [0] Default:0x0 RW */ + u32 vn_cor_err:1; /* [1] Default:0x0 RW */ + u32 vn_fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 vn_fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 vn_parity_err:1; /* [4] Default:0x0 RW */ + u32 vn_list_err:1; /* [5] Default:0x0 RW */ + u32 rdma_ucor_err:1; /* [6] Default:0x0 RW */ + u32 rdma_cor_err:1; /* [7] Default:0x0 RW */ + u32 rdma_fifo_uflw_err:1; /* [8] Default:0x0 RW */ + u32 rdma_fifo_dflw_err:1; /* [9] Default:0x0 RW */ + u32 rdma_parity_err:1; /* [10] Default:0x0 RW */ + u32 rdma_list_err:1; /* [11] Default:0x0 RW */ + u32 cif_err:1; /* [12] Default:0x0 RW */ + u32 vn_other_abn:1; /* [13] Default:0x0 RW */ + u32 rdma_other_abn:1; /* [14] Default:0x0 RW */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DSCH_INT_SET_ADDR (0x404008) +#define NBL_DSCH_INT_SET_DEPTH (1) +#define NBL_DSCH_INT_SET_WIDTH (32) +#define NBL_DSCH_INT_SET_DWLEN (1) +union dsch_int_set_u { + struct dsch_int_set { + u32 vn_ucor_err:1; /* [0] Default:0x0 WO */ + u32 vn_cor_err:1; /* [1] Default:0x0 WO */ + u32 vn_fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 vn_fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 vn_parity_err:1; /* [4] Default:0x0 WO */ + u32 vn_list_err:1; /* [5] Default:0x0 WO */ + u32 rdma_ucor_err:1; /* [6] Default:0x0 WO */ + u32 rdma_cor_err:1; /* [7] Default:0x0 WO */ + u32 rdma_fifo_uflw_err:1; /* [8] Default:0x0 WO */ + u32 rdma_fifo_dflw_err:1; /* [9] Default:0x0 WO */ + u32 rdma_parity_err:1; /* [10] Default:0x0 WO */ + u32 rdma_list_err:1; /* [11] Default:0x0 WO */ + u32 cif_err:1; /* [12] Default:0x0 WO */ + u32 vn_other_abn:1; /* [13] Default:0x0 WO */ + u32 rdma_other_abn:1; /* [14] Default:0x0 WO */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_INT_SET_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_PARITY_ERR_INFO_ADDR (0x404050) +#define NBL_DSCH_VN_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DSCH_VN_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DSCH_VN_PARITY_ERR_INFO_DWLEN (1) +union dsch_vn_parity_err_info_u { + struct dsch_vn_parity_err_info { + u32 ram_addr:16; /* [15:0] Default:0x0 RO */ + u32 ram_id:5; /* [20:16] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_PARITY_ERR_INFO_ADDR (0x404058) +#define NBL_DSCH_RDMA_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DSCH_RDMA_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DSCH_RDMA_PARITY_ERR_INFO_DWLEN (1) +union dsch_rdma_parity_err_info_u { + struct dsch_rdma_parity_err_info { + u32 ram_addr:16; /* [15:0] Default:0x0 RO */ + u32 ram_id:5; /* [20:16] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_LIST_ERR_INFO_ADDR (0x404060) +#define NBL_DSCH_VN_LIST_ERR_INFO_DEPTH (1) +#define NBL_DSCH_VN_LIST_ERR_INFO_WIDTH (32) +#define NBL_DSCH_VN_LIST_ERR_INFO_DWLEN (1) +union dsch_vn_list_err_info_u { + struct dsch_vn_list_err_info { + u32 err_id:5; /* [4:0] Default:0x0 RO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_LIST_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_LIST_ERR_INFO_ADDR (0x404068) +#define NBL_DSCH_RDMA_LIST_ERR_INFO_DEPTH (1) +#define NBL_DSCH_RDMA_LIST_ERR_INFO_WIDTH (32) +#define NBL_DSCH_RDMA_LIST_ERR_INFO_DWLEN (1) +union dsch_rdma_list_err_info_u { + struct dsch_rdma_list_err_info { + u32 err_id:5; /* [4:0] Default:0x0 RO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_LIST_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSCH_CIF_ERR_INFO_ADDR (0x404070) +#define NBL_DSCH_CIF_ERR_INFO_DEPTH (1) +#define NBL_DSCH_CIF_ERR_INFO_WIDTH (32) +#define NBL_DSCH_CIF_ERR_INFO_DWLEN (1) +union dsch_cif_err_info_u { + struct dsch_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_OTHER_ABN_INFO_ADDR (0x404078) +#define NBL_DSCH_VN_OTHER_ABN_INFO_DEPTH (1) +#define NBL_DSCH_VN_OTHER_ABN_INFO_WIDTH (32) +#define NBL_DSCH_VN_OTHER_ABN_INFO_DWLEN (1) +union dsch_vn_other_abn_info_u { + struct dsch_vn_other_abn_info { + u32 err_id:5; /* [4:0] Default:0x0 RO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_OTHER_ABN_INFO_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_OTHER_ABN_INFO_ADDR (0x404080) +#define NBL_DSCH_RDMA_OTHER_ABN_INFO_DEPTH (1) +#define NBL_DSCH_RDMA_OTHER_ABN_INFO_WIDTH (32) +#define NBL_DSCH_RDMA_OTHER_ABN_INFO_DWLEN (1) +union dsch_rdma_other_abn_info_u { + struct dsch_rdma_other_abn_info { + u32 err_id:5; /* [4:0] Default:0x0 RO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_OTHER_ABN_INFO_DWLEN]; +} __packed; + +#define NBL_DSCH_CAR_CTRL_ADDR (0x404100) +#define NBL_DSCH_CAR_CTRL_DEPTH (1) +#define NBL_DSCH_CAR_CTRL_WIDTH (32) +#define NBL_DSCH_CAR_CTRL_DWLEN (1) +union dsch_car_ctrl_u { + struct dsch_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DSCH_INIT_START_ADDR (0x404104) +#define NBL_DSCH_INIT_START_DEPTH (1) +#define NBL_DSCH_INIT_START_WIDTH (32) +#define NBL_DSCH_INIT_START_DWLEN (1) +union dsch_init_start_u { + struct dsch_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_INIT_START_DWLEN]; +} __packed; + +#define NBL_DSCH_CTRL_ADDR (0x404108) +#define NBL_DSCH_CTRL_DEPTH (1) +#define NBL_DSCH_CTRL_WIDTH (32) +#define NBL_DSCH_CTRL_DWLEN (1) +union dsch_ctrl_u { + struct dsch_ctrl { + u32 cnt_clr:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_CTRL_DWLEN]; +} __packed; + +#define NBL_DSCH_BP_RESP_ADDR (0x40410c) +#define NBL_DSCH_BP_RESP_DEPTH (1) +#define NBL_DSCH_BP_RESP_WIDTH (32) +#define NBL_DSCH_BP_RESP_DWLEN (1) +union dsch_bp_resp_u { + struct dsch_bp_resp { + u32 dbm_en:1; /* [0] Default:0x1 RW */ + u32 dqm_dpt0_vn_en:1; /* [1] Default:0x1 RW */ + u32 dqm_dpt1_vn_en:1; /* [2] Default:0x1 RW */ + u32 dqm_dpt2_vn_en:1; /* [3] Default:0x1 RW */ + u32 dqm_dpt3_vn_en:1; /* [4] Default:0x1 RW */ + u32 dqm_dpt0_rdma_en:1; /* [5] Default:0x1 RW */ + u32 dqm_dpt1_rdma_en:1; /* [6] Default:0x1 RW */ + u32 dqm_dpt2_rdma_en:1; /* [7] Default:0x1 RW */ + u32 dqm_dpt3_rdma_en:1; /* [8] Default:0x1 RW */ + u32 dstore_vn_spt0_en:1; /* [9] Default:0x1 RW */ + u32 dstore_vn_spt1_en:1; /* [10] Default:0x1 RW */ + u32 dstore_rdma_spt0_en:1; /* [11] Default:0x1 RW */ + u32 dstore_rdma_spt1_en:1; /* [12] Default:0x1 RW */ + u32 dvn_dsch_info_en:1; /* [13] Default:0x1 RW */ + u32 txp_dsch_info_en:1; /* [14] Default:0x1 RW */ + u32 hdma_dsch_dif_w_info_en:1; /* [15] Default:0x1 RW */ + u32 hdma_dsch_dif_w_data_en:1; /* [16] Default:0x1 RW */ + u32 hdma_dsch_dif_r_info_en:1; /* [17] Default:0x1 RW */ + u32 bg_en:4; /* [21:18] Default:0xF RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_BP_RESP_DWLEN]; +} __packed; + +#define NBL_DSCH_DBM_BUF_AEMPTY_XOFF_TH_ADDR (0x404110) +#define NBL_DSCH_DBM_BUF_AEMPTY_XOFF_TH_DEPTH (1) +#define NBL_DSCH_DBM_BUF_AEMPTY_XOFF_TH_WIDTH (32) +#define NBL_DSCH_DBM_BUF_AEMPTY_XOFF_TH_DWLEN (1) +union dsch_dbm_buf_aempty_xoff_th_u { + struct dsch_dbm_buf_aempty_xoff_th { + u32 xoff_th:11; /* [10:0] Default:0xA RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DBM_BUF_AEMPTY_XOFF_TH_DWLEN]; +} __packed; + +#define NBL_DSCH_DBM_BUF_AEMPTY_XON_TH_ADDR (0x404114) +#define NBL_DSCH_DBM_BUF_AEMPTY_XON_TH_DEPTH (1) +#define NBL_DSCH_DBM_BUF_AEMPTY_XON_TH_WIDTH (32) +#define NBL_DSCH_DBM_BUF_AEMPTY_XON_TH_DWLEN (1) +union dsch_dbm_buf_aempty_xon_th_u { + struct dsch_dbm_buf_aempty_xon_th { + u32 xon_th:11; /* [10:0] Default:0x32 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DBM_BUF_AEMPTY_XON_TH_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_QID_MAX_ADDR (0x404118) +#define NBL_DSCH_VN_HOST_QID_MAX_DEPTH (1) +#define NBL_DSCH_VN_HOST_QID_MAX_WIDTH (32) +#define NBL_DSCH_VN_HOST_QID_MAX_DWLEN (1) +union dsch_vn_host_qid_max_u { + struct dsch_vn_host_qid_max { + u32 host_qid_max:11; /* [10:0] Default:0x7DF RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_QID_MAX_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_Q_NUM_EMPTY_FLAG_DLY_LMT_ADDR (0x40411c) +#define NBL_DSCH_VN_HOST_Q_NUM_EMPTY_FLAG_DLY_LMT_DEPTH (1) +#define NBL_DSCH_VN_HOST_Q_NUM_EMPTY_FLAG_DLY_LMT_WIDTH (32) +#define NBL_DSCH_VN_HOST_Q_NUM_EMPTY_FLAG_DLY_LMT_DWLEN (1) +union dsch_vn_host_q_num_empty_flag_dly_lmt_u { + struct dsch_vn_host_q_num_empty_flag_dly_lmt { + u32 host_q_num_empty_flag_dly_lmt:16; /* [15:0] Default:0x3E8 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_Q_NUM_EMPTY_FLAG_DLY_LMT_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_POLL_SPWRR_MAP_ADDR (0x404120) +#define NBL_DSCH_RDMA_POLL_SPWRR_MAP_DEPTH (1) +#define NBL_DSCH_RDMA_POLL_SPWRR_MAP_WIDTH (32) +#define NBL_DSCH_RDMA_POLL_SPWRR_MAP_DWLEN (1) +union dsch_rdma_poll_spwrr_map_u { + struct dsch_rdma_poll_spwrr_map { + u32 rdma_poll_spwrr_map:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_POLL_SPWRR_MAP_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_CSCH_QLEN_TH_ADDR (0x404124) +#define NBL_DSCH_RDMA_CSCH_QLEN_TH_DEPTH (1) +#define NBL_DSCH_RDMA_CSCH_QLEN_TH_WIDTH (32) +#define NBL_DSCH_RDMA_CSCH_QLEN_TH_DWLEN (1) +union dsch_rdma_csch_qlen_th_u { + struct dsch_rdma_csch_qlen_th { + u32 rdma_csch_qlen_th:13; /* [12:0] Default:0x100 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_CSCH_QLEN_TH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_POLL_WGT_ADDR (0x404128) +#define NBL_DSCH_RDMA_POLL_WGT_DEPTH (1) +#define NBL_DSCH_RDMA_POLL_WGT_WIDTH (32) +#define NBL_DSCH_RDMA_POLL_WGT_DWLEN (1) +union dsch_rdma_poll_wgt_u { + struct dsch_rdma_poll_wgt { + u32 wgt:32; /* [31:0] Default:0x01010104 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_POLL_WGT_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_CSCH_INTERVAL_ADDR (0x404130) +#define NBL_DSCH_RDMA_CSCH_INTERVAL_DEPTH (1) +#define NBL_DSCH_RDMA_CSCH_INTERVAL_WIDTH (32) +#define NBL_DSCH_RDMA_CSCH_INTERVAL_DWLEN (1) +union dsch_rdma_csch_interval_u { + struct dsch_rdma_csch_interval { + u32 rdma_csch_interval:6; /* [5:0] Default:0x8 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_CSCH_INTERVAL_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_QUANTA_ADDR (0x404134) +#define NBL_DSCH_VN_QUANTA_DEPTH (1) +#define NBL_DSCH_VN_QUANTA_WIDTH (32) +#define NBL_DSCH_VN_QUANTA_DWLEN (1) +union dsch_vn_quanta_u { + struct dsch_vn_quanta { + u32 h_qua:16; /* [15:0] Default:0x1000 RW */ + u32 e_qua:16; /* [31:16] Default:0x1000 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_QUANTA_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_QUANTA_ADDR (0x404138) +#define NBL_DSCH_RDMA_QUANTA_DEPTH (1) +#define NBL_DSCH_RDMA_QUANTA_WIDTH (32) +#define NBL_DSCH_RDMA_QUANTA_DWLEN (1) +union dsch_rdma_quanta_u { + struct dsch_rdma_quanta { + u32 h_qua:16; /* [15:0] Default:0x1000 RW */ + u32 e_qua:16; /* [31:16] Default:0x1000 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_QUANTA_DWLEN]; +} __packed; + +#define NBL_DSCH_DPT_PFC_MAP_VNH_ADDR (0x40413c) +#define NBL_DSCH_DPT_PFC_MAP_VNH_DEPTH (1) +#define NBL_DSCH_DPT_PFC_MAP_VNH_WIDTH (32) +#define NBL_DSCH_DPT_PFC_MAP_VNH_DWLEN (1) +union dsch_dpt_pfc_map_vnh_u { + struct dsch_dpt_pfc_map_vnh { + u32 dpt0:4; /* [3:0] Default:0x1 RW */ + u32 dpt1:4; /* [7:4] Default:0x2 RW */ + u32 dpt2:4; /* [11:8] Default:0x4 RW */ + u32 dpt3:4; /* [15:12] Default:0x8 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DPT_PFC_MAP_VNH_DWLEN]; +} __packed; + +#define NBL_DSCH_BP_SET_ADDR (0x404140) +#define NBL_DSCH_BP_SET_DEPTH (1) +#define NBL_DSCH_BP_SET_WIDTH (32) +#define NBL_DSCH_BP_SET_DWLEN (1) +union dsch_bp_set_u { + struct dsch_bp_set { + u32 dsch_dvn_db_rdy:1; /* [0] Default:0x0 RW */ + u32 dsch_dvn_txlen_rdy:1; /* [1] Default:0x0 RW */ + u32 dsch_txp_db_rdy:1; /* [2] Default:0x0 RW */ + u32 dsch_txp_txlen_rdy:1; /* [3] Default:0x0 RW */ + u32 dsch_raqp_db_rdy:1; /* [4] Default:0x0 RW */ + u32 dsch_ceaq_db_rdy:1; /* [5] Default:0x0 RW */ + u32 dsch_tqp_rchk_db_rdy:1; /* [6] Default:0x0 RW */ + u32 dsch_tqp_rnr_db_rdy:1; /* [7] Default:0x0 RW */ + u32 dsch_tqp_rto_db_rdy:1; /* [8] Default:0x0 RW */ + u32 dsch_hdma_dif_r_data_fifo_nafull:1; /* [9] Default:0x0 RW */ + u32 dsch_hdma_dif_r_sync_fifo_nafull:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_BP_SET_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DB_TOCSCH_EN_ADDR (0x404144) +#define NBL_DSCH_RDMA_DB_TOCSCH_EN_DEPTH (1) +#define NBL_DSCH_RDMA_DB_TOCSCH_EN_WIDTH (32) +#define NBL_DSCH_RDMA_DB_TOCSCH_EN_DWLEN (1) +union dsch_rdma_db_tocsch_en_u { + struct dsch_rdma_db_tocsch_en { + u32 en:5; /* [4:0] Default:0x1F RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DB_TOCSCH_EN_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SQ_PRI_MAP_CFG_ADDR (0x404150) +#define NBL_DSCH_RDMA_SQ_PRI_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_SQ_PRI_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_SQ_PRI_MAP_CFG_DWLEN (1) +union dsch_rdma_sq_pri_map_cfg_u { + struct dsch_rdma_sq_pri_map_cfg { + u32 pri0:3; /* [2:0] Default:0x0 RW */ + u32 pri1:3; /* [5:3] Default:0x1 RW */ + u32 pri2:3; /* [8:6] Default:0x2 RW */ + u32 pri3:3; /* [11:9] Default:0x3 RW */ + u32 pri4:3; /* [14:12] Default:0x4 RW */ + u32 pri5:3; /* [17:15] Default:0x5 RW */ + u32 pri6:3; /* [20:18] Default:0x6 RW */ + u32 pri7:3; /* [23:21] Default:0x7 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SQ_PRI_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_RAQ_PRI_MAP_CFG_ADDR (0x404154) +#define NBL_DSCH_RDMA_RAQ_PRI_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_RAQ_PRI_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_RAQ_PRI_MAP_CFG_DWLEN (1) +union dsch_rdma_raq_pri_map_cfg_u { + struct dsch_rdma_raq_pri_map_cfg { + u32 pri0:3; /* [2:0] Default:0x0 RW */ + u32 pri1:3; /* [5:3] Default:0x1 RW */ + u32 pri2:3; /* [8:6] Default:0x2 RW */ + u32 pri3:3; /* [11:9] Default:0x3 RW */ + u32 pri4:3; /* [14:12] Default:0x4 RW */ + u32 pri5:3; /* [17:15] Default:0x5 RW */ + u32 pri6:3; /* [20:18] Default:0x6 RW */ + u32 pri7:3; /* [23:21] Default:0x7 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_RAQ_PRI_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_PRI03_MAP_CFG_ADDR (0x404158) +#define NBL_DSCH_RDMA_PRI03_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_PRI03_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_PRI03_MAP_CFG_DWLEN (1) +union dsch_rdma_pri03_map_cfg_u { + struct dsch_rdma_pri03_map_cfg { + u32 pri0_map:8; /* [7:0] Default:0x1 RW */ + u32 pri1_map:8; /* [15:8] Default:0x2 RW */ + u32 pri2_map:8; /* [23:16] Default:0x4 RW */ + u32 pri3_map:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_PRI03_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_PRI47_MAP_CFG_ADDR (0x40415c) +#define NBL_DSCH_RDMA_PRI47_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_PRI47_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_PRI47_MAP_CFG_DWLEN (1) +union dsch_rdma_pri47_map_cfg_u { + struct dsch_rdma_pri47_map_cfg { + u32 pri4_map:8; /* [7:0] Default:0x10 RW */ + u32 pri5_map:8; /* [15:8] Default:0x20 RW */ + u32 pri6_map:8; /* [23:16] Default:0x40 RW */ + u32 pri7_map:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_PRI47_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_PRI_IMAP_CFG_ADDR (0x404160) +#define NBL_DSCH_RDMA_PRI_IMAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_PRI_IMAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_PRI_IMAP_CFG_DWLEN (1) +union dsch_rdma_pri_imap_cfg_u { + struct dsch_rdma_pri_imap_cfg { + u32 pri0:3; /* [2:0] Default:0x0 RW */ + u32 pri1:3; /* [5:3] Default:0x1 RW */ + u32 pri2:3; /* [8:6] Default:0x2 RW */ + u32 pri3:3; /* [11:9] Default:0x3 RW */ + u32 pri4:3; /* [14:12] Default:0x4 RW */ + u32 pri5:3; /* [17:15] Default:0x5 RW */ + u32 pri6:3; /* [20:18] Default:0x6 RW */ + u32 pri7:3; /* [23:21] Default:0x7 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_PRI_IMAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_SELF_NOTIFY_STATE_ADDR (0x404180) +#define NBL_DSCH_VN_SELF_NOTIFY_STATE_DEPTH (1) +#define NBL_DSCH_VN_SELF_NOTIFY_STATE_WIDTH (32) +#define NBL_DSCH_VN_SELF_NOTIFY_STATE_DWLEN (1) +union dsch_vn_self_notify_state_u { + struct dsch_vn_self_notify_state { + u32 avail:1; /* [0] Default:0x1 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_SELF_NOTIFY_STATE_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_SELF_NOTIFY_CFG_ADDR (0x404184) +#define NBL_DSCH_VN_SELF_NOTIFY_CFG_DEPTH (1) +#define NBL_DSCH_VN_SELF_NOTIFY_CFG_WIDTH (32) +#define NBL_DSCH_VN_SELF_NOTIFY_CFG_DWLEN (1) +union dsch_vn_self_notify_cfg_u { + struct dsch_vn_self_notify_cfg { + u32 sel:1; /* [0] Default:0x0 WO */ + u32 en:1; /* [1] Default:0x0 WO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_SELF_NOTIFY_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_IN_CSCH_TH_ADDR (0x404188) +#define NBL_DSCH_RDMA_SW_DB_IN_CSCH_TH_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_IN_CSCH_TH_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_IN_CSCH_TH_DWLEN (1) +union dsch_rdma_sw_db_in_csch_th_u { + struct dsch_rdma_sw_db_in_csch_th { + u32 th:16; /* [15:0] Default:0x80 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_IN_CSCH_TH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DP0_PRI03_P2S_MAP_CFG_ADDR (0x404190) +#define NBL_DSCH_RDMA_DP0_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_DP0_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_DP0_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_rdma_dp0_pri03_p2s_map_cfg_u { + struct dsch_rdma_dp0_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DP0_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DP0_PRI47_P2S_MAP_CFG_ADDR (0x404194) +#define NBL_DSCH_RDMA_DP0_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_DP0_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_DP0_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_rdma_dp0_pri47_p2s_map_cfg_u { + struct dsch_rdma_dp0_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DP0_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DP1_PRI03_P2S_MAP_CFG_ADDR (0x404198) +#define NBL_DSCH_RDMA_DP1_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_DP1_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_DP1_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_rdma_dp1_pri03_p2s_map_cfg_u { + struct dsch_rdma_dp1_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DP1_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DP1_PRI47_P2S_MAP_CFG_ADDR (0x40419c) +#define NBL_DSCH_RDMA_DP1_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_DP1_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_DP1_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_rdma_dp1_pri47_p2s_map_cfg_u { + struct dsch_rdma_dp1_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DP1_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DP2_PRI03_P2S_MAP_CFG_ADDR (0x4041a0) +#define NBL_DSCH_RDMA_DP2_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_DP2_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_DP2_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_rdma_dp2_pri03_p2s_map_cfg_u { + struct dsch_rdma_dp2_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DP2_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DP2_PRI47_P2S_MAP_CFG_ADDR (0x4041a4) +#define NBL_DSCH_RDMA_DP2_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_DP2_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_DP2_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_rdma_dp2_pri47_p2s_map_cfg_u { + struct dsch_rdma_dp2_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DP2_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DP3_PRI03_P2S_MAP_CFG_ADDR (0x4041a8) +#define NBL_DSCH_RDMA_DP3_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_DP3_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_DP3_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_rdma_dp3_pri03_p2s_map_cfg_u { + struct dsch_rdma_dp3_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DP3_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DP3_PRI47_P2S_MAP_CFG_ADDR (0x4041ac) +#define NBL_DSCH_RDMA_DP3_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_RDMA_DP3_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_RDMA_DP3_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_rdma_dp3_pri47_p2s_map_cfg_u { + struct dsch_rdma_dp3_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DP3_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_DP0_PRI03_P2S_MAP_CFG_ADDR (0x4041b0) +#define NBL_DSCH_VN_HOST_DP0_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_HOST_DP0_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_HOST_DP0_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_host_dp0_pri03_p2s_map_cfg_u { + struct dsch_vn_host_dp0_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_DP0_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_DP0_PRI47_P2S_MAP_CFG_ADDR (0x4041b4) +#define NBL_DSCH_VN_HOST_DP0_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_HOST_DP0_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_HOST_DP0_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_host_dp0_pri47_p2s_map_cfg_u { + struct dsch_vn_host_dp0_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_DP0_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_DP1_PRI03_P2S_MAP_CFG_ADDR (0x4041b8) +#define NBL_DSCH_VN_HOST_DP1_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_HOST_DP1_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_HOST_DP1_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_host_dp1_pri03_p2s_map_cfg_u { + struct dsch_vn_host_dp1_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_DP1_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_DP1_PRI47_P2S_MAP_CFG_ADDR (0x4041bc) +#define NBL_DSCH_VN_HOST_DP1_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_HOST_DP1_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_HOST_DP1_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_host_dp1_pri47_p2s_map_cfg_u { + struct dsch_vn_host_dp1_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_DP1_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_DP2_PRI03_P2S_MAP_CFG_ADDR (0x4041c0) +#define NBL_DSCH_VN_HOST_DP2_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_HOST_DP2_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_HOST_DP2_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_host_dp2_pri03_p2s_map_cfg_u { + struct dsch_vn_host_dp2_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_DP2_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_DP2_PRI47_P2S_MAP_CFG_ADDR (0x4041c4) +#define NBL_DSCH_VN_HOST_DP2_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_HOST_DP2_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_HOST_DP2_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_host_dp2_pri47_p2s_map_cfg_u { + struct dsch_vn_host_dp2_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_DP2_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_DP3_PRI03_P2S_MAP_CFG_ADDR (0x4041c8) +#define NBL_DSCH_VN_HOST_DP3_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_HOST_DP3_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_HOST_DP3_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_host_dp3_pri03_p2s_map_cfg_u { + struct dsch_vn_host_dp3_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_DP3_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_HOST_DP3_PRI47_P2S_MAP_CFG_ADDR (0x4041cc) +#define NBL_DSCH_VN_HOST_DP3_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_HOST_DP3_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_HOST_DP3_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_host_dp3_pri47_p2s_map_cfg_u { + struct dsch_vn_host_dp3_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_HOST_DP3_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_ECPU_DP0_PRI03_P2S_MAP_CFG_ADDR (0x4041d0) +#define NBL_DSCH_VN_ECPU_DP0_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_ECPU_DP0_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_ECPU_DP0_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_ecpu_dp0_pri03_p2s_map_cfg_u { + struct dsch_vn_ecpu_dp0_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_ECPU_DP0_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_ECPU_DP0_PRI47_P2S_MAP_CFG_ADDR (0x4041d4) +#define NBL_DSCH_VN_ECPU_DP0_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_ECPU_DP0_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_ECPU_DP0_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_ecpu_dp0_pri47_p2s_map_cfg_u { + struct dsch_vn_ecpu_dp0_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_ECPU_DP0_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_ECPU_DP1_PRI03_P2S_MAP_CFG_ADDR (0x4041d8) +#define NBL_DSCH_VN_ECPU_DP1_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_ECPU_DP1_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_ECPU_DP1_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_ecpu_dp1_pri03_p2s_map_cfg_u { + struct dsch_vn_ecpu_dp1_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_ECPU_DP1_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_ECPU_DP1_PRI47_P2S_MAP_CFG_ADDR (0x4041dc) +#define NBL_DSCH_VN_ECPU_DP1_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_ECPU_DP1_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_ECPU_DP1_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_ecpu_dp1_pri47_p2s_map_cfg_u { + struct dsch_vn_ecpu_dp1_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_ECPU_DP1_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_ECPU_DP2_PRI03_P2S_MAP_CFG_ADDR (0x4041e0) +#define NBL_DSCH_VN_ECPU_DP2_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_ECPU_DP2_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_ECPU_DP2_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_ecpu_dp2_pri03_p2s_map_cfg_u { + struct dsch_vn_ecpu_dp2_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_ECPU_DP2_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_ECPU_DP2_PRI47_P2S_MAP_CFG_ADDR (0x4041e4) +#define NBL_DSCH_VN_ECPU_DP2_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_ECPU_DP2_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_ECPU_DP2_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_ecpu_dp2_pri47_p2s_map_cfg_u { + struct dsch_vn_ecpu_dp2_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_ECPU_DP2_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_ECPU_DP3_PRI03_P2S_MAP_CFG_ADDR (0x4041e8) +#define NBL_DSCH_VN_ECPU_DP3_PRI03_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_ECPU_DP3_PRI03_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_ECPU_DP3_PRI03_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_ecpu_dp3_pri03_p2s_map_cfg_u { + struct dsch_vn_ecpu_dp3_pri03_p2s_map_cfg { + u32 pri0:8; /* [7:0] Default:0x1 RW */ + u32 pri1:8; /* [15:8] Default:0x2 RW */ + u32 pri2:8; /* [23:16] Default:0x4 RW */ + u32 pri3:8; /* [31:24] Default:0x8 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_ECPU_DP3_PRI03_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_ECPU_DP3_PRI47_P2S_MAP_CFG_ADDR (0x4041ec) +#define NBL_DSCH_VN_ECPU_DP3_PRI47_P2S_MAP_CFG_DEPTH (1) +#define NBL_DSCH_VN_ECPU_DP3_PRI47_P2S_MAP_CFG_WIDTH (32) +#define NBL_DSCH_VN_ECPU_DP3_PRI47_P2S_MAP_CFG_DWLEN (1) +union dsch_vn_ecpu_dp3_pri47_p2s_map_cfg_u { + struct dsch_vn_ecpu_dp3_pri47_p2s_map_cfg { + u32 pri4:8; /* [7:0] Default:0x10 RW */ + u32 pri5:8; /* [15:8] Default:0x20 RW */ + u32 pri6:8; /* [23:16] Default:0x40 RW */ + u32 pri7:8; /* [31:24] Default:0x80 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_ECPU_DP3_PRI47_P2S_MAP_CFG_DWLEN]; +} __packed; + +#define NBL_DSCH_DPT_PFC_MAP_VNE_ADDR (0x4041f0) +#define NBL_DSCH_DPT_PFC_MAP_VNE_DEPTH (1) +#define NBL_DSCH_DPT_PFC_MAP_VNE_WIDTH (32) +#define NBL_DSCH_DPT_PFC_MAP_VNE_DWLEN (1) +union dsch_dpt_pfc_map_vne_u { + struct dsch_dpt_pfc_map_vne { + u32 dpt0:4; /* [3:0] Default:0x1 RW */ + u32 dpt1:4; /* [7:4] Default:0x2 RW */ + u32 dpt2:4; /* [11:8] Default:0x4 RW */ + u32 dpt3:4; /* [15:12] Default:0x8 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DPT_PFC_MAP_VNE_DWLEN]; +} __packed; + +#define NBL_DSCH_DPT_PFC_MAP_RDMA_ADDR (0x4041f4) +#define NBL_DSCH_DPT_PFC_MAP_RDMA_DEPTH (1) +#define NBL_DSCH_DPT_PFC_MAP_RDMA_WIDTH (32) +#define NBL_DSCH_DPT_PFC_MAP_RDMA_DWLEN (1) +union dsch_dpt_pfc_map_rdma_u { + struct dsch_dpt_pfc_map_rdma { + u32 dpt0:4; /* [3:0] Default:0x1 RW */ + u32 dpt1:4; /* [7:4] Default:0x2 RW */ + u32 dpt2:4; /* [11:8] Default:0x4 RW */ + u32 dpt3:4; /* [15:12] Default:0x8 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DPT_PFC_MAP_RDMA_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_LNET_EN_ADDR (0x404300) +#define NBL_DSCH_RDMA_LNET_EN_DEPTH (1) +#define NBL_DSCH_RDMA_LNET_EN_WIDTH (32) +#define NBL_DSCH_RDMA_LNET_EN_DWLEN (1) +union dsch_rdma_lnet_en_u { + struct dsch_rdma_lnet_en { + u32 en:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_LNET_EN_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_HNET_EN_ADDR (0x404304) +#define NBL_DSCH_RDMA_HNET_EN_DEPTH (1) +#define NBL_DSCH_RDMA_HNET_EN_WIDTH (32) +#define NBL_DSCH_RDMA_HNET_EN_DWLEN (1) +union dsch_rdma_hnet_en_u { + struct dsch_rdma_hnet_en { + u32 en:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_HNET_EN_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_PAGE_OCU_CLR_ADDR (0x404310) +#define NBL_DSCH_RDMA_PAGE_OCU_CLR_DEPTH (1) +#define NBL_DSCH_RDMA_PAGE_OCU_CLR_WIDTH (32) +#define NBL_DSCH_RDMA_PAGE_OCU_CLR_DWLEN (1) +union dsch_rdma_page_ocu_clr_u { + struct dsch_rdma_page_ocu_clr { + u32 clr:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_PAGE_OCU_CLR_DWLEN]; +} __packed; + +#define NBL_DSCH_PSHA_EN_ADDR (0x404314) +#define NBL_DSCH_PSHA_EN_DEPTH (1) +#define NBL_DSCH_PSHA_EN_WIDTH (32) +#define NBL_DSCH_PSHA_EN_DWLEN (1) +union dsch_psha_en_u { + struct dsch_psha_en { + u32 en:4; /* [3:0] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_PSHA_EN_DWLEN]; +} __packed; + +#define NBL_DSCH_DPT2BG_MAP_ADDR (0x404320) +#define NBL_DSCH_DPT2BG_MAP_DEPTH (1) +#define NBL_DSCH_DPT2BG_MAP_WIDTH (32) +#define NBL_DSCH_DPT2BG_MAP_DWLEN (1) +union dsch_dpt2bg_map_u { + struct dsch_dpt2bg_map { + u32 map:8; /* [7:0] Default:0xE4 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DPT2BG_MAP_DWLEN]; +} __packed; + +#define NBL_DSCH_DPT2BG_IMAP_ADDR (0x404324) +#define NBL_DSCH_DPT2BG_IMAP_DEPTH (1) +#define NBL_DSCH_DPT2BG_IMAP_WIDTH (32) +#define NBL_DSCH_DPT2BG_IMAP_DWLEN (1) +union dsch_dpt2bg_imap_u { + struct dsch_dpt2bg_imap { + u32 imap:16; /* [15:0] Default:0x8421 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DPT2BG_IMAP_DWLEN]; +} __packed; + +#define NBL_DSCH_DPT2SID_MAP_ADDR (0x404328) +#define NBL_DSCH_DPT2SID_MAP_DEPTH (1) +#define NBL_DSCH_DPT2SID_MAP_WIDTH (32) +#define NBL_DSCH_DPT2SID_MAP_DWLEN (1) +union dsch_dpt2sid_map_u { + struct dsch_dpt2sid_map { + u32 map:8; /* [7:0] Default:0xE4 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DPT2SID_MAP_DWLEN]; +} __packed; + +#define NBL_DSCH_DPT2SID_IMAP_ADDR (0x40432c) +#define NBL_DSCH_DPT2SID_IMAP_DEPTH (1) +#define NBL_DSCH_DPT2SID_IMAP_WIDTH (32) +#define NBL_DSCH_DPT2SID_IMAP_DWLEN (1) +union dsch_dpt2sid_imap_u { + struct dsch_dpt2sid_imap { + u32 imap:16; /* [15:0] Default:0x8421 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DPT2SID_IMAP_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_SPT_ERR_CHK_ADDR (0x404330) +#define NBL_DSCH_VN_SPT_ERR_CHK_DEPTH (1) +#define NBL_DSCH_VN_SPT_ERR_CHK_WIDTH (32) +#define NBL_DSCH_VN_SPT_ERR_CHK_DWLEN (1) +union dsch_vn_spt_err_chk_u { + struct dsch_vn_spt_err_chk { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_SPT_ERR_CHK_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_Q2TC_CFG_TBL_ADDR (0x414000) +#define NBL_DSCH_VN_Q2TC_CFG_TBL_DEPTH (2048) +#define NBL_DSCH_VN_Q2TC_CFG_TBL_WIDTH (32) +#define NBL_DSCH_VN_Q2TC_CFG_TBL_DWLEN (1) +union dsch_vn_q2tc_cfg_tbl_u { + struct dsch_vn_q2tc_cfg_tbl { + u32 tcid:13; /* [12:0] Default:0x0 RW */ + u32 reserve:18; /* [30:13] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_Q2TC_CFG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_Q2TC_CFG_TBL_REG(r) (NBL_DSCH_VN_Q2TC_CFG_TBL_ADDR + \ + (NBL_DSCH_VN_Q2TC_CFG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_N2G_CFG_TBL_ADDR (0x464000) +#define NBL_DSCH_VN_N2G_CFG_TBL_DEPTH (520) +#define NBL_DSCH_VN_N2G_CFG_TBL_WIDTH (32) +#define NBL_DSCH_VN_N2G_CFG_TBL_DWLEN (1) +union dsch_vn_n2g_cfg_tbl_u { + struct dsch_vn_n2g_cfg_tbl { + u32 grpid:8; /* [7:0] Default:0x0 RW */ + u32 reserve:23; /* [30:8] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_N2G_CFG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_N2G_CFG_TBL_REG(r) (NBL_DSCH_VN_N2G_CFG_TBL_ADDR + \ + (NBL_DSCH_VN_N2G_CFG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_G2P_CFG_TBL_ADDR (0x468000) +#define NBL_DSCH_VN_G2P_CFG_TBL_DEPTH (256) +#define NBL_DSCH_VN_G2P_CFG_TBL_WIDTH (32) +#define NBL_DSCH_VN_G2P_CFG_TBL_DWLEN (1) +union dsch_vn_g2p_cfg_tbl_u { + struct dsch_vn_g2p_cfg_tbl { + u32 port:3; /* [2:0] Default:0x0 RW */ + u32 reserve:28; /* [30:3] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_G2P_CFG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_G2P_CFG_TBL_REG(r) (NBL_DSCH_VN_G2P_CFG_TBL_ADDR + \ + (NBL_DSCH_VN_G2P_CFG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_TC_WGT_CFG_TBL_ADDR (0x46c000) +#define NBL_DSCH_VN_TC_WGT_CFG_TBL_DEPTH (520) +#define NBL_DSCH_VN_TC_WGT_CFG_TBL_WIDTH (64) +#define NBL_DSCH_VN_TC_WGT_CFG_TBL_DWLEN (2) +union dsch_vn_tc_wgt_cfg_tbl_u { + struct dsch_vn_tc_wgt_cfg_tbl { + u32 tc0_wgt:8; /* [7:0] Default:0x0 RW */ + u32 tc1_wgt:8; /* [15:8] Default:0x0 RW */ + u32 tc2_wgt:8; /* [23:16] Default:0x0 RW */ + u32 tc3_wgt:8; /* [31:24] Default:0x0 RW */ + u32 tc4_wgt:8; /* [39:32] Default:0x0 RW */ + u32 tc5_wgt:8; /* [47:40] Default:0x0 RW */ + u32 tc6_wgt:8; /* [55:48] Default:0x0 RW */ + u32 tc7_wgt:8; /* [63:56] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_VN_TC_WGT_CFG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_TC_WGT_CFG_TBL_REG(r) (NBL_DSCH_VN_TC_WGT_CFG_TBL_ADDR + \ + (NBL_DSCH_VN_TC_WGT_CFG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_TC_SPWRR_CFG_TBL_ADDR (0x470000) +#define NBL_DSCH_VN_TC_SPWRR_CFG_TBL_DEPTH (520) +#define NBL_DSCH_VN_TC_SPWRR_CFG_TBL_WIDTH (32) +#define NBL_DSCH_VN_TC_SPWRR_CFG_TBL_DWLEN (1) +union dsch_vn_tc_spwrr_cfg_tbl_u { + struct dsch_vn_tc_spwrr_cfg_tbl { + u32 tc_spwrr:8; /* [7:0] Default:0x0 RW */ + u32 reserve:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_TC_SPWRR_CFG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_TC_SPWRR_CFG_TBL_REG(r) (NBL_DSCH_VN_TC_SPWRR_CFG_TBL_ADDR + \ + (NBL_DSCH_VN_TC_SPWRR_CFG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_ADDR (0x474000) +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_DEPTH (520) +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_WIDTH (32) +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_DWLEN (1) +union dsch_vn_sha2net_map_tbl_u { + struct dsch_vn_sha2net_map_tbl { + u32 vld:1; /* [0] Default:0x0 RW */ + u32 reserve:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_SHA2NET_MAP_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_REG(r) (NBL_DSCH_VN_SHA2NET_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_SHA2NET_MAP_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_ADDR (0x478000) +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_DEPTH (520) +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_WIDTH (32) +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_DWLEN (1) +union dsch_vn_net2sha_map_tbl_u { + struct dsch_vn_net2sha_map_tbl { + u32 vld:1; /* [0] Default:0x0 RW */ + u32 reserve:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_NET2SHA_MAP_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_REG(r) (NBL_DSCH_VN_NET2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_NET2SHA_MAP_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_ADDR (0x47c000) +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_DEPTH (256) +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_WIDTH (32) +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_DWLEN (1) +union dsch_vn_sha2grp_map_tbl_u { + struct dsch_vn_sha2grp_map_tbl { + u32 vld:1; /* [0] Default:0x0 RW */ + u32 reserve:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_SHA2GRP_MAP_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_REG(r) (NBL_DSCH_VN_SHA2GRP_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_SHA2GRP_MAP_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_ADDR (0x480000) +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_DEPTH (256) +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_WIDTH (32) +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_DWLEN (1) +union dsch_vn_grp2sha_map_tbl_u { + struct dsch_vn_grp2sha_map_tbl { + u32 vld:1; /* [0] Default:0x0 RW */ + u32 reserve:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_GRP2SHA_MAP_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_REG(r) (NBL_DSCH_VN_GRP2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_GRP2SHA_MAP_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_N2G_CFG_TBL_ADDR (0x484000) +#define NBL_DSCH_RDMA_N2G_CFG_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_N2G_CFG_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_N2G_CFG_TBL_DWLEN (1) +union dsch_rdma_n2g_cfg_tbl_u { + struct dsch_rdma_n2g_cfg_tbl { + u32 grpid:6; /* [5:0] Default:0x0 RW */ + u32 reserve:25; /* [30:6] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_N2G_CFG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_N2G_CFG_TBL_REG(r) (NBL_DSCH_RDMA_N2G_CFG_TBL_ADDR + \ + (NBL_DSCH_RDMA_N2G_CFG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_G2P_CFG_TBL_ADDR (0x488000) +#define NBL_DSCH_RDMA_G2P_CFG_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_G2P_CFG_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_G2P_CFG_TBL_DWLEN (1) +union dsch_rdma_g2p_cfg_tbl_u { + struct dsch_rdma_g2p_cfg_tbl { + u32 port:3; /* [2:0] Default:0x0 RW */ + u32 reserve:28; /* [30:3] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_G2P_CFG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_G2P_CFG_TBL_REG(r) (NBL_DSCH_RDMA_G2P_CFG_TBL_ADDR + \ + (NBL_DSCH_RDMA_G2P_CFG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_TC_WGT_CFG_TBL_ADDR (0x48c000) +#define NBL_DSCH_RDMA_TC_WGT_CFG_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_TC_WGT_CFG_TBL_WIDTH (64) +#define NBL_DSCH_RDMA_TC_WGT_CFG_TBL_DWLEN (2) +union dsch_rdma_tc_wgt_cfg_tbl_u { + struct dsch_rdma_tc_wgt_cfg_tbl { + u32 tc0_wgt:8; /* [7:0] Default:0x0 RW */ + u32 tc1_wgt:8; /* [15:8] Default:0x0 RW */ + u32 tc2_wgt:8; /* [23:16] Default:0x0 RW */ + u32 tc3_wgt:8; /* [31:24] Default:0x0 RW */ + u32 tc4_wgt:8; /* [39:32] Default:0x0 RW */ + u32 tc5_wgt:8; /* [47:40] Default:0x0 RW */ + u32 tc6_wgt:8; /* [55:48] Default:0x0 RW */ + u32 tc7_wgt:8; /* [63:56] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_TC_WGT_CFG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_TC_WGT_CFG_TBL_REG(r) (NBL_DSCH_RDMA_TC_WGT_CFG_TBL_ADDR + \ + (NBL_DSCH_RDMA_TC_WGT_CFG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_TC_SPWRR_CFG_TBL_ADDR (0x490000) +#define NBL_DSCH_RDMA_TC_SPWRR_CFG_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_TC_SPWRR_CFG_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_TC_SPWRR_CFG_TBL_DWLEN (1) +union dsch_rdma_tc_spwrr_cfg_tbl_u { + struct dsch_rdma_tc_spwrr_cfg_tbl { + u32 tc_spwrr:8; /* [7:0] Default:0x0 RW */ + u32 reserve:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_TC_SPWRR_CFG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_TC_SPWRR_CFG_TBL_REG(r) (NBL_DSCH_RDMA_TC_SPWRR_CFG_TBL_ADDR + \ + (NBL_DSCH_RDMA_TC_SPWRR_CFG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_ADDR (0x494000) +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_DWLEN (1) +union dsch_rdma_net2sha_map_tbl_u { + struct dsch_rdma_net2sha_map_tbl { + u32 net_shaping_id:10; /* [9:0] Default:0x0 RW */ + u32 reserve:21; /* [30:10] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_NET2SHA_MAP_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_NET2SHA_MAP_TBL_REG(r) (NBL_DSCH_RDMA_NET2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_RDMA_NET2SHA_MAP_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_GRP2SHA_MAP_TBL_ADDR (0x498000) +#define NBL_DSCH_RDMA_GRP2SHA_MAP_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_GRP2SHA_MAP_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_GRP2SHA_MAP_TBL_DWLEN (1) +union dsch_rdma_grp2sha_map_tbl_u { + struct dsch_rdma_grp2sha_map_tbl { + u32 grp_shaping_id:8; /* [7:0] Default:0x0 RW */ + u32 reserve:23; /* [30:8] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_GRP2SHA_MAP_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_GRP2SHA_MAP_TBL_REG(r) (NBL_DSCH_RDMA_GRP2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_RDMA_GRP2SHA_MAP_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_ADDR (0x49c000) +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_DEPTH (520) +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_DWLEN (1) +union dsch_rdma_sha2net_map_tbl_u { + struct dsch_rdma_sha2net_map_tbl { + u32 rdma_vf_id:6; /* [5:0] Default:0x0 RW */ + u32 reserve:25; /* [30:6] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SHA2NET_MAP_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_SHA2NET_MAP_TBL_REG(r) (NBL_DSCH_RDMA_SHA2NET_MAP_TBL_ADDR + \ + (NBL_DSCH_RDMA_SHA2NET_MAP_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_SHA2GRP_MAP_TBL_ADDR (0x4a0000) +#define NBL_DSCH_RDMA_SHA2GRP_MAP_TBL_DEPTH (256) +#define NBL_DSCH_RDMA_SHA2GRP_MAP_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_SHA2GRP_MAP_TBL_DWLEN (1) +union dsch_rdma_sha2grp_map_tbl_u { + struct dsch_rdma_sha2grp_map_tbl { + u32 rdma_grp_id:6; /* [5:0] Default:0x0 RW */ + u32 reserve:25; /* [30:6] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SHA2GRP_MAP_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_SHA2GRP_MAP_TBL_REG(r) (NBL_DSCH_RDMA_SHA2GRP_MAP_TBL_ADDR + \ + (NBL_DSCH_RDMA_SHA2GRP_MAP_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_INIT_DONE_ADDR (0x404800) +#define NBL_DSCH_INIT_DONE_DEPTH (1) +#define NBL_DSCH_INIT_DONE_WIDTH (32) +#define NBL_DSCH_INIT_DONE_DWLEN (1) +union dsch_init_done_u { + struct dsch_init_done { + u32 init_done:1; /* [0] Default:0x0 RO */ + u32 host_q_clear_state:1; /* [1] Default:0x1 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_FREE_PTR_NUM_ADDR (0x404804) +#define NBL_DSCH_RDMA_FREE_PTR_NUM_DEPTH (1) +#define NBL_DSCH_RDMA_FREE_PTR_NUM_WIDTH (32) +#define NBL_DSCH_RDMA_FREE_PTR_NUM_DWLEN (1) +union dsch_rdma_free_ptr_num_u { + struct dsch_rdma_free_ptr_num { + u32 num:12; /* [11:0] Default:0x800 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_FREE_PTR_NUM_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_FREE_PTR_HDR_ADDR (0x404808) +#define NBL_DSCH_RDMA_FREE_PTR_HDR_DEPTH (1) +#define NBL_DSCH_RDMA_FREE_PTR_HDR_WIDTH (32) +#define NBL_DSCH_RDMA_FREE_PTR_HDR_DWLEN (1) +union dsch_rdma_free_ptr_hdr_u { + struct dsch_rdma_free_ptr_hdr { + u32 hdr:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_FREE_PTR_HDR_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_FREE_PTR_TAIL_ADDR (0x40480c) +#define NBL_DSCH_RDMA_FREE_PTR_TAIL_DEPTH (1) +#define NBL_DSCH_RDMA_FREE_PTR_TAIL_WIDTH (32) +#define NBL_DSCH_RDMA_FREE_PTR_TAIL_DWLEN (1) +union dsch_rdma_free_ptr_tail_u { + struct dsch_rdma_free_ptr_tail { + u32 tail:11; /* [10:0] Default:0x7FF RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_FREE_PTR_TAIL_DWLEN]; +} __packed; + +#define NBL_DSCH_DSTORE_DSCH_LINK_FC_ADDR (0x404810) +#define NBL_DSCH_DSTORE_DSCH_LINK_FC_DEPTH (1) +#define NBL_DSCH_DSTORE_DSCH_LINK_FC_WIDTH (32) +#define NBL_DSCH_DSTORE_DSCH_LINK_FC_DWLEN (1) +union dsch_dstore_dsch_link_fc_u { + struct dsch_dstore_dsch_link_fc { + u32 info:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DSTORE_DSCH_LINK_FC_DWLEN]; +} __packed; + +#define NBL_DSCH_DSTORE_DSCH_PFC_ADDR (0x404814) +#define NBL_DSCH_DSTORE_DSCH_PFC_DEPTH (1) +#define NBL_DSCH_DSTORE_DSCH_PFC_WIDTH (32) +#define NBL_DSCH_DSTORE_DSCH_PFC_DWLEN (1) +union dsch_dstore_dsch_pfc_u { + struct dsch_dstore_dsch_pfc { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DSTORE_DSCH_PFC_DWLEN]; +} __packed; + +#define NBL_DSCH_DQM_DSCH_LINK_FC_ADDR (0x404818) +#define NBL_DSCH_DQM_DSCH_LINK_FC_DEPTH (1) +#define NBL_DSCH_DQM_DSCH_LINK_FC_WIDTH (32) +#define NBL_DSCH_DQM_DSCH_LINK_FC_DWLEN (1) +union dsch_dqm_dsch_link_fc_u { + struct dsch_dqm_dsch_link_fc { + u32 info:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DQM_DSCH_LINK_FC_DWLEN]; +} __packed; + +#define NBL_DSCH_DQM_DSCH_PFC_ADDR (0x40481c) +#define NBL_DSCH_DQM_DSCH_PFC_DEPTH (1) +#define NBL_DSCH_DQM_DSCH_PFC_WIDTH (32) +#define NBL_DSCH_DQM_DSCH_PFC_DWLEN (1) +union dsch_dqm_dsch_pfc_u { + struct dsch_dqm_dsch_pfc { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DQM_DSCH_PFC_DWLEN]; +} __packed; + +#define NBL_DSCH_DBM_FREE_PTR_ADDR (0x404820) +#define NBL_DSCH_DBM_FREE_PTR_DEPTH (1) +#define NBL_DSCH_DBM_FREE_PTR_WIDTH (32) +#define NBL_DSCH_DBM_FREE_PTR_DWLEN (1) +union dsch_dbm_free_ptr_u { + struct dsch_dbm_free_ptr { + u32 num:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DBM_FREE_PTR_DWLEN]; +} __packed; + +#define NBL_DSCH_DSTORE_DSCH_LINK_FC_HISTORY_ADDR (0x4048b0) +#define NBL_DSCH_DSTORE_DSCH_LINK_FC_HISTORY_DEPTH (1) +#define NBL_DSCH_DSTORE_DSCH_LINK_FC_HISTORY_WIDTH (32) +#define NBL_DSCH_DSTORE_DSCH_LINK_FC_HISTORY_DWLEN (1) +union dsch_dstore_dsch_link_fc_history_u { + struct dsch_dstore_dsch_link_fc_history { + u32 vn_h:1; /* [0] Default:0x0 RC */ + u32 vn_e:1; /* [1] Default:0x0 RC */ + u32 rdma_h:1; /* [2] Default:0x0 RC */ + u32 rdma_e:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DSTORE_DSCH_LINK_FC_HISTORY_DWLEN]; +} __packed; + +#define NBL_DSCH_DSTORE_DSCH_PFC_HISTORY_ADDR (0x4048b4) +#define NBL_DSCH_DSTORE_DSCH_PFC_HISTORY_DEPTH (1) +#define NBL_DSCH_DSTORE_DSCH_PFC_HISTORY_WIDTH (32) +#define NBL_DSCH_DSTORE_DSCH_PFC_HISTORY_DWLEN (1) +union dsch_dstore_dsch_pfc_history_u { + struct dsch_dstore_dsch_pfc_history { + u32 vn_h_pri0:1; /* [0] Default:0x0 RC */ + u32 vn_h_pri1:1; /* [1] Default:0x0 RC */ + u32 vn_h_pri2:1; /* [2] Default:0x0 RC */ + u32 vn_h_pri3:1; /* [3] Default:0x0 RC */ + u32 vn_h_pri4:1; /* [4] Default:0x0 RC */ + u32 vn_h_pri5:1; /* [5] Default:0x0 RC */ + u32 vn_h_pri6:1; /* [6] Default:0x0 RC */ + u32 vn_h_pri7:1; /* [7] Default:0x0 RC */ + u32 vn_e_pri0:1; /* [8] Default:0x0 RC */ + u32 vn_e_pri1:1; /* [9] Default:0x0 RC */ + u32 vn_e_pri2:1; /* [10] Default:0x0 RC */ + u32 vn_e_pri3:1; /* [11] Default:0x0 RC */ + u32 vn_e_pri4:1; /* [12] Default:0x0 RC */ + u32 vn_e_pri5:1; /* [13] Default:0x0 RC */ + u32 vn_e_pri6:1; /* [14] Default:0x0 RC */ + u32 vn_e_pri7:1; /* [15] Default:0x0 RC */ + u32 rdma_h_pri0:1; /* [16] Default:0x0 RC */ + u32 rdma_h_pri1:1; /* [17] Default:0x0 RC */ + u32 rdma_h_pri2:1; /* [18] Default:0x0 RC */ + u32 rdma_h_pri3:1; /* [19] Default:0x0 RC */ + u32 rdma_h_pri4:1; /* [20] Default:0x0 RC */ + u32 rdma_h_pri5:1; /* [21] Default:0x0 RC */ + u32 rdma_h_pri6:1; /* [22] Default:0x0 RC */ + u32 rdma_h_pri7:1; /* [23] Default:0x0 RC */ + u32 rdma_e_pri0:1; /* [24] Default:0x0 RC */ + u32 rdma_e_pri1:1; /* [25] Default:0x0 RC */ + u32 rdma_e_pri2:1; /* [26] Default:0x0 RC */ + u32 rdma_e_pri3:1; /* [27] Default:0x0 RC */ + u32 rdma_e_pri4:1; /* [28] Default:0x0 RC */ + u32 rdma_e_pri5:1; /* [29] Default:0x0 RC */ + u32 rdma_e_pri6:1; /* [30] Default:0x0 RC */ + u32 rdma_e_pri7:1; /* [31] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DSCH_DSTORE_DSCH_PFC_HISTORY_DWLEN]; +} __packed; + +#define NBL_DSCH_DQM_DSCH_LINK_FC_HISTORY_ADDR (0x4048b8) +#define NBL_DSCH_DQM_DSCH_LINK_FC_HISTORY_DEPTH (1) +#define NBL_DSCH_DQM_DSCH_LINK_FC_HISTORY_WIDTH (32) +#define NBL_DSCH_DQM_DSCH_LINK_FC_HISTORY_DWLEN (1) +union dsch_dqm_dsch_link_fc_history_u { + struct dsch_dqm_dsch_link_fc_history { + u32 eth0:1; /* [0] Default:0x0 RC */ + u32 eth1:1; /* [1] Default:0x0 RC */ + u32 eth2:1; /* [2] Default:0x0 RC */ + u32 eth3:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_DQM_DSCH_LINK_FC_HISTORY_DWLEN]; +} __packed; + +#define NBL_DSCH_DQM_DSCH_PFC_HISTORY_ADDR (0x4048bc) +#define NBL_DSCH_DQM_DSCH_PFC_HISTORY_DEPTH (1) +#define NBL_DSCH_DQM_DSCH_PFC_HISTORY_WIDTH (32) +#define NBL_DSCH_DQM_DSCH_PFC_HISTORY_DWLEN (1) +union dsch_dqm_dsch_pfc_history_u { + struct dsch_dqm_dsch_pfc_history { + u32 eth0_pri0:1; /* [0] Default:0x0 RC */ + u32 eth0_pri1:1; /* [1] Default:0x0 RC */ + u32 eth0_pri2:1; /* [2] Default:0x0 RC */ + u32 eth0_pri3:1; /* [3] Default:0x0 RC */ + u32 eth0_pri4:1; /* [4] Default:0x0 RC */ + u32 eth0_pri5:1; /* [5] Default:0x0 RC */ + u32 eth0_pri6:1; /* [6] Default:0x0 RC */ + u32 eth0_pri7:1; /* [7] Default:0x0 RC */ + u32 eth1_pri0:1; /* [8] Default:0x0 RC */ + u32 eth1_pri1:1; /* [9] Default:0x0 RC */ + u32 eth1_pri2:1; /* [10] Default:0x0 RC */ + u32 eth1_pri3:1; /* [11] Default:0x0 RC */ + u32 eth1_pri4:1; /* [12] Default:0x0 RC */ + u32 eth1_pri5:1; /* [13] Default:0x0 RC */ + u32 eth1_pri6:1; /* [14] Default:0x0 RC */ + u32 eth1_pri7:1; /* [15] Default:0x0 RC */ + u32 eth2_pri0:1; /* [16] Default:0x0 RC */ + u32 eth2_pri1:1; /* [17] Default:0x0 RC */ + u32 eth2_pri2:1; /* [18] Default:0x0 RC */ + u32 eth2_pri3:1; /* [19] Default:0x0 RC */ + u32 eth2_pri4:1; /* [20] Default:0x0 RC */ + u32 eth2_pri5:1; /* [21] Default:0x0 RC */ + u32 eth2_pri6:1; /* [22] Default:0x0 RC */ + u32 eth2_pri7:1; /* [23] Default:0x0 RC */ + u32 eth3_pri0:1; /* [24] Default:0x0 RC */ + u32 eth3_pri1:1; /* [25] Default:0x0 RC */ + u32 eth3_pri2:1; /* [26] Default:0x0 RC */ + u32 eth3_pri3:1; /* [27] Default:0x0 RC */ + u32 eth3_pri4:1; /* [28] Default:0x0 RC */ + u32 eth3_pri5:1; /* [29] Default:0x0 RC */ + u32 eth3_pri6:1; /* [30] Default:0x0 RC */ + u32 eth3_pri7:1; /* [31] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DSCH_DQM_DSCH_PFC_HISTORY_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_PAGE_OCU_MAX_ADDR (0x4048d0) +#define NBL_DSCH_RDMA_PAGE_OCU_MAX_DEPTH (1) +#define NBL_DSCH_RDMA_PAGE_OCU_MAX_WIDTH (32) +#define NBL_DSCH_RDMA_PAGE_OCU_MAX_DWLEN (1) +union dsch_rdma_page_ocu_max_u { + struct dsch_rdma_page_ocu_max { + u32 val:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_PAGE_OCU_MAX_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_SW_H_DB_VLD_ADDR (0x404900) +#define NBL_DSCH_VN_SW_H_DB_VLD_DEPTH (1) +#define NBL_DSCH_VN_SW_H_DB_VLD_WIDTH (32) +#define NBL_DSCH_VN_SW_H_DB_VLD_DWLEN (1) +union dsch_vn_sw_h_db_vld_u { + struct dsch_vn_sw_h_db_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_SW_H_DB_VLD_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_SW_E_DB_VLD_ADDR (0x404904) +#define NBL_DSCH_VN_SW_E_DB_VLD_DEPTH (1) +#define NBL_DSCH_VN_SW_E_DB_VLD_WIDTH (32) +#define NBL_DSCH_VN_SW_E_DB_VLD_DWLEN (1) +union dsch_vn_sw_e_db_vld_u { + struct dsch_vn_sw_e_db_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_SW_E_DB_VLD_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_ENQ_ADDR (0x404910) +#define NBL_DSCH_VN_ENQ_DEPTH (1) +#define NBL_DSCH_VN_ENQ_WIDTH (32) +#define NBL_DSCH_VN_ENQ_DWLEN (1) +union dsch_vn_enq_u { + struct dsch_vn_enq { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_ENQ_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_H_ENQ_ADDR (0x404914) +#define NBL_DSCH_VN_H_ENQ_DEPTH (1) +#define NBL_DSCH_VN_H_ENQ_WIDTH (32) +#define NBL_DSCH_VN_H_ENQ_DWLEN (1) +union dsch_vn_h_enq_u { + struct dsch_vn_h_enq { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_H_ENQ_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_E_ENQ_ADDR (0x404918) +#define NBL_DSCH_VN_E_ENQ_DEPTH (1) +#define NBL_DSCH_VN_E_ENQ_WIDTH (32) +#define NBL_DSCH_VN_E_ENQ_DWLEN (1) +union dsch_vn_e_enq_u { + struct dsch_vn_e_enq { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_E_ENQ_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_DEQ_ADDR (0x404920) +#define NBL_DSCH_VN_DEQ_DEPTH (1) +#define NBL_DSCH_VN_DEQ_WIDTH (32) +#define NBL_DSCH_VN_DEQ_DWLEN (1) +union dsch_vn_deq_u { + struct dsch_vn_deq { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_DEQ_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_H_DEQ_ADDR (0x404924) +#define NBL_DSCH_VN_H_DEQ_DEPTH (1) +#define NBL_DSCH_VN_H_DEQ_WIDTH (32) +#define NBL_DSCH_VN_H_DEQ_DWLEN (1) +union dsch_vn_h_deq_u { + struct dsch_vn_h_deq { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_H_DEQ_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_E_DEQ_ADDR (0x404928) +#define NBL_DSCH_VN_E_DEQ_DEPTH (1) +#define NBL_DSCH_VN_E_DEQ_WIDTH (32) +#define NBL_DSCH_VN_E_DEQ_DWLEN (1) +union dsch_vn_e_deq_u { + struct dsch_vn_e_deq { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_E_DEQ_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_H_DEQ_IVLD_ADDR (0x404930) +#define NBL_DSCH_VN_H_DEQ_IVLD_DEPTH (1) +#define NBL_DSCH_VN_H_DEQ_IVLD_WIDTH (32) +#define NBL_DSCH_VN_H_DEQ_IVLD_DWLEN (1) +union dsch_vn_h_deq_ivld_u { + struct dsch_vn_h_deq_ivld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_H_DEQ_IVLD_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_E_DEQ_IVLD_ADDR (0x404934) +#define NBL_DSCH_VN_E_DEQ_IVLD_DEPTH (1) +#define NBL_DSCH_VN_E_DEQ_IVLD_WIDTH (32) +#define NBL_DSCH_VN_E_DEQ_IVLD_DWLEN (1) +union dsch_vn_e_deq_ivld_u { + struct dsch_vn_e_deq_ivld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_E_DEQ_IVLD_DWLEN]; +} __packed; + +#define NBL_DSCH_DVN_HDB_H_ADDR (0x404938) +#define NBL_DSCH_DVN_HDB_H_DEPTH (1) +#define NBL_DSCH_DVN_HDB_H_WIDTH (32) +#define NBL_DSCH_DVN_HDB_H_DWLEN (1) +union dsch_dvn_hdb_h_u { + struct dsch_dvn_hdb_h { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_DVN_HDB_H_DWLEN]; +} __packed; + +#define NBL_DSCH_DVN_HDB_E_ADDR (0x40493c) +#define NBL_DSCH_DVN_HDB_E_DEPTH (1) +#define NBL_DSCH_DVN_HDB_E_WIDTH (32) +#define NBL_DSCH_DVN_HDB_E_DWLEN (1) +union dsch_dvn_hdb_e_u { + struct dsch_dvn_hdb_e { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_DVN_HDB_E_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_NET_SHA_INTO_PIR_ADDR (0x404940) +#define NBL_DSCH_VN_NET_SHA_INTO_PIR_DEPTH (1) +#define NBL_DSCH_VN_NET_SHA_INTO_PIR_WIDTH (32) +#define NBL_DSCH_VN_NET_SHA_INTO_PIR_DWLEN (1) +union dsch_vn_net_sha_into_pir_u { + struct dsch_vn_net_sha_into_pir { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_NET_SHA_INTO_PIR_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_NET_SHA_OUTOF_PIR_ADDR (0x404944) +#define NBL_DSCH_VN_NET_SHA_OUTOF_PIR_DEPTH (1) +#define NBL_DSCH_VN_NET_SHA_OUTOF_PIR_WIDTH (32) +#define NBL_DSCH_VN_NET_SHA_OUTOF_PIR_DWLEN (1) +union dsch_vn_net_sha_outof_pir_u { + struct dsch_vn_net_sha_outof_pir { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_NET_SHA_OUTOF_PIR_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_GRP_SHA_INTO_PIR_ADDR (0x404948) +#define NBL_DSCH_VN_GRP_SHA_INTO_PIR_DEPTH (1) +#define NBL_DSCH_VN_GRP_SHA_INTO_PIR_WIDTH (32) +#define NBL_DSCH_VN_GRP_SHA_INTO_PIR_DWLEN (1) +union dsch_vn_grp_sha_into_pir_u { + struct dsch_vn_grp_sha_into_pir { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_GRP_SHA_INTO_PIR_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_GRP_SHA_OUTOF_PIR_ADDR (0x40494c) +#define NBL_DSCH_VN_GRP_SHA_OUTOF_PIR_DEPTH (1) +#define NBL_DSCH_VN_GRP_SHA_OUTOF_PIR_WIDTH (32) +#define NBL_DSCH_VN_GRP_SHA_OUTOF_PIR_DWLEN (1) +union dsch_vn_grp_sha_outof_pir_u { + struct dsch_vn_grp_sha_outof_pir { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_GRP_SHA_OUTOF_PIR_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_DSCH_SHA_NET_REQ_ADDR (0x404950) +#define NBL_DSCH_VN_DSCH_SHA_NET_REQ_DEPTH (1) +#define NBL_DSCH_VN_DSCH_SHA_NET_REQ_WIDTH (32) +#define NBL_DSCH_VN_DSCH_SHA_NET_REQ_DWLEN (1) +union dsch_vn_dsch_sha_net_req_u { + struct dsch_vn_dsch_sha_net_req { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_DSCH_SHA_NET_REQ_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_DSCH_SHA_GRP_REQ_ADDR (0x404954) +#define NBL_DSCH_VN_DSCH_SHA_GRP_REQ_DEPTH (1) +#define NBL_DSCH_VN_DSCH_SHA_GRP_REQ_WIDTH (32) +#define NBL_DSCH_VN_DSCH_SHA_GRP_REQ_DWLEN (1) +union dsch_vn_dsch_sha_grp_req_u { + struct dsch_vn_dsch_sha_grp_req { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_DSCH_SHA_GRP_REQ_DWLEN]; +} __packed; + +#define NBL_DSCH_SHA_DSCH_NET_RESP_ADDR (0x404958) +#define NBL_DSCH_SHA_DSCH_NET_RESP_DEPTH (1) +#define NBL_DSCH_SHA_DSCH_NET_RESP_WIDTH (32) +#define NBL_DSCH_SHA_DSCH_NET_RESP_DWLEN (1) +union dsch_sha_dsch_net_resp_u { + struct dsch_sha_dsch_net_resp { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_SHA_DSCH_NET_RESP_DWLEN]; +} __packed; + +#define NBL_DSCH_SHA_DSCH_GRP_RESP_ADDR (0x40495c) +#define NBL_DSCH_SHA_DSCH_GRP_RESP_DEPTH (1) +#define NBL_DSCH_SHA_DSCH_GRP_RESP_WIDTH (32) +#define NBL_DSCH_SHA_DSCH_GRP_RESP_DWLEN (1) +union dsch_sha_dsch_grp_resp_u { + struct dsch_sha_dsch_grp_resp { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_SHA_DSCH_GRP_RESP_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_VNQM_DB_UPD_TCIVLD_ADDR (0x404960) +#define NBL_DSCH_VN_VNQM_DB_UPD_TCIVLD_DEPTH (1) +#define NBL_DSCH_VN_VNQM_DB_UPD_TCIVLD_WIDTH (32) +#define NBL_DSCH_VN_VNQM_DB_UPD_TCIVLD_DWLEN (1) +union dsch_vn_vnqm_db_upd_tcivld_u { + struct dsch_vn_vnqm_db_upd_tcivld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_VNQM_DB_UPD_TCIVLD_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_SCH_DB_UPD_NGIVLD_ADDR (0x404964) +#define NBL_DSCH_VN_SCH_DB_UPD_NGIVLD_DEPTH (1) +#define NBL_DSCH_VN_SCH_DB_UPD_NGIVLD_WIDTH (32) +#define NBL_DSCH_VN_SCH_DB_UPD_NGIVLD_DWLEN (1) +union dsch_vn_sch_db_upd_ngivld_u { + struct dsch_vn_sch_db_upd_ngivld { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_SCH_DB_UPD_NGIVLD_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_MERGE_SW_DB_CONF_ADDR (0x404970) +#define NBL_DSCH_VN_MERGE_SW_DB_CONF_DEPTH (1) +#define NBL_DSCH_VN_MERGE_SW_DB_CONF_WIDTH (32) +#define NBL_DSCH_VN_MERGE_SW_DB_CONF_DWLEN (1) +union dsch_vn_merge_sw_db_conf_u { + struct dsch_vn_merge_sw_db_conf { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSCH_VN_MERGE_SW_DB_CONF_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_ADDR (0x404a00) +#define NBL_DSCH_RDMA_SW_DB_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_DWLEN (1) +union dsch_rdma_sw_db_u { + struct dsch_rdma_sw_db { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_H_ADDR (0x404a04) +#define NBL_DSCH_RDMA_SW_DB_H_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_H_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_H_DWLEN (1) +union dsch_rdma_sw_db_h_u { + struct dsch_rdma_sw_db_h { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_H_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_E_ADDR (0x404a08) +#define NBL_DSCH_RDMA_SW_DB_E_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_E_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_E_DWLEN (1) +union dsch_rdma_sw_db_e_u { + struct dsch_rdma_sw_db_e { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_E_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_H_LOST_ADDR (0x404a0c) +#define NBL_DSCH_RDMA_SW_DB_H_LOST_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_H_LOST_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_H_LOST_DWLEN (1) +union dsch_rdma_sw_db_h_lost_u { + struct dsch_rdma_sw_db_h_lost { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_H_LOST_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_E_LOST_ADDR (0x404a10) +#define NBL_DSCH_RDMA_SW_DB_E_LOST_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_E_LOST_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_E_LOST_DWLEN (1) +union dsch_rdma_sw_db_e_lost_u { + struct dsch_rdma_sw_db_e_lost { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_E_LOST_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_TOSCH_ADDR (0x404a14) +#define NBL_DSCH_RDMA_SW_DB_TOSCH_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_TOSCH_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_TOSCH_DWLEN (1) +union dsch_rdma_sw_db_tosch_u { + struct dsch_rdma_sw_db_tosch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_TOSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_TOCSCH_ADDR (0x404a18) +#define NBL_DSCH_RDMA_SW_DB_TOCSCH_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_TOCSCH_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_TOCSCH_DWLEN (1) +union dsch_rdma_sw_db_tocsch_u { + struct dsch_rdma_sw_db_tocsch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_TOCSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_TXP_HDB_ADDR (0x404a1c) +#define NBL_DSCH_RDMA_TXP_HDB_DEPTH (1) +#define NBL_DSCH_RDMA_TXP_HDB_WIDTH (32) +#define NBL_DSCH_RDMA_TXP_HDB_DWLEN (1) +union dsch_rdma_txp_hdb_u { + struct dsch_rdma_txp_hdb { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_TXP_HDB_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_TXP_HDB_TOSCH_ADDR (0x404a20) +#define NBL_DSCH_RDMA_TXP_HDB_TOSCH_DEPTH (1) +#define NBL_DSCH_RDMA_TXP_HDB_TOSCH_WIDTH (32) +#define NBL_DSCH_RDMA_TXP_HDB_TOSCH_DWLEN (1) +union dsch_rdma_txp_hdb_tosch_u { + struct dsch_rdma_txp_hdb_tosch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_TXP_HDB_TOSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_TXP_HDB_TOCSCH_ADDR (0x404a24) +#define NBL_DSCH_RDMA_TXP_HDB_TOCSCH_DEPTH (1) +#define NBL_DSCH_RDMA_TXP_HDB_TOCSCH_WIDTH (32) +#define NBL_DSCH_RDMA_TXP_HDB_TOCSCH_DWLEN (1) +union dsch_rdma_txp_hdb_tocsch_u { + struct dsch_rdma_txp_hdb_tocsch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_TXP_HDB_TOCSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_RAQP_HDB_ADDR (0x404a28) +#define NBL_DSCH_RDMA_RAQP_HDB_DEPTH (1) +#define NBL_DSCH_RDMA_RAQP_HDB_WIDTH (32) +#define NBL_DSCH_RDMA_RAQP_HDB_DWLEN (1) +union dsch_rdma_raqp_hdb_u { + struct dsch_rdma_raqp_hdb { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_RAQP_HDB_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_RAQP_HDB_TOSCH_ADDR (0x404a2c) +#define NBL_DSCH_RDMA_RAQP_HDB_TOSCH_DEPTH (1) +#define NBL_DSCH_RDMA_RAQP_HDB_TOSCH_WIDTH (32) +#define NBL_DSCH_RDMA_RAQP_HDB_TOSCH_DWLEN (1) +union dsch_rdma_raqp_hdb_tosch_u { + struct dsch_rdma_raqp_hdb_tosch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_RAQP_HDB_TOSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_RAQP_HDB_TOCSCH_ADDR (0x404a30) +#define NBL_DSCH_RDMA_RAQP_HDB_TOCSCH_DEPTH (1) +#define NBL_DSCH_RDMA_RAQP_HDB_TOCSCH_WIDTH (32) +#define NBL_DSCH_RDMA_RAQP_HDB_TOCSCH_DWLEN (1) +union dsch_rdma_raqp_hdb_tocsch_u { + struct dsch_rdma_raqp_hdb_tocsch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_RAQP_HDB_TOCSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_CEAQ_HDB_ADDR (0x404a34) +#define NBL_DSCH_RDMA_CEAQ_HDB_DEPTH (1) +#define NBL_DSCH_RDMA_CEAQ_HDB_WIDTH (32) +#define NBL_DSCH_RDMA_CEAQ_HDB_DWLEN (1) +union dsch_rdma_ceaq_hdb_u { + struct dsch_rdma_ceaq_hdb { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_CEAQ_HDB_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_CEAQ_HDB_TOSCH_ADDR (0x404a38) +#define NBL_DSCH_RDMA_CEAQ_HDB_TOSCH_DEPTH (1) +#define NBL_DSCH_RDMA_CEAQ_HDB_TOSCH_WIDTH (32) +#define NBL_DSCH_RDMA_CEAQ_HDB_TOSCH_DWLEN (1) +union dsch_rdma_ceaq_hdb_tosch_u { + struct dsch_rdma_ceaq_hdb_tosch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_CEAQ_HDB_TOSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_CEAQ_HDB_TOCSCH_ADDR (0x404a3c) +#define NBL_DSCH_RDMA_CEAQ_HDB_TOCSCH_DEPTH (1) +#define NBL_DSCH_RDMA_CEAQ_HDB_TOCSCH_WIDTH (32) +#define NBL_DSCH_RDMA_CEAQ_HDB_TOCSCH_DWLEN (1) +union dsch_rdma_ceaq_hdb_tocsch_u { + struct dsch_rdma_ceaq_hdb_tocsch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_CEAQ_HDB_TOCSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DBQM_HDB_LOST_ADDR (0x404a40) +#define NBL_DSCH_RDMA_DBQM_HDB_LOST_DEPTH (1) +#define NBL_DSCH_RDMA_DBQM_HDB_LOST_WIDTH (32) +#define NBL_DSCH_RDMA_DBQM_HDB_LOST_DWLEN (1) +union dsch_rdma_dbqm_hdb_lost_u { + struct dsch_rdma_dbqm_hdb_lost { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DBQM_HDB_LOST_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_RCHK_HDB_TOSCH_ADDR (0x404a44) +#define NBL_DSCH_RDMA_RCHK_HDB_TOSCH_DEPTH (1) +#define NBL_DSCH_RDMA_RCHK_HDB_TOSCH_WIDTH (32) +#define NBL_DSCH_RDMA_RCHK_HDB_TOSCH_DWLEN (1) +union dsch_rdma_rchk_hdb_tosch_u { + struct dsch_rdma_rchk_hdb_tosch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_RCHK_HDB_TOSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_RCHK_HDB_TOCSCH_ADDR (0x404a48) +#define NBL_DSCH_RDMA_RCHK_HDB_TOCSCH_DEPTH (1) +#define NBL_DSCH_RDMA_RCHK_HDB_TOCSCH_WIDTH (32) +#define NBL_DSCH_RDMA_RCHK_HDB_TOCSCH_DWLEN (1) +union dsch_rdma_rchk_hdb_tocsch_u { + struct dsch_rdma_rchk_hdb_tocsch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_RCHK_HDB_TOCSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_RCHK_HDB_ADDR (0x404a4c) +#define NBL_DSCH_RDMA_RCHK_HDB_DEPTH (1) +#define NBL_DSCH_RDMA_RCHK_HDB_WIDTH (32) +#define NBL_DSCH_RDMA_RCHK_HDB_DWLEN (1) +union dsch_rdma_rchk_hdb_u { + struct dsch_rdma_rchk_hdb { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_RCHK_HDB_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_RNR_HDB_ADDR (0x404a50) +#define NBL_DSCH_RDMA_RNR_HDB_DEPTH (1) +#define NBL_DSCH_RDMA_RNR_HDB_WIDTH (32) +#define NBL_DSCH_RDMA_RNR_HDB_DWLEN (1) +union dsch_rdma_rnr_hdb_u { + struct dsch_rdma_rnr_hdb { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_RNR_HDB_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_RTO_HDB_ADDR (0x404a54) +#define NBL_DSCH_RDMA_RTO_HDB_DEPTH (1) +#define NBL_DSCH_RDMA_RTO_HDB_WIDTH (32) +#define NBL_DSCH_RDMA_RTO_HDB_DWLEN (1) +union dsch_rdma_rto_hdb_u { + struct dsch_rdma_rto_hdb { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_RTO_HDB_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_TXP_TXLEN_ADDR (0x404a58) +#define NBL_DSCH_RDMA_TXP_TXLEN_DEPTH (1) +#define NBL_DSCH_RDMA_TXP_TXLEN_WIDTH (32) +#define NBL_DSCH_RDMA_TXP_TXLEN_DWLEN (1) +union dsch_rdma_txp_txlen_u { + struct dsch_rdma_txp_txlen { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_TXP_TXLEN_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DIF_W_DB_LOST_ADDR (0x404a70) +#define NBL_DSCH_RDMA_DIF_W_DB_LOST_DEPTH (1) +#define NBL_DSCH_RDMA_DIF_W_DB_LOST_WIDTH (32) +#define NBL_DSCH_RDMA_DIF_W_DB_LOST_DWLEN (1) +union dsch_rdma_dif_w_db_lost_u { + struct dsch_rdma_dif_w_db_lost { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DIF_W_DB_LOST_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DIF_W_DB_LOST_INFO_ADDR (0x404a74) +#define NBL_DSCH_RDMA_DIF_W_DB_LOST_INFO_DEPTH (1) +#define NBL_DSCH_RDMA_DIF_W_DB_LOST_INFO_WIDTH (32) +#define NBL_DSCH_RDMA_DIF_W_DB_LOST_INFO_DWLEN (1) +union dsch_rdma_dif_w_db_lost_info_u { + struct dsch_rdma_dif_w_db_lost_info { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DIF_W_DB_LOST_INFO_DWLEN]; +} __packed; + +#define NBL_DSCH_POLL_DBQM_GET_ADDR (0x404a7c) +#define NBL_DSCH_POLL_DBQM_GET_DEPTH (1) +#define NBL_DSCH_POLL_DBQM_GET_WIDTH (32) +#define NBL_DSCH_POLL_DBQM_GET_DWLEN (1) +union dsch_poll_dbqm_get_u { + struct dsch_poll_dbqm_get { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_POLL_DBQM_GET_DWLEN]; +} __packed; + +#define NBL_DSCH_POLL_CSCH_GET_ADDR (0x404a80) +#define NBL_DSCH_POLL_CSCH_GET_DEPTH (1) +#define NBL_DSCH_POLL_CSCH_GET_WIDTH (32) +#define NBL_DSCH_POLL_CSCH_GET_DWLEN (1) +union dsch_poll_csch_get_u { + struct dsch_poll_csch_get { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_POLL_CSCH_GET_DWLEN]; +} __packed; + +#define NBL_DSCH_POLL_RNR_GET_ADDR (0x404a88) +#define NBL_DSCH_POLL_RNR_GET_DEPTH (1) +#define NBL_DSCH_POLL_RNR_GET_WIDTH (32) +#define NBL_DSCH_POLL_RNR_GET_DWLEN (1) +union dsch_poll_rnr_get_u { + struct dsch_poll_rnr_get { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_POLL_RNR_GET_DWLEN]; +} __packed; + +#define NBL_DSCH_POLL_RTO_GET_ADDR (0x404a8c) +#define NBL_DSCH_POLL_RTO_GET_DEPTH (1) +#define NBL_DSCH_POLL_RTO_GET_WIDTH (32) +#define NBL_DSCH_POLL_RTO_GET_DWLEN (1) +union dsch_poll_rto_get_u { + struct dsch_poll_rto_get { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_POLL_RTO_GET_DWLEN]; +} __packed; + +#define NBL_DSCH_DSCH_TXP_INFO_VLD_ADDR (0x404a90) +#define NBL_DSCH_DSCH_TXP_INFO_VLD_DEPTH (1) +#define NBL_DSCH_DSCH_TXP_INFO_VLD_WIDTH (32) +#define NBL_DSCH_DSCH_TXP_INFO_VLD_DWLEN (1) +union dsch_dsch_txp_info_vld_u { + struct dsch_dsch_txp_info_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_DSCH_TXP_INFO_VLD_DWLEN]; +} __packed; + +#define NBL_DSCH_OUT_FROM_DBQM_ADDR (0x404a94) +#define NBL_DSCH_OUT_FROM_DBQM_DEPTH (1) +#define NBL_DSCH_OUT_FROM_DBQM_WIDTH (32) +#define NBL_DSCH_OUT_FROM_DBQM_DWLEN (1) +union dsch_out_from_dbqm_u { + struct dsch_out_from_dbqm { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_OUT_FROM_DBQM_DWLEN]; +} __packed; + +#define NBL_DSCH_OUT_FROM_CSCH_ADDR (0x404a98) +#define NBL_DSCH_OUT_FROM_CSCH_DEPTH (1) +#define NBL_DSCH_OUT_FROM_CSCH_WIDTH (32) +#define NBL_DSCH_OUT_FROM_CSCH_DWLEN (1) +union dsch_out_from_csch_u { + struct dsch_out_from_csch { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_OUT_FROM_CSCH_DWLEN]; +} __packed; + +#define NBL_DSCH_OUT_FROM_CSCH_SHAN_ADDR (0x404a9c) +#define NBL_DSCH_OUT_FROM_CSCH_SHAN_DEPTH (1) +#define NBL_DSCH_OUT_FROM_CSCH_SHAN_WIDTH (32) +#define NBL_DSCH_OUT_FROM_CSCH_SHAN_DWLEN (1) +union dsch_out_from_csch_shan_u { + struct dsch_out_from_csch_shan { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_OUT_FROM_CSCH_SHAN_DWLEN]; +} __packed; + +#define NBL_DSCH_OUT_FROM_CSCH_PFC_ADDR (0x404aa0) +#define NBL_DSCH_OUT_FROM_CSCH_PFC_DEPTH (1) +#define NBL_DSCH_OUT_FROM_CSCH_PFC_WIDTH (32) +#define NBL_DSCH_OUT_FROM_CSCH_PFC_DWLEN (1) +union dsch_out_from_csch_pfc_u { + struct dsch_out_from_csch_pfc { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_OUT_FROM_CSCH_PFC_DWLEN]; +} __packed; + +#define NBL_DSCH_OUT_FROM_TQ_ADDR (0x404aa4) +#define NBL_DSCH_OUT_FROM_TQ_DEPTH (1) +#define NBL_DSCH_OUT_FROM_TQ_WIDTH (32) +#define NBL_DSCH_OUT_FROM_TQ_DWLEN (1) +union dsch_out_from_tq_u { + struct dsch_out_from_tq { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_OUT_FROM_TQ_DWLEN]; +} __packed; + +#define NBL_DSCH_OUT_FROM_TQ_SHAN_ADDR (0x404aa8) +#define NBL_DSCH_OUT_FROM_TQ_SHAN_DEPTH (1) +#define NBL_DSCH_OUT_FROM_TQ_SHAN_WIDTH (32) +#define NBL_DSCH_OUT_FROM_TQ_SHAN_DWLEN (1) +union dsch_out_from_tq_shan_u { + struct dsch_out_from_tq_shan { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_OUT_FROM_TQ_SHAN_DWLEN]; +} __packed; + +#define NBL_DSCH_OUT_FROM_TQ_PFC_ADDR (0x404aac) +#define NBL_DSCH_OUT_FROM_TQ_PFC_DEPTH (1) +#define NBL_DSCH_OUT_FROM_TQ_PFC_WIDTH (32) +#define NBL_DSCH_OUT_FROM_TQ_PFC_DWLEN (1) +union dsch_out_from_tq_pfc_u { + struct dsch_out_from_tq_pfc { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_OUT_FROM_TQ_PFC_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_NET_SHA_INTO_PIR_ADDR (0x404ab0) +#define NBL_DSCH_RDMA_NET_SHA_INTO_PIR_DEPTH (1) +#define NBL_DSCH_RDMA_NET_SHA_INTO_PIR_WIDTH (32) +#define NBL_DSCH_RDMA_NET_SHA_INTO_PIR_DWLEN (1) +union dsch_rdma_net_sha_into_pir_u { + struct dsch_rdma_net_sha_into_pir { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_NET_SHA_INTO_PIR_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_NET_SHA_OUTOF_PIR_ADDR (0x404ab4) +#define NBL_DSCH_RDMA_NET_SHA_OUTOF_PIR_DEPTH (1) +#define NBL_DSCH_RDMA_NET_SHA_OUTOF_PIR_WIDTH (32) +#define NBL_DSCH_RDMA_NET_SHA_OUTOF_PIR_DWLEN (1) +union dsch_rdma_net_sha_outof_pir_u { + struct dsch_rdma_net_sha_outof_pir { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_NET_SHA_OUTOF_PIR_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_GRP_SHA_INTO_PIR_ADDR (0x404ab8) +#define NBL_DSCH_RDMA_GRP_SHA_INTO_PIR_DEPTH (1) +#define NBL_DSCH_RDMA_GRP_SHA_INTO_PIR_WIDTH (32) +#define NBL_DSCH_RDMA_GRP_SHA_INTO_PIR_DWLEN (1) +union dsch_rdma_grp_sha_into_pir_u { + struct dsch_rdma_grp_sha_into_pir { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_GRP_SHA_INTO_PIR_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_GRP_SHA_OUTOF_PIR_ADDR (0x404abc) +#define NBL_DSCH_RDMA_GRP_SHA_OUTOF_PIR_DEPTH (1) +#define NBL_DSCH_RDMA_GRP_SHA_OUTOF_PIR_WIDTH (32) +#define NBL_DSCH_RDMA_GRP_SHA_OUTOF_PIR_DWLEN (1) +union dsch_rdma_grp_sha_outof_pir_u { + struct dsch_rdma_grp_sha_outof_pir { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_GRP_SHA_OUTOF_PIR_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DSCH_SHA_NET_REQ_ADDR (0x404ac0) +#define NBL_DSCH_RDMA_DSCH_SHA_NET_REQ_DEPTH (1) +#define NBL_DSCH_RDMA_DSCH_SHA_NET_REQ_WIDTH (32) +#define NBL_DSCH_RDMA_DSCH_SHA_NET_REQ_DWLEN (1) +union dsch_rdma_dsch_sha_net_req_u { + struct dsch_rdma_dsch_sha_net_req { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DSCH_SHA_NET_REQ_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_DSCH_SHA_GRP_REQ_ADDR (0x404ac4) +#define NBL_DSCH_RDMA_DSCH_SHA_GRP_REQ_DEPTH (1) +#define NBL_DSCH_RDMA_DSCH_SHA_GRP_REQ_WIDTH (32) +#define NBL_DSCH_RDMA_DSCH_SHA_GRP_REQ_DWLEN (1) +union dsch_rdma_dsch_sha_grp_req_u { + struct dsch_rdma_dsch_sha_grp_req { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DSCH_SHA_GRP_REQ_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SCH_DB_UPD_NGIVLD_ADDR (0x404ac8) +#define NBL_DSCH_RDMA_SCH_DB_UPD_NGIVLD_DEPTH (1) +#define NBL_DSCH_RDMA_SCH_DB_UPD_NGIVLD_WIDTH (32) +#define NBL_DSCH_RDMA_SCH_DB_UPD_NGIVLD_DWLEN (1) +union dsch_rdma_sch_db_upd_ngivld_u { + struct dsch_rdma_sch_db_upd_ngivld { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SCH_DB_UPD_NGIVLD_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_OUTP_NG_TBL_IVLD_ADDR (0x404acc) +#define NBL_DSCH_RDMA_OUTP_NG_TBL_IVLD_DEPTH (1) +#define NBL_DSCH_RDMA_OUTP_NG_TBL_IVLD_WIDTH (32) +#define NBL_DSCH_RDMA_OUTP_NG_TBL_IVLD_DWLEN (1) +union dsch_rdma_outp_ng_tbl_ivld_u { + struct dsch_rdma_outp_ng_tbl_ivld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_OUTP_NG_TBL_IVLD_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_OUT_FROM_DBQM_PFC_ADDR (0x404ad0) +#define NBL_DSCH_RDMA_OUT_FROM_DBQM_PFC_DEPTH (1) +#define NBL_DSCH_RDMA_OUT_FROM_DBQM_PFC_WIDTH (32) +#define NBL_DSCH_RDMA_OUT_FROM_DBQM_PFC_DWLEN (1) +union dsch_rdma_out_from_dbqm_pfc_u { + struct dsch_rdma_out_from_dbqm_pfc { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSCH_RDMA_OUT_FROM_DBQM_PFC_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF0_ADDR (0x408008) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF0_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF0_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF0_DWLEN (1) +union dsch_rdma_sw_db_buf_vf0_u { + struct dsch_rdma_sw_db_buf_vf0 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF0_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF1_ADDR (0x408048) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF1_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF1_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF1_DWLEN (1) +union dsch_rdma_sw_db_buf_vf1_u { + struct dsch_rdma_sw_db_buf_vf1 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF1_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF2_ADDR (0x408088) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF2_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF2_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF2_DWLEN (1) +union dsch_rdma_sw_db_buf_vf2_u { + struct dsch_rdma_sw_db_buf_vf2 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF2_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF3_ADDR (0x4080c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF3_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF3_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF3_DWLEN (1) +union dsch_rdma_sw_db_buf_vf3_u { + struct dsch_rdma_sw_db_buf_vf3 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF3_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF4_ADDR (0x408108) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF4_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF4_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF4_DWLEN (1) +union dsch_rdma_sw_db_buf_vf4_u { + struct dsch_rdma_sw_db_buf_vf4 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF4_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF5_ADDR (0x408148) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF5_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF5_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF5_DWLEN (1) +union dsch_rdma_sw_db_buf_vf5_u { + struct dsch_rdma_sw_db_buf_vf5 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF5_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF6_ADDR (0x408188) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF6_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF6_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF6_DWLEN (1) +union dsch_rdma_sw_db_buf_vf6_u { + struct dsch_rdma_sw_db_buf_vf6 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF6_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF7_ADDR (0x4081c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF7_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF7_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF7_DWLEN (1) +union dsch_rdma_sw_db_buf_vf7_u { + struct dsch_rdma_sw_db_buf_vf7 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF7_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF8_ADDR (0x408208) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF8_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF8_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF8_DWLEN (1) +union dsch_rdma_sw_db_buf_vf8_u { + struct dsch_rdma_sw_db_buf_vf8 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF8_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF9_ADDR (0x408248) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF9_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF9_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF9_DWLEN (1) +union dsch_rdma_sw_db_buf_vf9_u { + struct dsch_rdma_sw_db_buf_vf9 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF9_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF10_ADDR (0x408288) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF10_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF10_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF10_DWLEN (1) +union dsch_rdma_sw_db_buf_vf10_u { + struct dsch_rdma_sw_db_buf_vf10 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF10_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF11_ADDR (0x4082c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF11_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF11_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF11_DWLEN (1) +union dsch_rdma_sw_db_buf_vf11_u { + struct dsch_rdma_sw_db_buf_vf11 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF11_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF12_ADDR (0x408308) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF12_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF12_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF12_DWLEN (1) +union dsch_rdma_sw_db_buf_vf12_u { + struct dsch_rdma_sw_db_buf_vf12 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF12_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF13_ADDR (0x408348) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF13_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF13_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF13_DWLEN (1) +union dsch_rdma_sw_db_buf_vf13_u { + struct dsch_rdma_sw_db_buf_vf13 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF13_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF14_ADDR (0x408388) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF14_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF14_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF14_DWLEN (1) +union dsch_rdma_sw_db_buf_vf14_u { + struct dsch_rdma_sw_db_buf_vf14 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF14_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF15_ADDR (0x4083c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF15_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF15_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF15_DWLEN (1) +union dsch_rdma_sw_db_buf_vf15_u { + struct dsch_rdma_sw_db_buf_vf15 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF15_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF16_ADDR (0x408408) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF16_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF16_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF16_DWLEN (1) +union dsch_rdma_sw_db_buf_vf16_u { + struct dsch_rdma_sw_db_buf_vf16 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF16_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF17_ADDR (0x408448) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF17_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF17_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF17_DWLEN (1) +union dsch_rdma_sw_db_buf_vf17_u { + struct dsch_rdma_sw_db_buf_vf17 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF17_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF18_ADDR (0x408488) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF18_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF18_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF18_DWLEN (1) +union dsch_rdma_sw_db_buf_vf18_u { + struct dsch_rdma_sw_db_buf_vf18 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF18_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF19_ADDR (0x4084c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF19_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF19_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF19_DWLEN (1) +union dsch_rdma_sw_db_buf_vf19_u { + struct dsch_rdma_sw_db_buf_vf19 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF19_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF20_ADDR (0x408508) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF20_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF20_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF20_DWLEN (1) +union dsch_rdma_sw_db_buf_vf20_u { + struct dsch_rdma_sw_db_buf_vf20 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF20_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF21_ADDR (0x408548) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF21_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF21_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF21_DWLEN (1) +union dsch_rdma_sw_db_buf_vf21_u { + struct dsch_rdma_sw_db_buf_vf21 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF21_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF22_ADDR (0x408588) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF22_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF22_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF22_DWLEN (1) +union dsch_rdma_sw_db_buf_vf22_u { + struct dsch_rdma_sw_db_buf_vf22 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF22_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF23_ADDR (0x4085c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF23_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF23_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF23_DWLEN (1) +union dsch_rdma_sw_db_buf_vf23_u { + struct dsch_rdma_sw_db_buf_vf23 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF23_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF24_ADDR (0x408608) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF24_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF24_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF24_DWLEN (1) +union dsch_rdma_sw_db_buf_vf24_u { + struct dsch_rdma_sw_db_buf_vf24 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF24_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF25_ADDR (0x408648) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF25_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF25_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF25_DWLEN (1) +union dsch_rdma_sw_db_buf_vf25_u { + struct dsch_rdma_sw_db_buf_vf25 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF25_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF26_ADDR (0x408688) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF26_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF26_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF26_DWLEN (1) +union dsch_rdma_sw_db_buf_vf26_u { + struct dsch_rdma_sw_db_buf_vf26 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF26_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF27_ADDR (0x4086c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF27_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF27_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF27_DWLEN (1) +union dsch_rdma_sw_db_buf_vf27_u { + struct dsch_rdma_sw_db_buf_vf27 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF27_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF28_ADDR (0x408708) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF28_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF28_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF28_DWLEN (1) +union dsch_rdma_sw_db_buf_vf28_u { + struct dsch_rdma_sw_db_buf_vf28 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF28_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF29_ADDR (0x408748) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF29_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF29_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF29_DWLEN (1) +union dsch_rdma_sw_db_buf_vf29_u { + struct dsch_rdma_sw_db_buf_vf29 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF29_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF30_ADDR (0x408788) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF30_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF30_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF30_DWLEN (1) +union dsch_rdma_sw_db_buf_vf30_u { + struct dsch_rdma_sw_db_buf_vf30 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF30_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF31_ADDR (0x4087c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF31_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF31_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF31_DWLEN (1) +union dsch_rdma_sw_db_buf_vf31_u { + struct dsch_rdma_sw_db_buf_vf31 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF31_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF32_ADDR (0x408808) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF32_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF32_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF32_DWLEN (1) +union dsch_rdma_sw_db_buf_vf32_u { + struct dsch_rdma_sw_db_buf_vf32 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF32_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF33_ADDR (0x408848) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF33_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF33_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF33_DWLEN (1) +union dsch_rdma_sw_db_buf_vf33_u { + struct dsch_rdma_sw_db_buf_vf33 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF33_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF34_ADDR (0x408888) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF34_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF34_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF34_DWLEN (1) +union dsch_rdma_sw_db_buf_vf34_u { + struct dsch_rdma_sw_db_buf_vf34 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF34_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF35_ADDR (0x4088c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF35_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF35_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF35_DWLEN (1) +union dsch_rdma_sw_db_buf_vf35_u { + struct dsch_rdma_sw_db_buf_vf35 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF35_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF36_ADDR (0x408908) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF36_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF36_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF36_DWLEN (1) +union dsch_rdma_sw_db_buf_vf36_u { + struct dsch_rdma_sw_db_buf_vf36 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF36_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF37_ADDR (0x408948) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF37_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF37_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF37_DWLEN (1) +union dsch_rdma_sw_db_buf_vf37_u { + struct dsch_rdma_sw_db_buf_vf37 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF37_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF38_ADDR (0x408988) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF38_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF38_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF38_DWLEN (1) +union dsch_rdma_sw_db_buf_vf38_u { + struct dsch_rdma_sw_db_buf_vf38 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF38_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF39_ADDR (0x4089c8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF39_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF39_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF39_DWLEN (1) +union dsch_rdma_sw_db_buf_vf39_u { + struct dsch_rdma_sw_db_buf_vf39 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF39_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF40_ADDR (0x408a08) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF40_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF40_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF40_DWLEN (1) +union dsch_rdma_sw_db_buf_vf40_u { + struct dsch_rdma_sw_db_buf_vf40 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF40_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF41_ADDR (0x408a48) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF41_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF41_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF41_DWLEN (1) +union dsch_rdma_sw_db_buf_vf41_u { + struct dsch_rdma_sw_db_buf_vf41 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF41_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF42_ADDR (0x408a88) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF42_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF42_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF42_DWLEN (1) +union dsch_rdma_sw_db_buf_vf42_u { + struct dsch_rdma_sw_db_buf_vf42 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF42_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF43_ADDR (0x408ac8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF43_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF43_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF43_DWLEN (1) +union dsch_rdma_sw_db_buf_vf43_u { + struct dsch_rdma_sw_db_buf_vf43 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF43_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF44_ADDR (0x408b08) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF44_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF44_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF44_DWLEN (1) +union dsch_rdma_sw_db_buf_vf44_u { + struct dsch_rdma_sw_db_buf_vf44 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF44_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF45_ADDR (0x408b48) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF45_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF45_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF45_DWLEN (1) +union dsch_rdma_sw_db_buf_vf45_u { + struct dsch_rdma_sw_db_buf_vf45 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF45_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF46_ADDR (0x408b88) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF46_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF46_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF46_DWLEN (1) +union dsch_rdma_sw_db_buf_vf46_u { + struct dsch_rdma_sw_db_buf_vf46 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF46_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF47_ADDR (0x408bc8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF47_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF47_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF47_DWLEN (1) +union dsch_rdma_sw_db_buf_vf47_u { + struct dsch_rdma_sw_db_buf_vf47 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF47_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF48_ADDR (0x408c08) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF48_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF48_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF48_DWLEN (1) +union dsch_rdma_sw_db_buf_vf48_u { + struct dsch_rdma_sw_db_buf_vf48 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF48_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF49_ADDR (0x408c48) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF49_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF49_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF49_DWLEN (1) +union dsch_rdma_sw_db_buf_vf49_u { + struct dsch_rdma_sw_db_buf_vf49 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF49_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF50_ADDR (0x408c88) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF50_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF50_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF50_DWLEN (1) +union dsch_rdma_sw_db_buf_vf50_u { + struct dsch_rdma_sw_db_buf_vf50 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF50_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF51_ADDR (0x408cc8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF51_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF51_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF51_DWLEN (1) +union dsch_rdma_sw_db_buf_vf51_u { + struct dsch_rdma_sw_db_buf_vf51 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF51_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF52_ADDR (0x408d08) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF52_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF52_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF52_DWLEN (1) +union dsch_rdma_sw_db_buf_vf52_u { + struct dsch_rdma_sw_db_buf_vf52 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF52_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF53_ADDR (0x408d48) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF53_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF53_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF53_DWLEN (1) +union dsch_rdma_sw_db_buf_vf53_u { + struct dsch_rdma_sw_db_buf_vf53 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF53_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF54_ADDR (0x408d88) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF54_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF54_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF54_DWLEN (1) +union dsch_rdma_sw_db_buf_vf54_u { + struct dsch_rdma_sw_db_buf_vf54 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF54_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF55_ADDR (0x408dc8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF55_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF55_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF55_DWLEN (1) +union dsch_rdma_sw_db_buf_vf55_u { + struct dsch_rdma_sw_db_buf_vf55 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF55_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF56_ADDR (0x408e08) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF56_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF56_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF56_DWLEN (1) +union dsch_rdma_sw_db_buf_vf56_u { + struct dsch_rdma_sw_db_buf_vf56 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF56_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF57_ADDR (0x408e48) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF57_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF57_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF57_DWLEN (1) +union dsch_rdma_sw_db_buf_vf57_u { + struct dsch_rdma_sw_db_buf_vf57 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF57_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF58_ADDR (0x408e88) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF58_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF58_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF58_DWLEN (1) +union dsch_rdma_sw_db_buf_vf58_u { + struct dsch_rdma_sw_db_buf_vf58 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF58_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF59_ADDR (0x408ec8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF59_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF59_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF59_DWLEN (1) +union dsch_rdma_sw_db_buf_vf59_u { + struct dsch_rdma_sw_db_buf_vf59 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF59_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF60_ADDR (0x408f08) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF60_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF60_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF60_DWLEN (1) +union dsch_rdma_sw_db_buf_vf60_u { + struct dsch_rdma_sw_db_buf_vf60 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF60_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF61_ADDR (0x408f48) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF61_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF61_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF61_DWLEN (1) +union dsch_rdma_sw_db_buf_vf61_u { + struct dsch_rdma_sw_db_buf_vf61 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF61_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF62_ADDR (0x408f88) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF62_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF62_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF62_DWLEN (1) +union dsch_rdma_sw_db_buf_vf62_u { + struct dsch_rdma_sw_db_buf_vf62 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF62_DWLEN]; +} __packed; + +#define NBL_DSCH_RDMA_SW_DB_BUF_VF63_ADDR (0x408fc8) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF63_DEPTH (1) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF63_WIDTH (32) +#define NBL_DSCH_RDMA_SW_DB_BUF_VF63_DWLEN (1) +union dsch_rdma_sw_db_buf_vf63_u { + struct dsch_rdma_sw_db_buf_vf63 { + u32 cnt:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SW_DB_BUF_VF63_DWLEN]; +} __packed; + +#define NBL_DSCH_VN_TC_Q_LIST_ATTR_TBL_ADDR (0x424000) +#define NBL_DSCH_VN_TC_Q_LIST_ATTR_TBL_DEPTH (4160) +#define NBL_DSCH_VN_TC_Q_LIST_ATTR_TBL_WIDTH (64) +#define NBL_DSCH_VN_TC_Q_LIST_ATTR_TBL_DWLEN (2) +union dsch_vn_tc_q_list_attr_tbl_u { + struct dsch_vn_tc_q_list_attr_tbl { + u32 head:11; /* [10:0] Default:0x0 RO */ + u32 reserve3:5; /* [15:11] Default:0x0 RO */ + u32 tail:11; /* [26:16] Default:0x0 RO */ + u32 reserve2:5; /* [31:27] Default:0x0 RO */ + u32 len:12; /* [43:32] Default:0x0 RO */ + u32 reserve1:20; /* [63:44] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_TC_Q_LIST_ATTR_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_TC_Q_LIST_ATTR_TBL_REG(r) (NBL_DSCH_VN_TC_Q_LIST_ATTR_TBL_ADDR + \ + (NBL_DSCH_VN_TC_Q_LIST_ATTR_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_TC_Q_LIST_TBL_ADDR (0x444000) +#define NBL_DSCH_VN_TC_Q_LIST_TBL_DEPTH (2048) +#define NBL_DSCH_VN_TC_Q_LIST_TBL_WIDTH (32) +#define NBL_DSCH_VN_TC_Q_LIST_TBL_DWLEN (1) +union dsch_vn_tc_q_list_tbl_u { + struct dsch_vn_tc_q_list_tbl { + u32 nxt:11; /* [10:0] Default:0x0 RO */ + u32 reserve:18; /* [28:11] Default:0x0 RO */ + u32 regi:1; /* [29] Default:0x0 RO */ + u32 fly:1; /* [30] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_TC_Q_LIST_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_TC_Q_LIST_TBL_REG(r) (NBL_DSCH_VN_TC_Q_LIST_TBL_ADDR + \ + (NBL_DSCH_VN_TC_Q_LIST_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_PAGE_LIST_TBL_ADDR (0x454000) +#define NBL_DSCH_RDMA_PAGE_LIST_TBL_DEPTH (2048) +#define NBL_DSCH_RDMA_PAGE_LIST_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_PAGE_LIST_TBL_DWLEN (1) +union dsch_rdma_page_list_tbl_u { + struct dsch_rdma_page_list_tbl { + u32 nxt:11; /* [10:0] Default:0x0 RO */ + u32 reserve:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_PAGE_LIST_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_PAGE_LIST_TBL_REG(r) (NBL_DSCH_RDMA_PAGE_LIST_TBL_ADDR + \ + (NBL_DSCH_RDMA_PAGE_LIST_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_TC_BM_TBL_ADDR (0x4a4000) +#define NBL_DSCH_VN_TC_BM_TBL_DEPTH (520) +#define NBL_DSCH_VN_TC_BM_TBL_WIDTH (32) +#define NBL_DSCH_VN_TC_BM_TBL_DWLEN (1) +union dsch_vn_tc_bm_tbl_u { + struct dsch_vn_tc_bm_tbl { + u32 tc_q_avail:8; /* [7:0] Default:0x0 RO */ + u32 tc_wgt_map:8; /* [15:8] Default:0x0 RO */ + u32 tc_rr_rmap:8; /* [23:16] Default:0x0 RO */ + u32 reserve:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_TC_BM_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_TC_BM_TBL_REG(r) (NBL_DSCH_VN_TC_BM_TBL_ADDR + \ + (NBL_DSCH_VN_TC_BM_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_TC_WGT_INFO_TBL_ADDR (0x4a8000) +#define NBL_DSCH_VN_TC_WGT_INFO_TBL_DEPTH (520) +#define NBL_DSCH_VN_TC_WGT_INFO_TBL_WIDTH (64) +#define NBL_DSCH_VN_TC_WGT_INFO_TBL_DWLEN (2) +union dsch_vn_tc_wgt_info_tbl_u { + struct dsch_vn_tc_wgt_info_tbl { + u32 tc0_wgt:8; /* [7:0] Default:0x1 RO */ + u32 tc1_wgt:8; /* [15:8] Default:0x1 RO */ + u32 tc2_wgt:8; /* [23:16] Default:0x1 RO */ + u32 tc3_wgt:8; /* [31:24] Default:0x1 RO */ + u32 tc4_wgt:8; /* [39:32] Default:0x1 RO */ + u32 tc5_wgt:8; /* [47:40] Default:0x1 RO */ + u32 tc6_wgt:8; /* [55:48] Default:0x1 RO */ + u32 tc7_wgt:8; /* [63:56] Default:0x1 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_TC_WGT_INFO_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_TC_WGT_INFO_TBL_REG(r) (NBL_DSCH_VN_TC_WGT_INFO_TBL_ADDR + \ + (NBL_DSCH_VN_TC_WGT_INFO_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_SPT_GRP_LIST_ATTR_TBL_ADDR (0x4ac000) +#define NBL_DSCH_VN_SPT_GRP_LIST_ATTR_TBL_DEPTH (8) +#define NBL_DSCH_VN_SPT_GRP_LIST_ATTR_TBL_WIDTH (64) +#define NBL_DSCH_VN_SPT_GRP_LIST_ATTR_TBL_DWLEN (2) +union dsch_vn_spt_grp_list_attr_tbl_u { + struct dsch_vn_spt_grp_list_attr_tbl { + u32 chdr:8; /* [7:0] Default:0x0 RO */ + u32 reserve6:2; /* [9:8] Default:0x0 RO */ + u32 ctail:8; /* [17:10] Default:0x0 RO */ + u32 reserve5:2; /* [19:18] Default:0x0 RO */ + u32 clen:9; /* [28:20] Default:0x0 RO */ + u32 reserve4:3; /* [31:29] Default:0x0 RO */ + u32 ehdr:8; /* [39:32] Default:0x0 RO */ + u32 reserve3:2; /* [41:40] Default:0x0 RO */ + u32 etail:8; /* [49:42] Default:0x0 RO */ + u32 reserve2:2; /* [51:50] Default:0x0 RO */ + u32 elen:9; /* [60:52] Default:0x0 RO */ + u32 reserve1:3; /* [63:61] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_SPT_GRP_LIST_ATTR_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_SPT_GRP_LIST_ATTR_TBL_REG(r) (NBL_DSCH_VN_SPT_GRP_LIST_ATTR_TBL_ADDR + \ + (NBL_DSCH_VN_SPT_GRP_LIST_ATTR_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_SPT_GRP_LIST_TBL_ADDR (0x4b0000) +#define NBL_DSCH_VN_SPT_GRP_LIST_TBL_DEPTH (256) +#define NBL_DSCH_VN_SPT_GRP_LIST_TBL_WIDTH (32) +#define NBL_DSCH_VN_SPT_GRP_LIST_TBL_DWLEN (1) +union dsch_vn_spt_grp_list_tbl_u { + struct dsch_vn_spt_grp_list_tbl { + u32 nxt:8; /* [7:0] Default:0x0 RO */ + u32 reserve3:4; /* [11:8] Default:0x0 RO */ + u32 pre:8; /* [19:12] Default:0x0 RO */ + u32 reserve2:4; /* [23:20] Default:0x0 RO */ + u32 sst:2; /* [25:24] Default:0x0 RO */ + u32 vld:1; /* [26] Default:0x0 RO */ + u32 reserve1:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_SPT_GRP_LIST_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_SPT_GRP_LIST_TBL_REG(r) (NBL_DSCH_VN_SPT_GRP_LIST_TBL_ADDR + \ + (NBL_DSCH_VN_SPT_GRP_LIST_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_GRP_NET_LIST_ATTR_TBL_ADDR (0x4b4000) +#define NBL_DSCH_VN_GRP_NET_LIST_ATTR_TBL_DEPTH (256) +#define NBL_DSCH_VN_GRP_NET_LIST_ATTR_TBL_WIDTH (64) +#define NBL_DSCH_VN_GRP_NET_LIST_ATTR_TBL_DWLEN (2) +union dsch_vn_grp_net_list_attr_tbl_u { + struct dsch_vn_grp_net_list_attr_tbl { + u32 chdr:10; /* [9:0] Default:0x0 RO */ + u32 ctail:10; /* [19:10] Default:0x0 RO */ + u32 clen:11; /* [30:20] Default:0x0 RO */ + u32 reserve2:1; /* [31] Default:0x0 RO */ + u32 ehdr:10; /* [41:32] Default:0x0 RO */ + u32 etail:10; /* [51:42] Default:0x0 RO */ + u32 elen:11; /* [62:52] Default:0x0 RO */ + u32 reserve1:1; /* [63] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_GRP_NET_LIST_ATTR_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_GRP_NET_LIST_ATTR_TBL_REG(r) (NBL_DSCH_VN_GRP_NET_LIST_ATTR_TBL_ADDR + \ + (NBL_DSCH_VN_GRP_NET_LIST_ATTR_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_GRP_NET_LIST_TBL_ADDR (0x4b8000) +#define NBL_DSCH_VN_GRP_NET_LIST_TBL_DEPTH (520) +#define NBL_DSCH_VN_GRP_NET_LIST_TBL_WIDTH (32) +#define NBL_DSCH_VN_GRP_NET_LIST_TBL_DWLEN (1) +union dsch_vn_grp_net_list_tbl_u { + struct dsch_vn_grp_net_list_tbl { + u32 nxt:10; /* [9:0] Default:0x0 RO */ + u32 reserve3:2; /* [11:10] Default:0x0 RO */ + u32 pre:10; /* [21:12] Default:0x0 RO */ + u32 reserve2:2; /* [23:22] Default:0x0 RO */ + u32 sst:2; /* [25:24] Default:0x0 RO */ + u32 pfc:1; /* [26] Default:0x0 RO */ + u32 vld:1; /* [27] Default:0x0 RO */ + u32 reserve1:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_GRP_NET_LIST_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_GRP_NET_LIST_TBL_REG(r) (NBL_DSCH_VN_GRP_NET_LIST_TBL_ADDR + \ + (NBL_DSCH_VN_GRP_NET_LIST_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_PFC_NET_LIST_ATTR_TBL_ADDR (0x4bc000) +#define NBL_DSCH_VN_PFC_NET_LIST_ATTR_TBL_DEPTH (8) +#define NBL_DSCH_VN_PFC_NET_LIST_ATTR_TBL_WIDTH (32) +#define NBL_DSCH_VN_PFC_NET_LIST_ATTR_TBL_DWLEN (1) +union dsch_vn_pfc_net_list_attr_tbl_u { + struct dsch_vn_pfc_net_list_attr_tbl { + u32 head:10; /* [9:0] Default:0x0 RO */ + u32 tail:10; /* [19:10] Default:0x0 RO */ + u32 len:11; /* [30:20] Default:0x0 RO */ + u32 reserve1:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_PFC_NET_LIST_ATTR_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_PFC_NET_LIST_ATTR_TBL_REG(r) (NBL_DSCH_VN_PFC_NET_LIST_ATTR_TBL_ADDR + \ + (NBL_DSCH_VN_PFC_NET_LIST_ATTR_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_NSHA_ST_REG_TBL_ADDR (0x4c0000) +#define NBL_DSCH_VN_NSHA_ST_REG_TBL_DEPTH (520) +#define NBL_DSCH_VN_NSHA_ST_REG_TBL_WIDTH (32) +#define NBL_DSCH_VN_NSHA_ST_REG_TBL_DWLEN (1) +union dsch_vn_nsha_st_reg_tbl_u { + struct dsch_vn_nsha_st_reg_tbl { + u32 sst:2; /* [1:0] Default:0x0 RO */ + u32 reserve:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_NSHA_ST_REG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_NSHA_ST_REG_TBL_REG(r) (NBL_DSCH_VN_NSHA_ST_REG_TBL_ADDR + \ + (NBL_DSCH_VN_NSHA_ST_REG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_VN_GSHA_ST_REG_TBL_ADDR (0x4c4000) +#define NBL_DSCH_VN_GSHA_ST_REG_TBL_DEPTH (256) +#define NBL_DSCH_VN_GSHA_ST_REG_TBL_WIDTH (32) +#define NBL_DSCH_VN_GSHA_ST_REG_TBL_DWLEN (1) +union dsch_vn_gsha_st_reg_tbl_u { + struct dsch_vn_gsha_st_reg_tbl { + u32 sst:2; /* [1:0] Default:0x0 RO */ + u32 reserve:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_VN_GSHA_ST_REG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_VN_GSHA_ST_REG_TBL_REG(r) (NBL_DSCH_VN_GSHA_ST_REG_TBL_ADDR + \ + (NBL_DSCH_VN_GSHA_ST_REG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_DBQ_ATTR_TBL_ADDR (0x4c8000) +#define NBL_DSCH_RDMA_DBQ_ATTR_TBL_DEPTH (512) +#define NBL_DSCH_RDMA_DBQ_ATTR_TBL_WIDTH (64) +#define NBL_DSCH_RDMA_DBQ_ATTR_TBL_DWLEN (2) +union dsch_rdma_dbq_attr_tbl_u { + struct dsch_rdma_dbq_attr_tbl { + u32 ptr_hdr_oft:8; /* [7:0] Default:0x0 RO */ + u32 reserve4:8; /* [15:8] Default:0x0 RO */ + u32 ptr_hdr:11; /* [26:16] Default:0x0 RO */ + u32 reserve3:4; /* [30:27] Default:0x0 RO */ + u32 nempty:1; /* [31] Default:0x0 RO */ + u32 ptr_tail_oft:8; /* [39:32] Default:0x0 RO */ + u32 reserve2:8; /* [47:40] Default:0x0 RO */ + u32 ptr_tail:11; /* [58:48] Default:0x0 RO */ + u32 reserve1:5; /* [63:59] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_DBQ_ATTR_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_DBQ_ATTR_TBL_REG(r) (NBL_DSCH_RDMA_DBQ_ATTR_TBL_ADDR + \ + (NBL_DSCH_RDMA_DBQ_ATTR_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_TC_Q_LIST_ATTR_TBL_ADDR (0x4cc000) +#define NBL_DSCH_RDMA_TC_Q_LIST_ATTR_TBL_DEPTH (512) +#define NBL_DSCH_RDMA_TC_Q_LIST_ATTR_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_TC_Q_LIST_ATTR_TBL_DWLEN (1) +union dsch_rdma_tc_q_list_attr_tbl_u { + struct dsch_rdma_tc_q_list_attr_tbl { + u32 rptr:6; /* [5:0] Default:0x0 RO */ + u32 reserve4:2; /* [7:6] Default:0x0 RO */ + u32 rlen:7; /* [14:8] Default:0x0 RO */ + u32 reserve3:1; /* [15] Default:0x0 RO */ + u32 wptr:3; /* [18:16] Default:0x0 RO */ + u32 reserve2:5; /* [23:19] Default:0x0 RO */ + u32 wlen:4; /* [27:24] Default:0x0 RO */ + u32 fly:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_TC_Q_LIST_ATTR_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_TC_Q_LIST_ATTR_TBL_REG(r) (NBL_DSCH_RDMA_TC_Q_LIST_ATTR_TBL_ADDR + \ + (NBL_DSCH_RDMA_TC_Q_LIST_ATTR_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_TC_BM_TBL_ADDR (0x4d0000) +#define NBL_DSCH_RDMA_TC_BM_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_TC_BM_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_TC_BM_TBL_DWLEN (1) +union dsch_rdma_tc_bm_tbl_u { + struct dsch_rdma_tc_bm_tbl { + u32 tc_q_avail:8; /* [7:0] Default:0x0 RO */ + u32 tc_wgt_map:8; /* [15:8] Default:0x0 RO */ + u32 tc_rr_rmap:8; /* [23:16] Default:0x0 RO */ + u32 reserve:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_TC_BM_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_TC_BM_TBL_REG(r) (NBL_DSCH_RDMA_TC_BM_TBL_ADDR + \ + (NBL_DSCH_RDMA_TC_BM_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_TC_WGT_INFO_TBL_ADDR (0x4d4000) +#define NBL_DSCH_RDMA_TC_WGT_INFO_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_TC_WGT_INFO_TBL_WIDTH (64) +#define NBL_DSCH_RDMA_TC_WGT_INFO_TBL_DWLEN (2) +union dsch_rdma_tc_wgt_info_tbl_u { + struct dsch_rdma_tc_wgt_info_tbl { + u32 tc0_wgt:8; /* [7:0] Default:0x1 RO */ + u32 tc1_wgt:8; /* [15:8] Default:0x1 RO */ + u32 tc2_wgt:8; /* [23:16] Default:0x1 RO */ + u32 tc3_wgt:8; /* [31:24] Default:0x1 RO */ + u32 tc4_wgt:8; /* [39:32] Default:0x1 RO */ + u32 tc5_wgt:8; /* [47:40] Default:0x1 RO */ + u32 tc6_wgt:8; /* [55:48] Default:0x1 RO */ + u32 tc7_wgt:8; /* [63:56] Default:0x1 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_TC_WGT_INFO_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_TC_WGT_INFO_TBL_REG(r) (NBL_DSCH_RDMA_TC_WGT_INFO_TBL_ADDR + \ + (NBL_DSCH_RDMA_TC_WGT_INFO_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_SPT_GRP_LIST_ATTR_TBL_ADDR (0x4d8000) +#define NBL_DSCH_RDMA_SPT_GRP_LIST_ATTR_TBL_DEPTH (8) +#define NBL_DSCH_RDMA_SPT_GRP_LIST_ATTR_TBL_WIDTH (64) +#define NBL_DSCH_RDMA_SPT_GRP_LIST_ATTR_TBL_DWLEN (2) +union dsch_rdma_spt_grp_list_attr_tbl_u { + struct dsch_rdma_spt_grp_list_attr_tbl { + u32 chdr:6; /* [5:0] Default:0x0 RO */ + u32 reserve6:4; /* [9:6] Default:0x0 RO */ + u32 ctail:6; /* [15:10] Default:0x0 RO */ + u32 reserve5:4; /* [19:16] Default:0x0 RO */ + u32 clen:7; /* [26:20] Default:0x0 RO */ + u32 reserve4:5; /* [31:27] Default:0x0 RO */ + u32 ehdr:6; /* [37:32] Default:0x0 RO */ + u32 reserve3:4; /* [41:38] Default:0x0 RO */ + u32 etail:6; /* [47:42] Default:0x0 RO */ + u32 reserve2:4; /* [51:48] Default:0x0 RO */ + u32 elen:7; /* [58:52] Default:0x0 RO */ + u32 reserve1:5; /* [63:59] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SPT_GRP_LIST_ATTR_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_SPT_GRP_LIST_ATTR_TBL_REG(r) (NBL_DSCH_RDMA_SPT_GRP_LIST_ATTR_TBL_ADDR + \ + (NBL_DSCH_RDMA_SPT_GRP_LIST_ATTR_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_SPT_GRP_LIST_TBL_ADDR (0x4dc000) +#define NBL_DSCH_RDMA_SPT_GRP_LIST_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_SPT_GRP_LIST_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_SPT_GRP_LIST_TBL_DWLEN (1) +union dsch_rdma_spt_grp_list_tbl_u { + struct dsch_rdma_spt_grp_list_tbl { + u32 nxt:6; /* [5:0] Default:0x0 RO */ + u32 reserve3:6; /* [11:6] Default:0x0 RO */ + u32 pre:6; /* [17:12] Default:0x0 RO */ + u32 reserve2:6; /* [23:18] Default:0x0 RO */ + u32 sst:2; /* [25:24] Default:0x0 RO */ + u32 vld:1; /* [26] Default:0x0 RO */ + u32 reserve1:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_SPT_GRP_LIST_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_SPT_GRP_LIST_TBL_REG(r) (NBL_DSCH_RDMA_SPT_GRP_LIST_TBL_ADDR + \ + (NBL_DSCH_RDMA_SPT_GRP_LIST_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_GRP_NET_LIST_ATTR_TBL_ADDR (0x4e0000) +#define NBL_DSCH_RDMA_GRP_NET_LIST_ATTR_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_GRP_NET_LIST_ATTR_TBL_WIDTH (64) +#define NBL_DSCH_RDMA_GRP_NET_LIST_ATTR_TBL_DWLEN (2) +union dsch_rdma_grp_net_list_attr_tbl_u { + struct dsch_rdma_grp_net_list_attr_tbl { + u32 chdr:6; /* [5:0] Default:0x0 RO */ + u32 reserve6:4; /* [9:6] Default:0x0 RO */ + u32 ctail:6; /* [15:10] Default:0x0 RO */ + u32 reserve5:4; /* [19:16] Default:0x0 RO */ + u32 clen:7; /* [26:20] Default:0x0 RO */ + u32 reserve4:5; /* [31:27] Default:0x0 RO */ + u32 ehdr:6; /* [37:32] Default:0x0 RO */ + u32 reserve3:4; /* [41:38] Default:0x0 RO */ + u32 etail:6; /* [47:42] Default:0x0 RO */ + u32 reserve2:4; /* [51:48] Default:0x0 RO */ + u32 elen:7; /* [58:52] Default:0x0 RO */ + u32 reserve1:5; /* [63:59] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_GRP_NET_LIST_ATTR_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_GRP_NET_LIST_ATTR_TBL_REG(r) (NBL_DSCH_RDMA_GRP_NET_LIST_ATTR_TBL_ADDR + \ + (NBL_DSCH_RDMA_GRP_NET_LIST_ATTR_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_GRP_NET_LIST_TBL_ADDR (0x4e4000) +#define NBL_DSCH_RDMA_GRP_NET_LIST_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_GRP_NET_LIST_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_GRP_NET_LIST_TBL_DWLEN (1) +union dsch_rdma_grp_net_list_tbl_u { + struct dsch_rdma_grp_net_list_tbl { + u32 nxt:6; /* [5:0] Default:0x0 RO */ + u32 reserve3:6; /* [11:6] Default:0x0 RO */ + u32 pre:6; /* [17:12] Default:0x0 RO */ + u32 reserve2:6; /* [23:18] Default:0x0 RO */ + u32 sst:2; /* [25:24] Default:0x0 RO */ + u32 pfc:1; /* [26] Default:0x0 RO */ + u32 vld:1; /* [27] Default:0x0 RO */ + u32 reserve1:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_GRP_NET_LIST_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_GRP_NET_LIST_TBL_REG(r) (NBL_DSCH_RDMA_GRP_NET_LIST_TBL_ADDR + \ + (NBL_DSCH_RDMA_GRP_NET_LIST_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_PFC_NET_LIST_ATTR_TBL_ADDR (0x4e8000) +#define NBL_DSCH_RDMA_PFC_NET_LIST_ATTR_TBL_DEPTH (8) +#define NBL_DSCH_RDMA_PFC_NET_LIST_ATTR_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_PFC_NET_LIST_ATTR_TBL_DWLEN (1) +union dsch_rdma_pfc_net_list_attr_tbl_u { + struct dsch_rdma_pfc_net_list_attr_tbl { + u32 head:6; /* [5:0] Default:0x0 RO */ + u32 reserve3:4; /* [9:6] Default:0x0 RO */ + u32 tail:6; /* [15:10] Default:0x0 RO */ + u32 reserve2:4; /* [19:16] Default:0x0 RO */ + u32 len:7; /* [26:20] Default:0x0 RO */ + u32 reserve1:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_PFC_NET_LIST_ATTR_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_PFC_NET_LIST_ATTR_TBL_REG(r) (NBL_DSCH_RDMA_PFC_NET_LIST_ATTR_TBL_ADDR + \ + (NBL_DSCH_RDMA_PFC_NET_LIST_ATTR_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_NSHA_ST_REG_TBL_ADDR (0x4ec000) +#define NBL_DSCH_RDMA_NSHA_ST_REG_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_NSHA_ST_REG_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_NSHA_ST_REG_TBL_DWLEN (1) +union dsch_rdma_nsha_st_reg_tbl_u { + struct dsch_rdma_nsha_st_reg_tbl { + u32 sst:2; /* [1:0] Default:0x0 RO */ + u32 reserve:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_NSHA_ST_REG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_NSHA_ST_REG_TBL_REG(r) (NBL_DSCH_RDMA_NSHA_ST_REG_TBL_ADDR + \ + (NBL_DSCH_RDMA_NSHA_ST_REG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_GSHA_ST_REG_TBL_ADDR (0x4f0000) +#define NBL_DSCH_RDMA_GSHA_ST_REG_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_GSHA_ST_REG_TBL_WIDTH (32) +#define NBL_DSCH_RDMA_GSHA_ST_REG_TBL_DWLEN (1) +union dsch_rdma_gsha_st_reg_tbl_u { + struct dsch_rdma_gsha_st_reg_tbl { + u32 sst:2; /* [1:0] Default:0x0 RO */ + u32 reserve:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_GSHA_ST_REG_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_GSHA_ST_REG_TBL_REG(r) (NBL_DSCH_RDMA_GSHA_ST_REG_TBL_ADDR + \ + (NBL_DSCH_RDMA_GSHA_ST_REG_TBL_DWLEN * 4) * (r)) + +#define NBL_DSCH_RDMA_VF_STAT_TBL_ADDR (0x4f4000) +#define NBL_DSCH_RDMA_VF_STAT_TBL_DEPTH (64) +#define NBL_DSCH_RDMA_VF_STAT_TBL_WIDTH (64) +#define NBL_DSCH_RDMA_VF_STAT_TBL_DWLEN (2) +union dsch_rdma_vf_stat_tbl_u { + struct dsch_rdma_vf_stat_tbl { + u32 db_num_onchip:8; /* [7:0] Default:0x0 RO */ + u32 reserve3:8; /* [15:8] Default:0x0 RO */ + u32 page_num:12; /* [27:16] Default:0x0 RO */ + u32 reserve2:4; /* [31:28] Default:0x0 RO */ + u32 db_num_inddr:22; /* [53:32] Default:0x0 RO */ + u32 reserve1:10; /* [63:54] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSCH_RDMA_VF_STAT_TBL_DWLEN]; +} __packed; +#define NBL_DSCH_RDMA_VF_STAT_TBL_REG(r) (NBL_DSCH_RDMA_VF_STAT_TBL_ADDR + \ + (NBL_DSCH_RDMA_VF_STAT_TBL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstat.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstat.h new file mode 100644 index 0000000000000000000000000000000000000000..db5c3dc129f72ac8ed7dc3ece257938499e9f1a8 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstat.h @@ -0,0 +1,378 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DSTAT_H +#define NBL_DSTAT_H 1 + +#include + +#define NBL_DSTAT_BASE (0x0071C000) + +#define NBL_DSTAT_INT_STATUS_ADDR (0x71c000) +#define NBL_DSTAT_INT_STATUS_DEPTH (1) +#define NBL_DSTAT_INT_STATUS_WIDTH (32) +#define NBL_DSTAT_INT_STATUS_DWLEN (1) +union dstat_int_status_u { + struct dstat_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 parity_err:1; /* [5] Default:0x0 RWC */ + u32 stat_type_err:1; /* [6] Default:0x0 RWC */ + u32 tbl_conflict_err:1; /* [7] Default:0X0 RWC */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DSTAT_INT_MASK_ADDR (0x71c004) +#define NBL_DSTAT_INT_MASK_DEPTH (1) +#define NBL_DSTAT_INT_MASK_WIDTH (32) +#define NBL_DSTAT_INT_MASK_DWLEN (1) +union dstat_int_mask_u { + struct dstat_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 parity_err:1; /* [5] Default:0x0 RW */ + u32 stat_type_err:1; /* [6] Default:0x0 RW */ + u32 tbl_conflict_err:1; /* [7] Default:0X0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DSTAT_INT_SET_ADDR (0x71c008) +#define NBL_DSTAT_INT_SET_DEPTH (1) +#define NBL_DSTAT_INT_SET_WIDTH (32) +#define NBL_DSTAT_INT_SET_DWLEN (1) +union dstat_int_set_u { + struct dstat_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 parity_err:1; /* [5] Default:0x0 WO */ + u32 stat_type_err:1; /* [6] Default:0x0 WO */ + u32 tbl_conflict_err:1; /* [7] Default:0x0 WO */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_INT_SET_DWLEN]; +} __packed; + +#define NBL_DSTAT_COR_ERR_INFO_ADDR (0x71c00c) +#define NBL_DSTAT_COR_ERR_INFO_DEPTH (1) +#define NBL_DSTAT_COR_ERR_INFO_WIDTH (32) +#define NBL_DSTAT_COR_ERR_INFO_DWLEN (1) +union dstat_cor_err_info_u { + struct dstat_cor_err_info { + u32 addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTAT_PARITY_ERR_INFO_ADDR (0x71c01c) +#define NBL_DSTAT_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DSTAT_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DSTAT_PARITY_ERR_INFO_DWLEN (1) +union dstat_parity_err_info_u { + struct dstat_parity_err_info { + u32 ram_id:4; /* [3:0] Default:0x0 RO */ + u32 addr:10; /* [13:4] Default:0x0 RO */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTAT_CIF_ERR_INFO_ADDR (0x71c024) +#define NBL_DSTAT_CIF_ERR_INFO_DEPTH (1) +#define NBL_DSTAT_CIF_ERR_INFO_WIDTH (32) +#define NBL_DSTAT_CIF_ERR_INFO_DWLEN (1) +union dstat_cif_err_info_u { + struct dstat_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTAT_TBL_CONFLICT_ERR_INFO_ADDR (0x71c030) +#define NBL_DSTAT_TBL_CONFLICT_ERR_INFO_DEPTH (1) +#define NBL_DSTAT_TBL_CONFLICT_ERR_INFO_WIDTH (32) +#define NBL_DSTAT_TBL_CONFLICT_ERR_INFO_DWLEN (1) +union dstat_tbl_conflict_err_info_u { + struct dstat_tbl_conflict_err_info { + u32 tbl_id:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_TBL_CONFLICT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTAT_CAR_CTRL_ADDR (0x71c100) +#define NBL_DSTAT_CAR_CTRL_DEPTH (1) +#define NBL_DSTAT_CAR_CTRL_WIDTH (32) +#define NBL_DSTAT_CAR_CTRL_DWLEN (1) +union dstat_car_ctrl_u { + struct dstat_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x0 RW */ + u32 rctr_car:1; /* [1] Default:0x0 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DSTAT_INIT_START_ADDR (0x71c104) +#define NBL_DSTAT_INIT_START_DEPTH (1) +#define NBL_DSTAT_INIT_START_WIDTH (32) +#define NBL_DSTAT_INIT_START_DWLEN (1) +union dstat_init_start_u { + struct dstat_init_start { + u32 init_start:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_INIT_START_DWLEN]; +} __packed; + +#define NBL_DSTAT_CPU_READ_CLR_ADDR (0x71c108) +#define NBL_DSTAT_CPU_READ_CLR_DEPTH (1) +#define NBL_DSTAT_CPU_READ_CLR_WIDTH (32) +#define NBL_DSTAT_CPU_READ_CLR_DWLEN (1) +union dstat_cpu_read_clr_u { + struct dstat_cpu_read_clr { + u32 cpu_clr:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_CPU_READ_CLR_DWLEN]; +} __packed; + +#define NBL_DSTAT_GLB_CLR_ADDR (0x71c10c) +#define NBL_DSTAT_GLB_CLR_DEPTH (1) +#define NBL_DSTAT_GLB_CLR_WIDTH (32) +#define NBL_DSTAT_GLB_CLR_DWLEN (1) +union dstat_glb_clr_u { + struct dstat_glb_clr { + u32 glb_clr:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_GLB_CLR_DWLEN]; +} __packed; + +#define NBL_DSTAT_SEP_CLR_ADDR (0x71c110) +#define NBL_DSTAT_SEP_CLR_DEPTH (1) +#define NBL_DSTAT_SEP_CLR_WIDTH (32) +#define NBL_DSTAT_SEP_CLR_DWLEN (1) +union dstat_sep_clr_u { + struct dstat_sep_clr { + u32 vsi_tbl_clr:1; /* [0] Default:0x0 RW */ + u32 ptype_tbl_clr:1; /* [1] Default:0x0 RW */ + u32 err_code_tbl_clr:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_SEP_CLR_DWLEN]; +} __packed; + +#define NBL_DSTAT_CFG_TEST_ADDR (0x71c118) +#define NBL_DSTAT_CFG_TEST_DEPTH (1) +#define NBL_DSTAT_CFG_TEST_WIDTH (32) +#define NBL_DSTAT_CFG_TEST_DWLEN (1) +union dstat_cfg_test_u { + struct dstat_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSTAT_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_DSTAT_INIT_DONE_ADDR (0x71c200) +#define NBL_DSTAT_INIT_DONE_DEPTH (1) +#define NBL_DSTAT_INIT_DONE_WIDTH (32) +#define NBL_DSTAT_INIT_DONE_DWLEN (1) +union dstat_init_done_u { + struct dstat_init_done { + u32 init_done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DSTAT_GLB_CLR_DONE_ADDR (0x71c204) +#define NBL_DSTAT_GLB_CLR_DONE_DEPTH (1) +#define NBL_DSTAT_GLB_CLR_DONE_WIDTH (32) +#define NBL_DSTAT_GLB_CLR_DONE_DWLEN (1) +union dstat_glb_clr_done_u { + struct dstat_glb_clr_done { + u32 glb_clr_done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_GLB_CLR_DONE_DWLEN]; +} __packed; + +#define NBL_DSTAT_SEP_CLR_DONE_ADDR (0x71c208) +#define NBL_DSTAT_SEP_CLR_DONE_DEPTH (1) +#define NBL_DSTAT_SEP_CLR_DONE_WIDTH (32) +#define NBL_DSTAT_SEP_CLR_DONE_DWLEN (1) +union dstat_sep_clr_done_u { + struct dstat_sep_clr_done { + u32 vsi_tbl_done:1; /* [0] Default:0x0 RO */ + u32 ptype_tbl_done:1; /* [1] Default:0x0 RO */ + u32 err_code_tbl_done:1; /* [2] Default:0x0 RO */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_SEP_CLR_DONE_DWLEN]; +} __packed; + +#define NBL_DSTAT_ERR_CODE_STAT_ETH0_ADDR (0x71c400) +#define NBL_DSTAT_ERR_CODE_STAT_ETH0_DEPTH (16) +#define NBL_DSTAT_ERR_CODE_STAT_ETH0_WIDTH (32) +#define NBL_DSTAT_ERR_CODE_STAT_ETH0_DWLEN (1) +union dstat_err_code_stat_eth0_u { + struct dstat_err_code_stat_eth0 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_ERR_CODE_STAT_ETH0_DWLEN]; +} __packed; +#define NBL_DSTAT_ERR_CODE_STAT_ETH0_REG(r) (NBL_DSTAT_ERR_CODE_STAT_ETH0_ADDR + \ + (NBL_DSTAT_ERR_CODE_STAT_ETH0_DWLEN * 4) * (r)) + +#define NBL_DSTAT_ERR_CODE_STAT_ETH1_ADDR (0x71c500) +#define NBL_DSTAT_ERR_CODE_STAT_ETH1_DEPTH (16) +#define NBL_DSTAT_ERR_CODE_STAT_ETH1_WIDTH (32) +#define NBL_DSTAT_ERR_CODE_STAT_ETH1_DWLEN (1) +union dstat_err_code_stat_eth1_u { + struct dstat_err_code_stat_eth1 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_ERR_CODE_STAT_ETH1_DWLEN]; +} __packed; +#define NBL_DSTAT_ERR_CODE_STAT_ETH1_REG(r) (NBL_DSTAT_ERR_CODE_STAT_ETH1_ADDR + \ + (NBL_DSTAT_ERR_CODE_STAT_ETH1_DWLEN * 4) * (r)) + +#define NBL_DSTAT_ERR_CODE_STAT_ETH2_ADDR (0x71c600) +#define NBL_DSTAT_ERR_CODE_STAT_ETH2_DEPTH (16) +#define NBL_DSTAT_ERR_CODE_STAT_ETH2_WIDTH (32) +#define NBL_DSTAT_ERR_CODE_STAT_ETH2_DWLEN (1) +union dstat_err_code_stat_eth2_u { + struct dstat_err_code_stat_eth2 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_ERR_CODE_STAT_ETH2_DWLEN]; +} __packed; +#define NBL_DSTAT_ERR_CODE_STAT_ETH2_REG(r) (NBL_DSTAT_ERR_CODE_STAT_ETH2_ADDR + \ + (NBL_DSTAT_ERR_CODE_STAT_ETH2_DWLEN * 4) * (r)) + +#define NBL_DSTAT_ERR_CODE_STAT_ETH3_ADDR (0x71c700) +#define NBL_DSTAT_ERR_CODE_STAT_ETH3_DEPTH (16) +#define NBL_DSTAT_ERR_CODE_STAT_ETH3_WIDTH (32) +#define NBL_DSTAT_ERR_CODE_STAT_ETH3_DWLEN (1) +union dstat_err_code_stat_eth3_u { + struct dstat_err_code_stat_eth3 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_ERR_CODE_STAT_ETH3_DWLEN]; +} __packed; +#define NBL_DSTAT_ERR_CODE_STAT_ETH3_REG(r) (NBL_DSTAT_ERR_CODE_STAT_ETH3_ADDR + \ + (NBL_DSTAT_ERR_CODE_STAT_ETH3_DWLEN * 4) * (r)) + +#define NBL_DSTAT_ERR_CODE_STAT_LOOPBACK_ADDR (0x71c800) +#define NBL_DSTAT_ERR_CODE_STAT_LOOPBACK_DEPTH (16) +#define NBL_DSTAT_ERR_CODE_STAT_LOOPBACK_WIDTH (32) +#define NBL_DSTAT_ERR_CODE_STAT_LOOPBACK_DWLEN (1) +union dstat_err_code_stat_loopback_u { + struct dstat_err_code_stat_loopback { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_ERR_CODE_STAT_LOOPBACK_DWLEN]; +} __packed; +#define NBL_DSTAT_ERR_CODE_STAT_LOOPBACK_REG(r) (NBL_DSTAT_ERR_CODE_STAT_LOOPBACK_ADDR + \ + (NBL_DSTAT_ERR_CODE_STAT_LOOPBACK_DWLEN * 4) * (r)) + +#define NBL_DSTAT_PTYPE_STAT_ETH0_ADDR (0x71d000) +#define NBL_DSTAT_PTYPE_STAT_ETH0_DEPTH (256) +#define NBL_DSTAT_PTYPE_STAT_ETH0_WIDTH (32) +#define NBL_DSTAT_PTYPE_STAT_ETH0_DWLEN (1) +union dstat_ptype_stat_eth0_u { + struct dstat_ptype_stat_eth0 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_PTYPE_STAT_ETH0_DWLEN]; +} __packed; +#define NBL_DSTAT_PTYPE_STAT_ETH0_REG(r) (NBL_DSTAT_PTYPE_STAT_ETH0_ADDR + \ + (NBL_DSTAT_PTYPE_STAT_ETH0_DWLEN * 4) * (r)) + +#define NBL_DSTAT_PTYPE_STAT_ETH1_ADDR (0x71d400) +#define NBL_DSTAT_PTYPE_STAT_ETH1_DEPTH (256) +#define NBL_DSTAT_PTYPE_STAT_ETH1_WIDTH (32) +#define NBL_DSTAT_PTYPE_STAT_ETH1_DWLEN (1) +union dstat_ptype_stat_eth1_u { + struct dstat_ptype_stat_eth1 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_PTYPE_STAT_ETH1_DWLEN]; +} __packed; +#define NBL_DSTAT_PTYPE_STAT_ETH1_REG(r) (NBL_DSTAT_PTYPE_STAT_ETH1_ADDR + \ + (NBL_DSTAT_PTYPE_STAT_ETH1_DWLEN * 4) * (r)) + +#define NBL_DSTAT_PTYPE_STAT_ETH2_ADDR (0x71d800) +#define NBL_DSTAT_PTYPE_STAT_ETH2_DEPTH (256) +#define NBL_DSTAT_PTYPE_STAT_ETH2_WIDTH (32) +#define NBL_DSTAT_PTYPE_STAT_ETH2_DWLEN (1) +union dstat_ptype_stat_eth2_u { + struct dstat_ptype_stat_eth2 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_PTYPE_STAT_ETH2_DWLEN]; +} __packed; +#define NBL_DSTAT_PTYPE_STAT_ETH2_REG(r) (NBL_DSTAT_PTYPE_STAT_ETH2_ADDR + \ + (NBL_DSTAT_PTYPE_STAT_ETH2_DWLEN * 4) * (r)) + +#define NBL_DSTAT_PTYPE_STAT_ETH3_ADDR (0x71dc00) +#define NBL_DSTAT_PTYPE_STAT_ETH3_DEPTH (256) +#define NBL_DSTAT_PTYPE_STAT_ETH3_WIDTH (32) +#define NBL_DSTAT_PTYPE_STAT_ETH3_DWLEN (1) +union dstat_ptype_stat_eth3_u { + struct dstat_ptype_stat_eth3 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_PTYPE_STAT_ETH3_DWLEN]; +} __packed; +#define NBL_DSTAT_PTYPE_STAT_ETH3_REG(r) (NBL_DSTAT_PTYPE_STAT_ETH3_ADDR + \ + (NBL_DSTAT_PTYPE_STAT_ETH3_DWLEN * 4) * (r)) + +#define NBL_DSTAT_PTYPE_STAT_LOOPBACK_ADDR (0x71e000) +#define NBL_DSTAT_PTYPE_STAT_LOOPBACK_DEPTH (256) +#define NBL_DSTAT_PTYPE_STAT_LOOPBACK_WIDTH (32) +#define NBL_DSTAT_PTYPE_STAT_LOOPBACK_DWLEN (1) +union dstat_ptype_stat_loopback_u { + struct dstat_ptype_stat_loopback { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_PTYPE_STAT_LOOPBACK_DWLEN]; +} __packed; +#define NBL_DSTAT_PTYPE_STAT_LOOPBACK_REG(r) (NBL_DSTAT_PTYPE_STAT_LOOPBACK_ADDR + \ + (NBL_DSTAT_PTYPE_STAT_LOOPBACK_DWLEN * 4) * (r)) + +#define NBL_DSTAT_VSI_STAT_ADDR (0x724000) +#define NBL_DSTAT_VSI_STAT_DEPTH (1024) +#define NBL_DSTAT_VSI_STAT_WIDTH (128) +#define NBL_DSTAT_VSI_STAT_DWLEN (4) +union dstat_vsi_stat_u { + struct dstat_vsi_stat { + u32 fwd_byte_cnt_low:32; /* [31:0] Default:0x0 RO */ + u32 fwd_byte_cnt_high:32; /* [63:32] Default:0x0 RO */ + u32 fwd_pkt_cnt_low:32; /* [95:64] Default:0x0 RO */ + u32 fwd_pkt_cnt_high:32; /* [127:96] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTAT_VSI_STAT_DWLEN]; +} __packed; +#define NBL_DSTAT_VSI_STAT_REG(r) (NBL_DSTAT_VSI_STAT_ADDR + \ + (NBL_DSTAT_VSI_STAT_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h new file mode 100644 index 0000000000000000000000000000000000000000..f9fe9a6177002e3d8789e876e0a970755424249d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dstore.h @@ -0,0 +1,952 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DSTORE_H +#define NBL_DSTORE_H 1 + +#include + +#define NBL_DSTORE_BASE (0x00704000) + +#define NBL_DSTORE_INT_STATUS_ADDR (0x704000) +#define NBL_DSTORE_INT_STATUS_DEPTH (1) +#define NBL_DSTORE_INT_STATUS_WIDTH (32) +#define NBL_DSTORE_INT_STATUS_DWLEN (1) +union dstore_int_status_u { + struct dstore_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 parity_err:1; /* [5] Default:0x0 RWC */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DSTORE_INT_MASK_ADDR (0x704004) +#define NBL_DSTORE_INT_MASK_DEPTH (1) +#define NBL_DSTORE_INT_MASK_WIDTH (32) +#define NBL_DSTORE_INT_MASK_DWLEN (1) +union dstore_int_mask_u { + struct dstore_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 parity_err:1; /* [5] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DSTORE_INT_SET_ADDR (0x704008) +#define NBL_DSTORE_INT_SET_DEPTH (0) +#define NBL_DSTORE_INT_SET_WIDTH (32) +#define NBL_DSTORE_INT_SET_DWLEN (1) +union dstore_int_set_u { + struct dstore_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 parity_err:1; /* [5] Default:0x0 WO */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INT_SET_DWLEN]; +} __packed; + +#define NBL_DSTORE_COR_ERR_INFO_ADDR (0x70400c) +#define NBL_DSTORE_COR_ERR_INFO_DEPTH (1) +#define NBL_DSTORE_COR_ERR_INFO_WIDTH (32) +#define NBL_DSTORE_COR_ERR_INFO_DWLEN (1) +union dstore_cor_err_info_u { + struct dstore_cor_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTORE_PARITY_ERR_INFO_ADDR (0x704014) +#define NBL_DSTORE_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DSTORE_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DSTORE_PARITY_ERR_INFO_DWLEN (1) +union dstore_parity_err_info_u { + struct dstore_parity_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTORE_CIF_ERR_INFO_ADDR (0x70401c) +#define NBL_DSTORE_CIF_ERR_INFO_DEPTH (1) +#define NBL_DSTORE_CIF_ERR_INFO_WIDTH (32) +#define NBL_DSTORE_CIF_ERR_INFO_DWLEN (1) +union dstore_cif_err_info_u { + struct dstore_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DSTORE_CAR_CTRL_ADDR (0x704100) +#define NBL_DSTORE_CAR_CTRL_DEPTH (1) +#define NBL_DSTORE_CAR_CTRL_WIDTH (32) +#define NBL_DSTORE_CAR_CTRL_DWLEN (1) +union dstore_car_ctrl_u { + struct dstore_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DSTORE_INIT_START_ADDR (0x704104) +#define NBL_DSTORE_INIT_START_DEPTH (1) +#define NBL_DSTORE_INIT_START_WIDTH (32) +#define NBL_DSTORE_INIT_START_DWLEN (1) +union dstore_init_start_u { + struct dstore_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INIT_START_DWLEN]; +} __packed; + +#define NBL_DSTORE_PKT_LEN_ADDR (0x704108) +#define NBL_DSTORE_PKT_LEN_DEPTH (1) +#define NBL_DSTORE_PKT_LEN_WIDTH (32) +#define NBL_DSTORE_PKT_LEN_DWLEN (1) +union dstore_pkt_len_u { + struct dstore_pkt_len { + u32 min:7; /* [6:0] Default:60 RW */ + u32 rsv1:8; /* [14:7] Default:0x0 RO */ + u32 min_chk_en:1; /* [15] Default:0x0 RW */ + u32 max:14; /* [29:16] Default:9600 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 max_chk_en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_DSTORE_SCH_PD_BUFFER_TH_ADDR (0x704128) +#define NBL_DSTORE_SCH_PD_BUFFER_TH_DEPTH (1) +#define NBL_DSTORE_SCH_PD_BUFFER_TH_WIDTH (32) +#define NBL_DSTORE_SCH_PD_BUFFER_TH_DWLEN (1) +union dstore_sch_pd_buffer_th_u { + struct dstore_sch_pd_buffer_th { + u32 aful_th:9; /* [8:0] Default:500 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_SCH_PD_BUFFER_TH_DWLEN]; +} __packed; + +#define NBL_DSTORE_GLB_FC_TH_ADDR (0x70412c) +#define NBL_DSTORE_GLB_FC_TH_DEPTH (1) +#define NBL_DSTORE_GLB_FC_TH_WIDTH (32) +#define NBL_DSTORE_GLB_FC_TH_DWLEN (1) +union dstore_glb_fc_th_u { + struct dstore_glb_fc_th { + u32 xoff_th:10; /* [9:0] Default:900 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 xon_th:10; /* [25:16] Default:850 RW */ + u32 rsv:5; /* [30:26] Default:0x0 RO */ + u32 fc_en:1; /* [31:31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_GLB_FC_TH_DWLEN]; +} __packed; + +#define NBL_DSTORE_GLB_DROP_TH_ADDR (0x704130) +#define NBL_DSTORE_GLB_DROP_TH_DEPTH (1) +#define NBL_DSTORE_GLB_DROP_TH_WIDTH (32) +#define NBL_DSTORE_GLB_DROP_TH_DWLEN (1) +union dstore_glb_drop_th_u { + struct dstore_glb_drop_th { + u32 disc_th:10; /* [9:0] Default:985 RW */ + u32 rsv:21; /* [30:10] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_GLB_DROP_TH_DWLEN]; +} __packed; + +#define NBL_DSTORE_PORT_FC_TH_ADDR (0x704134) +#define NBL_DSTORE_PORT_FC_TH_DEPTH (6) +#define NBL_DSTORE_PORT_FC_TH_WIDTH (32) +#define NBL_DSTORE_PORT_FC_TH_DWLEN (1) +union dstore_port_fc_th_u { + struct dstore_port_fc_th { + u32 xoff_th:10; /* [9:0] Default:400 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 xon_th:10; /* [25:16] Default:400 RW */ + u32 rsv:4; /* [29:26] Default:0x0 RO */ + u32 fc_set:1; /* [30:30] Default:0x0 RW */ + u32 fc_en:1; /* [31:31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_PORT_FC_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_PORT_FC_TH_REG(r) (NBL_DSTORE_PORT_FC_TH_ADDR + \ + (NBL_DSTORE_PORT_FC_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_PORT_DROP_TH_ADDR (0x704150) +#define NBL_DSTORE_PORT_DROP_TH_DEPTH (6) +#define NBL_DSTORE_PORT_DROP_TH_WIDTH (32) +#define NBL_DSTORE_PORT_DROP_TH_DWLEN (1) +union dstore_port_drop_th_u { + struct dstore_port_drop_th { + u32 disc_th:10; /* [9:0] Default:800 RW */ + u32 rsv:21; /* [30:10] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_DSTORE_PORT_DROP_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_PORT_DROP_TH_REG(r) (NBL_DSTORE_PORT_DROP_TH_ADDR + \ + (NBL_DSTORE_PORT_DROP_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_CFG_TEST_ADDR (0x704170) +#define NBL_DSTORE_CFG_TEST_DEPTH (1) +#define NBL_DSTORE_CFG_TEST_WIDTH (32) +#define NBL_DSTORE_CFG_TEST_DWLEN (1) +union dstore_cfg_test_u { + struct dstore_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSTORE_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_DSTORE_HIGH_PRI_PKT_ADDR (0x70417c) +#define NBL_DSTORE_HIGH_PRI_PKT_DEPTH (1) +#define NBL_DSTORE_HIGH_PRI_PKT_WIDTH (32) +#define NBL_DSTORE_HIGH_PRI_PKT_DWLEN (1) +union dstore_high_pri_pkt_u { + struct dstore_high_pri_pkt { + u32 en:1; /* [0:0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_HIGH_PRI_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_COS_FC_TH_ADDR (0x704200) +#define NBL_DSTORE_COS_FC_TH_DEPTH (48) +#define NBL_DSTORE_COS_FC_TH_WIDTH (32) +#define NBL_DSTORE_COS_FC_TH_DWLEN (1) +union dstore_cos_fc_th_u { + struct dstore_cos_fc_th { + u32 xoff_th:10; /* [9:0] Default:100 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 xon_th:10; /* [25:16] Default:100 RW */ + u32 rsv:4; /* [29:26] Default:0x0 RO */ + u32 fc_set:1; /* [30:30] Default:0x0 RW */ + u32 fc_en:1; /* [31:31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSTORE_COS_FC_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_COS_FC_TH_REG(r) (NBL_DSTORE_COS_FC_TH_ADDR + \ + (NBL_DSTORE_COS_FC_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_COS_DROP_TH_ADDR (0x704300) +#define NBL_DSTORE_COS_DROP_TH_DEPTH (48) +#define NBL_DSTORE_COS_DROP_TH_WIDTH (32) +#define NBL_DSTORE_COS_DROP_TH_DWLEN (1) +union dstore_cos_drop_th_u { + struct dstore_cos_drop_th { + u32 disc_th:10; /* [9:0] Default:120 RW */ + u32 rsv:21; /* [30:10] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSTORE_COS_DROP_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_COS_DROP_TH_REG(r) (NBL_DSTORE_COS_DROP_TH_ADDR + \ + (NBL_DSTORE_COS_DROP_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_SCH_PD_WRR_WGT_ADDR (0x704400) +#define NBL_DSTORE_SCH_PD_WRR_WGT_DEPTH (36) +#define NBL_DSTORE_SCH_PD_WRR_WGT_WIDTH (32) +#define NBL_DSTORE_SCH_PD_WRR_WGT_DWLEN (1) +union dstore_sch_pd_wrr_wgt_u { + struct dstore_sch_pd_wrr_wgt { + u32 wgt_cos:4; /* [3:0] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_SCH_PD_WRR_WGT_DWLEN]; +} __packed; +#define NBL_DSTORE_SCH_PD_WRR_WGT_REG(r) (NBL_DSTORE_SCH_PD_WRR_WGT_ADDR + \ + (NBL_DSTORE_SCH_PD_WRR_WGT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_COS7_FORCE_ADDR (0x704504) +#define NBL_DSTORE_COS7_FORCE_DEPTH (1) +#define NBL_DSTORE_COS7_FORCE_WIDTH (32) +#define NBL_DSTORE_COS7_FORCE_DWLEN (1) +union dstore_cos7_force_u { + struct dstore_cos7_force { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_COS7_FORCE_DWLEN]; +} __packed; + +#define NBL_DSTORE_D_DPORT_FC_TH_ADDR (0x704600) +#define NBL_DSTORE_D_DPORT_FC_TH_DEPTH (5) +#define NBL_DSTORE_D_DPORT_FC_TH_WIDTH (32) +#define NBL_DSTORE_D_DPORT_FC_TH_DWLEN (1) +union dstore_d_dport_fc_th_u { + struct dstore_d_dport_fc_th { + u32 xoff_th:11; /* [10:0] Default:200 RW */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 xon_th:11; /* [26:16] Default:100 RW */ + u32 rsv:3; /* [29:27] Default:0x0 RO */ + u32 fc_set:1; /* [30:30] Default:0x0 RW */ + u32 fc_en:1; /* [31:31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DSTORE_D_DPORT_FC_TH_DWLEN]; +} __packed; +#define NBL_DSTORE_D_DPORT_FC_TH_REG(r) (NBL_DSTORE_D_DPORT_FC_TH_ADDR + \ + (NBL_DSTORE_D_DPORT_FC_TH_DWLEN * 4) * (r)) + +#define NBL_DSTORE_INIT_DONE_ADDR (0x704800) +#define NBL_DSTORE_INIT_DONE_DEPTH (1) +#define NBL_DSTORE_INIT_DONE_WIDTH (32) +#define NBL_DSTORE_INIT_DONE_DWLEN (1) +union dstore_init_done_u { + struct dstore_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_ADDR (0x70481c) +#define NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_DEPTH (1) +#define NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_WIDTH (32) +#define NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_DWLEN (1) +union dstore_sch_idle_list_status_curr_u { + struct dstore_sch_idle_list_status_curr { + u32 empt:1; /* [0] Default:0x0 RO */ + u32 full:1; /* [1] Default:0x1 RO */ + u32 cnt:10; /* [11:2] Default:0x200 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_SCH_IDLE_LIST_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_ADDR (0x704820) +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_DEPTH (48) +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_WIDTH (32) +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_DWLEN (1) +union dstore_sch_que_list_status_u { + struct dstore_sch_que_list_status { + u32 curr_empt:1; /* [0] Default:0x1 RO */ + u32 curr_cnt:10; /* [10:1] Default:0x0 RO */ + u32 history_udf:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_SCH_QUE_LIST_STATUS_DWLEN]; +} __packed; +#define NBL_DSTORE_SCH_QUE_LIST_STATUS_REG(r) (NBL_DSTORE_SCH_QUE_LIST_STATUS_ADDR + \ + (NBL_DSTORE_SCH_QUE_LIST_STATUS_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_TOTAL_PKT_ADDR (0x705050) +#define NBL_DSTORE_RCV_TOTAL_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_PKT_DWLEN (1) +union dstore_rcv_total_pkt_u { + struct dstore_rcv_total_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_BYTE_ADDR (0x705054) +#define NBL_DSTORE_RCV_TOTAL_BYTE_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_BYTE_WIDTH (48) +#define NBL_DSTORE_RCV_TOTAL_BYTE_DWLEN (2) +union dstore_rcv_total_byte_u { + struct dstore_rcv_total_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_BYTE_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_ADDR (0x70505c) +#define NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_DWLEN (1) +union dstore_rcv_total_right_pkt_u { + struct dstore_rcv_total_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_WRONG_PKT_ADDR (0x705060) +#define NBL_DSTORE_RCV_TOTAL_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_WRONG_PKT_DWLEN (1) +union dstore_rcv_total_wrong_pkt_u { + struct dstore_rcv_total_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_FWD_RIGHT_PKT_ADDR (0x705064) +#define NBL_DSTORE_RCV_FWD_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_FWD_RIGHT_PKT_DWLEN (1) +union dstore_rcv_fwd_right_pkt_u { + struct dstore_rcv_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_FWD_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_FWD_WRONG_PKT_ADDR (0x705068) +#define NBL_DSTORE_RCV_FWD_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_FWD_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_FWD_WRONG_PKT_DWLEN (1) +union dstore_rcv_fwd_wrong_pkt_u { + struct dstore_rcv_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_FWD_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_HERR_RIGHT_PKT_ADDR (0x70506c) +#define NBL_DSTORE_RCV_HERR_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_HERR_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_HERR_RIGHT_PKT_DWLEN (1) +union dstore_rcv_herr_right_pkt_u { + struct dstore_rcv_herr_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_HERR_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_HERR_WRONG_PKT_ADDR (0x705070) +#define NBL_DSTORE_RCV_HERR_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_HERR_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_HERR_WRONG_PKT_DWLEN (1) +union dstore_rcv_herr_wrong_pkt_u { + struct dstore_rcv_herr_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_HERR_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_TOTAL_PKT_ADDR (0x705074) +#define NBL_DSTORE_IPRO_TOTAL_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_TOTAL_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_TOTAL_PKT_DWLEN (1) +union dstore_ipro_total_pkt_u { + struct dstore_ipro_total_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_TOTAL_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_TOTAL_BYTE_ADDR (0x705078) +#define NBL_DSTORE_IPRO_TOTAL_BYTE_DEPTH (1) +#define NBL_DSTORE_IPRO_TOTAL_BYTE_WIDTH (48) +#define NBL_DSTORE_IPRO_TOTAL_BYTE_DWLEN (2) +union dstore_ipro_total_byte_u { + struct dstore_ipro_total_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_TOTAL_BYTE_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_FWD_RIGHT_PKT_ADDR (0x705080) +#define NBL_DSTORE_IPRO_FWD_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_FWD_RIGHT_PKT_DWLEN (1) +union dstore_ipro_fwd_right_pkt_u { + struct dstore_ipro_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_FWD_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_FWD_WRONG_PKT_ADDR (0x705084) +#define NBL_DSTORE_IPRO_FWD_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_FWD_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_FWD_WRONG_PKT_DWLEN (1) +union dstore_ipro_fwd_wrong_pkt_u { + struct dstore_ipro_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_FWD_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_HERR_RIGHT_PKT_ADDR (0x705088) +#define NBL_DSTORE_IPRO_HERR_RIGHT_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_HERR_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_HERR_RIGHT_PKT_DWLEN (1) +union dstore_ipro_herr_right_pkt_u { + struct dstore_ipro_herr_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_HERR_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_IPRO_HERR_WRONG_PKT_ADDR (0x70508c) +#define NBL_DSTORE_IPRO_HERR_WRONG_PKT_DEPTH (1) +#define NBL_DSTORE_IPRO_HERR_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_HERR_WRONG_PKT_DWLEN (1) +union dstore_ipro_herr_wrong_pkt_u { + struct dstore_ipro_herr_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_HERR_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_PMEM_TOTAL_PKT_ADDR (0x705090) +#define NBL_DSTORE_PMEM_TOTAL_PKT_DEPTH (1) +#define NBL_DSTORE_PMEM_TOTAL_PKT_WIDTH (32) +#define NBL_DSTORE_PMEM_TOTAL_PKT_DWLEN (1) +union dstore_pmem_total_pkt_u { + struct dstore_pmem_total_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_PMEM_TOTAL_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_PMEM_TOTAL_BYTE_ADDR (0x705094) +#define NBL_DSTORE_PMEM_TOTAL_BYTE_DEPTH (1) +#define NBL_DSTORE_PMEM_TOTAL_BYTE_WIDTH (48) +#define NBL_DSTORE_PMEM_TOTAL_BYTE_DWLEN (2) +union dstore_pmem_total_byte_u { + struct dstore_pmem_total_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_PMEM_TOTAL_BYTE_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_ADDR (0x70509c) +#define NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_DWLEN (1) +union dstore_rcv_total_err_drop_pkt_u { + struct dstore_rcv_total_err_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_ERR_DROP_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_SHORT_PKT_ADDR (0x7050a0) +#define NBL_DSTORE_RCV_TOTAL_SHORT_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_SHORT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_SHORT_PKT_DWLEN (1) +union dstore_rcv_total_short_pkt_u { + struct dstore_rcv_total_short_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_SHORT_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_TOTAL_LONG_PKT_ADDR (0x7050a4) +#define NBL_DSTORE_RCV_TOTAL_LONG_PKT_DEPTH (1) +#define NBL_DSTORE_RCV_TOTAL_LONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_TOTAL_LONG_PKT_DWLEN (1) +union dstore_rcv_total_long_pkt_u { + struct dstore_rcv_total_long_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_TOTAL_LONG_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_BUF_TOTAL_DROP_PKT_ADDR (0x7050a8) +#define NBL_DSTORE_BUF_TOTAL_DROP_PKT_DEPTH (1) +#define NBL_DSTORE_BUF_TOTAL_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_BUF_TOTAL_DROP_PKT_DWLEN (1) +union dstore_buf_total_drop_pkt_u { + struct dstore_buf_total_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_BUF_TOTAL_DROP_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_BUF_TOTAL_TRUN_PKT_ADDR (0x7050ac) +#define NBL_DSTORE_BUF_TOTAL_TRUN_PKT_DEPTH (1) +#define NBL_DSTORE_BUF_TOTAL_TRUN_PKT_WIDTH (32) +#define NBL_DSTORE_BUF_TOTAL_TRUN_PKT_DWLEN (1) +union dstore_buf_total_trun_pkt_u { + struct dstore_buf_total_trun_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_BUF_TOTAL_TRUN_PKT_DWLEN]; +} __packed; + +#define NBL_DSTORE_RCV_PORT_PKT_ADDR (0x706000) +#define NBL_DSTORE_RCV_PORT_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_PKT_DWLEN (1) +union dstore_rcv_port_pkt_u { + struct dstore_rcv_port_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_PKT_REG(r) (NBL_DSTORE_RCV_PORT_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_BYTE_ADDR (0x706040) +#define NBL_DSTORE_RCV_PORT_BYTE_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_BYTE_WIDTH (48) +#define NBL_DSTORE_RCV_PORT_BYTE_DWLEN (2) +union dstore_rcv_port_byte_u { + struct dstore_rcv_port_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_BYTE_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_BYTE_REG(r) (NBL_DSTORE_RCV_PORT_BYTE_ADDR + \ + (NBL_DSTORE_RCV_PORT_BYTE_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_ADDR (0x7060c0) +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_DWLEN (1) +union dstore_rcv_port_total_right_pkt_u { + struct dstore_rcv_port_total_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_REG(r) (NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_TOTAL_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_ADDR (0x706100) +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_DWLEN (1) +union dstore_rcv_port_total_wrong_pkt_u { + struct dstore_rcv_port_total_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_REG(r) (NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_TOTAL_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_ADDR (0x706140) +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_DWLEN (1) +union dstore_rcv_port_fwd_right_pkt_u { + struct dstore_rcv_port_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_REG(r) (NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_FWD_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_ADDR (0x706180) +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_DWLEN (1) +union dstore_rcv_port_fwd_wrong_pkt_u { + struct dstore_rcv_port_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_REG(r) (NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_FWD_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_ADDR (0x7061c0) +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_DWLEN (1) +union dstore_rcv_port_herr_right_pkt_u { + struct dstore_rcv_port_herr_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_REG(r) (NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_HERR_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_ADDR (0x706200) +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_DWLEN (1) +union dstore_rcv_port_herr_wrong_pkt_u { + struct dstore_rcv_port_herr_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_REG(r) (NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_HERR_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_IPRO_PORT_PKT_ADDR (0x706240) +#define NBL_DSTORE_IPRO_PORT_PKT_DEPTH (12) +#define NBL_DSTORE_IPRO_PORT_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_PORT_PKT_DWLEN (1) +union dstore_ipro_port_pkt_u { + struct dstore_ipro_port_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_PORT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_IPRO_PORT_PKT_REG(r) (NBL_DSTORE_IPRO_PORT_PKT_ADDR + \ + (NBL_DSTORE_IPRO_PORT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_IPRO_PORT_BYTE_ADDR (0x706280) +#define NBL_DSTORE_IPRO_PORT_BYTE_DEPTH (12) +#define NBL_DSTORE_IPRO_PORT_BYTE_WIDTH (48) +#define NBL_DSTORE_IPRO_PORT_BYTE_DWLEN (2) +union dstore_ipro_port_byte_u { + struct dstore_ipro_port_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_PORT_BYTE_DWLEN]; +} __packed; +#define NBL_DSTORE_IPRO_PORT_BYTE_REG(r) (NBL_DSTORE_IPRO_PORT_BYTE_ADDR + \ + (NBL_DSTORE_IPRO_PORT_BYTE_DWLEN * 4) * (r)) + +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_ADDR (0x706300) +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_DEPTH (12) +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_DWLEN (1) +union dstore_ipro_port_fwd_right_pkt_u { + struct dstore_ipro_port_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_REG(r) (NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_ADDR + \ + (NBL_DSTORE_IPRO_PORT_FWD_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_ADDR (0x706340) +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_DEPTH (12) +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_WIDTH (32) +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_DWLEN (1) +union dstore_ipro_port_fwd_wrong_pkt_u { + struct dstore_ipro_port_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_REG(r) (NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_ADDR + \ + (NBL_DSTORE_IPRO_PORT_FWD_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_PMEM_PORT_PKT_ADDR (0x706380) +#define NBL_DSTORE_PMEM_PORT_PKT_DEPTH (12) +#define NBL_DSTORE_PMEM_PORT_PKT_WIDTH (32) +#define NBL_DSTORE_PMEM_PORT_PKT_DWLEN (1) +union dstore_pmem_port_pkt_u { + struct dstore_pmem_port_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_PMEM_PORT_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_PMEM_PORT_PKT_REG(r) (NBL_DSTORE_PMEM_PORT_PKT_ADDR + \ + (NBL_DSTORE_PMEM_PORT_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_PMEM_PORT_BYTE_ADDR (0x7063c0) +#define NBL_DSTORE_PMEM_PORT_BYTE_DEPTH (12) +#define NBL_DSTORE_PMEM_PORT_BYTE_WIDTH (48) +#define NBL_DSTORE_PMEM_PORT_BYTE_DWLEN (2) +union dstore_pmem_port_byte_u { + struct dstore_pmem_port_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_DSTORE_PMEM_PORT_BYTE_DWLEN]; +} __packed; +#define NBL_DSTORE_PMEM_PORT_BYTE_REG(r) (NBL_DSTORE_PMEM_PORT_BYTE_ADDR + \ + (NBL_DSTORE_PMEM_PORT_BYTE_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_ADDR (0x706440) +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_DWLEN (1) +union dstore_rcv_err_port_drop_pkt_u { + struct dstore_rcv_err_port_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_REG(r) (NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_ADDR + \ + (NBL_DSTORE_RCV_ERR_PORT_DROP_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_ADDR (0x706480) +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_DWLEN (1) +union dstore_rcv_port_short_drop_pkt_u { + struct dstore_rcv_port_short_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_REG(r) (NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_SHORT_DROP_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_RCV_PORT_LONG_PKT_ADDR (0x7064c0) +#define NBL_DSTORE_RCV_PORT_LONG_PKT_DEPTH (12) +#define NBL_DSTORE_RCV_PORT_LONG_PKT_WIDTH (32) +#define NBL_DSTORE_RCV_PORT_LONG_PKT_DWLEN (1) +union dstore_rcv_port_long_pkt_u { + struct dstore_rcv_port_long_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_RCV_PORT_LONG_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_RCV_PORT_LONG_PKT_REG(r) (NBL_DSTORE_RCV_PORT_LONG_PKT_ADDR + \ + (NBL_DSTORE_RCV_PORT_LONG_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_BUF_PORT_DROP_PKT_ADDR (0x706500) +#define NBL_DSTORE_BUF_PORT_DROP_PKT_DEPTH (12) +#define NBL_DSTORE_BUF_PORT_DROP_PKT_WIDTH (32) +#define NBL_DSTORE_BUF_PORT_DROP_PKT_DWLEN (1) +union dstore_buf_port_drop_pkt_u { + struct dstore_buf_port_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_BUF_PORT_DROP_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_BUF_PORT_DROP_PKT_REG(r) (NBL_DSTORE_BUF_PORT_DROP_PKT_ADDR + \ + (NBL_DSTORE_BUF_PORT_DROP_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_ADDR (0x706540) +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_DEPTH (12) +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_WIDTH (32) +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_DWLEN (1) +union dstore_buf_port_trun_pkt_u { + struct dstore_buf_port_trun_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_DSTORE_BUF_PORT_TRUN_PKT_DWLEN]; +} __packed; +#define NBL_DSTORE_BUF_PORT_TRUN_PKT_REG(r) (NBL_DSTORE_BUF_PORT_TRUN_PKT_ADDR + \ + (NBL_DSTORE_BUF_PORT_TRUN_PKT_DWLEN * 4) * (r)) + +#define NBL_DSTORE_BP_CUR_1ST_ADDR (0x706580) +#define NBL_DSTORE_BP_CUR_1ST_DEPTH (1) +#define NBL_DSTORE_BP_CUR_1ST_WIDTH (32) +#define NBL_DSTORE_BP_CUR_1ST_DWLEN (1) +union dstore_bp_cur_1st_u { + struct dstore_bp_cur_1st { + u32 link_fc:6; /* [5:0] Default:0x0 RO */ + u32 rsv:2; /* [7:6] Default:0x0 RO */ + u32 pfc:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_BP_CUR_1ST_DWLEN]; +} __packed; + +#define NBL_DSTORE_BP_CUR_2ND_ADDR (0x706584) +#define NBL_DSTORE_BP_CUR_2ND_DEPTH (1) +#define NBL_DSTORE_BP_CUR_2ND_WIDTH (32) +#define NBL_DSTORE_BP_CUR_2ND_DWLEN (1) +union dstore_bp_cur_2nd_u { + struct dstore_bp_cur_2nd { + u32 pfc:24; /* [23:0] Default:0x0 RO */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_BP_CUR_2ND_DWLEN]; +} __packed; + +#define NBL_DSTORE_BP_HISTORY_LINK_ADDR (0x706590) +#define NBL_DSTORE_BP_HISTORY_LINK_DEPTH (6) +#define NBL_DSTORE_BP_HISTORY_LINK_WIDTH (32) +#define NBL_DSTORE_BP_HISTORY_LINK_DWLEN (1) +union dstore_bp_history_link_u { + struct dstore_bp_history_link { + u32 fc:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_BP_HISTORY_LINK_DWLEN]; +} __packed; +#define NBL_DSTORE_BP_HISTORY_LINK_REG(r) (NBL_DSTORE_BP_HISTORY_LINK_ADDR + \ + (NBL_DSTORE_BP_HISTORY_LINK_DWLEN * 4) * (r)) + +#define NBL_DSTORE_BP_HISTORY_ADDR (0x7065b0) +#define NBL_DSTORE_BP_HISTORY_DEPTH (48) +#define NBL_DSTORE_BP_HISTORY_WIDTH (32) +#define NBL_DSTORE_BP_HISTORY_DWLEN (1) +union dstore_bp_history_u { + struct dstore_bp_history { + u32 pfc:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_BP_HISTORY_DWLEN]; +} __packed; +#define NBL_DSTORE_BP_HISTORY_REG(r) (NBL_DSTORE_BP_HISTORY_ADDR + \ + (NBL_DSTORE_BP_HISTORY_DWLEN * 4) * (r)) + +#define NBL_DSTORE_WRR_CUR_ADDR (0x706800) +#define NBL_DSTORE_WRR_CUR_DEPTH (36) +#define NBL_DSTORE_WRR_CUR_WIDTH (32) +#define NBL_DSTORE_WRR_CUR_DWLEN (1) +union dstore_wrr_cur_u { + struct dstore_wrr_cur { + u32 wgt_cos:5; /* [4:0] Default:0x0 RO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_WRR_CUR_DWLEN]; +} __packed; +#define NBL_DSTORE_WRR_CUR_REG(r) (NBL_DSTORE_WRR_CUR_ADDR + \ + (NBL_DSTORE_WRR_CUR_DWLEN * 4) * (r)) + +#define NBL_DSTORE_DDPORT_CUR_ADDR (0x707018) +#define NBL_DSTORE_DDPORT_CUR_DEPTH (1) +#define NBL_DSTORE_DDPORT_CUR_WIDTH (32) +#define NBL_DSTORE_DDPORT_CUR_DWLEN (1) +union dstore_ddport_cur_u { + struct dstore_ddport_cur { + u32 link_fc:5; /* [4:0] Default:0x0 RO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_DDPORT_CUR_DWLEN]; +} __packed; + +#define NBL_DSTORE_DDPORT_HISTORY_ADDR (0x70701c) +#define NBL_DSTORE_DDPORT_HISTORY_DEPTH (5) +#define NBL_DSTORE_DDPORT_HISTORY_WIDTH (32) +#define NBL_DSTORE_DDPORT_HISTORY_DWLEN (1) +union dstore_ddport_history_u { + struct dstore_ddport_history { + u32 link_fc:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_DDPORT_HISTORY_DWLEN]; +} __packed; +#define NBL_DSTORE_DDPORT_HISTORY_REG(r) (NBL_DSTORE_DDPORT_HISTORY_ADDR + \ + (NBL_DSTORE_DDPORT_HISTORY_DWLEN * 4) * (r)) + +#define NBL_DSTORE_DDPORT_RSC_ADD_ADDR (0x707050) +#define NBL_DSTORE_DDPORT_RSC_ADD_DEPTH (5) +#define NBL_DSTORE_DDPORT_RSC_ADD_WIDTH (32) +#define NBL_DSTORE_DDPORT_RSC_ADD_DWLEN (1) +union dstore_ddport_rsc_add_u { + struct dstore_ddport_rsc_add { + u32 cnt:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DSTORE_DDPORT_RSC_ADD_DWLEN]; +} __packed; +#define NBL_DSTORE_DDPORT_RSC_ADD_REG(r) (NBL_DSTORE_DDPORT_RSC_ADD_ADDR + \ + (NBL_DSTORE_DDPORT_RSC_ADD_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dvn.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dvn.h new file mode 100644 index 0000000000000000000000000000000000000000..b4665f15469ab102980f5b2f4376313f79107943 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_dvn.h @@ -0,0 +1,1188 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DVN_H +#define NBL_DVN_H 1 + +#include + +#define NBL_DVN_BASE (0x00514000) + +#define NBL_DVN_INT_STATUS_ADDR (0x514000) +#define NBL_DVN_INT_STATUS_DEPTH (1) +#define NBL_DVN_INT_STATUS_WIDTH (32) +#define NBL_DVN_INT_STATUS_DWLEN (1) +union dvn_int_status_u { + struct dvn_int_status { + u32 ecc_ucor_fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 queue_config_err:1; /* [3] Default:0x0 RWC */ + u32 pkt_dif_err:1; /* [4] Default:0x0 RWC */ + u32 desc_dif_err:1; /* [5] Default:0x0 RWC */ + u32 lcmg_rc_err:1; /* [6] Default:0x0 RWC */ + u32 ecc_ucor_normal_err:1; /* [7] Default:0x0 RWC */ + u32 ecc_cor_err:1; /* [8] Default:0x0 RWC */ + u32 parity_err:1; /* [9] Default:0x0 RWC */ + u32 cif_err:1; /* [10] Default:0x0 RWC */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DVN_INT_MASK_ADDR (0x514004) +#define NBL_DVN_INT_MASK_DEPTH (1) +#define NBL_DVN_INT_MASK_WIDTH (32) +#define NBL_DVN_INT_MASK_DWLEN (1) +union dvn_int_mask_u { + struct dvn_int_mask { + u32 ecc_ucor_fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 queue_config_err:1; /* [3] Default:0x0 RW */ + u32 pkt_dif_err:1; /* [4] Default:0x0 RW */ + u32 desc_dif_err:1; /* [5] Default:0x0 RW */ + u32 lcmg_rc_err:1; /* [6] Default:0x0 RW */ + u32 ecc_ucor_normal_err:1; /* [7] Default:0x0 RW */ + u32 ecc_cor_err:1; /* [8] Default:0x0 RW */ + u32 parity_err:1; /* [9] Default:0x0 RW */ + u32 cif_err:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DVN_INT_SET_ADDR (0x514008) +#define NBL_DVN_INT_SET_DEPTH (1) +#define NBL_DVN_INT_SET_WIDTH (32) +#define NBL_DVN_INT_SET_DWLEN (1) +union dvn_int_set_u { + struct dvn_int_set { + u32 ecc_ucor_fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 queue_config_err:1; /* [3] Default:0x0 WO */ + u32 pkt_dif_err:1; /* [4] Default:0x0 WO */ + u32 desc_dif_err:1; /* [5] Default:0x0 WO */ + u32 lcmg_rc_err:1; /* [6] Default:0x0 WO */ + u32 ecc_ucor_normal_err:1; /* [7] Default:0x0 WO */ + u32 ecc_cor_err:1; /* [8] Default:0x0 WO */ + u32 parity_err:1; /* [9] Default:0x0 WO */ + u32 cif_err:1; /* [10] Default:0x0 WO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_INT_SET_DWLEN]; +} __packed; + +#define NBL_DVN_INIT_DONE_ADDR (0x51400c) +#define NBL_DVN_INIT_DONE_DEPTH (1) +#define NBL_DVN_INIT_DONE_WIDTH (32) +#define NBL_DVN_INIT_DONE_DWLEN (1) +union dvn_init_done_u { + struct dvn_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DVN_QUEUE_CONFIG_ERR_INFO_ADDR (0x514028) +#define NBL_DVN_QUEUE_CONFIG_ERR_INFO_DEPTH (1) +#define NBL_DVN_QUEUE_CONFIG_ERR_INFO_WIDTH (32) +#define NBL_DVN_QUEUE_CONFIG_ERR_INFO_DWLEN (1) +union dvn_queue_config_err_info_u { + struct dvn_queue_config_err_info { + u32 queue_id:16; /* [15:0] Default:0x0 RO */ + u32 err_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_QUEUE_CONFIG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DVN_PKT_DIF_ERR_INFO_ADDR (0x514030) +#define NBL_DVN_PKT_DIF_ERR_INFO_DEPTH (1) +#define NBL_DVN_PKT_DIF_ERR_INFO_WIDTH (32) +#define NBL_DVN_PKT_DIF_ERR_INFO_DWLEN (1) +union dvn_pkt_dif_err_info_u { + struct dvn_pkt_dif_err_info { + u32 queue_id:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_PKT_DIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DVN_DESC_DIF_ERR_INFO_ADDR (0x514038) +#define NBL_DVN_DESC_DIF_ERR_INFO_DEPTH (1) +#define NBL_DVN_DESC_DIF_ERR_INFO_WIDTH (32) +#define NBL_DVN_DESC_DIF_ERR_INFO_DWLEN (1) +union dvn_desc_dif_err_info_u { + struct dvn_desc_dif_err_info { + u32 queue_id:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DESC_DIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DVN_PARITY_ERR_INFO_ADDR (0x514054) +#define NBL_DVN_PARITY_ERR_INFO_DEPTH (1) +#define NBL_DVN_PARITY_ERR_INFO_WIDTH (32) +#define NBL_DVN_PARITY_ERR_INFO_DWLEN (1) +union dvn_parity_err_info_u { + struct dvn_parity_err_info { + u32 ram_addr:16; /* [15:0] Default:0x0 RO */ + u32 ram_id:8; /* [23:16] Default:0x0 RO */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DVN_CIF_ERR_INFO_ADDR (0x51405c) +#define NBL_DVN_CIF_ERR_INFO_DEPTH (1) +#define NBL_DVN_CIF_ERR_INFO_WIDTH (32) +#define NBL_DVN_CIF_ERR_INFO_DWLEN (1) +union dvn_cif_err_info_u { + struct dvn_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DVN_INT_DEBUG_ADDR (0x514064) +#define NBL_DVN_INT_DEBUG_DEPTH (1) +#define NBL_DVN_INT_DEBUG_WIDTH (32) +#define NBL_DVN_INT_DEBUG_DWLEN (1) +union dvn_int_debug_u { + struct dvn_int_debug { + u32 int_flag:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_INT_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_CAR_CTRL_ADDR (0x514100) +#define NBL_DVN_CAR_CTRL_DEPTH (1) +#define NBL_DVN_CAR_CTRL_WIDTH (32) +#define NBL_DVN_CAR_CTRL_DWLEN (1) +union dvn_car_ctrl_u { + struct dvn_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DVN_INIT_START_ADDR (0x514104) +#define NBL_DVN_INIT_START_DEPTH (1) +#define NBL_DVN_INIT_START_WIDTH (32) +#define NBL_DVN_INIT_START_DWLEN (1) +union dvn_init_start_u { + struct dvn_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_INIT_START_DWLEN]; +} __packed; + +#define NBL_DVN_QUEUE_RESET_ADDR (0x514400) +#define NBL_DVN_QUEUE_RESET_DEPTH (1) +#define NBL_DVN_QUEUE_RESET_WIDTH (32) +#define NBL_DVN_QUEUE_RESET_DWLEN (1) +union dvn_queue_reset_u { + struct dvn_queue_reset { + u32 dvn_queue_index:11; /* [10:0] Default:0x0 RW */ + u32 vld:1; /* [11] Default:0x0 WO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_QUEUE_RESET_DWLEN]; +} __packed; + +#define NBL_DVN_QUEUE_RESET_DONE_ADDR (0x514404) +#define NBL_DVN_QUEUE_RESET_DONE_DEPTH (1) +#define NBL_DVN_QUEUE_RESET_DONE_WIDTH (32) +#define NBL_DVN_QUEUE_RESET_DONE_DWLEN (1) +union dvn_queue_reset_done_u { + struct dvn_queue_reset_done { + u32 flag:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_QUEUE_RESET_DONE_DWLEN]; +} __packed; + +#define NBL_DVN_CONTROL_STATUS_ADDR (0x514408) +#define NBL_DVN_CONTROL_STATUS_DEPTH (1) +#define NBL_DVN_CONTROL_STATUS_WIDTH (32) +#define NBL_DVN_CONTROL_STATUS_DWLEN (1) +union dvn_control_status_u { + struct dvn_control_status { + u32 dvn_stat_initdone:1; /* [0] Default:0x0 RO */ + u32 dvn_qcmg_initdone:1; /* [1] Default:0x0 RO */ + u32 dvn_lcmg_initdone:1; /* [2] Default:0x0 RO */ + u32 dvn_dblen_initdone:1; /* [3] Default:0x0 RO */ + u32 dvn_pr2_cfg_init_done:1; /* [4] Default:0x0 RO */ + u32 dvn_pr1_cfg_init_done:1; /* [5] Default:0x0 RO */ + u32 descrd_init_done:1; /* [6] Default:0x0 RO */ + u32 descwr_init_done:1; /* [7] Default:0x0 RO */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_CONTROL_STATUS_DWLEN]; +} __packed; + +#define NBL_DVN_ERR_QUEUE_ID_GET_ADDR (0x51440c) +#define NBL_DVN_ERR_QUEUE_ID_GET_DEPTH (1) +#define NBL_DVN_ERR_QUEUE_ID_GET_WIDTH (32) +#define NBL_DVN_ERR_QUEUE_ID_GET_DWLEN (1) +union dvn_err_queue_id_get_u { + struct dvn_err_queue_id_get { + u32 pkt_flag:1; /* [0] Default:0x0 WO */ + u32 desc_flag:1; /* [1] Default:0x0 WO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_ERR_QUEUE_ID_GET_DWLEN]; +} __packed; + +#define NBL_DVN_QUEUEID_DEBUG_ADDR (0x514414) +#define NBL_DVN_QUEUEID_DEBUG_DEPTH (1) +#define NBL_DVN_QUEUEID_DEBUG_WIDTH (32) +#define NBL_DVN_QUEUEID_DEBUG_DWLEN (1) +union dvn_queueid_debug_u { + struct dvn_queueid_debug { + u32 dvn_queueid_debug:11; /* [10:0] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_QUEUEID_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_QUEUE_STAT_CLR_ADDR (0x514418) +#define NBL_DVN_QUEUE_STAT_CLR_DEPTH (1) +#define NBL_DVN_QUEUE_STAT_CLR_WIDTH (32) +#define NBL_DVN_QUEUE_STAT_CLR_DWLEN (1) +union dvn_queue_stat_clr_u { + struct dvn_queue_stat_clr { + u32 queue_id:11; /* [10:0] Default:0x0 RW */ + u32 vld:1; /* [11] Default:0X0 WO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_QUEUE_STAT_CLR_DWLEN]; +} __packed; + +#define NBL_DVN_ECPU_QUEUE_NUM_ADDR (0x51441c) +#define NBL_DVN_ECPU_QUEUE_NUM_DEPTH (1) +#define NBL_DVN_ECPU_QUEUE_NUM_WIDTH (32) +#define NBL_DVN_ECPU_QUEUE_NUM_DWLEN (1) +union dvn_ecpu_queue_num_u { + struct dvn_ecpu_queue_num { + u32 ecpu_queue_number:9; /* [8:0] Default:0x20 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_ECPU_QUEUE_NUM_DWLEN]; +} __packed; + +#define NBL_DVN_PKT_LEN_CFG_ADDR (0x514420) +#define NBL_DVN_PKT_LEN_CFG_DEPTH (1) +#define NBL_DVN_PKT_LEN_CFG_WIDTH (32) +#define NBL_DVN_PKT_LEN_CFG_DWLEN (1) +union dvn_pkt_len_cfg_u { + struct dvn_pkt_len_cfg { + u32 max_pkt_len:17; /* [16:0] Default:0x10049 RW */ + u32 rsv:7; /* [23:17] Default:0x0 RO */ + u32 min_pkt_len:8; /* [31:24] Default:0xd RW */ + } __packed info; + u32 data[NBL_DVN_PKT_LEN_CFG_DWLEN]; +} __packed; + +#define NBL_DVN_MTU_CFG_ADDR (0x514424) +#define NBL_DVN_MTU_CFG_DEPTH (1) +#define NBL_DVN_MTU_CFG_WIDTH (32) +#define NBL_DVN_MTU_CFG_DWLEN (1) +union dvn_mtu_cfg_u { + struct dvn_mtu_cfg { + u32 mtu_cfg:16; /* [15:0] Default:0x259a RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_MTU_CFG_DWLEN]; +} __packed; + +#define NBL_DVN_TCP_FLAG_MASK1_ADDR (0x514428) +#define NBL_DVN_TCP_FLAG_MASK1_DEPTH (1) +#define NBL_DVN_TCP_FLAG_MASK1_WIDTH (32) +#define NBL_DVN_TCP_FLAG_MASK1_DWLEN (1) +union dvn_tcp_flag_mask1_u { + struct dvn_tcp_flag_mask1 { + u32 tcpflag_first_mask:12; /* [11:0] Default:0xff6 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 tcpflag_mid_mask:12; /* [27:16] Default:0xff6 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_TCP_FLAG_MASK1_DWLEN]; +} __packed; + +#define NBL_DVN_TCP_FLAG_MASK2_ADDR (0x51442c) +#define NBL_DVN_TCP_FLAG_MASK2_DEPTH (1) +#define NBL_DVN_TCP_FLAG_MASK2_WIDTH (32) +#define NBL_DVN_TCP_FLAG_MASK2_DWLEN (1) +union dvn_tcp_flag_mask2_u { + struct dvn_tcp_flag_mask2 { + u32 tcpflag_last_mask:12; /* [11:0] Default:0xf7f RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_TCP_FLAG_MASK2_DWLEN]; +} __packed; + +#define NBL_DVN_DESCREQ_NUM_CFG_ADDR (0x514430) +#define NBL_DVN_DESCREQ_NUM_CFG_DEPTH (1) +#define NBL_DVN_DESCREQ_NUM_CFG_WIDTH (32) +#define NBL_DVN_DESCREQ_NUM_CFG_DWLEN (1) +union dvn_descreq_num_cfg_u { + struct dvn_descreq_num_cfg { + u32 avring_cfg_num:1; /* [0] Default:0x0 RW */ + u32 rsv1:3; /* [3:1] Default:0x0 RO */ + u32 packed_l1_num:3; /* [6:4] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DESCREQ_NUM_CFG_DWLEN]; +} __packed; + +#define NBL_DVN_DESC_PADPT_ERR_INFO_ADDR (0x514438) +#define NBL_DVN_DESC_PADPT_ERR_INFO_DEPTH (1) +#define NBL_DVN_DESC_PADPT_ERR_INFO_WIDTH (32) +#define NBL_DVN_DESC_PADPT_ERR_INFO_DWLEN (1) +union dvn_desc_padpt_err_info_u { + struct dvn_desc_padpt_err_info { + u32 desc_padpt_err_queue_id_reg:11; /* [10:0] Default:0x0 RO */ + u32 desc_padpt_err_queue_id_vld:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DESC_PADPT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DVN_PKT_PADPT_ERR_INFO_ADDR (0x51443c) +#define NBL_DVN_PKT_PADPT_ERR_INFO_DEPTH (1) +#define NBL_DVN_PKT_PADPT_ERR_INFO_WIDTH (32) +#define NBL_DVN_PKT_PADPT_ERR_INFO_DWLEN (1) +union dvn_pkt_padpt_err_info_u { + struct dvn_pkt_padpt_err_info { + u32 pkt_padpt_err_queue_id_reg:11; /* [10:0] Default:0x0 RO */ + u32 pkt_padpt_err_queue_id_vld:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_PKT_PADPT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DVN_QUEUE_TOTAL_NUM_ADDR (0x514444) +#define NBL_DVN_QUEUE_TOTAL_NUM_DEPTH (1) +#define NBL_DVN_QUEUE_TOTAL_NUM_WIDTH (32) +#define NBL_DVN_QUEUE_TOTAL_NUM_DWLEN (1) +union dvn_queue_total_num_u { + struct dvn_queue_total_num { + u32 cfg_queue_num:12; /* [11:0] Default:0x800 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_QUEUE_TOTAL_NUM_DWLEN]; +} __packed; + +#define NBL_DVN_DIF_REQ_RD_DESC_ADDR (0x514448) +#define NBL_DVN_DIF_REQ_RD_DESC_DEPTH (1) +#define NBL_DVN_DIF_REQ_RD_DESC_WIDTH (32) +#define NBL_DVN_DIF_REQ_RD_DESC_DWLEN (1) +union dvn_dif_req_rd_desc_u { + struct dvn_dif_req_rd_desc { + u32 vn_ph:2; /* [1:0] Default:0x0 RW */ + u32 th_en:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DIF_REQ_RD_DESC_DWLEN]; +} __packed; + +#define NBL_DVN_DIF_REQ_WR_DESC_ADDR (0x51444c) +#define NBL_DVN_DIF_REQ_WR_DESC_DEPTH (1) +#define NBL_DVN_DIF_REQ_WR_DESC_WIDTH (32) +#define NBL_DVN_DIF_REQ_WR_DESC_DWLEN (1) +union dvn_dif_req_wr_desc_u { + struct dvn_dif_req_wr_desc { + u32 vn_ph:2; /* [1:0] Default:0x0 RW */ + u32 th_en:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DIF_REQ_WR_DESC_DWLEN]; +} __packed; + +#define NBL_DVN_DIF_REQ_RD_DATA_ADDR (0x514450) +#define NBL_DVN_DIF_REQ_RD_DATA_DEPTH (1) +#define NBL_DVN_DIF_REQ_RD_DATA_WIDTH (32) +#define NBL_DVN_DIF_REQ_RD_DATA_DWLEN (1) +union dvn_dif_req_rd_data_u { + struct dvn_dif_req_rd_data { + u32 vn_ph:2; /* [1:0] Default:0x0 RW */ + u32 th_en:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DIF_REQ_RD_DATA_DWLEN]; +} __packed; + +#define NBL_DVN_DIF_REQ_WR_DATA_ADDR (0x514454) +#define NBL_DVN_DIF_REQ_WR_DATA_DEPTH (1) +#define NBL_DVN_DIF_REQ_WR_DATA_WIDTH (32) +#define NBL_DVN_DIF_REQ_WR_DATA_DWLEN (1) +union dvn_dif_req_wr_data_u { + struct dvn_dif_req_wr_data { + u32 vn_ph:2; /* [1:0] Default:0x0 RW */ + u32 th_en:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DIF_REQ_WR_DATA_DWLEN]; +} __packed; + +#define NBL_DVN_QUEUE_STAT_CLR_DONE_ADDR (0x514458) +#define NBL_DVN_QUEUE_STAT_CLR_DONE_DEPTH (1) +#define NBL_DVN_QUEUE_STAT_CLR_DONE_WIDTH (32) +#define NBL_DVN_QUEUE_STAT_CLR_DONE_DWLEN (1) +union dvn_queue_stat_clr_done_u { + struct dvn_queue_stat_clr_done { + u32 flag:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_QUEUE_STAT_CLR_DONE_DWLEN]; +} __packed; + +#define NBL_DVN_DIF_REQ_RD_RO_FLAG_ADDR (0x51445c) +#define NBL_DVN_DIF_REQ_RD_RO_FLAG_DEPTH (1) +#define NBL_DVN_DIF_REQ_RD_RO_FLAG_WIDTH (32) +#define NBL_DVN_DIF_REQ_RD_RO_FLAG_DWLEN (1) +union dvn_dif_req_rd_ro_flag_u { + struct dvn_dif_req_rd_ro_flag { + u32 rd_desc_ro_en:1; /* [0] Default:0x0 RW */ + u32 rd_data_ro_en:1; /* [1] Default:0x0 RW */ + u32 rd_avring_ro_en:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DIF_REQ_RD_RO_FLAG_DWLEN]; +} __packed; + +#define NBL_DVN_SPLIT_MODE_CFG_PARA_ADDR (0x514460) +#define NBL_DVN_SPLIT_MODE_CFG_PARA_DEPTH (1) +#define NBL_DVN_SPLIT_MODE_CFG_PARA_WIDTH (32) +#define NBL_DVN_SPLIT_MODE_CFG_PARA_DWLEN (1) +union dvn_split_mode_cfg_para_u { + struct dvn_split_mode_cfg_para { + u32 req_one_desc_flag:1; /* [0] Default:0x0 RW */ + u32 req_entry_num:5; /* [5:1] Default:0x1 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_SPLIT_MODE_CFG_PARA_DWLEN]; +} __packed; + +#define NBL_DVN_BACK_PRESSURE_MASK_ADDR (0x514464) +#define NBL_DVN_BACK_PRESSURE_MASK_DEPTH (1) +#define NBL_DVN_BACK_PRESSURE_MASK_WIDTH (32) +#define NBL_DVN_BACK_PRESSURE_MASK_DWLEN (1) +union dvn_back_pressure_mask_u { + struct dvn_back_pressure_mask { + u32 l4s_flag:1; /* [0] Default:0x0 RW */ + u32 dsch_flag:1; /* [1] Default:0x0 RW */ + u32 dstore_port0_flag:1; /* [2] Default:0x0 RW */ + u32 dstore_port1_flag:1; /* [3] Default:0x0 RW */ + u32 dstore_port2_flag:1; /* [4] Default:0x0 RW */ + u32 dstore_port3_flag:1; /* [5] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_BACK_PRESSURE_MASK_DWLEN]; +} __packed; + +#define NBL_DVN_PKT_COMPENSATION_ADDR (0x514468) +#define NBL_DVN_PKT_COMPENSATION_DEPTH (1) +#define NBL_DVN_PKT_COMPENSATION_WIDTH (32) +#define NBL_DVN_PKT_COMPENSATION_DWLEN (1) +union dvn_pkt_compensation_u { + struct dvn_pkt_compensation { + u32 cfg_byte:6; /* [5:0] Default:0x0 RW */ + u32 cfg_sign_flag:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_PKT_COMPENSATION_DWLEN]; +} __packed; + +#define NBL_DVN_ETH_TYPE1_ADDR (0x51446c) +#define NBL_DVN_ETH_TYPE1_DEPTH (1) +#define NBL_DVN_ETH_TYPE1_WIDTH (32) +#define NBL_DVN_ETH_TYPE1_DWLEN (1) +union dvn_eth_type1_u { + struct dvn_eth_type1 { + u32 cfg_vlan_type_1:17; /* [16:0] Default:0x18100 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_ETH_TYPE1_DWLEN]; +} __packed; + +#define NBL_DVN_ETH_TYPE2_ADDR (0x514470) +#define NBL_DVN_ETH_TYPE2_DEPTH (1) +#define NBL_DVN_ETH_TYPE2_WIDTH (32) +#define NBL_DVN_ETH_TYPE2_DWLEN (1) +union dvn_eth_type2_u { + struct dvn_eth_type2 { + u32 cfg_vlan_type_2:17; /* [16:0] Default:0x19100 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_ETH_TYPE2_DWLEN]; +} __packed; + +#define NBL_DVN_ETH_TYPE3_ADDR (0x514474) +#define NBL_DVN_ETH_TYPE3_DEPTH (1) +#define NBL_DVN_ETH_TYPE3_WIDTH (32) +#define NBL_DVN_ETH_TYPE3_DWLEN (1) +union dvn_eth_type3_u { + struct dvn_eth_type3 { + u32 cfg_vlan_type_3:17; /* [16:0] Default:0x188a8 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_ETH_TYPE3_DWLEN]; +} __packed; + +#define NBL_DVN_ETH_TYPE4_ADDR (0x514478) +#define NBL_DVN_ETH_TYPE4_DEPTH (1) +#define NBL_DVN_ETH_TYPE4_WIDTH (32) +#define NBL_DVN_ETH_TYPE4_DWLEN (1) +union dvn_eth_type4_u { + struct dvn_eth_type4 { + u32 cfg_vlan_type_4:17; /* [16:0] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_ETH_TYPE4_DWLEN]; +} __packed; + +#define NBL_DVN_PKT_ANALYSE_SHAPING_ADDR (0x51447c) +#define NBL_DVN_PKT_ANALYSE_SHAPING_DEPTH (1) +#define NBL_DVN_PKT_ANALYSE_SHAPING_WIDTH (32) +#define NBL_DVN_PKT_ANALYSE_SHAPING_DWLEN (1) +union dvn_pkt_analyse_shaping_u { + struct dvn_pkt_analyse_shaping { + u32 cfg_bytes:12; /* [11:0] Default:0x2c0 RW */ + u32 mask:1; /* [12] Default:0x0 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_PKT_ANALYSE_SHAPING_DWLEN]; +} __packed; + +#define NBL_DVN_DESC_WR_MERGE_TIMEOUT_ADDR (0x514480) +#define NBL_DVN_DESC_WR_MERGE_TIMEOUT_DEPTH (1) +#define NBL_DVN_DESC_WR_MERGE_TIMEOUT_WIDTH (32) +#define NBL_DVN_DESC_WR_MERGE_TIMEOUT_DWLEN (1) +union dvn_desc_wr_merge_timeout_u { + struct dvn_desc_wr_merge_timeout { + u32 cfg_cycle:10; /* [9:0] Default:0x73 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DESC_WR_MERGE_TIMEOUT_DWLEN]; +} __packed; + +#define NBL_DVN_SEG_PKT_DIF_ERR_SET_QUEUE_ERR_ADDR (0x514488) +#define NBL_DVN_SEG_PKT_DIF_ERR_SET_QUEUE_ERR_DEPTH (1) +#define NBL_DVN_SEG_PKT_DIF_ERR_SET_QUEUE_ERR_WIDTH (32) +#define NBL_DVN_SEG_PKT_DIF_ERR_SET_QUEUE_ERR_DWLEN (1) +union dvn_seg_pkt_dif_err_set_queue_err_u { + struct dvn_seg_pkt_dif_err_set_queue_err { + u32 flag:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_SEG_PKT_DIF_ERR_SET_QUEUE_ERR_DWLEN]; +} __packed; + +#define NBL_DVN_PR1_CFG_DEBUG_REG0_ADDR (0x514800) +#define NBL_DVN_PR1_CFG_DEBUG_REG0_DEPTH (1) +#define NBL_DVN_PR1_CFG_DEBUG_REG0_WIDTH (32) +#define NBL_DVN_PR1_CFG_DEBUG_REG0_DWLEN (1) +union dvn_pr1_cfg_debug_reg0_u { + struct dvn_pr1_cfg_debug_reg0 { + u32 dvn_pr1_cfg_debug_reg0:32; /* [31:0] Default:0x800 RO */ + } __packed info; + u32 data[NBL_DVN_PR1_CFG_DEBUG_REG0_DWLEN]; +} __packed; + +#define NBL_DVN_PR1_CFG_DEBUG_REG1_ADDR (0x514804) +#define NBL_DVN_PR1_CFG_DEBUG_REG1_DEPTH (1) +#define NBL_DVN_PR1_CFG_DEBUG_REG1_WIDTH (32) +#define NBL_DVN_PR1_CFG_DEBUG_REG1_DWLEN (1) +union dvn_pr1_cfg_debug_reg1_u { + struct dvn_pr1_cfg_debug_reg1 { + u32 dvn_pr1_cfg_debug_reg1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_PR1_CFG_DEBUG_REG1_DWLEN]; +} __packed; + +#define NBL_DVN_PR1_CFG_DEBUG_REG2_ADDR (0x514808) +#define NBL_DVN_PR1_CFG_DEBUG_REG2_DEPTH (1) +#define NBL_DVN_PR1_CFG_DEBUG_REG2_WIDTH (32) +#define NBL_DVN_PR1_CFG_DEBUG_REG2_DWLEN (1) +union dvn_pr1_cfg_debug_reg2_u { + struct dvn_pr1_cfg_debug_reg2 { + u32 dvn_pr1_cfg_debug_reg2:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_PR1_CFG_DEBUG_REG2_DWLEN]; +} __packed; + +#define NBL_DVN_PR2_CFG_DEBUG_REG0_ADDR (0x51480c) +#define NBL_DVN_PR2_CFG_DEBUG_REG0_DEPTH (1) +#define NBL_DVN_PR2_CFG_DEBUG_REG0_WIDTH (32) +#define NBL_DVN_PR2_CFG_DEBUG_REG0_DWLEN (1) +union dvn_pr2_cfg_debug_reg0_u { + struct dvn_pr2_cfg_debug_reg0 { + u32 dvn_pr2_cfg_debug_reg0:32; /* [31:0] Default:0x40 RO */ + } __packed info; + u32 data[NBL_DVN_PR2_CFG_DEBUG_REG0_DWLEN]; +} __packed; + +#define NBL_DVN_PR2_CFG_DEBUG_REG1_ADDR (0x514810) +#define NBL_DVN_PR2_CFG_DEBUG_REG1_DEPTH (1) +#define NBL_DVN_PR2_CFG_DEBUG_REG1_WIDTH (32) +#define NBL_DVN_PR2_CFG_DEBUG_REG1_DWLEN (1) +union dvn_pr2_cfg_debug_reg1_u { + struct dvn_pr2_cfg_debug_reg1 { + u32 dvn_pr2_cfg_debug_reg1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_PR2_CFG_DEBUG_REG1_DWLEN]; +} __packed; + +#define NBL_DVN_AVRING_DEBUG1_ADDR (0x514814) +#define NBL_DVN_AVRING_DEBUG1_DEPTH (1) +#define NBL_DVN_AVRING_DEBUG1_WIDTH (32) +#define NBL_DVN_AVRING_DEBUG1_DWLEN (1) +union dvn_avring_debug1_u { + struct dvn_avring_debug1 { + u32 dvn_avring_debug1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_AVRING_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_QCRD_DEBUG1_ADDR (0x514818) +#define NBL_DVN_QCRD_DEBUG1_DEPTH (1) +#define NBL_DVN_QCRD_DEBUG1_WIDTH (32) +#define NBL_DVN_QCRD_DEBUG1_DWLEN (1) +union dvn_qcrd_debug1_u { + struct dvn_qcrd_debug1 { + u32 dvn_qcrd_debug1:32; /* [31:0] Default:0x4 RO */ + } __packed info; + u32 data[NBL_DVN_QCRD_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_DESCRDL1_DEBUG1_ADDR (0x51481c) +#define NBL_DVN_DESCRDL1_DEBUG1_DEPTH (1) +#define NBL_DVN_DESCRDL1_DEBUG1_WIDTH (32) +#define NBL_DVN_DESCRDL1_DEBUG1_DWLEN (1) +union dvn_descrdl1_debug1_u { + struct dvn_descrdl1_debug1 { + u32 dvn_descrdl1_debug1:32; /* [31:0] Default:0x20 RO */ + } __packed info; + u32 data[NBL_DVN_DESCRDL1_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_DESCRDL2_DEBUG1_ADDR (0x514820) +#define NBL_DVN_DESCRDL2_DEBUG1_DEPTH (1) +#define NBL_DVN_DESCRDL2_DEBUG1_WIDTH (32) +#define NBL_DVN_DESCRDL2_DEBUG1_DWLEN (1) +union dvn_descrdl2_debug1_u { + struct dvn_descrdl2_debug1 { + u32 dvn_descrdl2_debug1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DESCRDL2_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_DESCRDL2_DEBUG2_ADDR (0x514824) +#define NBL_DVN_DESCRDL2_DEBUG2_DEPTH (1) +#define NBL_DVN_DESCRDL2_DEBUG2_WIDTH (32) +#define NBL_DVN_DESCRDL2_DEBUG2_DWLEN (1) +union dvn_descrdl2_debug2_u { + struct dvn_descrdl2_debug2 { + u32 dvn_descrdl2_debug2:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DESCRDL2_DEBUG2_DWLEN]; +} __packed; + +#define NBL_DVN_DESCRDL3_DEBUG1_ADDR (0x514828) +#define NBL_DVN_DESCRDL3_DEBUG1_DEPTH (1) +#define NBL_DVN_DESCRDL3_DEBUG1_WIDTH (32) +#define NBL_DVN_DESCRDL3_DEBUG1_DWLEN (1) +union dvn_descrdl3_debug1_u { + struct dvn_descrdl3_debug1 { + u32 dvn_descrdl3_debug1:32; /* [31:0] Default:0xe0 RO */ + } __packed info; + u32 data[NBL_DVN_DESCRDL3_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_PKTAN_DEBUG1_ADDR (0x51482c) +#define NBL_DVN_PKTAN_DEBUG1_DEPTH (1) +#define NBL_DVN_PKTAN_DEBUG1_WIDTH (32) +#define NBL_DVN_PKTAN_DEBUG1_DWLEN (1) +union dvn_pktan_debug1_u { + struct dvn_pktan_debug1 { + u32 dvn_pktan_debug1:32; /* [31:0] Default:0x4008_0008 RO */ + } __packed info; + u32 data[NBL_DVN_PKTAN_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_DATRQ_DEBUG1_ADDR (0x514830) +#define NBL_DVN_DATRQ_DEBUG1_DEPTH (1) +#define NBL_DVN_DATRQ_DEBUG1_WIDTH (32) +#define NBL_DVN_DATRQ_DEBUG1_DWLEN (1) +union dvn_datrq_debug1_u { + struct dvn_datrq_debug1 { + u32 dvn_datrq_debug1:32; /* [31:0] Default:0x13 RO */ + } __packed info; + u32 data[NBL_DVN_DATRQ_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_PKTS_DEBUG1_ADDR (0x514834) +#define NBL_DVN_PKTS_DEBUG1_DEPTH (1) +#define NBL_DVN_PKTS_DEBUG1_WIDTH (32) +#define NBL_DVN_PKTS_DEBUG1_DWLEN (1) +union dvn_pkts_debug1_u { + struct dvn_pkts_debug1 { + u32 dvn_pkts_debug1:32; /* [31:0] Default:0x40 RO */ + } __packed info; + u32 data[NBL_DVN_PKTS_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_DESCWR_DEBUG1_ADDR (0x514838) +#define NBL_DVN_DESCWR_DEBUG1_DEPTH (1) +#define NBL_DVN_DESCWR_DEBUG1_WIDTH (32) +#define NBL_DVN_DESCWR_DEBUG1_DWLEN (1) +union dvn_descwr_debug1_u { + struct dvn_descwr_debug1 { + u32 dvn_descwr_debug1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DESCWR_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_SEG_DEBUG1_ADDR (0x51483c) +#define NBL_DVN_SEG_DEBUG1_DEPTH (1) +#define NBL_DVN_SEG_DEBUG1_WIDTH (32) +#define NBL_DVN_SEG_DEBUG1_DWLEN (1) +union dvn_seg_debug1_u { + struct dvn_seg_debug1 { + u32 dvn_seg_debug1:32; /* [31:0] Default:0x80200a0 RO */ + } __packed info; + u32 data[NBL_DVN_SEG_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_PKTS_JOINT_DEBUG1_ADDR (0x514840) +#define NBL_DVN_PKTS_JOINT_DEBUG1_DEPTH (1) +#define NBL_DVN_PKTS_JOINT_DEBUG1_WIDTH (32) +#define NBL_DVN_PKTS_JOINT_DEBUG1_DWLEN (1) +union dvn_pkts_joint_debug1_u { + struct dvn_pkts_joint_debug1 { + u32 dvn_pkts_joint_debug1:32; /* [31:0] Default:0x18e8 RO */ + } __packed info; + u32 data[NBL_DVN_PKTS_JOINT_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_WR_MERGE_DEBUG1_ADDR (0x514844) +#define NBL_DVN_WR_MERGE_DEBUG1_DEPTH (1) +#define NBL_DVN_WR_MERGE_DEBUG1_WIDTH (32) +#define NBL_DVN_WR_MERGE_DEBUG1_DWLEN (1) +union dvn_wr_merge_debug1_u { + struct dvn_wr_merge_debug1 { + u32 dvn_wr_merge_debug1:32; /* [31:0] Default:0x30 RO */ + } __packed info; + u32 data[NBL_DVN_WR_MERGE_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_MUX_DEBUG1_ADDR (0x514848) +#define NBL_DVN_MUX_DEBUG1_DEPTH (1) +#define NBL_DVN_MUX_DEBUG1_WIDTH (32) +#define NBL_DVN_MUX_DEBUG1_DWLEN (1) +union dvn_mux_debug1_u { + struct dvn_mux_debug1 { + u32 dvn_mux_debug1:32; /* [31:0] Default:0x21f4 RO */ + } __packed info; + u32 data[NBL_DVN_MUX_DEBUG1_DWLEN]; +} __packed; + +#define NBL_DVN_PKTAN_DEBUG2_ADDR (0x51484c) +#define NBL_DVN_PKTAN_DEBUG2_DEPTH (1) +#define NBL_DVN_PKTAN_DEBUG2_WIDTH (32) +#define NBL_DVN_PKTAN_DEBUG2_DWLEN (1) +union dvn_pktan_debug2_u { + struct dvn_pktan_debug2 { + u32 dvn_pktan_debug2:32; /* [31:0] Default:0x200 RO */ + } __packed info; + u32 data[NBL_DVN_PKTAN_DEBUG2_DWLEN]; +} __packed; + +#define NBL_DVN_SEG_DEBUG2_ADDR (0x514850) +#define NBL_DVN_SEG_DEBUG2_DEPTH (1) +#define NBL_DVN_SEG_DEBUG2_WIDTH (32) +#define NBL_DVN_SEG_DEBUG2_DWLEN (1) +union dvn_seg_debug2_u { + struct dvn_seg_debug2 { + u32 dvn_seg_debug2:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_SEG_DEBUG2_DWLEN]; +} __packed; + +#define NBL_DVN_SEG_DEBUG3_ADDR (0x514854) +#define NBL_DVN_SEG_DEBUG3_DEPTH (1) +#define NBL_DVN_SEG_DEBUG3_WIDTH (32) +#define NBL_DVN_SEG_DEBUG3_DWLEN (1) +union dvn_seg_debug3_u { + struct dvn_seg_debug3 { + u32 dvn_seg_debug3:32; /* [31:0] Default:0x8000_0000 RO */ + } __packed info; + u32 data[NBL_DVN_SEG_DEBUG3_DWLEN]; +} __packed; + +#define NBL_DVN_QCMG_QUEUE_PARA_RAM_DEBUG_ADDR (0x514910) +#define NBL_DVN_QCMG_QUEUE_PARA_RAM_DEBUG_DEPTH (1) +#define NBL_DVN_QCMG_QUEUE_PARA_RAM_DEBUG_WIDTH (32) +#define NBL_DVN_QCMG_QUEUE_PARA_RAM_DEBUG_DWLEN (1) +union dvn_qcmg_queue_para_ram_debug_u { + struct dvn_qcmg_queue_para_ram_debug { + u32 dvn_qcmg_queue_para_ram_debug:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_QCMG_QUEUE_PARA_RAM_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_QCMG_QUEUE_CONTEXT_RAM0_DEBUG_ADDR (0x514914) +#define NBL_DVN_QCMG_QUEUE_CONTEXT_RAM0_DEBUG_DEPTH (1) +#define NBL_DVN_QCMG_QUEUE_CONTEXT_RAM0_DEBUG_WIDTH (32) +#define NBL_DVN_QCMG_QUEUE_CONTEXT_RAM0_DEBUG_DWLEN (1) +union dvn_qcmg_queue_context_ram0_debug_u { + struct dvn_qcmg_queue_context_ram0_debug { + u32 dvn_qcmg_queue_context_ram0_debug:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_QCMG_QUEUE_CONTEXT_RAM0_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_QCMG_QUEUE_CONTEXT_RAM1_DEBUG_ADDR (0x514918) +#define NBL_DVN_QCMG_QUEUE_CONTEXT_RAM1_DEBUG_DEPTH (1) +#define NBL_DVN_QCMG_QUEUE_CONTEXT_RAM1_DEBUG_WIDTH (32) +#define NBL_DVN_QCMG_QUEUE_CONTEXT_RAM1_DEBUG_DWLEN (1) +union dvn_qcmg_queue_context_ram1_debug_u { + struct dvn_qcmg_queue_context_ram1_debug { + u32 dvn_qcmg_queue_context_ram1_debug:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_QCMG_QUEUE_CONTEXT_RAM1_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_LCMG_CONTEXT_RAM_DEBUG_ADDR (0x51491c) +#define NBL_DVN_LCMG_CONTEXT_RAM_DEBUG_DEPTH (1) +#define NBL_DVN_LCMG_CONTEXT_RAM_DEBUG_WIDTH (32) +#define NBL_DVN_LCMG_CONTEXT_RAM_DEBUG_DWLEN (1) +union dvn_lcmg_context_ram_debug_u { + struct dvn_lcmg_context_ram_debug { + u32 dvn_lcmg_context_ram_debug:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_LCMG_CONTEXT_RAM_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_LCMG_HEADER_RAM_DEBUG_ADDR (0x514920) +#define NBL_DVN_LCMG_HEADER_RAM_DEBUG_DEPTH (1) +#define NBL_DVN_LCMG_HEADER_RAM_DEBUG_WIDTH (32) +#define NBL_DVN_LCMG_HEADER_RAM_DEBUG_DWLEN (1) +union dvn_lcmg_header_ram_debug_u { + struct dvn_lcmg_header_ram_debug { + u32 dvn_lcmg_header_ram_debug:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_LCMG_HEADER_RAM_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_CFG_DESCWR_DEBUG_RAM_DEBUG_ADDR (0x514928) +#define NBL_DVN_CFG_DESCWR_DEBUG_RAM_DEBUG_DEPTH (1) +#define NBL_DVN_CFG_DESCWR_DEBUG_RAM_DEBUG_WIDTH (32) +#define NBL_DVN_CFG_DESCWR_DEBUG_RAM_DEBUG_DWLEN (1) +union dvn_cfg_descwr_debug_ram_debug_u { + struct dvn_cfg_descwr_debug_ram_debug { + u32 dvn_cfg_descwr_debug_ram_debug:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_CFG_DESCWR_DEBUG_RAM_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_CFG_DESCRD_DEBUG_RAM_DEBUG_ADDR (0x51492c) +#define NBL_DVN_CFG_DESCRD_DEBUG_RAM_DEBUG_DEPTH (1) +#define NBL_DVN_CFG_DESCRD_DEBUG_RAM_DEBUG_WIDTH (32) +#define NBL_DVN_CFG_DESCRD_DEBUG_RAM_DEBUG_DWLEN (1) +union dvn_cfg_descrd_debug_ram_debug_u { + struct dvn_cfg_descrd_debug_ram_debug { + u32 dvn_cfg_descrd_debug_ram_debug:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_CFG_DESCRD_DEBUG_RAM_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_DBLEN_TXLEN_RAM_DEBUG_ADDR (0x5149e4) +#define NBL_DVN_DBLEN_TXLEN_RAM_DEBUG_DEPTH (1) +#define NBL_DVN_DBLEN_TXLEN_RAM_DEBUG_WIDTH (32) +#define NBL_DVN_DBLEN_TXLEN_RAM_DEBUG_DWLEN (1) +union dvn_dblen_txlen_ram_debug_u { + struct dvn_dblen_txlen_ram_debug { + u32 dvn_dblen_txlen_ram_debug:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_DBLEN_TXLEN_RAM_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_STAT_CACHE_RAM_DEBUG_ADDR (0x5149ec) +#define NBL_DVN_STAT_CACHE_RAM_DEBUG_DEPTH (1) +#define NBL_DVN_STAT_CACHE_RAM_DEBUG_WIDTH (32) +#define NBL_DVN_STAT_CACHE_RAM_DEBUG_DWLEN (1) +union dvn_stat_cache_ram_debug_u { + struct dvn_stat_cache_ram_debug { + u32 dvn_stat_cache_ram_debug:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_STAT_CACHE_RAM_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_MUX_PKTDATA_RAM_DEBUG2_ADDR (0x5149f4) +#define NBL_DVN_MUX_PKTDATA_RAM_DEBUG2_DEPTH (1) +#define NBL_DVN_MUX_PKTDATA_RAM_DEBUG2_WIDTH (32) +#define NBL_DVN_MUX_PKTDATA_RAM_DEBUG2_DWLEN (1) +union dvn_mux_pktdata_ram_debug2_u { + struct dvn_mux_pktdata_ram_debug2 { + u32 dvn_mux_pktdata_ram_debug2:32; /* [31:0] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_MUX_PKTDATA_RAM_DEBUG2_DWLEN]; +} __packed; + +#define NBL_DVN_EXTEND_HEADER_REG0_ADDR (0x514f00) +#define NBL_DVN_EXTEND_HEADER_REG0_DEPTH (1) +#define NBL_DVN_EXTEND_HEADER_REG0_WIDTH (32) +#define NBL_DVN_EXTEND_HEADER_REG0_DWLEN (1) +union dvn_extend_header_reg0_u { + struct dvn_extend_header_reg0 { + u32 dvn_extend_header_reg0:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_EXTEND_HEADER_REG0_DWLEN]; +} __packed; + +#define NBL_DVN_EXTEND_HEADER_REG1_ADDR (0x514f04) +#define NBL_DVN_EXTEND_HEADER_REG1_DEPTH (1) +#define NBL_DVN_EXTEND_HEADER_REG1_WIDTH (32) +#define NBL_DVN_EXTEND_HEADER_REG1_DWLEN (1) +union dvn_extend_header_reg1_u { + struct dvn_extend_header_reg1 { + u32 dvn_extend_header_reg1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_EXTEND_HEADER_REG1_DWLEN]; +} __packed; + +#define NBL_DVN_EXTEND_HEADER_REG2_ADDR (0x514f08) +#define NBL_DVN_EXTEND_HEADER_REG2_DEPTH (1) +#define NBL_DVN_EXTEND_HEADER_REG2_WIDTH (32) +#define NBL_DVN_EXTEND_HEADER_REG2_DWLEN (1) +union dvn_extend_header_reg2_u { + struct dvn_extend_header_reg2 { + u32 dvn_extend_header_reg2:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_EXTEND_HEADER_REG2_DWLEN]; +} __packed; + +#define NBL_DVN_WARNING_STATUS_ADDR (0x515000) +#define NBL_DVN_WARNING_STATUS_DEPTH (1) +#define NBL_DVN_WARNING_STATUS_WIDTH (32) +#define NBL_DVN_WARNING_STATUS_DWLEN (1) +union dvn_warning_status_u { + struct dvn_warning_status { + u32 dvn_warning_status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_WARNING_STATUS_DWLEN]; +} __packed; + +#define NBL_DVN_RAM_ERR1_ADDR (0x515004) +#define NBL_DVN_RAM_ERR1_DEPTH (1) +#define NBL_DVN_RAM_ERR1_WIDTH (32) +#define NBL_DVN_RAM_ERR1_DWLEN (1) +union dvn_ram_err1_u { + struct dvn_ram_err1 { + u32 u_header1_fifo_ram_err:1; /* [0] Default:0x0 RC */ + u32 u_pkts_pktheader_fifo_ram_err:1; /* [1] Default:0x0 RC */ + u32 u_pktdata_fifo_ram_err:1; /* [2] Default:0x0 RC */ + u32 u_pkts_seg_pktinfo_fifo_err:1; /* [3] Default:0x0 RC */ + u32 u_desc_wrback_fifo_ram_err:1; /* [4] Default:0x0 RC */ + u32 u_baddr_descwr_fifo_ram_err:1; /* [5] Default:0x0 RC */ + u32 u_descrd_debug_mem_ram_err:1; /* [6] Default:0x0 RC */ + u32 u_descwr_debug_mem_ram_err:1; /* [7] Default:0x0 RC */ + u32 u_sub_burst_txlen_ram_err:1; /* [8] Default:0x0 RC */ + u32 u_desc_pre_statis_fifo_ram_err:1; /* [9] Default:0x0 RC */ + u32 u_cache_ram_cnt_ram_err:1; /* [10] Default:0x0 RC */ + u32 u_dvn_rc_fifo_ram_err:1; /* [11] Default:0x0 RC */ + u32 u_dvn_header_mem_ram_err:1; /* [12] Default:0x0 RC */ + u32 u_queue_context_mem_ram_err:1; /* [13] Default:0x0 RC */ + u32 u_queue_context_mem_0_ram_err:1; /* [14] Default:0x0 RC */ + u32 u_queue_context_mem_1_ram_err:1; /* [15] Default:0x0 RC */ + u32 u_queue_para_mem_0_ram_err:1; /* [16] Default:0x0 RC */ + u32 u_wr_merge_data_fifo_ram_err:1; /* [17] Default:0x0 RC */ + u32 u_wr_merge_info_fifo_ram_err:1; /* [18] Default:0x0 RC */ + u32 u_host_mux_pktinfo_fifo_ram_err:1; /* [19] Default:0x0 RC */ + u32 u_host_mux_pktdata_ram_err:1; /* [20] Default:0x0 RC */ + u32 u_descv10_pr1_req_info_fifo_ram_err:1; /* [21] Default:0x0 RC */ + u32 u_pktan_split_fc_status_ram_err:1; /* [22] Default:0x0 RC */ + u32 rsv:9; /* [31:23] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_RAM_ERR1_DWLEN]; +} __packed; + +#define NBL_DVN_RAM_ERR2_ADDR (0x515008) +#define NBL_DVN_RAM_ERR2_DEPTH (1) +#define NBL_DVN_RAM_ERR2_WIDTH (32) +#define NBL_DVN_RAM_ERR2_DWLEN (1) +union dvn_ram_err2_u { + struct dvn_ram_err2 { + u32 dsch_dvn_info_fifo_ram_err:1; /* [0] Default:0x0 RC */ + u32 qcrd_avring_fifo_ram_err:1; /* [1] Default:0x0 RC */ + u32 qcrd_descrdl1_fifo_ram_err:1; /* [2] Default:0x0 RC */ + u32 inner_info1_fifo_ram_err:1; /* [3] Default:0x0 RC */ + u32 inner_info2_fifo_ram_err:1; /* [4] Default:0x0 RC */ + u32 avail_idx_fifo1_ram_err:1; /* [5] Default:0x0 RC */ + u32 avail_idx_fifo2_ram_err:1; /* [6] Default:0x0 RC */ + u32 avail_ring_fifo_ram_err:1; /* [7] Default:0x0 RC */ + u32 avring_l1_ring_fifo_ram_err:1; /* [8] Default:0x0 RC */ + u32 avring_l1_burst_info_fifo_ram_err:1; /* [9] Default:0x0 RC */ + u32 u_burinf_fifo_ram_err:1; /* [10] Default:0x0 RC */ + u32 u_reqinf_fifo_ram_err:1; /* [11] Default:0x0 RC */ + u32 u_desc_fifo_ram_err:1; /* [12] Default:0x0 RC */ + u32 u_burst_infifo_ram_err:1; /* [13] Default:0x0 RC */ + u32 u_infifo_ram_err:1; /* [14] Default:0x0 RC */ + u32 u_desfifo_ram_err:1; /* [15] Default:0x0 RC */ + u32 u_desc_mfifo_ram_err:1; /* [16] Default:0x0 RC */ + u32 descrdl1_l2_desc_fifo_ram_err:1; /* [17] Default:0x0 RC */ + u32 descrdl1_l2_burst_info_fifo_ram_err:1; /* [18] Default:0x0 RC */ + u32 descrdl2_l3_indesc_fifo_ram_err:1; /* [19] Default:0x0 RC */ + u32 descrdl2_l3_alldesc_fifo_ram_err:1; /* [20] Default:0x0 RC */ + u32 descrdl2_l3_burst_info_fifo_ram_err:1; /* [21] Default:0x0 RC */ + u32 indesc_req_fifo_ram_err:1; /* [22] Default:0x0 RC */ + u32 descrdl3_pktrq_desc_fifo_ram_err:1; /* [23] Default:0x0 RC */ + u32 descrdl3_pktrq_pktinfo_fifo_ram_err:1; /* [24] Default:0x0 RC */ + u32 descrdl3_pktrq_burst_info_fifo_ram_err:1; /* [25] Default:0x0 RC */ + u32 u_datrq_desc_info_fifo_ram_err:1; /* [26] Default:0x0 RC */ + u32 u_pktan_seg_burstinfo_info_fifo_ram_err:1; /* [27] Default:0x0 RC */ + u32 u_pktan_pkts_pktinfo_info_fifo_ram_err:1; /* [28] Default:0x0 RC */ + u32 u_pktan_seg_header_fifo_ram_err:1; /* [29] Default:0x0 RC */ + u32 u_pktrq_pkts_cellinfo_fifo_ram_err:1; /* [30] Default:0x0 RC */ + u32 u_pktrq_pkts_pktdata_fifo_ram_err:1; /* [31] Default:0x0 RC */ + } __packed info; + u32 data[NBL_DVN_RAM_ERR2_DWLEN]; +} __packed; + +#define NBL_DVN_STATUS_ERR_ADDR (0x51504c) +#define NBL_DVN_STATUS_ERR_DEPTH (1) +#define NBL_DVN_STATUS_ERR_WIDTH (32) +#define NBL_DVN_STATUS_ERR_DWLEN (1) +union dvn_status_err_u { + struct dvn_status_err { + u32 sp_id_err:1; /* [0] Default:0x0 RO */ + u32 pktdata1_fifo_err:1; /* [1] Default:0x0 RO */ + u32 cellinfo1_fifo_err:1; /* [2] Default:0x0 RO */ + u32 pkts_header_fifo_err:1; /* [3] Default:0x0 RO */ + u32 pkts_pktdata_fifo_err:1; /* [4] Default:0x0 RO */ + u32 desc_wrback_fifo_err:1; /* [5] Default:0x0 RO */ + u32 burstinfo_fifo_err:1; /* [6] Default:0x0 RO */ + u32 pktinfo_fifo_err:1; /* [7] Default:0x0 RO */ + u32 descinfo_fifo_err:1; /* [8] Default:0x0 RO */ + u32 descrdl3_pktrq_burst_info_fifo_err:1; /* [9] Default:0x0 RO */ + u32 descrdl3_pktrq_pktinfo_fifo_err:1; /* [10] Default:0x0 RO */ + u32 descrdl3_pktrq_desc_fifo_err:1; /* [11] Default:0x0 RO */ + u32 descrdl2_l3_burst_info_fifo_err:1; /* [12] Default:0x0 RO */ + u32 descrdl2_l3_alldesc_fifo_err:1; /* [13] Default:0x0 RO */ + u32 descrdl2_l3_indesc_fifo1_err:1; /* [14] Default:0x0 RO */ + u32 descrdl1_l2_burst_info_fifo_err:1; /* [15] Default:0x0 RO */ + u32 padpt_dvn_desc_rerr_fifo_err:1; /* [16] Default:0x0 RO */ + u32 descrdl1_l2_desc_fifo1_err:1; /* [17] Default:0x0 RO */ + u32 dsch_dvn_info_fifo_err:1; /* [18] Default:0x0 RO */ + u32 qcrd_fifo_err:1; /* [19] Default:0x0 RO */ + u32 qcrd_avring_fifo_err:1; /* [20] Default:0x0 RO */ + u32 pr1_cfg_fifo_err:1; /* [21] Default:0x0 RO */ + u32 pr2_cfg_fifo_err:1; /* [22] Default:0x0 RO */ + u32 avail_idx_fifo_err:1; /* [23] Default:0x0 RO */ + u32 avail_ring_fifo_err:1; /* [24] Default:0x0 RO */ + u32 avring_l1_burst_info_fifo_err:1; /* [25] Default:0x0 RO */ + u32 inner_info_fifo_err:1; /* [26] Default:0x0 RO */ + u32 pr2_cfg_interrupt:1; /* [27] Default:0x0 RO */ + u32 pr1_cfg_interrupt:1; /* [28] Default:0x0 RO */ + u32 pkt_padpt_err_queue_id_vld:1; /* [29] Default:0x0 RO */ + u32 desc_padpt_err_queue_id_vld:1; /* [30] Default:0x0 RO */ + u32 rsv:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_STATUS_ERR_DWLEN]; +} __packed; + +#define NBL_DVN_LCMG_RC_ERR_DEBUG_ADDR (0x515110) +#define NBL_DVN_LCMG_RC_ERR_DEBUG_DEPTH (1) +#define NBL_DVN_LCMG_RC_ERR_DEBUG_WIDTH (32) +#define NBL_DVN_LCMG_RC_ERR_DEBUG_DWLEN (1) +union dvn_lcmg_rc_err_debug_u { + struct dvn_lcmg_rc_err_debug { + u32 rc_err_debug:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_LCMG_RC_ERR_DEBUG_DWLEN]; +} __packed; + +#define NBL_DVN_QUEUE_TABLE_ADDR (0x534000) +#define NBL_DVN_QUEUE_TABLE_DEPTH (2048) +#define NBL_DVN_QUEUE_TABLE_WIDTH (256) +#define NBL_DVN_QUEUE_TABLE_DWLEN (8) +union dvn_queue_table_u { + struct dvn_queue_table { + u32 dvn_used_baddr_arr[2]; /* [63:0] Default:0x0 RW */ + u32 dvn_avail_baddr_arr[2]; /* [127:64] Default:0x0 RW */ + u32 dvn_queue_baddr_arr[2]; /* [191:128] Default:0x0 RW */ + u32 dvn_queue_size:4; /* [195:192] Default:0x0 RW */ + u32 dvn_queue_type:1; /* [196] Default:0x0 RW */ + u32 dvn_queue_en:1; /* [197] Default:0x0 RW */ + u32 dvn_extend_header_en:1; /* [198] Default:0x0 RW */ + u32 dvn_interleave_seg_disable:1; /* [199] Default:0x0 RW */ + u32 dvn_seg_disable:1; /* [200] Default:0x0 RW */ + u32 reserve_l:32; /* [255:201] Default:0x0 RO */ + u32 reserve_h:23; /* [255:201] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_QUEUE_TABLE_DWLEN]; +} __packed; +#define NBL_DVN_QUEUE_TABLE_REG(r) (NBL_DVN_QUEUE_TABLE_ADDR + \ + (NBL_DVN_QUEUE_TABLE_DWLEN * 4) * (r)) + +#define NBL_DVN_DESCRD_DEBUG_ADDR (0x564000) +#define NBL_DVN_DESCRD_DEBUG_DEPTH (2048) +#define NBL_DVN_DESCRD_DEBUG_WIDTH (256) +#define NBL_DVN_DESCRD_DEBUG_DWLEN (8) +union dvn_descrd_debug_u { + struct dvn_descrd_debug { + u32 dvn_descrd_data_debug0:32; /* [31:0] Default:0x0 RO */ + u32 dvn_descrd_data_debug1:32; /* [63:32] Default:0x0 RO */ + u32 dvn_descrd_data_debug2:32; /* [95:64] Default:0x0 RO */ + u32 dvn_descrd_data_debug3:32; /* [127:96] Default:0x0 RO */ + u32 dvn_descrd_addr_debug:32; /* [159:128] Default:0x0 RO */ + u32 reserve:32; /* [255:160] Default:0x0 RO */ + u32 reserve_arr[2]; /* [255:160] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DESCRD_DEBUG_DWLEN]; +} __packed; +#define NBL_DVN_DESCRD_DEBUG_REG(r) (NBL_DVN_DESCRD_DEBUG_ADDR + \ + (NBL_DVN_DESCRD_DEBUG_DWLEN * 4) * (r)) + +#define NBL_DVN_DESCWR_DEBUG_ADDR (0x574000) +#define NBL_DVN_DESCWR_DEBUG_DEPTH (2048) +#define NBL_DVN_DESCWR_DEBUG_WIDTH (32) +#define NBL_DVN_DESCWR_DEBUG_DWLEN (1) +union dvn_descwr_debug_u { + struct dvn_descwr_debug { + u32 dvn_descwr_data_debug:16; /* [15:0] Default:0x0 RO */ + u32 dvn_descwr_addr_debug:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_DESCWR_DEBUG_DWLEN]; +} __packed; +#define NBL_DVN_DESCWR_DEBUG_REG(r) (NBL_DVN_DESCWR_DEBUG_ADDR + \ + (NBL_DVN_DESCWR_DEBUG_DWLEN * 4) * (r)) + +#define NBL_DVN_LSO_PKT_HEADER_ADDR (0x584000) +#define NBL_DVN_LSO_PKT_HEADER_DEPTH (1536) +#define NBL_DVN_LSO_PKT_HEADER_WIDTH (512) +#define NBL_DVN_LSO_PKT_HEADER_DWLEN (16) +union dvn_lso_pkt_header_u { + struct dvn_lso_pkt_header { + u32 head_data_arr[16]; /* [511:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DVN_LSO_PKT_HEADER_DWLEN]; +} __packed; +#define NBL_DVN_LSO_PKT_HEADER_REG(r) (NBL_DVN_LSO_PKT_HEADER_ADDR + \ + (NBL_DVN_LSO_PKT_HEADER_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_shaping.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_shaping.h new file mode 100644 index 0000000000000000000000000000000000000000..ef1f4db4ee9a8fad9deb9091534fd52096fb8a4f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_shaping.h @@ -0,0 +1,560 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_SHAPING_H +#define NBL_SHAPING_H 1 + +#include + +#define NBL_SHAPING_BASE (0x00504000) + +#define NBL_SHAPING_INT_STATUS_ADDR (0x504000) +#define NBL_SHAPING_INT_STATUS_DEPTH (1) +#define NBL_SHAPING_INT_STATUS_WIDTH (32) +#define NBL_SHAPING_INT_STATUS_DWLEN (1) +union shaping_int_status_u { + struct shaping_int_status { + u32 net_parity_err:1; /* [0] Default:0x0 RWC */ + u32 grp_parity_err:1; /* [1] Default:0x0 RWC */ + u32 dflw_err:1; /* [2] Default:0x0 RWC */ + u32 uflw_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_SHAPING_INT_MASK_ADDR (0x504004) +#define NBL_SHAPING_INT_MASK_DEPTH (1) +#define NBL_SHAPING_INT_MASK_WIDTH (32) +#define NBL_SHAPING_INT_MASK_DWLEN (1) +union shaping_int_mask_u { + struct shaping_int_mask { + u32 net_parity_err:1; /* [0] Default:0x0 RW */ + u32 grp_parity_err:1; /* [1] Default:0x0 RW */ + u32 dflw_err:1; /* [2] Default:0x0 RW */ + u32 uflw_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_INT_MASK_DWLEN]; +} __packed; + +#define NBL_SHAPING_INT_SET_ADDR (0x504008) +#define NBL_SHAPING_INT_SET_DEPTH (1) +#define NBL_SHAPING_INT_SET_WIDTH (32) +#define NBL_SHAPING_INT_SET_DWLEN (1) +union shaping_int_set_u { + struct shaping_int_set { + u32 net_parity_err:1; /* [0] Default:0x0 WO */ + u32 grp_parity_err:1; /* [1] Default:0x0 WO */ + u32 dflw_err:1; /* [2] Default:0x0 WO */ + u32 uflw_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_INT_SET_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_PARITY_ERR_INFO_ADDR (0x504100) +#define NBL_SHAPING_NET_PARITY_ERR_INFO_DEPTH (1) +#define NBL_SHAPING_NET_PARITY_ERR_INFO_WIDTH (32) +#define NBL_SHAPING_NET_PARITY_ERR_INFO_DWLEN (1) +union shaping_net_parity_err_info_u { + struct shaping_net_parity_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_NET_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_SHAPING_DFLW_ERR_INFO_ADDR (0x504108) +#define NBL_SHAPING_DFLW_ERR_INFO_DEPTH (1) +#define NBL_SHAPING_DFLW_ERR_INFO_WIDTH (32) +#define NBL_SHAPING_DFLW_ERR_INFO_DWLEN (1) +union shaping_dflw_err_info_u { + struct shaping_dflw_err_info { + u32 id:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_DFLW_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_SHAPING_UFLW_ERR_INFO_ADDR (0x504110) +#define NBL_SHAPING_UFLW_ERR_INFO_DEPTH (1) +#define NBL_SHAPING_UFLW_ERR_INFO_WIDTH (32) +#define NBL_SHAPING_UFLW_ERR_INFO_DWLEN (1) +union shaping_uflw_err_info_u { + struct shaping_uflw_err_info { + u32 id:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_UFLW_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_SHAPING_CIF_ERR_INFO_ADDR (0x504118) +#define NBL_SHAPING_CIF_ERR_INFO_DEPTH (1) +#define NBL_SHAPING_CIF_ERR_INFO_WIDTH (32) +#define NBL_SHAPING_CIF_ERR_INFO_DWLEN (1) +union shaping_cif_err_info_u { + struct shaping_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_SHAPING_CAR_CTRL_ADDR (0x504120) +#define NBL_SHAPING_CAR_CTRL_DEPTH (1) +#define NBL_SHAPING_CAR_CTRL_WIDTH (32) +#define NBL_SHAPING_CAR_CTRL_DWLEN (1) +union shaping_car_ctrl_u { + struct shaping_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_INIT_START_ADDR (0x504124) +#define NBL_SHAPING_NET_INIT_START_DEPTH (1) +#define NBL_SHAPING_NET_INIT_START_WIDTH (32) +#define NBL_SHAPING_NET_INIT_START_DWLEN (1) +union shaping_net_init_start_u { + struct shaping_net_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_NET_INIT_START_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_PARITY_ERR_INFO_ADDR (0x504200) +#define NBL_SHAPING_GRP_PARITY_ERR_INFO_DEPTH (1) +#define NBL_SHAPING_GRP_PARITY_ERR_INFO_WIDTH (32) +#define NBL_SHAPING_GRP_PARITY_ERR_INFO_DWLEN (1) +union shaping_grp_parity_err_info_u { + struct shaping_grp_parity_err_info { + u32 ram_addr:8; /* [7:0] Default:0x0 RO */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_GRP_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_INIT_START_ADDR (0x504224) +#define NBL_SHAPING_GRP_INIT_START_DEPTH (1) +#define NBL_SHAPING_GRP_INIT_START_WIDTH (32) +#define NBL_SHAPING_GRP_INIT_START_DWLEN (1) +union shaping_grp_init_start_u { + struct shaping_grp_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_GRP_INIT_START_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_TIMMING_ADD_ADDR (0x504300) +#define NBL_SHAPING_NET_TIMMING_ADD_DEPTH (1) +#define NBL_SHAPING_NET_TIMMING_ADD_WIDTH (32) +#define NBL_SHAPING_NET_TIMMING_ADD_DWLEN (1) +union shaping_net_timming_add_u { + struct shaping_net_timming_add { + u32 cycle_max:12; /* [11:0] Default:0x8 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 depth:12; /* [27:16] Default:0x258 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_NET_TIMMING_ADD_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_INIT_DONE_ADDR (0x504304) +#define NBL_SHAPING_NET_INIT_DONE_DEPTH (1) +#define NBL_SHAPING_NET_INIT_DONE_WIDTH (32) +#define NBL_SHAPING_NET_INIT_DONE_DWLEN (1) +union shaping_net_init_done_u { + struct shaping_net_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_NET_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_PKT_VLD_ADDR (0x504308) +#define NBL_SHAPING_NET_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_NET_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_NET_PKT_VLD_DWLEN (1) +union shaping_net_pkt_vld_u { + struct shaping_net_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_STATE_VLD_ADDR (0x50430c) +#define NBL_SHAPING_NET_STATE_VLD_DEPTH (1) +#define NBL_SHAPING_NET_STATE_VLD_WIDTH (32) +#define NBL_SHAPING_NET_STATE_VLD_DWLEN (1) +union shaping_net_state_vld_u { + struct shaping_net_state_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_STATE_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_NOSHA_PKT_VLD_ADDR (0x504310) +#define NBL_SHAPING_NET_NOSHA_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_NET_NOSHA_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_NET_NOSHA_PKT_VLD_DWLEN (1) +union shaping_net_nosha_pkt_vld_u { + struct shaping_net_nosha_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_NOSHA_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_GREEN_PKT_VLD_ADDR (0x504314) +#define NBL_SHAPING_NET_GREEN_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_NET_GREEN_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_NET_GREEN_PKT_VLD_DWLEN (1) +union shaping_net_green_pkt_vld_u { + struct shaping_net_green_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_GREEN_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_YELLOW_PKT_VLD_ADDR (0x504318) +#define NBL_SHAPING_NET_YELLOW_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_NET_YELLOW_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_NET_YELLOW_PKT_VLD_DWLEN (1) +union shaping_net_yellow_pkt_vld_u { + struct shaping_net_yellow_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_YELLOW_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_RED_PKT_VLD_ADDR (0x50431c) +#define NBL_SHAPING_NET_RED_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_NET_RED_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_NET_RED_PKT_VLD_DWLEN (1) +union shaping_net_red_pkt_vld_u { + struct shaping_net_red_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_RED_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_NOSHA_PKT_LEN_ADDR (0x504320) +#define NBL_SHAPING_NET_NOSHA_PKT_LEN_DEPTH (1) +#define NBL_SHAPING_NET_NOSHA_PKT_LEN_WIDTH (48) +#define NBL_SHAPING_NET_NOSHA_PKT_LEN_DWLEN (2) +union shaping_net_nosha_pkt_len_u { + struct shaping_net_nosha_pkt_len { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_NOSHA_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_GREEN_PKT_LEN_ADDR (0x504328) +#define NBL_SHAPING_NET_GREEN_PKT_LEN_DEPTH (1) +#define NBL_SHAPING_NET_GREEN_PKT_LEN_WIDTH (48) +#define NBL_SHAPING_NET_GREEN_PKT_LEN_DWLEN (2) +union shaping_net_green_pkt_len_u { + struct shaping_net_green_pkt_len { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_GREEN_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_YELLOW_PKT_LEN_ADDR (0x504330) +#define NBL_SHAPING_NET_YELLOW_PKT_LEN_DEPTH (1) +#define NBL_SHAPING_NET_YELLOW_PKT_LEN_WIDTH (48) +#define NBL_SHAPING_NET_YELLOW_PKT_LEN_DWLEN (2) +union shaping_net_yellow_pkt_len_u { + struct shaping_net_yellow_pkt_len { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_YELLOW_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_SHAPING_NET_RED_PKT_LEN_ADDR (0x504338) +#define NBL_SHAPING_NET_RED_PKT_LEN_DEPTH (1) +#define NBL_SHAPING_NET_RED_PKT_LEN_WIDTH (48) +#define NBL_SHAPING_NET_RED_PKT_LEN_DWLEN (2) +union shaping_net_red_pkt_len_u { + struct shaping_net_red_pkt_len { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_NET_RED_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_TIMMING_ADD_ADDR (0x504400) +#define NBL_SHAPING_GRP_TIMMING_ADD_DEPTH (1) +#define NBL_SHAPING_GRP_TIMMING_ADD_WIDTH (32) +#define NBL_SHAPING_GRP_TIMMING_ADD_DWLEN (1) +union shaping_grp_timming_add_u { + struct shaping_grp_timming_add { + u32 cycle_max:12; /* [11:0] Default:0x8 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 depth:12; /* [27:16] Default:0x258 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_GRP_TIMMING_ADD_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_INIT_DONE_ADDR (0x504404) +#define NBL_SHAPING_GRP_INIT_DONE_DEPTH (1) +#define NBL_SHAPING_GRP_INIT_DONE_WIDTH (32) +#define NBL_SHAPING_GRP_INIT_DONE_DWLEN (1) +union shaping_grp_init_done_u { + struct shaping_grp_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_GRP_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_PKT_VLD_ADDR (0x504408) +#define NBL_SHAPING_GRP_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_GRP_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_GRP_PKT_VLD_DWLEN (1) +union shaping_grp_pkt_vld_u { + struct shaping_grp_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_STATE_VLD_ADDR (0x50440c) +#define NBL_SHAPING_GRP_STATE_VLD_DEPTH (1) +#define NBL_SHAPING_GRP_STATE_VLD_WIDTH (32) +#define NBL_SHAPING_GRP_STATE_VLD_DWLEN (1) +union shaping_grp_state_vld_u { + struct shaping_grp_state_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_STATE_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_NOSHA_PKT_VLD_ADDR (0x504410) +#define NBL_SHAPING_GRP_NOSHA_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_GRP_NOSHA_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_GRP_NOSHA_PKT_VLD_DWLEN (1) +union shaping_grp_nosha_pkt_vld_u { + struct shaping_grp_nosha_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_NOSHA_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_GREEN_PKT_VLD_ADDR (0x504414) +#define NBL_SHAPING_GRP_GREEN_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_GRP_GREEN_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_GRP_GREEN_PKT_VLD_DWLEN (1) +union shaping_grp_green_pkt_vld_u { + struct shaping_grp_green_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_GREEN_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_YELLOW_PKT_VLD_ADDR (0x504418) +#define NBL_SHAPING_GRP_YELLOW_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_GRP_YELLOW_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_GRP_YELLOW_PKT_VLD_DWLEN (1) +union shaping_grp_yellow_pkt_vld_u { + struct shaping_grp_yellow_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_YELLOW_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_RED_PKT_VLD_ADDR (0x50441c) +#define NBL_SHAPING_GRP_RED_PKT_VLD_DEPTH (1) +#define NBL_SHAPING_GRP_RED_PKT_VLD_WIDTH (32) +#define NBL_SHAPING_GRP_RED_PKT_VLD_DWLEN (1) +union shaping_grp_red_pkt_vld_u { + struct shaping_grp_red_pkt_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_RED_PKT_VLD_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_NOSHA_PKT_LEN_ADDR (0x504420) +#define NBL_SHAPING_GRP_NOSHA_PKT_LEN_DEPTH (1) +#define NBL_SHAPING_GRP_NOSHA_PKT_LEN_WIDTH (48) +#define NBL_SHAPING_GRP_NOSHA_PKT_LEN_DWLEN (2) +union shaping_grp_nosha_pkt_len_u { + struct shaping_grp_nosha_pkt_len { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_NOSHA_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_GREEN_PKT_LEN_ADDR (0x504428) +#define NBL_SHAPING_GRP_GREEN_PKT_LEN_DEPTH (1) +#define NBL_SHAPING_GRP_GREEN_PKT_LEN_WIDTH (48) +#define NBL_SHAPING_GRP_GREEN_PKT_LEN_DWLEN (2) +union shaping_grp_green_pkt_len_u { + struct shaping_grp_green_pkt_len { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_GREEN_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_YELLOW_PKT_LEN_ADDR (0x504430) +#define NBL_SHAPING_GRP_YELLOW_PKT_LEN_DEPTH (1) +#define NBL_SHAPING_GRP_YELLOW_PKT_LEN_WIDTH (48) +#define NBL_SHAPING_GRP_YELLOW_PKT_LEN_DWLEN (2) +union shaping_grp_yellow_pkt_len_u { + struct shaping_grp_yellow_pkt_len { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_YELLOW_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_SHAPING_GRP_RED_PKT_LEN_ADDR (0x504438) +#define NBL_SHAPING_GRP_RED_PKT_LEN_DEPTH (1) +#define NBL_SHAPING_GRP_RED_PKT_LEN_WIDTH (48) +#define NBL_SHAPING_GRP_RED_PKT_LEN_DWLEN (2) +union shaping_grp_red_pkt_len_u { + struct shaping_grp_red_pkt_len { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_SHAPING_GRP_RED_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_SHAPING_TIMMING_ADDR (0x504500) +#define NBL_SHAPING_TIMMING_DEPTH (1) +#define NBL_SHAPING_TIMMING_WIDTH (32) +#define NBL_SHAPING_TIMMING_DWLEN (1) +union shaping_timming_u { + struct shaping_timming { + u32 off:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_TIMMING_DWLEN]; +} __packed; + +#define NBL_SHAPING_DPORT_TIMMING_ADD_ADDR (0x504504) +#define NBL_SHAPING_DPORT_TIMMING_ADD_DEPTH (1) +#define NBL_SHAPING_DPORT_TIMMING_ADD_WIDTH (32) +#define NBL_SHAPING_DPORT_TIMMING_ADD_DWLEN (1) +union shaping_dport_timming_add_u { + struct shaping_dport_timming_add { + u32 cycle_max:12; /* [11:0] Default:0x4B0 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 depth:4; /* [19:16] Default:0x4 RW */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_DPORT_TIMMING_ADD_DWLEN]; +} __packed; + +#define NBL_SHAPING_DPORT_ADDR (0x504700) +#define NBL_SHAPING_DPORT_DEPTH (4) +#define NBL_SHAPING_DPORT_WIDTH (128) +#define NBL_SHAPING_DPORT_DWLEN (4) +union shaping_dport_u { + struct shaping_dport { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:19; /* [19:1] Default:0x0 RW */ + u32 cir:19; /* [38:20] Default:0x0 RW */ + u32 pir:19; /* [57:39] Default:0x0 RW */ + u32 cbs:21; /* [78:58] Default:0x0 RW */ + u32 pbs:21; /* [99:79] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_DPORT_DWLEN]; +} __packed; +#define NBL_SHAPING_DPORT_REG(r) (NBL_SHAPING_DPORT_ADDR + \ + (NBL_SHAPING_DPORT_DWLEN * 4) * (r)) + +#define NBL_SHAPING_DVN_DPORT_ADDR (0x504750) +#define NBL_SHAPING_DVN_DPORT_DEPTH (4) +#define NBL_SHAPING_DVN_DPORT_WIDTH (128) +#define NBL_SHAPING_DVN_DPORT_DWLEN (4) +union shaping_dvn_dport_u { + struct shaping_dvn_dport { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:19; /* [19:1] Default:0x0 RW */ + u32 cir:19; /* [38:20] Default:0x0 RW */ + u32 pir:19; /* [57:39] Default:0x0 RW */ + u32 cbs:21; /* [78:58] Default:0x0 RW */ + u32 pbs:21; /* [99:79] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_DVN_DPORT_DWLEN]; +} __packed; +#define NBL_SHAPING_DVN_DPORT_REG(r) (NBL_SHAPING_DVN_DPORT_ADDR + \ + (NBL_SHAPING_DVN_DPORT_DWLEN * 4) * (r)) + +#define NBL_SHAPING_RDMA_DPORT_ADDR (0x5047a0) +#define NBL_SHAPING_RDMA_DPORT_DEPTH (4) +#define NBL_SHAPING_RDMA_DPORT_WIDTH (128) +#define NBL_SHAPING_RDMA_DPORT_DWLEN (4) +union shaping_rdma_dport_u { + struct shaping_rdma_dport { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:19; /* [19:1] Default:0x0 RW */ + u32 cir:19; /* [38:20] Default:0x0 RW */ + u32 pir:19; /* [57:39] Default:0x0 RW */ + u32 cbs:21; /* [78:58] Default:0x0 RW */ + u32 pbs:21; /* [99:79] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_RDMA_DPORT_DWLEN]; +} __packed; +#define NBL_SHAPING_RDMA_DPORT_REG(r) (NBL_SHAPING_RDMA_DPORT_ADDR + \ + (NBL_SHAPING_RDMA_DPORT_DWLEN * 4) * (r)) + +#define NBL_SHAPING_GRP_ADDR (0x504800) +#define NBL_SHAPING_GRP_DEPTH (256) +#define NBL_SHAPING_GRP_WIDTH (128) +#define NBL_SHAPING_GRP_DWLEN (4) +union shaping_grp_u { + struct shaping_grp { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:19; /* [19:1] Default:0x0 RW */ + u32 cir:19; /* [38:20] Default:0x0 RW */ + u32 pir:19; /* [57:39] Default:0x0 RW */ + u32 cbs:21; /* [78:58] Default:0x0 RW */ + u32 pbs:21; /* [99:79] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_GRP_DWLEN]; +} __packed; +#define NBL_SHAPING_GRP_REG(r) (NBL_SHAPING_GRP_ADDR + \ + (NBL_SHAPING_GRP_DWLEN * 4) * (r)) + +#define NBL_SHAPING_NET_ADDR (0x505800) +#define NBL_SHAPING_NET_DEPTH (520) +#define NBL_SHAPING_NET_WIDTH (128) +#define NBL_SHAPING_NET_DWLEN (4) +union shaping_net_u { + struct shaping_net { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:19; /* [19:1] Default:0x0 RW */ + u32 cir:19; /* [38:20] Default:0x0 RW */ + u32 pir:19; /* [57:39] Default:0x0 RW */ + u32 cbs:21; /* [78:58] Default:0x0 RW */ + u32 pbs:21; /* [99:79] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_SHAPING_NET_DWLEN]; +} __packed; +#define NBL_SHAPING_NET_REG(r) (NBL_SHAPING_NET_ADDR + \ + (NBL_SHAPING_NET_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ubm.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ubm.h new file mode 100644 index 0000000000000000000000000000000000000000..309f20a08c53bb405367ccdd8a8472614eab1743 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ubm.h @@ -0,0 +1,354 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UBM_H +#define NBL_UBM_H 1 + +#include + +#define NBL_UBM_BASE (0x0010C000) + +#define NBL_UBM_INT_STATUS_ADDR (0x10c000) +#define NBL_UBM_INT_STATUS_DEPTH (1) +#define NBL_UBM_INT_STATUS_WIDTH (32) +#define NBL_UBM_INT_STATUS_DWLEN (1) +union ubm_int_status_u { + struct ubm_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_w_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_r_err:1; /* [2] Default:0x0 RWC */ + u32 mif_wr_err:1; /* [3] Default:0x0 RWC */ + u32 mif_rd_err:1; /* [4] Default:0x0 RWC */ + u32 bitmap_pntr_err:1; /* [5] Default:0x0 RWC */ + u32 tail_pntr_err:1; /* [6] Default:0x0 RWC */ + u32 weight_pntr_err:1; /* [7] Default:0x0 RWC */ + u32 cor_err:1; /* [8] Default:0x0 RWC */ + u32 cif_err:1; /* [9] Default:0x0 RWC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UBM_INT_MASK_ADDR (0x10c004) +#define NBL_UBM_INT_MASK_DEPTH (1) +#define NBL_UBM_INT_MASK_WIDTH (32) +#define NBL_UBM_INT_MASK_DWLEN (1) +union ubm_int_mask_u { + struct ubm_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 fifo_w_err:1; /* [1] Default:0x0 RW */ + u32 fifo_r_err:1; /* [2] Default:0x0 RW */ + u32 mif_wr_err:1; /* [3] Default:0x0 RW */ + u32 mif_rd_err:1; /* [4] Default:0x0 RW */ + u32 bitmap_pntr_err:1; /* [5] Default:0x0 RW */ + u32 tail_pntr_err:1; /* [6] Default:0x0 RW */ + u32 weight_pntr_err:1; /* [7] Default:0x0 RW */ + u32 cor_err:1; /* [8] Default:0x0 RW */ + u32 cif_err:1; /* [9] Default:0x0 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UBM_INT_SET_ADDR (0x10c008) +#define NBL_UBM_INT_SET_DEPTH (1) +#define NBL_UBM_INT_SET_WIDTH (32) +#define NBL_UBM_INT_SET_DWLEN (1) +union ubm_int_set_u { + struct ubm_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 fifo_w_err:1; /* [1] Default:0x0 WO */ + u32 fifo_r_err:1; /* [2] Default:0x0 WO */ + u32 mif_wr_err:1; /* [3] Default:0x0 WO */ + u32 mif_rd_err:1; /* [4] Default:0x0 WO */ + u32 bitmap_pntr_err:1; /* [5] Default:0x0 WO */ + u32 tail_pntr_err:1; /* [6] Default:0x0 WO */ + u32 weight_pntr_err:1; /* [7] Default:0x0 WO */ + u32 cor_err:1; /* [8] Default:0x0 WO */ + u32 cif_err:1; /* [9] Default:0x0 WO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_INT_SET_DWLEN]; +} __packed; + +#define NBL_UBM_UCOR_ERR_INFO_ADDR (0x10c00c) +#define NBL_UBM_UCOR_ERR_INFO_DEPTH (1) +#define NBL_UBM_UCOR_ERR_INFO_WIDTH (32) +#define NBL_UBM_UCOR_ERR_INFO_DWLEN (1) +union ubm_ucor_err_info_u { + struct ubm_ucor_err_info { + u32 ram_addr:12; /* [11:0] Default:0x0 RO */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_UCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UBM_MIF_WR_ERR_INFO_ADDR (0x10c02c) +#define NBL_UBM_MIF_WR_ERR_INFO_DEPTH (1) +#define NBL_UBM_MIF_WR_ERR_INFO_WIDTH (32) +#define NBL_UBM_MIF_WR_ERR_INFO_DWLEN (1) +union ubm_mif_wr_err_info_u { + struct ubm_mif_wr_err_info { + u32 sel:1; /* [0] Default:0x0 RO */ + u32 err_type_id:2; /* [2:1] Default:0x0 RO */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_MIF_WR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UBM_MIF_RD_ERR_INFO_ADDR (0x10c034) +#define NBL_UBM_MIF_RD_ERR_INFO_DEPTH (1) +#define NBL_UBM_MIF_RD_ERR_INFO_WIDTH (32) +#define NBL_UBM_MIF_RD_ERR_INFO_DWLEN (1) +union ubm_mif_rd_err_info_u { + struct ubm_mif_rd_err_info { + u32 sel:1; /* [0] Default:0x0 RO */ + u32 err_type_id:1; /* [1] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_MIF_RD_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UBM_WEIGHT_PNTR_ERR_INFO_ADDR (0x10c03c) +#define NBL_UBM_WEIGHT_PNTR_ERR_INFO_DEPTH (1) +#define NBL_UBM_WEIGHT_PNTR_ERR_INFO_WIDTH (32) +#define NBL_UBM_WEIGHT_PNTR_ERR_INFO_DWLEN (1) +union ubm_weight_pntr_err_info_u { + struct ubm_weight_pntr_err_info { + u32 ram_addr:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_WEIGHT_PNTR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UBM_TAIL_PNTR_ERR_INFO_ADDR (0x10c044) +#define NBL_UBM_TAIL_PNTR_ERR_INFO_DEPTH (1) +#define NBL_UBM_TAIL_PNTR_ERR_INFO_WIDTH (32) +#define NBL_UBM_TAIL_PNTR_ERR_INFO_DWLEN (1) +union ubm_tail_pntr_err_info_u { + struct ubm_tail_pntr_err_info { + u32 head_pntr:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_TAIL_PNTR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UBM_BITMAP_PNTR_ERR_INFO_ADDR (0x10c04c) +#define NBL_UBM_BITMAP_PNTR_ERR_INFO_DEPTH (1) +#define NBL_UBM_BITMAP_PNTR_ERR_INFO_WIDTH (32) +#define NBL_UBM_BITMAP_PNTR_ERR_INFO_DWLEN (1) +union ubm_bitmap_pntr_err_info_u { + struct ubm_bitmap_pntr_err_info { + u32 ram_addr:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_BITMAP_PNTR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UBM_PNTR_AGING_INFO_ADDR (0x10c054) +#define NBL_UBM_PNTR_AGING_INFO_DEPTH (1) +#define NBL_UBM_PNTR_AGING_INFO_WIDTH (32) +#define NBL_UBM_PNTR_AGING_INFO_DWLEN (1) +union ubm_pntr_aging_info_u { + struct ubm_pntr_aging_info { + u32 addr:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_PNTR_AGING_INFO_DWLEN]; +} __packed; + +#define NBL_UBM_COR_ERR_INFO_ADDR (0x10c05c) +#define NBL_UBM_COR_ERR_INFO_DEPTH (1) +#define NBL_UBM_COR_ERR_INFO_WIDTH (32) +#define NBL_UBM_COR_ERR_INFO_DWLEN (1) +union ubm_cor_err_info_u { + struct ubm_cor_err_info { + u32 ram_addr:12; /* [11:0] Default:0x0 RO */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UBM_CIF_ERR_INFO_ADDR (0x10c064) +#define NBL_UBM_CIF_ERR_INFO_DEPTH (1) +#define NBL_UBM_CIF_ERR_INFO_WIDTH (32) +#define NBL_UBM_CIF_ERR_INFO_DWLEN (1) +union ubm_cif_err_info_u { + struct ubm_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UBM_CAR_CTRL_ADDR (0x10c100) +#define NBL_UBM_CAR_CTRL_DEPTH (1) +#define NBL_UBM_CAR_CTRL_WIDTH (32) +#define NBL_UBM_CAR_CTRL_DWLEN (1) +union ubm_car_ctrl_u { + struct ubm_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UBM_INIT_START_ADDR (0x10c104) +#define NBL_UBM_INIT_START_DEPTH (1) +#define NBL_UBM_INIT_START_WIDTH (32) +#define NBL_UBM_INIT_START_DWLEN (1) +union ubm_init_start_u { + struct ubm_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_INIT_START_DWLEN]; +} __packed; + +#define NBL_UBM_AGING_EN_ADDR (0x10c120) +#define NBL_UBM_AGING_EN_DEPTH (1) +#define NBL_UBM_AGING_EN_WIDTH (32) +#define NBL_UBM_AGING_EN_DWLEN (1) +union ubm_aging_en_u { + struct ubm_aging_en { + u32 vld:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_AGING_EN_DWLEN]; +} __packed; + +#define NBL_UBM_AGING_TIME_UNIT_ADDR (0x10c124) +#define NBL_UBM_AGING_TIME_UNIT_DEPTH (1) +#define NBL_UBM_AGING_TIME_UNIT_WIDTH (32) +#define NBL_UBM_AGING_TIME_UNIT_DWLEN (1) +union ubm_aging_time_unit_u { + struct ubm_aging_time_unit { + u32 value:32; /* [31:0] Default:0xffff RW */ + } __packed info; + u32 data[NBL_UBM_AGING_TIME_UNIT_DWLEN]; +} __packed; + +#define NBL_UBM_LIST_RAM_RD_ADDR (0x10c128) +#define NBL_UBM_LIST_RAM_RD_DEPTH (1) +#define NBL_UBM_LIST_RAM_RD_WIDTH (32) +#define NBL_UBM_LIST_RAM_RD_DWLEN (1) +union ubm_list_ram_rd_u { + struct ubm_list_ram_rd { + u32 sel:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_LIST_RAM_RD_DWLEN]; +} __packed; + +#define NBL_UBM_INIT_DONE_ADDR (0x10c200) +#define NBL_UBM_INIT_DONE_DEPTH (1) +#define NBL_UBM_INIT_DONE_WIDTH (32) +#define NBL_UBM_INIT_DONE_DWLEN (1) +union ubm_init_done_u { + struct ubm_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UBM_NFULL_HISTORY_ADDR (0x10c218) +#define NBL_UBM_NFULL_HISTORY_DEPTH (1) +#define NBL_UBM_NFULL_HISTORY_WIDTH (32) +#define NBL_UBM_NFULL_HISTORY_DWLEN (1) +union ubm_nfull_history_u { + struct ubm_nfull_history { + u32 ped:1; /* [0] Default:0x0 RC */ + u32 uqm:1; /* [1] Default:0x0 RC */ + u32 split:1; /* [2] Default:0x0 RC */ + u32 chk:1; /* [3] Default:0x0 RC */ + u32 pntr:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_NFULL_HISTORY_DWLEN]; +} __packed; + +#define NBL_UBM_NAFULL_HISTORY_ADDR (0x10c21c) +#define NBL_UBM_NAFULL_HISTORY_DEPTH (1) +#define NBL_UBM_NAFULL_HISTORY_WIDTH (32) +#define NBL_UBM_NAFULL_HISTORY_DWLEN (1) +union ubm_nafull_history_u { + struct ubm_nafull_history { + u32 ped:1; /* [0] Default:0x0 RC */ + u32 uqm:1; /* [1] Default:0x0 RC */ + u32 split:1; /* [2] Default:0x0 RC */ + u32 chk:1; /* [3] Default:0x0 RC */ + u32 pntr:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_NAFULL_HISTORY_DWLEN]; +} __packed; + +#define NBL_UBM_WERR_HISTORY_ADDR (0x10c220) +#define NBL_UBM_WERR_HISTORY_DEPTH (1) +#define NBL_UBM_WERR_HISTORY_WIDTH (32) +#define NBL_UBM_WERR_HISTORY_DWLEN (1) +union ubm_werr_history_u { + struct ubm_werr_history { + u32 ped:1; /* [0] Default:0x0 RC */ + u32 uqm:1; /* [1] Default:0x0 RC */ + u32 split:1; /* [2] Default:0x0 RC */ + u32 chk:1; /* [3] Default:0x0 RC */ + u32 pntr:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_WERR_HISTORY_DWLEN]; +} __packed; + +#define NBL_UBM_RERR_HISTORY_ADDR (0x10c224) +#define NBL_UBM_RERR_HISTORY_DEPTH (1) +#define NBL_UBM_RERR_HISTORY_WIDTH (32) +#define NBL_UBM_RERR_HISTORY_DWLEN (1) +union ubm_rerr_history_u { + struct ubm_rerr_history { + u32 ped:1; /* [0] Default:0x0 RC */ + u32 uqm:1; /* [1] Default:0x0 RC */ + u32 split:1; /* [2] Default:0x0 RC */ + u32 chk:1; /* [3] Default:0x0 RC */ + u32 pntr:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_RERR_HISTORY_DWLEN]; +} __packed; + +#define NBL_UBM_BITMAP_RAM_STATUS_CURR_ADDR (0x10c400) +#define NBL_UBM_BITMAP_RAM_STATUS_CURR_DEPTH (128) +#define NBL_UBM_BITMAP_RAM_STATUS_CURR_WIDTH (32) +#define NBL_UBM_BITMAP_RAM_STATUS_CURR_DWLEN (1) +union ubm_bitmap_ram_status_curr_u { + struct ubm_bitmap_ram_status_curr { + u32 bitmap_pntr:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_BITMAP_RAM_STATUS_CURR_DWLEN]; +} __packed; +#define NBL_UBM_BITMAP_RAM_STATUS_CURR_REG(r) (NBL_UBM_BITMAP_RAM_STATUS_CURR_ADDR + \ + (NBL_UBM_BITMAP_RAM_STATUS_CURR_DWLEN * 4) * (r)) + +#define NBL_UBM_LIST_RAM_ADDR (0x110000) +#define NBL_UBM_LIST_RAM_DEPTH (4096) +#define NBL_UBM_LIST_RAM_WIDTH (32) +#define NBL_UBM_LIST_RAM_DWLEN (1) +union ubm_list_ram_u { + struct ubm_list_ram { + u32 pntr:13; /* [12:0] Default:0x0 RO */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UBM_LIST_RAM_DWLEN]; +} __packed; +#define NBL_UBM_LIST_RAM_REG(r) (NBL_UBM_LIST_RAM_ADDR + \ + (NBL_UBM_LIST_RAM_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h new file mode 100644 index 0000000000000000000000000000000000000000..0f6add4268383d9ccf23ddd09f0139dacaaf5127 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ucar.h @@ -0,0 +1,409 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UCAR_H +#define NBL_UCAR_H 1 + +#include + +#define NBL_UCAR_BASE (0x00E84000) + +#define NBL_UCAR_INT_STATUS_ADDR (0xe84000) +#define NBL_UCAR_INT_STATUS_DEPTH (1) +#define NBL_UCAR_INT_STATUS_WIDTH (32) +#define NBL_UCAR_INT_STATUS_DWLEN (1) +union ucar_int_status_u { + struct ucar_int_status { + u32 color_err:1; /* [0] Default:0x0 RWC */ + u32 parity_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 cif_err:1; /* [3] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [4] Default:0x0 RWC */ + u32 atid_nomat_err:1; /* [5] Default:0x0 RWC */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UCAR_INT_MASK_ADDR (0xe84004) +#define NBL_UCAR_INT_MASK_DEPTH (1) +#define NBL_UCAR_INT_MASK_WIDTH (32) +#define NBL_UCAR_INT_MASK_DWLEN (1) +union ucar_int_mask_u { + struct ucar_int_mask { + u32 color_err:1; /* [0] Default:0x1 RW */ + u32 parity_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 cif_err:1; /* [3] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [4] Default:0x0 RW */ + u32 atid_nomat_err:1; /* [5] Default:0x1 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UCAR_INT_SET_ADDR (0xe84008) +#define NBL_UCAR_INT_SET_DEPTH (1) +#define NBL_UCAR_INT_SET_WIDTH (32) +#define NBL_UCAR_INT_SET_DWLEN (1) +union ucar_int_set_u { + struct ucar_int_set { + u32 color_err:1; /* [0] Default:0x0 WO */ + u32 parity_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 cif_err:1; /* [3] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [4] Default:0x0 WO */ + u32 atid_nomat_err:1; /* [5] Default:0x0 WO */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INT_SET_DWLEN]; +} __packed; + +#define NBL_UCAR_PARITY_ERR_INFO_ADDR (0xe84104) +#define NBL_UCAR_PARITY_ERR_INFO_DEPTH (1) +#define NBL_UCAR_PARITY_ERR_INFO_WIDTH (32) +#define NBL_UCAR_PARITY_ERR_INFO_DWLEN (1) +union ucar_parity_err_info_u { + struct ucar_parity_err_info { + u32 ram_addr:12; /* [11:0] Default:0x0 RO */ + u32 ram_id:3; /* [14:12] Default:0x0 RO */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UCAR_CIF_ERR_INFO_ADDR (0xe8411c) +#define NBL_UCAR_CIF_ERR_INFO_DEPTH (1) +#define NBL_UCAR_CIF_ERR_INFO_WIDTH (32) +#define NBL_UCAR_CIF_ERR_INFO_DWLEN (1) +union ucar_cif_err_info_u { + struct ucar_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UCAR_ATID_NOMAT_ERR_INFO_ADDR (0xe84134) +#define NBL_UCAR_ATID_NOMAT_ERR_INFO_DEPTH (1) +#define NBL_UCAR_ATID_NOMAT_ERR_INFO_WIDTH (32) +#define NBL_UCAR_ATID_NOMAT_ERR_INFO_DWLEN (1) +union ucar_atid_nomat_err_info_u { + struct ucar_atid_nomat_err_info { + u32 id:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_ATID_NOMAT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UCAR_CAR_CTRL_ADDR (0xe84200) +#define NBL_UCAR_CAR_CTRL_DEPTH (1) +#define NBL_UCAR_CAR_CTRL_WIDTH (32) +#define NBL_UCAR_CAR_CTRL_DWLEN (1) +union ucar_car_ctrl_u { + struct ucar_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UCAR_INIT_START_ADDR (0xe84204) +#define NBL_UCAR_INIT_START_DEPTH (1) +#define NBL_UCAR_INIT_START_WIDTH (32) +#define NBL_UCAR_INIT_START_DWLEN (1) +union ucar_init_start_u { + struct ucar_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INIT_START_DWLEN]; +} __packed; + +#define NBL_UCAR_FWD_CARID_ADDR (0xe84210) +#define NBL_UCAR_FWD_CARID_DEPTH (1) +#define NBL_UCAR_FWD_CARID_WIDTH (32) +#define NBL_UCAR_FWD_CARID_DWLEN (1) +union ucar_fwd_carid_u { + struct ucar_fwd_carid { + u32 act_id:6; /* [5:0] Default:0x5 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FWD_CARID_DWLEN]; +} __packed; + +#define NBL_UCAR_FWD_FLOW_CAR_ADDR (0xe84214) +#define NBL_UCAR_FWD_FLOW_CAR_DEPTH (1) +#define NBL_UCAR_FWD_FLOW_CAR_WIDTH (32) +#define NBL_UCAR_FWD_FLOW_CAR_DWLEN (1) +union ucar_fwd_flow_car_u { + struct ucar_fwd_flow_car { + u32 act_id:6; /* [5:0] Default:0x6 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FWD_FLOW_CAR_DWLEN]; +} __packed; + +#define NBL_UCAR_PBS_SUB_ADDR (0xe84224) +#define NBL_UCAR_PBS_SUB_DEPTH (1) +#define NBL_UCAR_PBS_SUB_WIDTH (32) +#define NBL_UCAR_PBS_SUB_DWLEN (1) +union ucar_pbs_sub_u { + struct ucar_pbs_sub { + u32 sel:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_PBS_SUB_DWLEN]; +} __packed; + +#define NBL_UCAR_FLOW_TIMMING_ADD_ADDR (0xe84400) +#define NBL_UCAR_FLOW_TIMMING_ADD_DEPTH (1) +#define NBL_UCAR_FLOW_TIMMING_ADD_WIDTH (32) +#define NBL_UCAR_FLOW_TIMMING_ADD_DWLEN (1) +union ucar_flow_timming_add_u { + struct ucar_flow_timming_add { + u32 cycle_max:12; /* [11:0] Default:0x4 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 depth:14; /* [29:16] Default:0x4B0 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FLOW_TIMMING_ADD_DWLEN]; +} __packed; + +#define NBL_UCAR_FLOW_4K_TIMMING_ADD_ADDR (0xe84404) +#define NBL_UCAR_FLOW_4K_TIMMING_ADD_DEPTH (1) +#define NBL_UCAR_FLOW_4K_TIMMING_ADD_WIDTH (32) +#define NBL_UCAR_FLOW_4K_TIMMING_ADD_DWLEN (1) +union ucar_flow_4k_timming_add_u { + struct ucar_flow_4k_timming_add { + u32 cycle_max:12; /* [11:0] Default:0x4 RW */ + u32 depth:18; /* [29:12] Default:0x12C0 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FLOW_4K_TIMMING_ADD_DWLEN]; +} __packed; + +#define NBL_UCAR_INIT_DONE_ADDR (0xe84408) +#define NBL_UCAR_INIT_DONE_DEPTH (1) +#define NBL_UCAR_INIT_DONE_WIDTH (32) +#define NBL_UCAR_INIT_DONE_DWLEN (1) +union ucar_init_done_u { + struct ucar_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UCAR_INPUT_CELL_ADDR (0xe8441c) +#define NBL_UCAR_INPUT_CELL_DEPTH (1) +#define NBL_UCAR_INPUT_CELL_WIDTH (32) +#define NBL_UCAR_INPUT_CELL_DWLEN (1) +union ucar_input_cell_u { + struct ucar_input_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_INPUT_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_RD_CELL_ADDR (0xe84420) +#define NBL_UCAR_RD_CELL_DEPTH (1) +#define NBL_UCAR_RD_CELL_WIDTH (32) +#define NBL_UCAR_RD_CELL_DWLEN (1) +union ucar_rd_cell_u { + struct ucar_rd_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_RD_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_CAR_CELL_ADDR (0xe84424) +#define NBL_UCAR_CAR_CELL_DEPTH (1) +#define NBL_UCAR_CAR_CELL_WIDTH (32) +#define NBL_UCAR_CAR_CELL_DWLEN (1) +union ucar_car_cell_u { + struct ucar_car_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_CAR_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_CAR_FLOW_CELL_ADDR (0xe84428) +#define NBL_UCAR_CAR_FLOW_CELL_DEPTH (1) +#define NBL_UCAR_CAR_FLOW_CELL_WIDTH (32) +#define NBL_UCAR_CAR_FLOW_CELL_DWLEN (1) +union ucar_car_flow_cell_u { + struct ucar_car_flow_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_CAR_FLOW_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_CAR_FLOW_4K_CELL_ADDR (0xe8442c) +#define NBL_UCAR_CAR_FLOW_4K_CELL_DEPTH (1) +#define NBL_UCAR_CAR_FLOW_4K_CELL_WIDTH (32) +#define NBL_UCAR_CAR_FLOW_4K_CELL_DWLEN (1) +union ucar_car_flow_4k_cell_u { + struct ucar_car_flow_4k_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_CAR_FLOW_4K_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_NOCAR_CELL_ADDR (0xe84430) +#define NBL_UCAR_NOCAR_CELL_DEPTH (1) +#define NBL_UCAR_NOCAR_CELL_WIDTH (32) +#define NBL_UCAR_NOCAR_CELL_DWLEN (1) +union ucar_nocar_cell_u { + struct ucar_nocar_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_NOCAR_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_NOCAR_ERR_ADDR (0xe84434) +#define NBL_UCAR_NOCAR_ERR_DEPTH (1) +#define NBL_UCAR_NOCAR_ERR_WIDTH (32) +#define NBL_UCAR_NOCAR_ERR_DWLEN (1) +union ucar_nocar_err_u { + struct ucar_nocar_err { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_NOCAR_ERR_DWLEN]; +} __packed; + +#define NBL_UCAR_GREEN_CELL_ADDR (0xe84438) +#define NBL_UCAR_GREEN_CELL_DEPTH (1) +#define NBL_UCAR_GREEN_CELL_WIDTH (32) +#define NBL_UCAR_GREEN_CELL_DWLEN (1) +union ucar_green_cell_u { + struct ucar_green_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_GREEN_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_YELLOW_CELL_ADDR (0xe8443c) +#define NBL_UCAR_YELLOW_CELL_DEPTH (1) +#define NBL_UCAR_YELLOW_CELL_WIDTH (32) +#define NBL_UCAR_YELLOW_CELL_DWLEN (1) +union ucar_yellow_cell_u { + struct ucar_yellow_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_YELLOW_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_RED_CELL_ADDR (0xe84440) +#define NBL_UCAR_RED_CELL_DEPTH (1) +#define NBL_UCAR_RED_CELL_WIDTH (32) +#define NBL_UCAR_RED_CELL_DWLEN (1) +union ucar_red_cell_u { + struct ucar_red_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_RED_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_NOCAR_PKT_ADDR (0xe84444) +#define NBL_UCAR_NOCAR_PKT_DEPTH (1) +#define NBL_UCAR_NOCAR_PKT_WIDTH (48) +#define NBL_UCAR_NOCAR_PKT_DWLEN (2) +union ucar_nocar_pkt_u { + struct ucar_nocar_pkt { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_NOCAR_PKT_DWLEN]; +} __packed; + +#define NBL_UCAR_GREEN_PKT_ADDR (0xe8444c) +#define NBL_UCAR_GREEN_PKT_DEPTH (1) +#define NBL_UCAR_GREEN_PKT_WIDTH (48) +#define NBL_UCAR_GREEN_PKT_DWLEN (2) +union ucar_green_pkt_u { + struct ucar_green_pkt { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_GREEN_PKT_DWLEN]; +} __packed; + +#define NBL_UCAR_YELLOW_PKT_ADDR (0xe84454) +#define NBL_UCAR_YELLOW_PKT_DEPTH (1) +#define NBL_UCAR_YELLOW_PKT_WIDTH (48) +#define NBL_UCAR_YELLOW_PKT_DWLEN (2) +union ucar_yellow_pkt_u { + struct ucar_yellow_pkt { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_YELLOW_PKT_DWLEN]; +} __packed; + +#define NBL_UCAR_RED_PKT_ADDR (0xe8445c) +#define NBL_UCAR_RED_PKT_DEPTH (1) +#define NBL_UCAR_RED_PKT_WIDTH (48) +#define NBL_UCAR_RED_PKT_DWLEN (2) +union ucar_red_pkt_u { + struct ucar_red_pkt { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_RED_PKT_DWLEN]; +} __packed; + +#define NBL_UCAR_FWD_TYPE_WRONG_CELL_ADDR (0xe84464) +#define NBL_UCAR_FWD_TYPE_WRONG_CELL_DEPTH (1) +#define NBL_UCAR_FWD_TYPE_WRONG_CELL_WIDTH (32) +#define NBL_UCAR_FWD_TYPE_WRONG_CELL_DWLEN (1) +union ucar_fwd_type_wrong_cell_u { + struct ucar_fwd_type_wrong_cell { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UCAR_FWD_TYPE_WRONG_CELL_DWLEN]; +} __packed; + +#define NBL_UCAR_FLOW_ADDR (0xe88000) +#define NBL_UCAR_FLOW_DEPTH (1024) +#define NBL_UCAR_FLOW_WIDTH (128) +#define NBL_UCAR_FLOW_DWLEN (4) +union ucar_flow_u { + struct ucar_flow { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:19; /* [19:1] Default:0x0 RW */ + u32 cir:19; /* [38:20] Default:0x0 RW */ + u32 pir:19; /* [57:39] Default:0x0 RW */ + u32 cbs:21; /* [78:58] Default:0x0 RW */ + u32 pbs:21; /* [99:79] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FLOW_DWLEN]; +} __packed; +#define NBL_UCAR_FLOW_REG(r) (NBL_UCAR_FLOW_ADDR + \ + (NBL_UCAR_FLOW_DWLEN * 4) * (r)) + +#define NBL_UCAR_FLOW_4K_ADDR (0xe94000) +#define NBL_UCAR_FLOW_4K_DEPTH (4096) +#define NBL_UCAR_FLOW_4K_WIDTH (128) +#define NBL_UCAR_FLOW_4K_DWLEN (4) +union ucar_flow_4k_u { + struct ucar_flow_4k { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:21; /* [21:1] Default:0x0 RW */ + u32 cir:21; /* [42:22] Default:0x0 RW */ + u32 pir:21; /* [63:43] Default:0x0 RW */ + u32 cbs:23; /* [86:64] Default:0x0 RW */ + u32 pbs:23; /* [109:87] Default:0x0 RW */ + u32 rsv:18; /* [127:110] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UCAR_FLOW_4K_DWLEN]; +} __packed; +#define NBL_UCAR_FLOW_4K_REG(r) (NBL_UCAR_FLOW_4K_ADDR + \ + (NBL_UCAR_FLOW_4K_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ul4s.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ul4s.h new file mode 100644 index 0000000000000000000000000000000000000000..e2aa51ac023203cb2f8a8a27204eb39c73b2460d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ul4s.h @@ -0,0 +1,893 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UL4S_H +#define NBL_UL4S_H 1 + +#include + +#define NBL_UL4S_BASE (0x00204000) + +#define NBL_UL4S_INT_STATUS_ADDR (0x204000) +#define NBL_UL4S_INT_STATUS_DEPTH (1) +#define NBL_UL4S_INT_STATUS_WIDTH (32) +#define NBL_UL4S_INT_STATUS_DWLEN (1) +union ul4s_int_status_u { + struct ul4s_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 parity_err:1; /* [5] Default:0x0 RWC */ + u32 ce_mod_err:1; /* [6] Default:0x0 RWC */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UL4S_INT_MASK_ADDR (0x204004) +#define NBL_UL4S_INT_MASK_DEPTH (1) +#define NBL_UL4S_INT_MASK_WIDTH (32) +#define NBL_UL4S_INT_MASK_DWLEN (1) +union ul4s_int_mask_u { + struct ul4s_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 parity_err:1; /* [5] Default:0x0 RW */ + u32 ce_mod_err:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UL4S_INT_SET_ADDR (0x204008) +#define NBL_UL4S_INT_SET_DEPTH (1) +#define NBL_UL4S_INT_SET_WIDTH (32) +#define NBL_UL4S_INT_SET_DWLEN (1) +union ul4s_int_set_u { + struct ul4s_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 parity_err:1; /* [5] Default:0x0 WO */ + u32 ce_mod_err:1; /* [6] Default:0x0 WO */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INT_SET_DWLEN]; +} __packed; + +#define NBL_UL4S_COR_ERR_INFO_ADDR (0x204010) +#define NBL_UL4S_COR_ERR_INFO_DEPTH (1) +#define NBL_UL4S_COR_ERR_INFO_WIDTH (32) +#define NBL_UL4S_COR_ERR_INFO_DWLEN (1) +union ul4s_cor_err_info_u { + struct ul4s_cor_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_UNCOR_ERR_INFO_ADDR (0x204018) +#define NBL_UL4S_UNCOR_ERR_INFO_DEPTH (1) +#define NBL_UL4S_UNCOR_ERR_INFO_WIDTH (32) +#define NBL_UL4S_UNCOR_ERR_INFO_DWLEN (1) +union ul4s_uncor_err_info_u { + struct ul4s_uncor_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_UNCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_PARITY_ERR_INFO_ADDR (0x204030) +#define NBL_UL4S_PARITY_ERR_INFO_DEPTH (1) +#define NBL_UL4S_PARITY_ERR_INFO_WIDTH (32) +#define NBL_UL4S_PARITY_ERR_INFO_DWLEN (1) +union ul4s_parity_err_info_u { + struct ul4s_parity_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_CIF_ERR_INFO_ADDR (0x204038) +#define NBL_UL4S_CIF_ERR_INFO_DEPTH (1) +#define NBL_UL4S_CIF_ERR_INFO_WIDTH (32) +#define NBL_UL4S_CIF_ERR_INFO_DWLEN (1) +union ul4s_cif_err_info_u { + struct ul4s_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_CAR_CTRL_ADDR (0x204100) +#define NBL_UL4S_CAR_CTRL_DEPTH (1) +#define NBL_UL4S_CAR_CTRL_WIDTH (32) +#define NBL_UL4S_CAR_CTRL_DWLEN (1) +union ul4s_car_ctrl_u { + struct ul4s_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UL4S_INIT_START_ADDR (0x204104) +#define NBL_UL4S_INIT_START_DEPTH (1) +#define NBL_UL4S_INIT_START_WIDTH (32) +#define NBL_UL4S_INIT_START_DWLEN (1) +union ul4s_init_start_u { + struct ul4s_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INIT_START_DWLEN]; +} __packed; + +#define NBL_UL4S_INTF_DATA_BUF_TH_ADDR (0x204124) +#define NBL_UL4S_INTF_DATA_BUF_TH_DEPTH (1) +#define NBL_UL4S_INTF_DATA_BUF_TH_WIDTH (32) +#define NBL_UL4S_INTF_DATA_BUF_TH_DWLEN (1) +union ul4s_intf_data_buf_th_u { + struct ul4s_intf_data_buf_th { + u32 aful_high_th:8; /* [7:0] Default:224 RW */ + u32 aful_low_th:8; /* [15:8] Default:200 RW */ + u32 drop_th:8; /* [23:16] Default:168 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INTF_DATA_BUF_TH_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UDP_BUF_TH_ADDR (0x204180) +#define NBL_UL4S_REC_UDP_BUF_TH_DEPTH (1) +#define NBL_UL4S_REC_UDP_BUF_TH_WIDTH (32) +#define NBL_UL4S_REC_UDP_BUF_TH_DWLEN (1) +union ul4s_rec_udp_buf_th_u { + struct ul4s_rec_udp_buf_th { + u32 aful_high_th:8; /* [7:0] Default:32 RW */ + u32 rsv1:8; /* [15:8] Default:0x0 RO */ + u32 aful_low_th:8; /* [23:16] Default:24 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_UDP_BUF_TH_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UDP_DONE_DATA_BUF_TH_ADDR (0x20418c) +#define NBL_UL4S_REC_UDP_DONE_DATA_BUF_TH_DEPTH (1) +#define NBL_UL4S_REC_UDP_DONE_DATA_BUF_TH_WIDTH (32) +#define NBL_UL4S_REC_UDP_DONE_DATA_BUF_TH_DWLEN (1) +union ul4s_rec_udp_done_data_buf_th_u { + struct ul4s_rec_udp_done_data_buf_th { + u32 aful_high_th:9; /* [8:0] Default:320 RW */ + u32 rsv1:7; /* [15:9] Default:0x0 RO */ + u32 aful_low_th:9; /* [24:16] Default:256 RW */ + u32 rsv:7; /* [31:25] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_UDP_DONE_DATA_BUF_TH_DWLEN]; +} __packed; + +#define NBL_UL4S_CFG_TCP_REC_NUM_STATUS_CURR_ADDR (0x204204) +#define NBL_UL4S_CFG_TCP_REC_NUM_STATUS_CURR_DEPTH (1) +#define NBL_UL4S_CFG_TCP_REC_NUM_STATUS_CURR_WIDTH (32) +#define NBL_UL4S_CFG_TCP_REC_NUM_STATUS_CURR_DWLEN (1) +union ul4s_cfg_tcp_rec_num_status_curr_u { + struct ul4s_cfg_tcp_rec_num_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:6; /* [9:4] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_CFG_TCP_REC_NUM_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_UL4S_INTF_DATA_BUF_STATUS_CURR_ADDR (0x204224) +#define NBL_UL4S_INTF_DATA_BUF_STATUS_CURR_DEPTH (1) +#define NBL_UL4S_INTF_DATA_BUF_STATUS_CURR_WIDTH (32) +#define NBL_UL4S_INTF_DATA_BUF_STATUS_CURR_DWLEN (1) +union ul4s_intf_data_buf_status_curr_u { + struct ul4s_intf_data_buf_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:8; /* [11:4] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INTF_DATA_BUF_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UDP_BUF_STATUS_CURR_ADDR (0x204280) +#define NBL_UL4S_REC_UDP_BUF_STATUS_CURR_DEPTH (1) +#define NBL_UL4S_REC_UDP_BUF_STATUS_CURR_WIDTH (32) +#define NBL_UL4S_REC_UDP_BUF_STATUS_CURR_DWLEN (1) +union ul4s_rec_udp_buf_status_curr_u { + struct ul4s_rec_udp_buf_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:8; /* [11:4] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_UDP_BUF_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UDP_INFO_STATUS_CURR_ADDR (0x204284) +#define NBL_UL4S_REC_UDP_INFO_STATUS_CURR_DEPTH (1) +#define NBL_UL4S_REC_UDP_INFO_STATUS_CURR_WIDTH (32) +#define NBL_UL4S_REC_UDP_INFO_STATUS_CURR_DWLEN (1) +union ul4s_rec_udp_info_status_curr_u { + struct ul4s_rec_udp_info_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:8; /* [11:4] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_UDP_INFO_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UDP_DATA_DONE_STATUS_CURR_ADDR (0x204290) +#define NBL_UL4S_REC_UDP_DATA_DONE_STATUS_CURR_DEPTH (1) +#define NBL_UL4S_REC_UDP_DATA_DONE_STATUS_CURR_WIDTH (32) +#define NBL_UL4S_REC_UDP_DATA_DONE_STATUS_CURR_DWLEN (1) +union ul4s_rec_udp_data_done_status_curr_u { + struct ul4s_rec_udp_data_done_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:9; /* [12:4] Default:0x0 RO */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_UDP_DATA_DONE_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UDP_INFO_DONE_STATUS_CURR_ADDR (0x204294) +#define NBL_UL4S_REC_UDP_INFO_DONE_STATUS_CURR_DEPTH (1) +#define NBL_UL4S_REC_UDP_INFO_DONE_STATUS_CURR_WIDTH (32) +#define NBL_UL4S_REC_UDP_INFO_DONE_STATUS_CURR_DWLEN (1) +union ul4s_rec_udp_info_done_status_curr_u { + struct ul4s_rec_udp_info_done_status_curr { + u32 nempty:1; /* [0] Default:0x0 RO */ + u32 nfull:1; /* [1] Default:0x1 RO */ + u32 naempty:1; /* [2] Default:0x0 RO */ + u32 nafull:1; /* [3] Default:0x1 RO */ + u32 cnt:10; /* [13:4] Default:0x0 RO */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_UDP_INFO_DONE_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_UL4S_INTF_RX_INFO_ADDR (0x204400) +#define NBL_UL4S_INTF_RX_INFO_DEPTH (1) +#define NBL_UL4S_INTF_RX_INFO_WIDTH (32) +#define NBL_UL4S_INTF_RX_INFO_DWLEN (1) +union ul4s_intf_rx_info_u { + struct ul4s_intf_rx_info { + u32 cell_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 un_pl_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_INTF_RX_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_INTF_RX_DATA_ADDR (0x204404) +#define NBL_UL4S_INTF_RX_DATA_DEPTH (1) +#define NBL_UL4S_INTF_RX_DATA_WIDTH (32) +#define NBL_UL4S_INTF_RX_DATA_DWLEN (1) +union ul4s_intf_rx_data_u { + struct ul4s_intf_rx_data { + u32 cell_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INTF_RX_DATA_DWLEN]; +} __packed; + +#define NBL_UL4S_L4S_RX_CELL_ADDR (0x204408) +#define NBL_UL4S_L4S_RX_CELL_DEPTH (1) +#define NBL_UL4S_L4S_RX_CELL_WIDTH (32) +#define NBL_UL4S_L4S_RX_CELL_DWLEN (1) +union ul4s_l4s_rx_cell_u { + struct ul4s_l4s_rx_cell { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_L4S_RX_CELL_DWLEN]; +} __packed; + +#define NBL_UL4S_L4S_RX_INFO_ADDR (0x20440c) +#define NBL_UL4S_L4S_RX_INFO_DEPTH (1) +#define NBL_UL4S_L4S_RX_INFO_WIDTH (32) +#define NBL_UL4S_L4S_RX_INFO_DWLEN (1) +union ul4s_l4s_rx_info_u { + struct ul4s_l4s_rx_info { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 drcnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_L4S_RX_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_L4S_RX_DATA_ADDR (0x204410) +#define NBL_UL4S_L4S_RX_DATA_DEPTH (1) +#define NBL_UL4S_L4S_RX_DATA_WIDTH (32) +#define NBL_UL4S_L4S_RX_DATA_DWLEN (1) +union ul4s_l4s_rx_data_u { + struct ul4s_l4s_rx_data { + u32 len_cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_L4S_RX_DATA_DWLEN]; +} __packed; + +#define NBL_UL4S_L4S_TX_INFO_ADDR (0x204414) +#define NBL_UL4S_L4S_TX_INFO_DEPTH (1) +#define NBL_UL4S_L4S_TX_INFO_WIDTH (32) +#define NBL_UL4S_L4S_TX_INFO_DWLEN (1) +union ul4s_l4s_tx_info_u { + struct ul4s_l4s_tx_info { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_L4S_TX_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_L4S_TX_LEN_ADDR (0x204418) +#define NBL_UL4S_L4S_TX_LEN_DEPTH (1) +#define NBL_UL4S_L4S_TX_LEN_WIDTH (32) +#define NBL_UL4S_L4S_TX_LEN_DWLEN (1) +union ul4s_l4s_tx_len_u { + struct ul4s_l4s_tx_len { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_L4S_TX_LEN_DWLEN]; +} __packed; + +#define NBL_UL4S_L4S_TCP_SN_INI_ADDR (0x204420) +#define NBL_UL4S_L4S_TCP_SN_INI_DEPTH (1) +#define NBL_UL4S_L4S_TCP_SN_INI_WIDTH (32) +#define NBL_UL4S_L4S_TCP_SN_INI_DWLEN (1) +union ul4s_l4s_tcp_sn_ini_u { + struct ul4s_l4s_tcp_sn_ini { + u32 cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_L4S_TCP_SN_INI_DWLEN]; +} __packed; + +#define NBL_UL4S_L4S_INFO_PRE_RD_ADDR (0x204424) +#define NBL_UL4S_L4S_INFO_PRE_RD_DEPTH (1) +#define NBL_UL4S_L4S_INFO_PRE_RD_WIDTH (32) +#define NBL_UL4S_L4S_INFO_PRE_RD_DWLEN (1) +union ul4s_l4s_info_pre_rd_u { + struct ul4s_l4s_info_pre_rd { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_L4S_INFO_PRE_RD_DWLEN]; +} __packed; + +#define NBL_UL4S_L4S_INFO_CELL_ADDR (0x204428) +#define NBL_UL4S_L4S_INFO_CELL_DEPTH (1) +#define NBL_UL4S_L4S_INFO_CELL_WIDTH (32) +#define NBL_UL4S_L4S_INFO_CELL_DWLEN (1) +union ul4s_l4s_info_cell_u { + struct ul4s_l4s_info_cell { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_L4S_INFO_CELL_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_PARSE_RX_ADDR (0x204440) +#define NBL_UL4S_REC_PARSE_RX_DEPTH (1) +#define NBL_UL4S_REC_PARSE_RX_WIDTH (32) +#define NBL_UL4S_REC_PARSE_RX_DWLEN (1) +union ul4s_rec_parse_rx_u { + struct ul4s_rec_parse_rx { + u32 ind_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 init_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 resync_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 loss_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_REC_PARSE_RX_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_PARSE_INFO_ADDR (0x204444) +#define NBL_UL4S_REC_PARSE_INFO_DEPTH (1) +#define NBL_UL4S_REC_PARSE_INFO_WIDTH (32) +#define NBL_UL4S_REC_PARSE_INFO_DWLEN (1) +union ul4s_rec_parse_info_u { + struct ul4s_rec_parse_info { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 info_un_match:8; /* [15:8] Default:0x0 RCTR */ + u32 rcnt:8; /* [23:16] Default:0x0 RCTR */ + u32 wcnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_REC_PARSE_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_PARSE_RES_ADDR (0x204448) +#define NBL_UL4S_REC_PARSE_RES_DEPTH (1) +#define NBL_UL4S_REC_PARSE_RES_WIDTH (32) +#define NBL_UL4S_REC_PARSE_RES_DWLEN (1) +union ul4s_rec_parse_res_u { + struct ul4s_rec_parse_res { + u32 eor_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 sor_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_REC_PARSE_RES_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_PARSE_BLK_ADDR (0x20444c) +#define NBL_UL4S_REC_PARSE_BLK_DEPTH (1) +#define NBL_UL4S_REC_PARSE_BLK_WIDTH (32) +#define NBL_UL4S_REC_PARSE_BLK_DWLEN (1) +union ul4s_rec_parse_blk_u { + struct ul4s_rec_parse_blk { + u32 eob_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 sob_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 info_retrans:8; /* [23:16] Default:0x0 RCTR */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_PARSE_BLK_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_DEPART_ADDR (0x204450) +#define NBL_UL4S_REC_DEPART_DEPTH (1) +#define NBL_UL4S_REC_DEPART_WIDTH (32) +#define NBL_UL4S_REC_DEPART_DWLEN (1) +union ul4s_rec_depart_u { + struct ul4s_rec_depart { + u32 eob_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 sob_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 eor_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 sor_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_REC_DEPART_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_DEPART_INFO_ADDR (0x204454) +#define NBL_UL4S_REC_DEPART_INFO_DEPTH (1) +#define NBL_UL4S_REC_DEPART_INFO_WIDTH (32) +#define NBL_UL4S_REC_DEPART_INFO_DWLEN (1) +union ul4s_rec_depart_info_u { + struct ul4s_rec_depart_info { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 eicv_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 head_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_REC_DEPART_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_DEP_ADA_ADDR (0x204460) +#define NBL_UL4S_REC_DEP_ADA_DEPTH (1) +#define NBL_UL4S_REC_DEP_ADA_WIDTH (32) +#define NBL_UL4S_REC_DEP_ADA_DWLEN (1) +union ul4s_rec_dep_ada_u { + struct ul4s_rec_dep_ada { + u32 eob_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 sob_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 eor_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 sor_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_REC_DEP_ADA_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_DEP_ADA_INFO_ADDR (0x204464) +#define NBL_UL4S_REC_DEP_ADA_INFO_DEPTH (1) +#define NBL_UL4S_REC_DEP_ADA_INFO_WIDTH (32) +#define NBL_UL4S_REC_DEP_ADA_INFO_DWLEN (1) +union ul4s_rec_dep_ada_info_u { + struct ul4s_rec_dep_ada_info { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_DEP_ADA_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UPD_ADA_ADDR (0x204470) +#define NBL_UL4S_REC_UPD_ADA_DEPTH (1) +#define NBL_UL4S_REC_UPD_ADA_WIDTH (32) +#define NBL_UL4S_REC_UPD_ADA_DWLEN (1) +union ul4s_rec_upd_ada_u { + struct ul4s_rec_upd_ada { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 icv_cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 sob_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_UPD_ADA_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UPDATE_ADDR (0x204480) +#define NBL_UL4S_REC_UPDATE_DEPTH (1) +#define NBL_UL4S_REC_UPDATE_WIDTH (32) +#define NBL_UL4S_REC_UPDATE_DWLEN (1) +union ul4s_rec_update_u { + struct ul4s_rec_update { + u32 bp_rcnt:8; /* [7:0] Default:0x0 RCTR */ + u32 bp_wcnt:8; /* [15:8] Default:0x0 RCTR */ + u32 cip_rcnt:8; /* [23:16] Default:0x0 RCTR */ + u32 cip_wcnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_REC_UPDATE_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UPDATE_INFO_ADDR (0x204484) +#define NBL_UL4S_REC_UPDATE_INFO_DEPTH (1) +#define NBL_UL4S_REC_UPDATE_INFO_WIDTH (32) +#define NBL_UL4S_REC_UPDATE_INFO_DWLEN (1) +union ul4s_rec_update_info_u { + struct ul4s_rec_update_info { + u32 eop_cnt:8; /* [7:0] Default:0x0 RCTR */ + u32 cnt:8; /* [15:8] Default:0x0 RCTR */ + u32 icv_cnt:8; /* [23:16] Default:0x0 RCTR */ + u32 cell_cnt:8; /* [31:24] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_REC_UPDATE_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_UPD_ICV_CHECK_ADDR (0x204488) +#define NBL_UL4S_REC_UPD_ICV_CHECK_DEPTH (1) +#define NBL_UL4S_REC_UPD_ICV_CHECK_WIDTH (32) +#define NBL_UL4S_REC_UPD_ICV_CHECK_DWLEN (1) +union ul4s_rec_upd_icv_check_u { + struct ul4s_rec_upd_icv_check { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_UPD_ICV_CHECK_DWLEN]; +} __packed; + +#define NBL_UL4S_UNL4S_RX_INFO_ADDR (0x204490) +#define NBL_UL4S_UNL4S_RX_INFO_DEPTH (1) +#define NBL_UL4S_UNL4S_RX_INFO_WIDTH (32) +#define NBL_UL4S_UNL4S_RX_INFO_DWLEN (1) +union ul4s_unl4s_rx_info_u { + struct ul4s_unl4s_rx_info { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_UNL4S_RX_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_UNL4S_RX_LEN_ADDR (0x204494) +#define NBL_UL4S_UNL4S_RX_LEN_DEPTH (1) +#define NBL_UL4S_UNL4S_RX_LEN_WIDTH (32) +#define NBL_UL4S_UNL4S_RX_LEN_DWLEN (1) +union ul4s_unl4s_rx_len_u { + struct ul4s_unl4s_rx_len { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_UNL4S_RX_LEN_DWLEN]; +} __packed; + +#define NBL_UL4S_UNL4S_TX_INFO_ADDR (0x2044a0) +#define NBL_UL4S_UNL4S_TX_INFO_DEPTH (1) +#define NBL_UL4S_UNL4S_TX_INFO_WIDTH (32) +#define NBL_UL4S_UNL4S_TX_INFO_DWLEN (1) +union ul4s_unl4s_tx_info_u { + struct ul4s_unl4s_tx_info { + u32 cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_UNL4S_TX_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_UNL4S_TX_LEN_ADDR (0x2044a4) +#define NBL_UL4S_UNL4S_TX_LEN_DEPTH (1) +#define NBL_UL4S_UNL4S_TX_LEN_WIDTH (32) +#define NBL_UL4S_UNL4S_TX_LEN_DWLEN (1) +union ul4s_unl4s_tx_len_u { + struct ul4s_unl4s_tx_len { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_UNL4S_TX_LEN_DWLEN]; +} __packed; + +#define NBL_UL4S_RD_INFO_ADDR (0x2044b0) +#define NBL_UL4S_RD_INFO_DEPTH (1) +#define NBL_UL4S_RD_INFO_WIDTH (32) +#define NBL_UL4S_RD_INFO_DWLEN (1) +union ul4s_rd_info_u { + struct ul4s_rd_info { + u32 unl4s_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 l4s_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_RD_INFO_DWLEN]; +} __packed; + +#define NBL_UL4S_INT_TX_ADDR (0x2044b4) +#define NBL_UL4S_INT_TX_DEPTH (1) +#define NBL_UL4S_INT_TX_WIDTH (32) +#define NBL_UL4S_INT_TX_DWLEN (1) +union ul4s_int_tx_u { + struct ul4s_int_tx { + u32 info_cnt:16; /* [15:0] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INT_TX_DWLEN]; +} __packed; + +#define NBL_UL4S_RX_CELL_LEN_ADDR (0x2044c0) +#define NBL_UL4S_RX_CELL_LEN_DEPTH (1) +#define NBL_UL4S_RX_CELL_LEN_WIDTH (64) +#define NBL_UL4S_RX_CELL_LEN_DWLEN (2) +union ul4s_rx_cell_len_u { + struct ul4s_rx_cell_len { + u32 cnt_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_RX_CELL_LEN_DWLEN]; +} __packed; + +#define NBL_UL4S_RX_TOTAL_LEN_ADDR (0x2044c8) +#define NBL_UL4S_RX_TOTAL_LEN_DEPTH (1) +#define NBL_UL4S_RX_TOTAL_LEN_WIDTH (64) +#define NBL_UL4S_RX_TOTAL_LEN_DWLEN (2) +union ul4s_rx_total_len_u { + struct ul4s_rx_total_len { + u32 cnt_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_RX_TOTAL_LEN_DWLEN]; +} __packed; + +#define NBL_UL4S_TX_PAD_LEN_ADDR (0x2044d0) +#define NBL_UL4S_TX_PAD_LEN_DEPTH (1) +#define NBL_UL4S_TX_PAD_LEN_WIDTH (64) +#define NBL_UL4S_TX_PAD_LEN_DWLEN (2) +union ul4s_tx_pad_len_u { + struct ul4s_tx_pad_len { + u32 cnt_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_TX_PAD_LEN_DWLEN]; +} __packed; + +#define NBL_UL4S_TX_UNPAD_LEN_ADDR (0x2044d8) +#define NBL_UL4S_TX_UNPAD_LEN_DEPTH (1) +#define NBL_UL4S_TX_UNPAD_LEN_WIDTH (64) +#define NBL_UL4S_TX_UNPAD_LEN_DWLEN (2) +union ul4s_tx_unpad_len_u { + struct ul4s_tx_unpad_len { + u32 cnt_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UL4S_TX_UNPAD_LEN_DWLEN]; +} __packed; + +#define NBL_UL4S_INFO_MAN_ADDR (0x204510) +#define NBL_UL4S_INFO_MAN_DEPTH (1) +#define NBL_UL4S_INFO_MAN_WIDTH (32) +#define NBL_UL4S_INFO_MAN_DWLEN (1) +union ul4s_info_man_u { + struct ul4s_info_man { + u32 read_state:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INFO_MAN_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_DEP_ADA_IV_ADDR (0x204564) +#define NBL_UL4S_REC_DEP_ADA_IV_DEPTH (1) +#define NBL_UL4S_REC_DEP_ADA_IV_WIDTH (32) +#define NBL_UL4S_REC_DEP_ADA_IV_DWLEN (1) +union ul4s_rec_dep_ada_iv_u { + struct ul4s_rec_dep_ada_iv { + u32 val:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_DEP_ADA_IV_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_DEP_ADA_SN_ADDR (0x204568) +#define NBL_UL4S_REC_DEP_ADA_SN_DEPTH (1) +#define NBL_UL4S_REC_DEP_ADA_SN_WIDTH (32) +#define NBL_UL4S_REC_DEP_ADA_SN_DWLEN (1) +union ul4s_rec_dep_ada_sn_u { + struct ul4s_rec_dep_ada_sn { + u32 val:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_DEP_ADA_SN_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_DEP_ADA_CE_ADDR (0x20456c) +#define NBL_UL4S_REC_DEP_ADA_CE_DEPTH (1) +#define NBL_UL4S_REC_DEP_ADA_CE_WIDTH (32) +#define NBL_UL4S_REC_DEP_ADA_CE_DWLEN (1) +union ul4s_rec_dep_ada_ce_u { + struct ul4s_rec_dep_ada_ce { + u32 len:11; /* [10:0] Default:0x0 RO */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 sid:10; /* [25:16] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_DEP_ADA_CE_DWLEN]; +} __packed; + +#define NBL_UL4S_INIT_DONE_ADDR (0x204600) +#define NBL_UL4S_INIT_DONE_DEPTH (1) +#define NBL_UL4S_INIT_DONE_WIDTH (32) +#define NBL_UL4S_INIT_DONE_DWLEN (1) +union ul4s_init_done_u { + struct ul4s_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UL4S_VER_YEAR_MONTH_ADDR (0x204604) +#define NBL_UL4S_VER_YEAR_MONTH_DEPTH (1) +#define NBL_UL4S_VER_YEAR_MONTH_WIDTH (32) +#define NBL_UL4S_VER_YEAR_MONTH_DWLEN (1) +union ul4s_ver_year_month_u { + struct ul4s_ver_year_month { + u32 month:8; /* [7:0] Default:0x06 RO */ + u32 year:8; /* [15:8] Default:0x22 RO */ + u32 version:8; /* [23:16] Default:0x21 RO */ + u32 reg_type:8; /* [31:24] Default:0x5b RO */ + } __packed info; + u32 data[NBL_UL4S_VER_YEAR_MONTH_DWLEN]; +} __packed; + +#define NBL_UL4S_REG_TEST_ADDR (0x204608) +#define NBL_UL4S_REG_TEST_DEPTH (1) +#define NBL_UL4S_REG_TEST_WIDTH (32) +#define NBL_UL4S_REG_TEST_DWLEN (1) +union ul4s_reg_test_u { + struct ul4s_reg_test { + u32 reg_test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UL4S_REG_TEST_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_FULL_CTRL_ADDR (0x204610) +#define NBL_UL4S_REC_FULL_CTRL_DEPTH (1) +#define NBL_UL4S_REC_FULL_CTRL_WIDTH (32) +#define NBL_UL4S_REC_FULL_CTRL_DWLEN (1) +union ul4s_rec_full_ctrl_u { + struct ul4s_rec_full_ctrl { + u32 en:1; /* [0] Default:0x0 RW */ + u32 drop:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x00 RO */ + } __packed info; + u32 data[NBL_UL4S_REC_FULL_CTRL_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_HEAD_VERSION_ADDR (0x204630) +#define NBL_UL4S_REC_HEAD_VERSION_DEPTH (1) +#define NBL_UL4S_REC_HEAD_VERSION_WIDTH (32) +#define NBL_UL4S_REC_HEAD_VERSION_DWLEN (1) +union ul4s_rec_head_version_u { + struct ul4s_rec_head_version { + u32 v0:16; /* [15:0] Default:0x0302 RW */ + u32 v1:16; /* [31:16] Default:0x0303 RW */ + } __packed info; + u32 data[NBL_UL4S_REC_HEAD_VERSION_DWLEN]; +} __packed; + +#define NBL_UL4S_REC_HEAD_TYPE_ADDR (0x204634) +#define NBL_UL4S_REC_HEAD_TYPE_DEPTH (1) +#define NBL_UL4S_REC_HEAD_TYPE_WIDTH (32) +#define NBL_UL4S_REC_HEAD_TYPE_DWLEN (1) +union ul4s_rec_head_type_u { + struct ul4s_rec_head_type { + u32 check_en:1; /* [0] Default:0x1 RW */ + u32 rsv:15; /* [15:1] Default:0x0 RO */ + u32 length:16; /* [31:16] Default:0x4400 RW */ + } __packed info; + u32 data[NBL_UL4S_REC_HEAD_TYPE_DWLEN]; +} __packed; + +#define NBL_UL4S_SCH_ADDR (0x2046c0) +#define NBL_UL4S_SCH_DEPTH (1) +#define NBL_UL4S_SCH_WIDTH (32) +#define NBL_UL4S_SCH_DWLEN (1) +union ul4s_sch_u { + struct ul4s_sch { + u32 pri:2; /* [1:0] Default:0x2 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_SCH_DWLEN]; +} __packed; + +#define NBL_UL4S_SCH_PAD_ADDR (0x2046c4) +#define NBL_UL4S_SCH_PAD_DEPTH (1) +#define NBL_UL4S_SCH_PAD_WIDTH (32) +#define NBL_UL4S_SCH_PAD_DWLEN (1) +union ul4s_sch_pad_u { + struct ul4s_sch_pad { + u32 en:1; /* [0] Default:0x1 RW */ + u32 clr:1; /* [1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x00 RO */ + } __packed info; + u32 data[NBL_UL4S_SCH_PAD_DWLEN]; +} __packed; + +#define NBL_UL4S_SYNC_TRIG_ADDR (0x204700) +#define NBL_UL4S_SYNC_TRIG_DEPTH (1) +#define NBL_UL4S_SYNC_TRIG_WIDTH (32) +#define NBL_UL4S_SYNC_TRIG_DWLEN (1) +union ul4s_sync_trig_u { + struct ul4s_sync_trig { + u32 rsv1:1; /* [0] Default:0x0 RW */ + u32 trig:1; /* [1] Default:0x0 RW */ + u32 init_sync:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_SYNC_TRIG_DWLEN]; +} __packed; + +#define NBL_UL4S_SYNC_SID_ADDR (0x204704) +#define NBL_UL4S_SYNC_SID_DEPTH (1) +#define NBL_UL4S_SYNC_SID_WIDTH (32) +#define NBL_UL4S_SYNC_SID_DWLEN (1) +union ul4s_sync_sid_u { + struct ul4s_sync_sid { + u32 sync_sid:10; /* [9:0] Default:0x0 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_SYNC_SID_DWLEN]; +} __packed; + +#define NBL_UL4S_SYNC_TCP_SN_ADDR (0x204710) +#define NBL_UL4S_SYNC_TCP_SN_DEPTH (1) +#define NBL_UL4S_SYNC_TCP_SN_WIDTH (32) +#define NBL_UL4S_SYNC_TCP_SN_DWLEN (1) +union ul4s_sync_tcp_sn_u { + struct ul4s_sync_tcp_sn { + u32 sync_tcp_sn:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UL4S_SYNC_TCP_SN_DWLEN]; +} __packed; + +#define NBL_UL4S_SYNC_REC_NUM_ADDR (0x204714) +#define NBL_UL4S_SYNC_REC_NUM_DEPTH (1) +#define NBL_UL4S_SYNC_REC_NUM_WIDTH (64) +#define NBL_UL4S_SYNC_REC_NUM_DWLEN (2) +union ul4s_sync_rec_num_u { + struct ul4s_sync_rec_num { + u32 sync_rec_num_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UL4S_SYNC_REC_NUM_DWLEN]; +} __packed; + +#define NBL_UL4S_TAB_KEY_SALT_ADDR (0x214000) +#define NBL_UL4S_TAB_KEY_SALT_DEPTH (1024) +#define NBL_UL4S_TAB_KEY_SALT_WIDTH (512) +#define NBL_UL4S_TAB_KEY_SALT_DWLEN (16) +union ul4s_tab_key_salt_u { + struct ul4s_tab_key_salt { + u32 key_arr[8]; /* [255:0] Default:0x0 RW */ + u32 salt:32; /* [287:256] Default:0x0 RW */ + u32 mode:2; /* [289:288] Default:0x0 RW */ + u32 ena:1; /* [290] Default:0x0 RW */ + u32 rsv:29; /* [511:291] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:291] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UL4S_TAB_KEY_SALT_DWLEN]; +} __packed; +#define NBL_UL4S_TAB_KEY_SALT_REG(r) (NBL_UL4S_TAB_KEY_SALT_ADDR + \ + (NBL_UL4S_TAB_KEY_SALT_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h new file mode 100644 index 0000000000000000000000000000000000000000..16061974d449522a016295b465f774e84ffea7ca --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upa.h @@ -0,0 +1,817 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UPA_H +#define NBL_UPA_H 1 + +#include + +#define NBL_UPA_BASE (0x0008C000) + +#define NBL_UPA_INT_STATUS_ADDR (0x8c000) +#define NBL_UPA_INT_STATUS_DEPTH (1) +#define NBL_UPA_INT_STATUS_WIDTH (32) +#define NBL_UPA_INT_STATUS_DWLEN (1) +union upa_int_status_u { + struct upa_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_underflow:1; /* [1] Default:0x0 RWC */ + u32 fifo_overflow:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 RWC */ + u32 ucor_err:1; /* [7] Default:0x0 RWC */ + u32 cor_err:1; /* [8] Default:0x0 RWC */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UPA_INT_MASK_ADDR (0x8c004) +#define NBL_UPA_INT_MASK_DEPTH (1) +#define NBL_UPA_INT_MASK_WIDTH (32) +#define NBL_UPA_INT_MASK_DWLEN (1) +union upa_int_mask_u { + struct upa_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_underflow:1; /* [1] Default:0x0 RW */ + u32 fifo_overflow:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 RW */ + u32 ucor_err:1; /* [7] Default:0x0 RW */ + u32 cor_err:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UPA_INT_SET_ADDR (0x8c008) +#define NBL_UPA_INT_SET_DEPTH (1) +#define NBL_UPA_INT_SET_WIDTH (32) +#define NBL_UPA_INT_SET_DWLEN (1) +union upa_int_set_u { + struct upa_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_underflow:1; /* [1] Default:0x0 WO */ + u32 fifo_overflow:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 cfg_err:1; /* [6] Default:0x0 WO */ + u32 ucor_err:1; /* [7] Default:0x0 WO */ + u32 cor_err:1; /* [8] Default:0x0 WO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INT_SET_DWLEN]; +} __packed; + +#define NBL_UPA_INIT_DONE_ADDR (0x8c00c) +#define NBL_UPA_INIT_DONE_DEPTH (1) +#define NBL_UPA_INIT_DONE_WIDTH (32) +#define NBL_UPA_INIT_DONE_DWLEN (1) +union upa_init_done_u { + struct upa_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UPA_CIF_ERR_INFO_ADDR (0x8c040) +#define NBL_UPA_CIF_ERR_INFO_DEPTH (1) +#define NBL_UPA_CIF_ERR_INFO_WIDTH (32) +#define NBL_UPA_CIF_ERR_INFO_DWLEN (1) +union upa_cif_err_info_u { + struct upa_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPA_CFG_ERR_INFO_ADDR (0x8c050) +#define NBL_UPA_CFG_ERR_INFO_DEPTH (1) +#define NBL_UPA_CFG_ERR_INFO_WIDTH (32) +#define NBL_UPA_CFG_ERR_INFO_DWLEN (1) +union upa_cfg_err_info_u { + struct upa_cfg_err_info { + u32 id0:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPA_CAR_CTRL_ADDR (0x8c100) +#define NBL_UPA_CAR_CTRL_DEPTH (1) +#define NBL_UPA_CAR_CTRL_WIDTH (32) +#define NBL_UPA_CAR_CTRL_DWLEN (1) +union upa_car_ctrl_u { + struct upa_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UPA_INIT_START_ADDR (0x8c180) +#define NBL_UPA_INIT_START_DEPTH (1) +#define NBL_UPA_INIT_START_WIDTH (32) +#define NBL_UPA_INIT_START_DWLEN (1) +union upa_init_start_u { + struct upa_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_INIT_START_DWLEN]; +} __packed; + +#define NBL_UPA_LAYO_CKSUM0_CTRL_ADDR (0x8c1b0) +#define NBL_UPA_LAYO_CKSUM0_CTRL_DEPTH (4) +#define NBL_UPA_LAYO_CKSUM0_CTRL_WIDTH (32) +#define NBL_UPA_LAYO_CKSUM0_CTRL_DWLEN (1) +union upa_layo_cksum0_ctrl_u { + struct upa_layo_cksum0_ctrl { + u32 data:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_UPA_LAYO_CKSUM0_CTRL_DWLEN]; +} __packed; +#define NBL_UPA_LAYO_CKSUM0_CTRL_REG(r) (NBL_UPA_LAYO_CKSUM0_CTRL_ADDR + \ + (NBL_UPA_LAYO_CKSUM0_CTRL_DWLEN * 4) * (r)) + +#define NBL_UPA_LAYI_CKSUM0_CTRL_ADDR (0x8c1c0) +#define NBL_UPA_LAYI_CKSUM0_CTRL_DEPTH (4) +#define NBL_UPA_LAYI_CKSUM0_CTRL_WIDTH (32) +#define NBL_UPA_LAYI_CKSUM0_CTRL_DWLEN (1) +union upa_layi_cksum0_ctrl_u { + struct upa_layi_cksum0_ctrl { + u32 data:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_UPA_LAYI_CKSUM0_CTRL_DWLEN]; +} __packed; +#define NBL_UPA_LAYI_CKSUM0_CTRL_REG(r) (NBL_UPA_LAYI_CKSUM0_CTRL_ADDR + \ + (NBL_UPA_LAYI_CKSUM0_CTRL_DWLEN * 4) * (r)) + +#define NBL_UPA_FWD_TYPE_STAGE_0_ADDR (0x8c1d0) +#define NBL_UPA_FWD_TYPE_STAGE_0_DEPTH (1) +#define NBL_UPA_FWD_TYPE_STAGE_0_WIDTH (32) +#define NBL_UPA_FWD_TYPE_STAGE_0_DWLEN (1) +union upa_fwd_type_stage_0_u { + struct upa_fwd_type_stage_0 { + u32 tbl:32; /* [31:0] Default:0xF3FFFFF2 RW */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_STAGE_0_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_STAGE_1_ADDR (0x8c1d4) +#define NBL_UPA_FWD_TYPE_STAGE_1_DEPTH (1) +#define NBL_UPA_FWD_TYPE_STAGE_1_WIDTH (32) +#define NBL_UPA_FWD_TYPE_STAGE_1_DWLEN (1) +union upa_fwd_type_stage_1_u { + struct upa_fwd_type_stage_1 { + u32 tbl:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_STAGE_1_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_STAGE_2_ADDR (0x8c1d8) +#define NBL_UPA_FWD_TYPE_STAGE_2_DEPTH (1) +#define NBL_UPA_FWD_TYPE_STAGE_2_WIDTH (32) +#define NBL_UPA_FWD_TYPE_STAGE_2_DWLEN (1) +union upa_fwd_type_stage_2_u { + struct upa_fwd_type_stage_2 { + u32 tbl:32; /* [31:0] Default:0xFFFFFFFF RW */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_STAGE_2_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_BYPASS_0_ADDR (0x8c1e0) +#define NBL_UPA_FWD_TYPE_BYPASS_0_DEPTH (1) +#define NBL_UPA_FWD_TYPE_BYPASS_0_WIDTH (32) +#define NBL_UPA_FWD_TYPE_BYPASS_0_DWLEN (1) +union upa_fwd_type_bypass_0_u { + struct upa_fwd_type_bypass_0 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_BYPASS_0_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_BYPASS_1_ADDR (0x8c1e4) +#define NBL_UPA_FWD_TYPE_BYPASS_1_DEPTH (1) +#define NBL_UPA_FWD_TYPE_BYPASS_1_WIDTH (32) +#define NBL_UPA_FWD_TYPE_BYPASS_1_DWLEN (1) +union upa_fwd_type_bypass_1_u { + struct upa_fwd_type_bypass_1 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_BYPASS_1_DWLEN]; +} __packed; + +#define NBL_UPA_FWD_TYPE_BYPASS_2_ADDR (0x8c1e8) +#define NBL_UPA_FWD_TYPE_BYPASS_2_DEPTH (1) +#define NBL_UPA_FWD_TYPE_BYPASS_2_WIDTH (32) +#define NBL_UPA_FWD_TYPE_BYPASS_2_DWLEN (1) +union upa_fwd_type_bypass_2_u { + struct upa_fwd_type_bypass_2 { + u32 tbl:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_FWD_TYPE_BYPASS_2_DWLEN]; +} __packed; + +#define NBL_UPA_DPORT_EXTRACT_ADDR (0x8c1ec) +#define NBL_UPA_DPORT_EXTRACT_DEPTH (1) +#define NBL_UPA_DPORT_EXTRACT_WIDTH (32) +#define NBL_UPA_DPORT_EXTRACT_DWLEN (1) +union upa_dport_extract_u { + struct upa_dport_extract { + u32 id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_DPORT_EXTRACT_DWLEN]; +} __packed; + +#define NBL_UPA_LAYO_PHV_ADDR (0x8c1f0) +#define NBL_UPA_LAYO_PHV_DEPTH (1) +#define NBL_UPA_LAYO_PHV_WIDTH (32) +#define NBL_UPA_LAYO_PHV_DWLEN (1) +union upa_layo_phv_u { + struct upa_layo_phv { + u32 len:7; /* [6:0] Default:0x46 RW */ + u32 change_en:1; /* [7] Default:0x1 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_LAYO_PHV_DWLEN]; +} __packed; + +#define NBL_UPA_L4S_PAD_ADDR (0x8c1f4) +#define NBL_UPA_L4S_PAD_DEPTH (1) +#define NBL_UPA_L4S_PAD_WIDTH (32) +#define NBL_UPA_L4S_PAD_DWLEN (1) +union upa_l4s_pad_u { + struct upa_l4s_pad { + u32 p_length:7; /* [6:0] Default:0x3C RW */ + u32 en:1; /* [7] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_L4S_PAD_DWLEN]; +} __packed; + +#define NBL_UPA_LAYO_FLAG_ADDR (0x8c1f8) +#define NBL_UPA_LAYO_FLAG_DEPTH (1) +#define NBL_UPA_LAYO_FLAG_WIDTH (32) +#define NBL_UPA_LAYO_FLAG_DWLEN (1) +union upa_layo_flag_u { + struct upa_layo_flag { + u32 mask:32; /* [31:0] Default:0x00 RW */ + } __packed info; + u32 data[NBL_UPA_LAYO_FLAG_DWLEN]; +} __packed; + +#define NBL_UPA_IP_EXT_PROTOCOL_ADDR (0x8c1fc) +#define NBL_UPA_IP_EXT_PROTOCOL_DEPTH (1) +#define NBL_UPA_IP_EXT_PROTOCOL_WIDTH (32) +#define NBL_UPA_IP_EXT_PROTOCOL_DWLEN (1) +union upa_ip_ext_protocol_u { + struct upa_ip_ext_protocol { + u32 tcp:8; /* [7:0] Default:0x6 RW */ + u32 udp:8; /* [15:8] Default:0x11 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_IP_EXT_PROTOCOL_DWLEN]; +} __packed; + +#define NBL_UPA_L3V6_ML_DA_ADDR (0x8c204) +#define NBL_UPA_L3V6_ML_DA_DEPTH (1) +#define NBL_UPA_L3V6_ML_DA_WIDTH (32) +#define NBL_UPA_L3V6_ML_DA_DWLEN (1) +union upa_l3v6_ml_da_u { + struct upa_l3v6_ml_da { + u32 ml_da:16; /* [15:0] Default:0x3333 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_L3V6_ML_DA_DWLEN]; +} __packed; + +#define NBL_UPA_NEXT_KEY_ADDR (0x8c208) +#define NBL_UPA_NEXT_KEY_DEPTH (1) +#define NBL_UPA_NEXT_KEY_WIDTH (32) +#define NBL_UPA_NEXT_KEY_DWLEN (1) +union upa_next_key_u { + struct upa_next_key { + u32 key_b:8; /* [7:0] Default:0x10 RW */ + u32 key_a:8; /* [15:8] Default:0x0C RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_NEXT_KEY_DWLEN]; +} __packed; + +#define NBL_UPA_L3_ML_DA_ADDR (0x8c20c) +#define NBL_UPA_L3_ML_DA_DEPTH (1) +#define NBL_UPA_L3_ML_DA_WIDTH (32) +#define NBL_UPA_L3_ML_DA_DWLEN (1) +union upa_l3_ml_da_u { + struct upa_l3_ml_da { + u32 ml_da_0:16; /* [15:0] Default:0x5e00 RW */ + u32 ml_da_1:16; /* [31:16] Default:0x0100 RW */ + } __packed info; + u32 data[NBL_UPA_L3_ML_DA_DWLEN]; +} __packed; + +#define NBL_UPA_CK_CTRL_ADDR (0x8c210) +#define NBL_UPA_CK_CTRL_DEPTH (1) +#define NBL_UPA_CK_CTRL_WIDTH (32) +#define NBL_UPA_CK_CTRL_DWLEN (1) +union upa_ck_ctrl_u { + struct upa_ck_ctrl { + u32 tcp_csum_en:1; /* [0] Default:0x1 RW */ + u32 udp_csum_en:1; /* [1] Default:0x1 RW */ + u32 sctp_crc32c_en:1; /* [2] Default:0x1 RW */ + u32 ipv4_ck_en:1; /* [3] Default:0x1 RW */ + u32 ipv6_ck_en:1; /* [4] Default:0x1 RW */ + u32 DA_ck_en:1; /* [5] Default:0x1 RW */ + u32 ipv6_ext_en:1; /* [6] Default:0x0 RW */ + u32 vlan_error_en:1; /* [7] Default:0x1 RW */ + u32 ctrl_p_en:1; /* [8] Default:0x0 RW */ + u32 ip_tlen_ck_en:1; /* [9] Default:0x0 RW */ + u32 not_uc_p_plck_aux_en:1; /* [10] Default:0x0 RW */ + u32 sctp_crc_plck_aux_en:1; /* [11] Default:0x1 RW */ + u32 tcp_csum_offset_id:2; /* [13:12] Default:0x2 RW */ + u32 udp_csum_offset_id:2; /* [15:14] Default:0x2 RW */ + u32 sctp_crc32c_offset_id:2; /* [17:16] Default:0x2 RW */ + u32 ipv4_ck_offset_id:2; /* [19:18] Default:0x1 RW */ + u32 ipv6_ck_offset_id:2; /* [21:20] Default:0x1 RW */ + u32 DA_ck_offset_id:2; /* [23:22] Default:0x0 RW */ + u32 plck_offset_id:2; /* [25:24] Default:0x3 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_CK_CTRL_DWLEN]; +} __packed; + +#define NBL_UPA_MC_INDEX_ADDR (0x8c214) +#define NBL_UPA_MC_INDEX_DEPTH (1) +#define NBL_UPA_MC_INDEX_WIDTH (32) +#define NBL_UPA_MC_INDEX_DWLEN (1) +union upa_mc_index_u { + struct upa_mc_index { + u32 l2_mc_index:5; /* [4:0] Default:0x8 RW */ + u32 rsv2:3; /* [7:5] Default:0x00 RO */ + u32 l3_mc_index:5; /* [12:8] Default:0x9 RW */ + u32 rsv1:3; /* [15:13] Default:0x00 RO */ + u32 ctrl_p_index:5; /* [20:16] Default:0xF RW */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_MC_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_CTRL_P_DA_ADDR (0x8c218) +#define NBL_UPA_CTRL_P_DA_DEPTH (1) +#define NBL_UPA_CTRL_P_DA_WIDTH (32) +#define NBL_UPA_CTRL_P_DA_DWLEN (1) +union upa_ctrl_p_da_u { + struct upa_ctrl_p_da { + u32 ctrl_da_0:16; /* [15:0] Default:0xC200 RW */ + u32 ctrl_da_1:16; /* [31:16] Default:0x0180 RW */ + } __packed info; + u32 data[NBL_UPA_CTRL_P_DA_DWLEN]; +} __packed; + +#define NBL_UPA_VLAN_INDEX_ADDR (0x8c220) +#define NBL_UPA_VLAN_INDEX_DEPTH (1) +#define NBL_UPA_VLAN_INDEX_WIDTH (32) +#define NBL_UPA_VLAN_INDEX_DWLEN (1) +union upa_vlan_index_u { + struct upa_vlan_index { + u32 i_vlan2_index:5; /* [4:0] Default:0x7 RW */ + u32 rsv3:3; /* [7:5] Default:0x00 RO */ + u32 i_vlan1_index:5; /* [12:8] Default:0x6 RW */ + u32 rsv2:3; /* [15:13] Default:0x00 RO */ + u32 o_vlan2_index:5; /* [20:16] Default:0x11 RW */ + u32 rsv1:3; /* [23:21] Default:0x0 RO */ + u32 o_vlan1_index:5; /* [28:24] Default:0x10 RW */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_VLAN_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_PRI_VLAN_INDEX_ADDR (0x8c224) +#define NBL_UPA_PRI_VLAN_INDEX_DEPTH (1) +#define NBL_UPA_PRI_VLAN_INDEX_WIDTH (32) +#define NBL_UPA_PRI_VLAN_INDEX_DWLEN (1) +union upa_pri_vlan_index_u { + struct upa_pri_vlan_index { + u32 int_vlan2:7; /* [6:0] Default:0x30 RW */ + u32 rsv3:1; /* [7] Default:0x0 RO */ + u32 int_vlan1:7; /* [14:8] Default:0x2E RW */ + u32 rsv2:1; /* [15] Default:0x0 RO */ + u32 ext_vlan2:7; /* [22:16] Default:0x10 RW */ + u32 rsv1:1; /* [23] Default:0x0 RO */ + u32 ext_vlan1:7; /* [30:24] Default:0xE RW */ + u32 rsv:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PRI_VLAN_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_PRI_DSCP_INDEX_ADDR (0x8c228) +#define NBL_UPA_PRI_DSCP_INDEX_DEPTH (1) +#define NBL_UPA_PRI_DSCP_INDEX_WIDTH (32) +#define NBL_UPA_PRI_DSCP_INDEX_DWLEN (1) +union upa_pri_dscp_index_u { + struct upa_pri_dscp_index { + u32 int_dscp:7; /* [6:0] Default:0x32 RW */ + u32 rsv3:1; /* [7] Default:0x0 RO */ + u32 ext_dscp:7; /* [14:8] Default:0x12 RW */ + u32 rsv2:1; /* [15] Default:0x0 RO */ + u32 ipv4_flag:5; /* [20:16] Default:0x1 RW */ + u32 rsv1:3; /* [23:21] Default:0x0 RO */ + u32 ipv6_flag:5; /* [28:24] Default:0x2 RW */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PRI_DSCP_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_RDMA_INDEX_ADDR (0x8c22c) +#define NBL_UPA_RDMA_INDEX_DEPTH (1) +#define NBL_UPA_RDMA_INDEX_WIDTH (32) +#define NBL_UPA_RDMA_INDEX_DWLEN (1) +union upa_rdma_index_u { + struct upa_rdma_index { + u32 ext_qpn:7; /* [6:0] Default:0x42 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 rdma_index:5; /* [12:8] Default:0xA RW */ + u32 rsv:19; /* [31:13] Default:0x00 RO */ + } __packed info; + u32 data[NBL_UPA_RDMA_INDEX_DWLEN]; +} __packed; + +#define NBL_UPA_PRI_SEL_CONF_ADDR (0x8c230) +#define NBL_UPA_PRI_SEL_CONF_DEPTH (5) +#define NBL_UPA_PRI_SEL_CONF_WIDTH (32) +#define NBL_UPA_PRI_SEL_CONF_DWLEN (1) +union upa_pri_sel_conf_u { + struct upa_pri_sel_conf { + u32 pri_sel:5; /* [4:0] Default:0x0 RW */ + u32 pri_default:3; /* [7:5] Default:0x0 RW */ + u32 pri_disen:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PRI_SEL_CONF_DWLEN]; +} __packed; +#define NBL_UPA_PRI_SEL_CONF_REG(r) (NBL_UPA_PRI_SEL_CONF_ADDR + \ + (NBL_UPA_PRI_SEL_CONF_DWLEN * 4) * (r)) + +#define NBL_UPA_ERROR_DROP_ADDR (0x8c248) +#define NBL_UPA_ERROR_DROP_DEPTH (1) +#define NBL_UPA_ERROR_DROP_WIDTH (32) +#define NBL_UPA_ERROR_DROP_DWLEN (1) +union upa_error_drop_u { + struct upa_error_drop { + u32 en:7; /* [6:0] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_ERROR_DROP_DWLEN]; +} __packed; + +#define NBL_UPA_ERROR_CODE_ADDR (0x8c24c) +#define NBL_UPA_ERROR_CODE_DEPTH (1) +#define NBL_UPA_ERROR_CODE_WIDTH (32) +#define NBL_UPA_ERROR_CODE_DWLEN (1) +union upa_error_code_u { + struct upa_error_code { + u32 no:32; /* [31:0] Default:0x09123456 RW */ + } __packed info; + u32 data[NBL_UPA_ERROR_CODE_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_SCAN_ADDR (0x8c250) +#define NBL_UPA_PTYPE_SCAN_DEPTH (1) +#define NBL_UPA_PTYPE_SCAN_WIDTH (32) +#define NBL_UPA_PTYPE_SCAN_DWLEN (1) +union upa_ptype_scan_u { + struct upa_ptype_scan { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_SCAN_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_SCAN_TH_ADDR (0x8c254) +#define NBL_UPA_PTYPE_SCAN_TH_DEPTH (1) +#define NBL_UPA_PTYPE_SCAN_TH_WIDTH (32) +#define NBL_UPA_PTYPE_SCAN_TH_DWLEN (1) +union upa_ptype_scan_th_u { + struct upa_ptype_scan_th { + u32 th:32; /* [31:00] Default:0x40 RW */ + } __packed info; + u32 data[NBL_UPA_PTYPE_SCAN_TH_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_SCAN_MASK_ADDR (0x8c258) +#define NBL_UPA_PTYPE_SCAN_MASK_DEPTH (1) +#define NBL_UPA_PTYPE_SCAN_MASK_WIDTH (32) +#define NBL_UPA_PTYPE_SCAN_MASK_DWLEN (1) +union upa_ptype_scan_mask_u { + struct upa_ptype_scan_mask { + u32 addr:8; /* [7:0] Default:0x0 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_SCAN_MASK_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_ADDR (0x8c25c) +#define NBL_UPA_PTYPE_INSERT_SEARCH_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_DWLEN (1) +union upa_ptype_insert_search_u { + struct upa_ptype_insert_search { + u32 ctrl:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_0_ADDR (0x8c260) +#define NBL_UPA_PTYPE_INSERT_SEARCH_0_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_0_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_0_DWLEN (1) +union upa_ptype_insert_search_0_u { + struct upa_ptype_insert_search_0 { + u32 key0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_0_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_1_ADDR (0x8c264) +#define NBL_UPA_PTYPE_INSERT_SEARCH_1_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_1_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_1_DWLEN (1) +union upa_ptype_insert_search_1_u { + struct upa_ptype_insert_search_1 { + u32 key1:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_1_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ADDR (0x8c268) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_DWLEN (1) +union upa_ptype_insert_search_result_u { + struct upa_ptype_insert_search_result { + u32 result:8; /* [7:0] Default:0x0 RO */ + u32 hit:1; /* [8] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_DWLEN]; +} __packed; + +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_ADDR (0x8c270) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DEPTH (1) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_WIDTH (32) +#define NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DWLEN (1) +union upa_ptype_insert_search_result_ack_u { + struct upa_ptype_insert_search_result_ack { + u32 vld:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PTYPE_INSERT_SEARCH_RESULT_ACK_DWLEN]; +} __packed; + +#define NBL_UPA_CFG_TEST_ADDR (0x8c80c) +#define NBL_UPA_CFG_TEST_DEPTH (1) +#define NBL_UPA_CFG_TEST_WIDTH (32) +#define NBL_UPA_CFG_TEST_DWLEN (1) +union upa_cfg_test_u { + struct upa_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_UPA_BP_STATE_ADDR (0x8cb00) +#define NBL_UPA_BP_STATE_DEPTH (1) +#define NBL_UPA_BP_STATE_WIDTH (32) +#define NBL_UPA_BP_STATE_DWLEN (1) +union upa_bp_state_u { + struct upa_bp_state { + u32 pa_rmux_data_bp:1; /* [0] Default:0x0 RO */ + u32 pa_rmux_info_bp:1; /* [1] Default:0x0 RO */ + u32 store_pa_data_bp:1; /* [2] Default:0x0 RO */ + u32 store_pa_info_bp:1; /* [3] Default:0x0 RO */ + u32 rx_data_fifo_afull:1; /* [4] Default:0x0 RO */ + u32 rx_info_fifo_afull:1; /* [5] Default:0x0 RO */ + u32 rx_ctrl_fifo_afull:1; /* [6] Default:0x0 RO */ + u32 cinf1_fifo_afull:1; /* [7] Default:0x0 RO */ + u32 ctrl_cinf1_fifo_afull:1; /* [8] Default:0x0 RO */ + u32 layo_info_fifo_afull:1; /* [9] Default:0x0 RO */ + u32 cinf2_fifo_afull:1; /* [10] Default:0x0 RO */ + u32 ctrl_cinf2_fifo_afull:1; /* [11] Default:0x0 RO */ + u32 layi_info_fifo_afull:1; /* [12] Default:0x0 RO */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_BP_STATE_DWLEN]; +} __packed; + +#define NBL_UPA_BP_HISTORY_ADDR (0x8cb04) +#define NBL_UPA_BP_HISTORY_DEPTH (1) +#define NBL_UPA_BP_HISTORY_WIDTH (32) +#define NBL_UPA_BP_HISTORY_DWLEN (1) +union upa_bp_history_u { + struct upa_bp_history { + u32 pa_rmux_data_bp:1; /* [0] Default:0x0 RC */ + u32 pa_rmux_info_bp:1; /* [1] Default:0x0 RC */ + u32 store_pa_data_bp:1; /* [2] Default:0x0 RC */ + u32 store_pa_info_bp:1; /* [3] Default:0x0 RC */ + u32 rx_data_fifo_afull:1; /* [4] Default:0x0 RC */ + u32 rx_info_fifo_afull:1; /* [5] Default:0x0 RC */ + u32 rx_ctrl_fifo_afull:1; /* [6] Default:0x0 RC */ + u32 cinf1_fifo_afull:1; /* [7] Default:0x0 RC */ + u32 ctrl_cinf1_fifo_afull:1; /* [8] Default:0x0 RC */ + u32 layo_info_fifo_afull:1; /* [9] Default:0x0 RC */ + u32 cinf2_fifo_afull:1; /* [10] Default:0x0 RC */ + u32 ctrl_cinf2_fifo_afull:1; /* [11] Default:0x0 RC */ + u32 layi_info_fifo_afull:1; /* [12] Default:0x0 RC */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_UPA_PRI_CONF_TABLE_ADDR (0x8e000) +#define NBL_UPA_PRI_CONF_TABLE_DEPTH (40) +#define NBL_UPA_PRI_CONF_TABLE_WIDTH (32) +#define NBL_UPA_PRI_CONF_TABLE_DWLEN (1) +union upa_pri_conf_table_u { + struct upa_pri_conf_table { + u32 pri0:4; /* [3:0] Default:0x0 RW */ + u32 pri1:4; /* [7:4] Default:0x0 RW */ + u32 pri2:4; /* [11:8] Default:0x0 RW */ + u32 pri3:4; /* [15:12] Default:0x0 RW */ + u32 pri4:4; /* [19:16] Default:0x0 RW */ + u32 pri5:4; /* [23:20] Default:0x0 RW */ + u32 pri6:4; /* [27:24] Default:0x0 RW */ + u32 pri7:4; /* [31:28] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_PRI_CONF_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_PRI_CONF_TABLE_REG(r) (NBL_UPA_PRI_CONF_TABLE_ADDR + \ + (NBL_UPA_PRI_CONF_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPA_KEY_TCAM_ADDR (0x8f000) +#define NBL_UPA_KEY_TCAM_DEPTH (256) +#define NBL_UPA_KEY_TCAM_WIDTH (64) +#define NBL_UPA_KEY_TCAM_DWLEN (2) +union upa_key_tcam_u { + struct upa_key_tcam { + u32 key_b:16; /* [15:0] Default:0x0 RW */ + u32 key_a:16; /* [31:16] Default:0x0 RW */ + u32 key_valid:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_KEY_TCAM_DWLEN]; +} __packed; +#define NBL_UPA_KEY_TCAM_REG(r) (NBL_UPA_KEY_TCAM_ADDR + \ + (NBL_UPA_KEY_TCAM_DWLEN * 4) * (r)) + +#define NBL_UPA_MASK_TCAM_ADDR (0x8f800) +#define NBL_UPA_MASK_TCAM_DEPTH (256) +#define NBL_UPA_MASK_TCAM_WIDTH (32) +#define NBL_UPA_MASK_TCAM_DWLEN (1) +union upa_mask_tcam_u { + struct upa_mask_tcam { + u32 mask_b:16; /* [15:0] Default:0x0 RW */ + u32 mask_a:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_MASK_TCAM_DWLEN]; +} __packed; +#define NBL_UPA_MASK_TCAM_REG(r) (NBL_UPA_MASK_TCAM_ADDR + \ + (NBL_UPA_MASK_TCAM_DWLEN * 4) * (r)) + +#define NBL_UPA_ACT_TABLE_ADDR (0x90000) +#define NBL_UPA_ACT_TABLE_DEPTH (256) +#define NBL_UPA_ACT_TABLE_WIDTH (128) +#define NBL_UPA_ACT_TABLE_DWLEN (4) +union upa_act_table_u { + struct upa_act_table { + u32 flag_control_0:8; /* [7:0] Default:0x0 RW */ + u32 flag_control_1:8; /* [15:8] Default:0x0 RW */ + u32 flag_control_2:8; /* [23:16] Default:0x0 RW */ + u32 legality_check:8; /* [31:24] Default:0x0 RW */ + u32 nxt_off_B:8; /* [39:32] Default:0x0 RW */ + u32 nxt_off_A:8; /* [47:40] Default:0x0 RW */ + u32 protocol_header_off:8; /* [55:48] Default:0x0 RW */ + u32 payload_length:8; /* [63:56] Default:0x0 RW */ + u32 mask:8; /* [71:64] Default:0x0 RW */ + u32 nxt_stg:4; /* [75:72] Default:0x0 RW */ + u32 rsv_l:32; /* [127:76] Default:0x0 RO */ + u32 rsv_h:20; /* [127:76] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_ACT_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_ACT_TABLE_REG(r) (NBL_UPA_ACT_TABLE_ADDR + \ + (NBL_UPA_ACT_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPA_EXT_CONF_TABLE_ADDR (0x91000) +#define NBL_UPA_EXT_CONF_TABLE_DEPTH (1024) +#define NBL_UPA_EXT_CONF_TABLE_WIDTH (32) +#define NBL_UPA_EXT_CONF_TABLE_DWLEN (1) +union upa_ext_conf_table_u { + struct upa_ext_conf_table { + u32 dst_offset:8; /* [7:0] Default:0x0 RW */ + u32 source_offset:6; /* [13:8] Default:0x0 RW */ + u32 mode_start_off:2; /* [15:14] Default:0x0 RW */ + u32 lx_sel:2; /* [17:16] Default:0x0 RW */ + u32 mode_sel:1; /* [18] Default:0x0 RW */ + u32 op_en:1; /* [19] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_EXT_CONF_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_EXT_CONF_TABLE_REG(r) (NBL_UPA_EXT_CONF_TABLE_ADDR + \ + (NBL_UPA_EXT_CONF_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPA_EXT_INDEX_TCAM_ADDR (0x92000) +#define NBL_UPA_EXT_INDEX_TCAM_DEPTH (64) +#define NBL_UPA_EXT_INDEX_TCAM_WIDTH (64) +#define NBL_UPA_EXT_INDEX_TCAM_DWLEN (2) +union upa_ext_index_tcam_u { + struct upa_ext_index_tcam { + u32 type_index:32; /* [31:0] Default:0x0 RW */ + u32 type_valid:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_EXT_INDEX_TCAM_DWLEN]; +} __packed; +#define NBL_UPA_EXT_INDEX_TCAM_REG(r) (NBL_UPA_EXT_INDEX_TCAM_ADDR + \ + (NBL_UPA_EXT_INDEX_TCAM_DWLEN * 4) * (r)) + +#define NBL_UPA_EXT_INDEX_TCAM_MASK_ADDR (0x92200) +#define NBL_UPA_EXT_INDEX_TCAM_MASK_DEPTH (64) +#define NBL_UPA_EXT_INDEX_TCAM_MASK_WIDTH (32) +#define NBL_UPA_EXT_INDEX_TCAM_MASK_DWLEN (1) +union upa_ext_index_tcam_mask_u { + struct upa_ext_index_tcam_mask { + u32 mask:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPA_EXT_INDEX_TCAM_MASK_DWLEN]; +} __packed; +#define NBL_UPA_EXT_INDEX_TCAM_MASK_REG(r) (NBL_UPA_EXT_INDEX_TCAM_MASK_ADDR + \ + (NBL_UPA_EXT_INDEX_TCAM_MASK_DWLEN * 4) * (r)) + +#define NBL_UPA_EXT_INDEX_TABLE_ADDR (0x92300) +#define NBL_UPA_EXT_INDEX_TABLE_DEPTH (64) +#define NBL_UPA_EXT_INDEX_TABLE_WIDTH (32) +#define NBL_UPA_EXT_INDEX_TABLE_DWLEN (1) +union upa_ext_index_table_u { + struct upa_ext_index_table { + u32 p_index:3; /* [2:0] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_EXT_INDEX_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_EXT_INDEX_TABLE_REG(r) (NBL_UPA_EXT_INDEX_TABLE_ADDR + \ + (NBL_UPA_EXT_INDEX_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPA_TYPE_INDEX_TCAM_ADDR (0x94000) +#define NBL_UPA_TYPE_INDEX_TCAM_DEPTH (256) +#define NBL_UPA_TYPE_INDEX_TCAM_WIDTH (256) +#define NBL_UPA_TYPE_INDEX_TCAM_DWLEN (8) +union upa_type_index_tcam_u { + struct upa_type_index_tcam { + u32 layi_x:32; /* [31:0] Default:0xFFFFFFFF RW */ + u32 layo_x:32; /* [63:32] Default:0xFFFFFFFF RW */ + u32 layi_y:32; /* [95:64] Default:0xFFFFFFFF RW */ + u32 layo_y:32; /* [127:96] Default:0xFFFFFFFF RW */ + u32 type_valid:1; /* [128] Default:0x0 RW */ + u32 rsv_l:32; /* [255:129] Default:0x0 RO */ + u32 rsv_h:31; /* [255:129] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:129] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_TYPE_INDEX_TCAM_DWLEN]; +} __packed; +#define NBL_UPA_TYPE_INDEX_TCAM_REG(r) (NBL_UPA_TYPE_INDEX_TCAM_ADDR + \ + (NBL_UPA_TYPE_INDEX_TCAM_DWLEN * 4) * (r)) + +#define NBL_UPA_PACKET_TYPE_TABLE_ADDR (0x96000) +#define NBL_UPA_PACKET_TYPE_TABLE_DEPTH (256) +#define NBL_UPA_PACKET_TYPE_TABLE_WIDTH (32) +#define NBL_UPA_PACKET_TYPE_TABLE_DWLEN (1) +union upa_packet_type_table_u { + struct upa_packet_type_table { + u32 p_type:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPA_PACKET_TYPE_TABLE_DWLEN]; +} __packed; +#define NBL_UPA_PACKET_TYPE_TABLE_REG(r) (NBL_UPA_PACKET_TYPE_TABLE_ADDR + \ + (NBL_UPA_PACKET_TYPE_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h new file mode 100644 index 0000000000000000000000000000000000000000..1a88c44380efd1caca314fbe69a07cb8b1e0449e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uped.h @@ -0,0 +1,1494 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UPED_H +#define NBL_UPED_H 1 + +#include + +#define NBL_UPED_BASE (0x0015C000) + +#define NBL_UPED_INT_STATUS_ADDR (0x15c000) +#define NBL_UPED_INT_STATUS_DEPTH (1) +#define NBL_UPED_INT_STATUS_WIDTH (32) +#define NBL_UPED_INT_STATUS_DWLEN (1) +union uped_int_status_u { + struct uped_int_status { + u32 pkt_length_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 input_err:1; /* [5] Default:0x0 RWC */ + u32 cfg_err:1; /* [6] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [7] Default:0x0 RWC */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 RWC */ + u32 meta_ucor_err:1; /* [9] Default:0x0 RWC */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 RWC */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x0 RWC */ + u32 meta_value_err:1; /* [12] Default:0x0 RWC */ + u32 edit_atnum_err:1; /* [13] Default:0x0 RWC */ + u32 header_oft_ovf:1; /* [14] Default:0x0 RWC */ + u32 edit_pos_err:1; /* [15] Default:0x0 RWC */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 RWC */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 RWC */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UPED_INT_MASK_ADDR (0x15c004) +#define NBL_UPED_INT_MASK_DEPTH (1) +#define NBL_UPED_INT_MASK_WIDTH (32) +#define NBL_UPED_INT_MASK_DWLEN (1) +union uped_int_mask_u { + struct uped_int_mask { + u32 pkt_length_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 input_err:1; /* [5] Default:0x0 RW */ + u32 cfg_err:1; /* [6] Default:0x0 RW */ + u32 data_ucor_err:1; /* [7] Default:0x0 RW */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 RW */ + u32 meta_ucor_err:1; /* [9] Default:0x0 RW */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 RW */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x1 RW */ + u32 meta_value_err:1; /* [12] Default:0x0 RW */ + u32 edit_atnum_err:1; /* [13] Default:0x0 RW */ + u32 header_oft_ovf:1; /* [14] Default:0x0 RW */ + u32 edit_pos_err:1; /* [15] Default:0x0 RW */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 RW */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UPED_INT_SET_ADDR (0x15c008) +#define NBL_UPED_INT_SET_DEPTH (1) +#define NBL_UPED_INT_SET_WIDTH (32) +#define NBL_UPED_INT_SET_DWLEN (1) +union uped_int_set_u { + struct uped_int_set { + u32 pkt_length_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 input_err:1; /* [5] Default:0x0 WO */ + u32 cfg_err:1; /* [6] Default:0x0 WO */ + u32 data_ucor_err:1; /* [7] Default:0x0 WO */ + u32 inmeta_ucor_err:1; /* [8] Default:0x0 WO */ + u32 meta_ucor_err:1; /* [9] Default:0x0 WO */ + u32 meta_cor_ecc_err:1; /* [10] Default:0x0 WO */ + u32 fwd_atid_nomat_err:1; /* [11] Default:0x0 WO */ + u32 meta_value_err:1; /* [12] Default:0x0 WO */ + u32 edit_atnum_err:1; /* [13] Default:0x0 WO */ + u32 header_oft_ovf:1; /* [14] Default:0x0 WO */ + u32 edit_pos_err:1; /* [15] Default:0x0 WO */ + u32 da_oft_len_ovf:1; /* [16] Default:0x0 WO */ + u32 lxoffset_ovf:1; /* [17] Default:0x0 WO */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INT_SET_DWLEN]; +} __packed; + +#define NBL_UPED_INIT_DONE_ADDR (0x15c00c) +#define NBL_UPED_INIT_DONE_DEPTH (1) +#define NBL_UPED_INIT_DONE_WIDTH (32) +#define NBL_UPED_INIT_DONE_DWLEN (1) +union uped_init_done_u { + struct uped_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UPED_PKT_LENGTH_ERR_INFO_ADDR (0x15c020) +#define NBL_UPED_PKT_LENGTH_ERR_INFO_DEPTH (1) +#define NBL_UPED_PKT_LENGTH_ERR_INFO_WIDTH (32) +#define NBL_UPED_PKT_LENGTH_ERR_INFO_DWLEN (1) +union uped_pkt_length_err_info_u { + struct uped_pkt_length_err_info { + u32 ptr_eop:1; /* [0] Default:0x0 RC */ + u32 pkt_eop:1; /* [1] Default:0x0 RC */ + u32 pkt_mod:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PKT_LENGTH_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_CIF_ERR_INFO_ADDR (0x15c040) +#define NBL_UPED_CIF_ERR_INFO_DEPTH (1) +#define NBL_UPED_CIF_ERR_INFO_WIDTH (32) +#define NBL_UPED_CIF_ERR_INFO_DWLEN (1) +union uped_cif_err_info_u { + struct uped_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_INPUT_ERR_INFO_ADDR (0x15c048) +#define NBL_UPED_INPUT_ERR_INFO_DEPTH (1) +#define NBL_UPED_INPUT_ERR_INFO_WIDTH (32) +#define NBL_UPED_INPUT_ERR_INFO_DWLEN (1) +union uped_input_err_info_u { + struct uped_input_err_info { + u32 eoc_miss:1; /* [0] Default:0x0 RC */ + u32 soc_miss:1; /* [1] Default:0x0 RC */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INPUT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_CFG_ERR_INFO_ADDR (0x15c050) +#define NBL_UPED_CFG_ERR_INFO_DEPTH (1) +#define NBL_UPED_CFG_ERR_INFO_WIDTH (32) +#define NBL_UPED_CFG_ERR_INFO_DWLEN (1) +union uped_cfg_err_info_u { + struct uped_cfg_err_info { + u32 length:1; /* [0] Default:0x0 RC */ + u32 rd_conflict:1; /* [1] Default:0x0 RC */ + u32 rd_addr:8; /* [9:2] Default:0x0 RC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_ADDR (0x15c06c) +#define NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_DEPTH (1) +#define NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_WIDTH (32) +#define NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_DWLEN (1) +union uped_fwd_atid_nomat_err_info_u { + struct uped_fwd_atid_nomat_err_info { + u32 dport:1; /* [0] Default:0x0 RC */ + u32 dqueue:1; /* [1] Default:0x0 RC */ + u32 hash0:1; /* [2] Default:0x0 RC */ + u32 hash1:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_ATID_NOMAT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_META_VALUE_ERR_INFO_ADDR (0x15c070) +#define NBL_UPED_META_VALUE_ERR_INFO_DEPTH (1) +#define NBL_UPED_META_VALUE_ERR_INFO_WIDTH (32) +#define NBL_UPED_META_VALUE_ERR_INFO_DWLEN (1) +union uped_meta_value_err_info_u { + struct uped_meta_value_err_info { + u32 sport:1; /* [0] Default:0x0 RC */ + u32 dport:1; /* [1] Default:0x0 RC */ + u32 dscp_ecn:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_META_VALUE_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_EDIT_ATNUM_ERR_INFO_ADDR (0x15c078) +#define NBL_UPED_EDIT_ATNUM_ERR_INFO_DEPTH (1) +#define NBL_UPED_EDIT_ATNUM_ERR_INFO_WIDTH (32) +#define NBL_UPED_EDIT_ATNUM_ERR_INFO_DWLEN (1) +union uped_edit_atnum_err_info_u { + struct uped_edit_atnum_err_info { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 del_add:1; /* [1] Default:0x0 RC */ + u32 ttl:1; /* [2] Default:0x0 RC */ + u32 dscp:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_EDIT_ATNUM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_HEADER_OFT_OVF_ADDR (0x15c080) +#define NBL_UPED_HEADER_OFT_OVF_DEPTH (1) +#define NBL_UPED_HEADER_OFT_OVF_WIDTH (32) +#define NBL_UPED_HEADER_OFT_OVF_DWLEN (1) +union uped_header_oft_ovf_u { + struct uped_header_oft_ovf { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 rsv2:7; /* [7:1] Default:0x0 RO */ + u32 add_del:6; /* [13:8] Default:0x0 RC */ + u32 dscp_ecn:1; /* [14] Default:0x0 RC */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 ttl:1; /* [16] Default:0x0 RC */ + u32 sctp:1; /* [17] Default:0x0 RC */ + u32 ck_len0:1; /* [18] Default:0x0 RC */ + u32 ck_len1:1; /* [19] Default:0x0 RC */ + u32 len0:1; /* [20] Default:0x0 RC */ + u32 len1:1; /* [21] Default:0x0 RC */ + u32 ck0:1; /* [22] Default:0x0 RC */ + u32 ck1:1; /* [23] Default:0x0 RC */ + u32 ck_start0_0:1; /* [24] Default:0x0 RC */ + u32 ck_start0_1:1; /* [25] Default:0x0 RC */ + u32 ck_start1_0:1; /* [26] Default:0x0 RC */ + u32 ck_start1_1:1; /* [27] Default:0x0 RC */ + u32 head:1; /* [28] Default:0x0 RC */ + u32 head_out:1; /* [29] Default:0x0 RC */ + u32 l4_head:1; /* [30] Default:0x0 RC */ + u32 rsv:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HEADER_OFT_OVF_DWLEN]; +} __packed; + +#define NBL_UPED_EDIT_POS_ERR_ADDR (0x15c088) +#define NBL_UPED_EDIT_POS_ERR_DEPTH (1) +#define NBL_UPED_EDIT_POS_ERR_WIDTH (32) +#define NBL_UPED_EDIT_POS_ERR_DWLEN (1) +union uped_edit_pos_err_u { + struct uped_edit_pos_err { + u32 replace:1; /* [0] Default:0x0 RC */ + u32 cross_level:6; /* [6:1] Default:0x0 RC */ + u32 rsv2:1; /* [7] Default:0x0 RO */ + u32 add_del:6; /* [13:8] Default:0x0 RC */ + u32 dscp_ecn:1; /* [14] Default:0x0 RC */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 ttl:1; /* [16] Default:0x0 RC */ + u32 sctp:1; /* [17] Default:0x0 RC */ + u32 ck_len0:1; /* [18] Default:0x0 RC */ + u32 ck_len1:1; /* [19] Default:0x0 RC */ + u32 len0:1; /* [20] Default:0x0 RC */ + u32 len1:1; /* [21] Default:0x0 RC */ + u32 ck0:1; /* [22] Default:0x0 RC */ + u32 ck1:1; /* [23] Default:0x0 RC */ + u32 ck_start0_0:1; /* [24] Default:0x0 RC */ + u32 ck_start0_1:1; /* [25] Default:0x0 RC */ + u32 ck_start1_0:1; /* [26] Default:0x0 RC */ + u32 ck_start1_1:1; /* [27] Default:0x0 RC */ + u32 bth_header:1; /* [28] Default:0x0 RC */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_EDIT_POS_ERR_DWLEN]; +} __packed; + +#define NBL_UPED_DA_OFT_LEN_OVF_ADDR (0x15c090) +#define NBL_UPED_DA_OFT_LEN_OVF_DEPTH (1) +#define NBL_UPED_DA_OFT_LEN_OVF_WIDTH (32) +#define NBL_UPED_DA_OFT_LEN_OVF_DWLEN (1) +union uped_da_oft_len_ovf_u { + struct uped_da_oft_len_ovf { + u32 at0:5; /* [4:0] Default:0x0 RC */ + u32 at1:5; /* [9:5] Default:0x0 RC */ + u32 at2:5; /* [14:10] Default:0x0 RC */ + u32 at3:5; /* [19:15] Default:0x0 RC */ + u32 at4:5; /* [24:20] Default:0x0 RC */ + u32 at5:5; /* [29:25] Default:0x0 RC */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_DA_OFT_LEN_OVF_DWLEN]; +} __packed; + +#define NBL_UPED_LXOFFSET_OVF_ADDR (0x15c098) +#define NBL_UPED_LXOFFSET_OVF_DEPTH (1) +#define NBL_UPED_LXOFFSET_OVF_WIDTH (32) +#define NBL_UPED_LXOFFSET_OVF_DWLEN (1) +union uped_lxoffset_ovf_u { + struct uped_lxoffset_ovf { + u32 l2:1; /* [0] Default:0x0 RC */ + u32 l3:1; /* [1] Default:0x0 RC */ + u32 l4:1; /* [2] Default:0x0 RC */ + u32 pld:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_LXOFFSET_OVF_DWLEN]; +} __packed; + +#define NBL_UPED_CAR_CTRL_ADDR (0x15c100) +#define NBL_UPED_CAR_CTRL_DEPTH (1) +#define NBL_UPED_CAR_CTRL_WIDTH (32) +#define NBL_UPED_CAR_CTRL_DWLEN (1) +union uped_car_ctrl_u { + struct uped_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UPED_INIT_START_ADDR (0x15c10c) +#define NBL_UPED_INIT_START_DEPTH (1) +#define NBL_UPED_INIT_START_WIDTH (32) +#define NBL_UPED_INIT_START_DWLEN (1) +union uped_init_start_u { + struct uped_init_start { + u32 start:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INIT_START_DWLEN]; +} __packed; + +#define NBL_UPED_TIMEOUT_CFG_ADDR (0x15c110) +#define NBL_UPED_TIMEOUT_CFG_DEPTH (1) +#define NBL_UPED_TIMEOUT_CFG_WIDTH (32) +#define NBL_UPED_TIMEOUT_CFG_DWLEN (1) +union uped_timeout_cfg_u { + struct uped_timeout_cfg { + u32 fsm_max_num:16; /* [15:00] Default:0xfff RW */ + u32 tab:8; /* [23:16] Default:0x40 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TIMEOUT_CFG_DWLEN]; +} __packed; + +#define NBL_UPED_PKT_DROP_EN_ADDR (0x15c170) +#define NBL_UPED_PKT_DROP_EN_DEPTH (1) +#define NBL_UPED_PKT_DROP_EN_WIDTH (32) +#define NBL_UPED_PKT_DROP_EN_DWLEN (1) +union uped_pkt_drop_en_u { + struct uped_pkt_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PKT_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_PKT_HERR_DROP_EN_ADDR (0x15c174) +#define NBL_UPED_PKT_HERR_DROP_EN_DEPTH (1) +#define NBL_UPED_PKT_HERR_DROP_EN_WIDTH (32) +#define NBL_UPED_PKT_HERR_DROP_EN_DWLEN (1) +union uped_pkt_herr_drop_en_u { + struct uped_pkt_herr_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PKT_HERR_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_PKT_PARITY_DROP_EN_ADDR (0x15c178) +#define NBL_UPED_PKT_PARITY_DROP_EN_DEPTH (1) +#define NBL_UPED_PKT_PARITY_DROP_EN_WIDTH (32) +#define NBL_UPED_PKT_PARITY_DROP_EN_DWLEN (1) +union uped_pkt_parity_drop_en_u { + struct uped_pkt_parity_drop_en { + u32 en0:1; /* [0] Default:0x1 RW */ + u32 en1:1; /* [1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PKT_PARITY_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_TTL_DROP_EN_ADDR (0x15c17c) +#define NBL_UPED_TTL_DROP_EN_DEPTH (1) +#define NBL_UPED_TTL_DROP_EN_WIDTH (32) +#define NBL_UPED_TTL_DROP_EN_DWLEN (1) +union uped_ttl_drop_en_u { + struct uped_ttl_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TTL_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_DQUEUE_DROP_EN_ADDR (0x15c180) +#define NBL_UPED_DQUEUE_DROP_EN_DEPTH (1) +#define NBL_UPED_DQUEUE_DROP_EN_WIDTH (32) +#define NBL_UPED_DQUEUE_DROP_EN_DWLEN (1) +union uped_dqueue_drop_en_u { + struct uped_dqueue_drop_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_DQUEUE_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UPED_INTF_ECC_ERR_EN_ADDR (0x15c184) +#define NBL_UPED_INTF_ECC_ERR_EN_DEPTH (1) +#define NBL_UPED_INTF_ECC_ERR_EN_WIDTH (32) +#define NBL_UPED_INTF_ECC_ERR_EN_DWLEN (1) +union uped_intf_ecc_err_en_u { + struct uped_intf_ecc_err_en { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INTF_ECC_ERR_EN_DWLEN]; +} __packed; + +#define NBL_UPED_TTL_ERROR_CODE_ADDR (0x15c188) +#define NBL_UPED_TTL_ERROR_CODE_DEPTH (1) +#define NBL_UPED_TTL_ERROR_CODE_WIDTH (32) +#define NBL_UPED_TTL_ERROR_CODE_DWLEN (1) +union uped_ttl_error_code_u { + struct uped_ttl_error_code { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv1:7; /* [7:1] Default:0x0 RO */ + u32 id:4; /* [11:8] Default:0x6 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TTL_ERROR_CODE_DWLEN]; +} __packed; + +#define NBL_UPED_HIGH_PRI_PKT_EN_ADDR (0x15c190) +#define NBL_UPED_HIGH_PRI_PKT_EN_DEPTH (1) +#define NBL_UPED_HIGH_PRI_PKT_EN_WIDTH (32) +#define NBL_UPED_HIGH_PRI_PKT_EN_DWLEN (1) +union uped_high_pri_pkt_en_u { + struct uped_high_pri_pkt_en { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HIGH_PRI_PKT_EN_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL0_ADDR (0x15c204) +#define NBL_UPED_HW_EDIT_FLAG_SEL0_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL0_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL0_DWLEN (1) +union uped_hw_edit_flag_sel0_u { + struct uped_hw_edit_flag_sel0 { + u32 oft:5; /* [4:0] Default:0x1 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL0_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL1_ADDR (0x15c208) +#define NBL_UPED_HW_EDIT_FLAG_SEL1_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL1_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL1_DWLEN (1) +union uped_hw_edit_flag_sel1_u { + struct uped_hw_edit_flag_sel1 { + u32 oft:5; /* [4:0] Default:0x2 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL1_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL2_ADDR (0x15c20c) +#define NBL_UPED_HW_EDIT_FLAG_SEL2_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL2_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL2_DWLEN (1) +union uped_hw_edit_flag_sel2_u { + struct uped_hw_edit_flag_sel2 { + u32 oft:5; /* [4:0] Default:0x3 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL2_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL3_ADDR (0x15c210) +#define NBL_UPED_HW_EDIT_FLAG_SEL3_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL3_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL3_DWLEN (1) +union uped_hw_edit_flag_sel3_u { + struct uped_hw_edit_flag_sel3 { + u32 oft:5; /* [4:0] Default:0x4 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL3_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDIT_FLAG_SEL4_ADDR (0x15c214) +#define NBL_UPED_HW_EDIT_FLAG_SEL4_DEPTH (1) +#define NBL_UPED_HW_EDIT_FLAG_SEL4_WIDTH (32) +#define NBL_UPED_HW_EDIT_FLAG_SEL4_DWLEN (1) +union uped_hw_edit_flag_sel4_u { + struct uped_hw_edit_flag_sel4 { + u32 oft:5; /* [4:0] Default:0xe RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDIT_FLAG_SEL4_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_DPORT_ADDR (0x15c230) +#define NBL_UPED_FWD_DPORT_DEPTH (1) +#define NBL_UPED_FWD_DPORT_WIDTH (32) +#define NBL_UPED_FWD_DPORT_DWLEN (1) +union uped_fwd_dport_u { + struct uped_fwd_dport { + u32 id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_DPORT_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_DQUEUE_ADDR (0x15c234) +#define NBL_UPED_FWD_DQUEUE_DEPTH (1) +#define NBL_UPED_FWD_DQUEUE_WIDTH (32) +#define NBL_UPED_FWD_DQUEUE_DWLEN (1) +union uped_fwd_dqueue_u { + struct uped_fwd_dqueue { + u32 id:6; /* [5:0] Default:0xa RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_DQUEUE_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_MIRID_ADDR (0x15c238) +#define NBL_UPED_FWD_MIRID_DEPTH (1) +#define NBL_UPED_FWD_MIRID_WIDTH (32) +#define NBL_UPED_FWD_MIRID_DWLEN (1) +union uped_fwd_mirid_u { + struct uped_fwd_mirid { + u32 id:6; /* [5:0] Default:0x8 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_MIRID_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_L4IDX_ADDR (0x15c23c) +#define NBL_UPED_FWD_L4IDX_DEPTH (1) +#define NBL_UPED_FWD_L4IDX_WIDTH (32) +#define NBL_UPED_FWD_L4IDX_DWLEN (1) +union uped_fwd_l4idx_u { + struct uped_fwd_l4idx { + u32 id:6; /* [5:0] Default:0x11 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_L4IDX_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_HASH_0_ADDR (0x15c244) +#define NBL_UPED_FWD_HASH_0_DEPTH (1) +#define NBL_UPED_FWD_HASH_0_WIDTH (32) +#define NBL_UPED_FWD_HASH_0_DWLEN (1) +union uped_fwd_hash_0_u { + struct uped_fwd_hash_0 { + u32 id:6; /* [5:0] Default:0x13 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_HASH_0_DWLEN]; +} __packed; + +#define NBL_UPED_FWD_HASH_1_ADDR (0x15c248) +#define NBL_UPED_FWD_HASH_1_DEPTH (1) +#define NBL_UPED_FWD_HASH_1_WIDTH (32) +#define NBL_UPED_FWD_HASH_1_DWLEN (1) +union uped_fwd_hash_1_u { + struct uped_fwd_hash_1 { + u32 id:6; /* [5:0] Default:0x14 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_FWD_HASH_1_DWLEN]; +} __packed; + +#define NBL_UPED_L4_OFT_ADJUST_ADDR (0x15c250) +#define NBL_UPED_L4_OFT_ADJUST_DEPTH (1) +#define NBL_UPED_L4_OFT_ADJUST_WIDTH (32) +#define NBL_UPED_L4_OFT_ADJUST_DWLEN (1) +union uped_l4_oft_adjust_u { + struct uped_l4_oft_adjust { + u32 vau:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_L4_OFT_ADJUST_DWLEN]; +} __packed; + +#define NBL_UPED_PLD_OFT_ADJUST_ADDR (0x15c254) +#define NBL_UPED_PLD_OFT_ADJUST_DEPTH (1) +#define NBL_UPED_PLD_OFT_ADJUST_WIDTH (32) +#define NBL_UPED_PLD_OFT_ADJUST_DWLEN (1) +union uped_pld_oft_adjust_u { + struct uped_pld_oft_adjust { + u32 vau:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_PLD_OFT_ADJUST_DWLEN]; +} __packed; + +#define NBL_UPED_VLAN_TYPE0_ADDR (0x15c260) +#define NBL_UPED_VLAN_TYPE0_DEPTH (1) +#define NBL_UPED_VLAN_TYPE0_WIDTH (32) +#define NBL_UPED_VLAN_TYPE0_DWLEN (1) +union uped_vlan_type0_u { + struct uped_vlan_type0 { + u32 vau:16; /* [15:0] Default:0x8100 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_VLAN_TYPE0_DWLEN]; +} __packed; + +#define NBL_UPED_VLAN_TYPE1_ADDR (0x15c264) +#define NBL_UPED_VLAN_TYPE1_DEPTH (1) +#define NBL_UPED_VLAN_TYPE1_WIDTH (32) +#define NBL_UPED_VLAN_TYPE1_DWLEN (1) +union uped_vlan_type1_u { + struct uped_vlan_type1 { + u32 vau:16; /* [15:0] Default:0x88A8 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_VLAN_TYPE1_DWLEN]; +} __packed; + +#define NBL_UPED_VLAN_TYPE2_ADDR (0x15c268) +#define NBL_UPED_VLAN_TYPE2_DEPTH (1) +#define NBL_UPED_VLAN_TYPE2_WIDTH (32) +#define NBL_UPED_VLAN_TYPE2_DWLEN (1) +union uped_vlan_type2_u { + struct uped_vlan_type2 { + u32 vau:16; /* [15:0] Default:0x9100 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_VLAN_TYPE2_DWLEN]; +} __packed; + +#define NBL_UPED_VLAN_TYPE3_ADDR (0x15c26c) +#define NBL_UPED_VLAN_TYPE3_DEPTH (1) +#define NBL_UPED_VLAN_TYPE3_WIDTH (32) +#define NBL_UPED_VLAN_TYPE3_DWLEN (1) +union uped_vlan_type3_u { + struct uped_vlan_type3 { + u32 vau:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_VLAN_TYPE3_DWLEN]; +} __packed; + +#define NBL_UPED_L3_LEN_MDY_CMD_0_ADDR (0x15c300) +#define NBL_UPED_L3_LEN_MDY_CMD_0_DEPTH (1) +#define NBL_UPED_L3_LEN_MDY_CMD_0_WIDTH (32) +#define NBL_UPED_L3_LEN_MDY_CMD_0_DWLEN (1) +union uped_l3_len_mdy_cmd_0_u { + struct uped_l3_len_mdy_cmd_0 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0x2 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x2 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x2 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x0 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L3_LEN_MDY_CMD_0_DWLEN]; +} __packed; + +#define NBL_UPED_L3_LEN_MDY_CMD_1_ADDR (0x15c304) +#define NBL_UPED_L3_LEN_MDY_CMD_1_DEPTH (1) +#define NBL_UPED_L3_LEN_MDY_CMD_1_WIDTH (32) +#define NBL_UPED_L3_LEN_MDY_CMD_1_DWLEN (1) +union uped_l3_len_mdy_cmd_1_u { + struct uped_l3_len_mdy_cmd_1 { + u32 value:8; /* [7:0] Default:0x28 RW */ + u32 in_oft:7; /* [14:8] Default:0x4 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x2 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x1 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x0 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L3_LEN_MDY_CMD_1_DWLEN]; +} __packed; + +#define NBL_UPED_L4_LEN_MDY_CMD_0_ADDR (0x15c308) +#define NBL_UPED_L4_LEN_MDY_CMD_0_DEPTH (1) +#define NBL_UPED_L4_LEN_MDY_CMD_0_WIDTH (32) +#define NBL_UPED_L4_LEN_MDY_CMD_0_DWLEN (1) +union uped_l4_len_mdy_cmd_0_u { + struct uped_l4_len_mdy_cmd_0 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0xc RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x3 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x0 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x1 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_L4_LEN_MDY_CMD_0_DWLEN]; +} __packed; + +#define NBL_UPED_L4_LEN_MDY_CMD_1_ADDR (0x15c30c) +#define NBL_UPED_L4_LEN_MDY_CMD_1_DEPTH (1) +#define NBL_UPED_L4_LEN_MDY_CMD_1_WIDTH (32) +#define NBL_UPED_L4_LEN_MDY_CMD_1_DWLEN (1) +union uped_l4_len_mdy_cmd_1_u { + struct uped_l4_len_mdy_cmd_1 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 in_oft:7; /* [14:8] Default:0x4 RW */ + u32 rsv3:1; /* [15] Default:0x0 RO */ + u32 phid:2; /* [17:16] Default:0x3 RW */ + u32 rsv2:2; /* [19:18] Default:0x0 RO */ + u32 mode:2; /* [21:20] Default:0x0 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 unit:1; /* [24] Default:0x1 RW */ + u32 rsv:6; /* [30:25] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_LEN_MDY_CMD_1_DWLEN]; +} __packed; + +#define NBL_UPED_L3_CK_CMD_00_ADDR (0x15c310) +#define NBL_UPED_L3_CK_CMD_00_DEPTH (1) +#define NBL_UPED_L3_CK_CMD_00_WIDTH (32) +#define NBL_UPED_L3_CK_CMD_00_DWLEN (1) +union uped_l3_ck_cmd_00_u { + struct uped_l3_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0xa RW */ + u32 phid:2; /* [27:26] Default:0x2 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L3_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_UPED_L3_CK_CMD_01_ADDR (0x15c314) +#define NBL_UPED_L3_CK_CMD_01_DEPTH (1) +#define NBL_UPED_L3_CK_CMD_01_WIDTH (32) +#define NBL_UPED_L3_CK_CMD_01_DWLEN (1) +union uped_l3_ck_cmd_01_u { + struct uped_l3_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_L3_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_00_ADDR (0x15c318) +#define NBL_UPED_L4_CK_CMD_00_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_00_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_00_DWLEN (1) +union uped_l4_ck_cmd_00_u { + struct uped_l4_ck_cmd_00 { + u32 value:8; /* [7:0] Default:0x6 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x10 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_00_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_01_ADDR (0x15c31c) +#define NBL_UPED_L4_CK_CMD_01_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_01_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_01_DWLEN (1) +union uped_l4_ck_cmd_01_u { + struct uped_l4_ck_cmd_01 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_01_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_10_ADDR (0x15c320) +#define NBL_UPED_L4_CK_CMD_10_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_10_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_10_DWLEN (1) +union uped_l4_ck_cmd_10_u { + struct uped_l4_ck_cmd_10 { + u32 value:8; /* [7:0] Default:0x11 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_10_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_11_ADDR (0x15c324) +#define NBL_UPED_L4_CK_CMD_11_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_11_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_11_DWLEN (1) +union uped_l4_ck_cmd_11_u { + struct uped_l4_ck_cmd_11 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_11_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_20_ADDR (0x15c328) +#define NBL_UPED_L4_CK_CMD_20_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_20_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_20_DWLEN (1) +union uped_l4_ck_cmd_20_u { + struct uped_l4_ck_cmd_20 { + u32 value:8; /* [7:0] Default:0x2e RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x10 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_20_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_21_ADDR (0x15c32c) +#define NBL_UPED_L4_CK_CMD_21_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_21_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_21_DWLEN (1) +union uped_l4_ck_cmd_21_u { + struct uped_l4_ck_cmd_21 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_21_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_30_ADDR (0x15c330) +#define NBL_UPED_L4_CK_CMD_30_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_30_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_30_DWLEN (1) +union uped_l4_ck_cmd_30_u { + struct uped_l4_ck_cmd_30 { + u32 value:8; /* [7:0] Default:0x39 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x6 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x1 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_30_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_31_ADDR (0x15c334) +#define NBL_UPED_L4_CK_CMD_31_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_31_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_31_DWLEN (1) +union uped_l4_ck_cmd_31_u { + struct uped_l4_ck_cmd_31 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_31_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_40_ADDR (0x15c338) +#define NBL_UPED_L4_CK_CMD_40_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_40_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_40_DWLEN (1) +union uped_l4_ck_cmd_40_u { + struct uped_l4_ck_cmd_40 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x0 RW */ + u32 len_phid:2; /* [16:15] Default:0x0 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x0 RW */ + u32 in_oft:7; /* [25:19] Default:0x8 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x1 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_40_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_41_ADDR (0x15c33c) +#define NBL_UPED_L4_CK_CMD_41_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_41_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_41_DWLEN (1) +union uped_l4_ck_cmd_41_u { + struct uped_l4_ck_cmd_41 { + u32 ck_start0:6; /* [5:0] Default:0x0 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x0 RW */ + u32 ck_len0:7; /* [14:8] Default:0x0 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x0 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_41_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_50_ADDR (0x15c340) +#define NBL_UPED_L4_CK_CMD_50_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_50_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_50_DWLEN (1) +union uped_l4_ck_cmd_50_u { + struct uped_l4_ck_cmd_50 { + u32 value:8; /* [7:0] Default:0x0 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x2 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x0 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x2 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_50_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_51_ADDR (0x15c344) +#define NBL_UPED_L4_CK_CMD_51_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_51_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_51_DWLEN (1) +union uped_l4_ck_cmd_51_u { + struct uped_l4_ck_cmd_51 { + u32 ck_start0:6; /* [5:0] Default:0xc RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x8 RW */ + u32 ck_vld0:1; /* [15] Default:0x0 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_51_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_60_ADDR (0x15c348) +#define NBL_UPED_L4_CK_CMD_60_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_60_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_60_DWLEN (1) +union uped_l4_ck_cmd_60_u { + struct uped_l4_ck_cmd_60 { + u32 value:8; /* [7:0] Default:0x62 RW */ + u32 len_in_oft:7; /* [14:8] Default:0x4 RW */ + u32 len_phid:2; /* [16:15] Default:0x2 RW */ + u32 len_vld:1; /* [17] Default:0x1 RW */ + u32 data_vld:1; /* [18] Default:0x1 RW */ + u32 in_oft:7; /* [25:19] Default:0x2 RW */ + u32 phid:2; /* [27:26] Default:0x3 RW */ + u32 flag:1; /* [28] Default:0x0 RW */ + u32 mode:1; /* [29] Default:0x0 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_60_DWLEN]; +} __packed; + +#define NBL_UPED_L4_CK_CMD_61_ADDR (0x15c34c) +#define NBL_UPED_L4_CK_CMD_61_DEPTH (1) +#define NBL_UPED_L4_CK_CMD_61_WIDTH (32) +#define NBL_UPED_L4_CK_CMD_61_DWLEN (1) +union uped_l4_ck_cmd_61_u { + struct uped_l4_ck_cmd_61 { + u32 ck_start0:6; /* [5:0] Default:0x8 RW */ + u32 ck_phid0:2; /* [7:6] Default:0x2 RW */ + u32 ck_len0:7; /* [14:8] Default:0x20 RW */ + u32 ck_vld0:1; /* [15] Default:0x1 RW */ + u32 ck_start1:6; /* [21:16] Default:0x0 RW */ + u32 ck_phid1:2; /* [23:22] Default:0x3 RW */ + u32 ck_len1:7; /* [30:24] Default:0x0 RW */ + u32 ck_vld1:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_UPED_L4_CK_CMD_61_DWLEN]; +} __packed; + +#define NBL_UPED_CFG_TEST_ADDR (0x15c600) +#define NBL_UPED_CFG_TEST_DEPTH (1) +#define NBL_UPED_CFG_TEST_WIDTH (32) +#define NBL_UPED_CFG_TEST_DWLEN (1) +union uped_cfg_test_u { + struct uped_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_UPED_BP_STATE_ADDR (0x15c608) +#define NBL_UPED_BP_STATE_DEPTH (1) +#define NBL_UPED_BP_STATE_WIDTH (32) +#define NBL_UPED_BP_STATE_DWLEN (1) +union uped_bp_state_u { + struct uped_bp_state { + u32 bm_rtn_tout:1; /* [0] Default:0x0 RO */ + u32 bm_not_rdy:1; /* [1] Default:0x0 RO */ + u32 rsv1:1; /* [2] Default:0x0 RO */ + u32 qm_fc:1; /* [3] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_BP_STATE_DWLEN]; +} __packed; + +#define NBL_UPED_BP_HISTORY_ADDR (0x15c60c) +#define NBL_UPED_BP_HISTORY_DEPTH (1) +#define NBL_UPED_BP_HISTORY_WIDTH (32) +#define NBL_UPED_BP_HISTORY_DWLEN (1) +union uped_bp_history_u { + struct uped_bp_history { + u32 bm_rtn_tout:1; /* [0] Default:0x0 RC */ + u32 bm_not_rdy:1; /* [1] Default:0x0 RC */ + u32 rsv1:1; /* [2] Default:0x0 RC */ + u32 qm_fc:1; /* [3] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_UPED_MIRID_IND_ADDR (0x15c900) +#define NBL_UPED_MIRID_IND_DEPTH (1) +#define NBL_UPED_MIRID_IND_WIDTH (32) +#define NBL_UPED_MIRID_IND_DWLEN (1) +union uped_mirid_ind_u { + struct uped_mirid_ind { + u32 nomat:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MIRID_IND_DWLEN]; +} __packed; + +#define NBL_UPED_MD_AUX_OFT_ADDR (0x15c904) +#define NBL_UPED_MD_AUX_OFT_DEPTH (1) +#define NBL_UPED_MD_AUX_OFT_WIDTH (32) +#define NBL_UPED_MD_AUX_OFT_DWLEN (1) +union uped_md_aux_oft_u { + struct uped_md_aux_oft { + u32 l2_oft:8; /* [7:0] Default:0x0 RO */ + u32 l3_oft:8; /* [15:8] Default:0x0 RO */ + u32 l4_oft:8; /* [23:16] Default:0x0 RO */ + u32 pld_oft:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_AUX_OFT_DWLEN]; +} __packed; + +#define NBL_UPED_MD_AUX_PKT_LEN_ADDR (0x15c908) +#define NBL_UPED_MD_AUX_PKT_LEN_DEPTH (1) +#define NBL_UPED_MD_AUX_PKT_LEN_WIDTH (32) +#define NBL_UPED_MD_AUX_PKT_LEN_DWLEN (1) +union uped_md_aux_pkt_len_u { + struct uped_md_aux_pkt_len { + u32 len:14; /* [13:0] Default:0x0 RO */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_AUX_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_UPED_MD_FWD_DPORT_ADDR (0x15c910) +#define NBL_UPED_MD_FWD_DPORT_DEPTH (1) +#define NBL_UPED_MD_FWD_DPORT_WIDTH (32) +#define NBL_UPED_MD_FWD_DPORT_DWLEN (1) +union uped_md_fwd_dport_u { + struct uped_md_fwd_dport { + u32 id:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_FWD_DPORT_DWLEN]; +} __packed; + +#define NBL_UPED_MD_AUX_PLD_CKSUM_ADDR (0x15c914) +#define NBL_UPED_MD_AUX_PLD_CKSUM_DEPTH (1) +#define NBL_UPED_MD_AUX_PLD_CKSUM_WIDTH (32) +#define NBL_UPED_MD_AUX_PLD_CKSUM_DWLEN (1) +union uped_md_aux_pld_cksum_u { + struct uped_md_aux_pld_cksum { + u32 ck:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_AUX_PLD_CKSUM_DWLEN]; +} __packed; + +#define NBL_UPED_INNER_PKT_CKSUM_ADDR (0x15c918) +#define NBL_UPED_INNER_PKT_CKSUM_DEPTH (1) +#define NBL_UPED_INNER_PKT_CKSUM_WIDTH (32) +#define NBL_UPED_INNER_PKT_CKSUM_DWLEN (1) +union uped_inner_pkt_cksum_u { + struct uped_inner_pkt_cksum { + u32 ck:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_INNER_PKT_CKSUM_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_0_ADDR (0x15c920) +#define NBL_UPED_MD_EDIT_0_DEPTH (1) +#define NBL_UPED_MD_EDIT_0_WIDTH (32) +#define NBL_UPED_MD_EDIT_0_DWLEN (1) +union uped_md_edit_0_u { + struct uped_md_edit_0 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_0_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_1_ADDR (0x15c924) +#define NBL_UPED_MD_EDIT_1_DEPTH (1) +#define NBL_UPED_MD_EDIT_1_WIDTH (32) +#define NBL_UPED_MD_EDIT_1_DWLEN (1) +union uped_md_edit_1_u { + struct uped_md_edit_1 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_1_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_2_ADDR (0x15c928) +#define NBL_UPED_MD_EDIT_2_DEPTH (1) +#define NBL_UPED_MD_EDIT_2_WIDTH (32) +#define NBL_UPED_MD_EDIT_2_DWLEN (1) +union uped_md_edit_2_u { + struct uped_md_edit_2 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_2_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_3_ADDR (0x15c92c) +#define NBL_UPED_MD_EDIT_3_DEPTH (1) +#define NBL_UPED_MD_EDIT_3_WIDTH (32) +#define NBL_UPED_MD_EDIT_3_DWLEN (1) +union uped_md_edit_3_u { + struct uped_md_edit_3 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_3_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_4_ADDR (0x15c930) +#define NBL_UPED_MD_EDIT_4_DEPTH (1) +#define NBL_UPED_MD_EDIT_4_WIDTH (32) +#define NBL_UPED_MD_EDIT_4_DWLEN (1) +union uped_md_edit_4_u { + struct uped_md_edit_4 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_4_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_5_ADDR (0x15c934) +#define NBL_UPED_MD_EDIT_5_DEPTH (1) +#define NBL_UPED_MD_EDIT_5_WIDTH (32) +#define NBL_UPED_MD_EDIT_5_DWLEN (1) +union uped_md_edit_5_u { + struct uped_md_edit_5 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_5_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_6_ADDR (0x15c938) +#define NBL_UPED_MD_EDIT_6_DEPTH (1) +#define NBL_UPED_MD_EDIT_6_WIDTH (32) +#define NBL_UPED_MD_EDIT_6_DWLEN (1) +union uped_md_edit_6_u { + struct uped_md_edit_6 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_6_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_7_ADDR (0x15c93c) +#define NBL_UPED_MD_EDIT_7_DEPTH (1) +#define NBL_UPED_MD_EDIT_7_WIDTH (32) +#define NBL_UPED_MD_EDIT_7_DWLEN (1) +union uped_md_edit_7_u { + struct uped_md_edit_7 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_7_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_8_ADDR (0x15c940) +#define NBL_UPED_MD_EDIT_8_DEPTH (1) +#define NBL_UPED_MD_EDIT_8_WIDTH (32) +#define NBL_UPED_MD_EDIT_8_DWLEN (1) +union uped_md_edit_8_u { + struct uped_md_edit_8 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_8_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_9_ADDR (0x15c944) +#define NBL_UPED_MD_EDIT_9_DEPTH (1) +#define NBL_UPED_MD_EDIT_9_WIDTH (32) +#define NBL_UPED_MD_EDIT_9_DWLEN (1) +union uped_md_edit_9_u { + struct uped_md_edit_9 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_9_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_10_ADDR (0x15c948) +#define NBL_UPED_MD_EDIT_10_DEPTH (1) +#define NBL_UPED_MD_EDIT_10_WIDTH (32) +#define NBL_UPED_MD_EDIT_10_DWLEN (1) +union uped_md_edit_10_u { + struct uped_md_edit_10 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_10_DWLEN]; +} __packed; + +#define NBL_UPED_MD_EDIT_11_ADDR (0x15c94c) +#define NBL_UPED_MD_EDIT_11_DEPTH (1) +#define NBL_UPED_MD_EDIT_11_WIDTH (32) +#define NBL_UPED_MD_EDIT_11_DWLEN (1) +union uped_md_edit_11_u { + struct uped_md_edit_11 { + u32 vau:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_MD_EDIT_11_DWLEN]; +} __packed; + +#define NBL_UPED_ADD_DEL_LEN_ADDR (0x15c950) +#define NBL_UPED_ADD_DEL_LEN_DEPTH (1) +#define NBL_UPED_ADD_DEL_LEN_WIDTH (32) +#define NBL_UPED_ADD_DEL_LEN_DWLEN (1) +union uped_add_del_len_u { + struct uped_add_del_len { + u32 len:9; /* [8:0] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_ADD_DEL_LEN_DWLEN]; +} __packed; + +#define NBL_UPED_TTL_INFO_ADDR (0x15c970) +#define NBL_UPED_TTL_INFO_DEPTH (1) +#define NBL_UPED_TTL_INFO_WIDTH (32) +#define NBL_UPED_TTL_INFO_DWLEN (1) +union uped_ttl_info_u { + struct uped_ttl_info { + u32 old_ttl:8; /* [7:0] Default:0x0 RO */ + u32 new_ttl:8; /* [15:8] Default:0x0 RO */ + u32 ttl_val:1; /* [16] Default:0x0 RC */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TTL_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_LEN_INFO_VLD_ADDR (0x15c974) +#define NBL_UPED_LEN_INFO_VLD_DEPTH (1) +#define NBL_UPED_LEN_INFO_VLD_WIDTH (32) +#define NBL_UPED_LEN_INFO_VLD_DWLEN (1) +union uped_len_info_vld_u { + struct uped_len_info_vld { + u32 length0:1; /* [0] Default:0x0 RC */ + u32 length1:1; /* [1] Default:0x0 RC */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_LEN_INFO_VLD_DWLEN]; +} __packed; + +#define NBL_UPED_LEN0_INFO_ADDR (0x15c978) +#define NBL_UPED_LEN0_INFO_DEPTH (1) +#define NBL_UPED_LEN0_INFO_WIDTH (32) +#define NBL_UPED_LEN0_INFO_DWLEN (1) +union uped_len0_info_u { + struct uped_len0_info { + u32 old_len:16; /* [15:0] Default:0x0 RO */ + u32 new_len:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_LEN0_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_LEN1_INFO_ADDR (0x15c97c) +#define NBL_UPED_LEN1_INFO_DEPTH (1) +#define NBL_UPED_LEN1_INFO_WIDTH (32) +#define NBL_UPED_LEN1_INFO_DWLEN (1) +union uped_len1_info_u { + struct uped_len1_info { + u32 old_len:16; /* [15:0] Default:0x0 RO */ + u32 new_len:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_LEN1_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_EDIT_ATNUM_INFO_ADDR (0x15c980) +#define NBL_UPED_EDIT_ATNUM_INFO_DEPTH (1) +#define NBL_UPED_EDIT_ATNUM_INFO_WIDTH (32) +#define NBL_UPED_EDIT_ATNUM_INFO_DWLEN (1) +union uped_edit_atnum_info_u { + struct uped_edit_atnum_info { + u32 replace:4; /* [3:0] Default:0x0 RO */ + u32 del:4; /* [7:4] Default:0x0 RO */ + u32 add:4; /* [11:8] Default:0x0 RO */ + u32 ttl:4; /* [15:12] Default:0x0 RO */ + u32 dscp:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_EDIT_ATNUM_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_EDIT_NO_AT_INFO_ADDR (0x15c984) +#define NBL_UPED_EDIT_NO_AT_INFO_DEPTH (1) +#define NBL_UPED_EDIT_NO_AT_INFO_WIDTH (32) +#define NBL_UPED_EDIT_NO_AT_INFO_DWLEN (1) +union uped_edit_no_at_info_u { + struct uped_edit_no_at_info { + u32 l3_len:1; /* [0] Default:0x0 RC */ + u32 l4_len:1; /* [1] Default:0x0 RC */ + u32 l3_ck:1; /* [2] Default:0x0 RC */ + u32 l4_ck:1; /* [3] Default:0x0 RC */ + u32 sctp_ck:1; /* [4] Default:0x0 RC */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_EDIT_NO_AT_INFO_DWLEN]; +} __packed; + +#define NBL_UPED_UL4S_TOTAL_LENGTH_ADDR (0x15c988) +#define NBL_UPED_UL4S_TOTAL_LENGTH_DEPTH (1) +#define NBL_UPED_UL4S_TOTAL_LENGTH_WIDTH (32) +#define NBL_UPED_UL4S_TOTAL_LENGTH_DWLEN (1) +union uped_ul4s_total_length_u { + struct uped_ul4s_total_length { + u32 vau:14; /* [13:0] Default:0x0 RO */ + u32 rsv:16; /* [29:14] Default:0x0 RO */ + u32 tls_ind:1; /* [30] Default:0x0 RO */ + u32 vld:1; /* [31] Default:0x0 RC */ + } __packed info; + u32 data[NBL_UPED_UL4S_TOTAL_LENGTH_DWLEN]; +} __packed; + +#define NBL_UPED_HW_EDT_PROF_ADDR (0x15d000) +#define NBL_UPED_HW_EDT_PROF_DEPTH (32) +#define NBL_UPED_HW_EDT_PROF_WIDTH (32) +#define NBL_UPED_HW_EDT_PROF_DWLEN (1) +union uped_hw_edt_prof_u { + struct uped_hw_edt_prof { + u32 l4_len:2; /* [1:0] Default:0x2 RW */ + u32 l3_len:2; /* [3:2] Default:0x2 RW */ + u32 l4_ck:3; /* [6:4] Default:0x7 RW */ + u32 l3_ck:1; /* [7:7] Default:0x0 RW */ + u32 l4_ck_zero_free:1; /* [8:8] Default:0x1 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_HW_EDT_PROF_DWLEN]; +} __packed; +#define NBL_UPED_HW_EDT_PROF_REG(r) (NBL_UPED_HW_EDT_PROF_ADDR + \ + (NBL_UPED_HW_EDT_PROF_DWLEN * 4) * (r)) + +#define NBL_UPED_OUT_MASK_ADDR (0x15e000) +#define NBL_UPED_OUT_MASK_DEPTH (24) +#define NBL_UPED_OUT_MASK_WIDTH (64) +#define NBL_UPED_OUT_MASK_DWLEN (2) +union uped_out_mask_u { + struct uped_out_mask { + u32 flag:32; /* [31:0] Default:0x0 RW */ + u32 fwd:30; /* [61:32] Default:0x0 RW */ + u32 rsv:2; /* [63:62] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_OUT_MASK_DWLEN]; +} __packed; +#define NBL_UPED_OUT_MASK_REG(r) (NBL_UPED_OUT_MASK_ADDR + \ + (NBL_UPED_OUT_MASK_DWLEN * 4) * (r)) + +#define NBL_UPED_TAB_EDIT_CMD_ADDR (0x15f000) +#define NBL_UPED_TAB_EDIT_CMD_DEPTH (32) +#define NBL_UPED_TAB_EDIT_CMD_WIDTH (32) +#define NBL_UPED_TAB_EDIT_CMD_DWLEN (1) +union uped_tab_edit_cmd_u { + struct uped_tab_edit_cmd { + u32 in_offset:8; /* [7:0] Default:0x0 RW */ + u32 phid:2; /* [9:8] Default:0x0 RW */ + u32 len:7; /* [16:10] Default:0x0 RW */ + u32 mode:4; /* [20:17] Default:0xf RW */ + u32 l4_ck_ofld_upt:1; /* [21] Default:0x1 RW */ + u32 l3_ck_ofld_upt:1; /* [22] Default:0x1 RW */ + u32 rsv:9; /* [31:23] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TAB_EDIT_CMD_DWLEN]; +} __packed; +#define NBL_UPED_TAB_EDIT_CMD_REG(r) (NBL_UPED_TAB_EDIT_CMD_ADDR + \ + (NBL_UPED_TAB_EDIT_CMD_DWLEN * 4) * (r)) + +#define NBL_UPED_TAB_VSI_TYPE_ADDR (0x161000) +#define NBL_UPED_TAB_VSI_TYPE_DEPTH (1031) +#define NBL_UPED_TAB_VSI_TYPE_WIDTH (32) +#define NBL_UPED_TAB_VSI_TYPE_DWLEN (1) +union uped_tab_vsi_type_u { + struct uped_tab_vsi_type { + u32 sel:4; /* [3:0] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPED_TAB_VSI_TYPE_DWLEN]; +} __packed; +#define NBL_UPED_TAB_VSI_TYPE_REG(r) (NBL_UPED_TAB_VSI_TYPE_ADDR + \ + (NBL_UPED_TAB_VSI_TYPE_DWLEN * 4) * (r)) + +#define NBL_UPED_TAB_REPLACE_ADDR (0x164000) +#define NBL_UPED_TAB_REPLACE_DEPTH (2048) +#define NBL_UPED_TAB_REPLACE_WIDTH (64) +#define NBL_UPED_TAB_REPLACE_DWLEN (2) +union uped_tab_replace_u { + struct uped_tab_replace { + u32 vau_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPED_TAB_REPLACE_DWLEN]; +} __packed; +#define NBL_UPED_TAB_REPLACE_REG(r) (NBL_UPED_TAB_REPLACE_ADDR + \ + (NBL_UPED_TAB_REPLACE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upmem.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upmem.h new file mode 100644 index 0000000000000000000000000000000000000000..701e30483e719b7938bcea266c52385527db6864 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_upmem.h @@ -0,0 +1,195 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UPMEM_H +#define NBL_UPMEM_H 1 + +#include + +#define NBL_UPMEM_BASE (0x00108000) + +#define NBL_UPMEM_INT_STATUS_ADDR (0x108000) +#define NBL_UPMEM_INT_STATUS_DEPTH (1) +#define NBL_UPMEM_INT_STATUS_WIDTH (32) +#define NBL_UPMEM_INT_STATUS_DWLEN (1) +union upmem_int_status_u { + struct upmem_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 cpu_lgc_hzd:1; /* [2] Default:0x0 RWC */ + u32 parity_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UPMEM_INT_MASK_ADDR (0x108004) +#define NBL_UPMEM_INT_MASK_DEPTH (1) +#define NBL_UPMEM_INT_MASK_WIDTH (32) +#define NBL_UPMEM_INT_MASK_DWLEN (1) +union upmem_int_mask_u { + struct upmem_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 cpu_lgc_hzd:1; /* [2] Default:0x0 RW */ + u32 parity_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UPMEM_INT_SET_ADDR (0x108008) +#define NBL_UPMEM_INT_SET_DEPTH (1) +#define NBL_UPMEM_INT_SET_WIDTH (32) +#define NBL_UPMEM_INT_SET_DWLEN (1) +union upmem_int_set_u { + struct upmem_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 cpu_lgc_hzd:1; /* [2] Default:0x0 WO */ + u32 parity_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_INT_SET_DWLEN]; +} __packed; + +#define NBL_UPMEM_COR_ERR_INFO_ADDR (0x10800c) +#define NBL_UPMEM_COR_ERR_INFO_DEPTH (1) +#define NBL_UPMEM_COR_ERR_INFO_WIDTH (32) +#define NBL_UPMEM_COR_ERR_INFO_DWLEN (1) +union upmem_cor_err_info_u { + struct upmem_cor_err_info { + u32 ram_addr:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPMEM_PARITY_ERR_INFO_ADDR (0x108014) +#define NBL_UPMEM_PARITY_ERR_INFO_DEPTH (1) +#define NBL_UPMEM_PARITY_ERR_INFO_WIDTH (32) +#define NBL_UPMEM_PARITY_ERR_INFO_DWLEN (1) +union upmem_parity_err_info_u { + struct upmem_parity_err_info { + u32 ram_addr:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPMEM_CIF_ERR_INFO_ADDR (0x10801c) +#define NBL_UPMEM_CIF_ERR_INFO_DEPTH (1) +#define NBL_UPMEM_CIF_ERR_INFO_WIDTH (32) +#define NBL_UPMEM_CIF_ERR_INFO_DWLEN (1) +union upmem_cif_err_info_u { + struct upmem_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPMEM_CAR_CTRL_ADDR (0x108100) +#define NBL_UPMEM_CAR_CTRL_DEPTH (1) +#define NBL_UPMEM_CAR_CTRL_WIDTH (32) +#define NBL_UPMEM_CAR_CTRL_DWLEN (1) +union upmem_car_ctrl_u { + struct upmem_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UPMEM_INIT_START_ADDR (0x108104) +#define NBL_UPMEM_INIT_START_DEPTH (1) +#define NBL_UPMEM_INIT_START_WIDTH (32) +#define NBL_UPMEM_INIT_START_DWLEN (1) +union upmem_init_start_u { + struct upmem_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_INIT_START_DWLEN]; +} __packed; + +#define NBL_UPMEM_MEM_ACCESS_MODE_ADDR (0x108108) +#define NBL_UPMEM_MEM_ACCESS_MODE_DEPTH (1) +#define NBL_UPMEM_MEM_ACCESS_MODE_WIDTH (32) +#define NBL_UPMEM_MEM_ACCESS_MODE_DWLEN (1) +union upmem_mem_access_mode_u { + struct upmem_mem_access_mode { + u32 mode:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_MEM_ACCESS_MODE_DWLEN]; +} __packed; + +#define NBL_UPMEM_MEM_ACCESS_EN_ADDR (0x10810c) +#define NBL_UPMEM_MEM_ACCESS_EN_DEPTH (1) +#define NBL_UPMEM_MEM_ACCESS_EN_WIDTH (32) +#define NBL_UPMEM_MEM_ACCESS_EN_DWLEN (1) +union upmem_mem_access_en_u { + struct upmem_mem_access_en { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_MEM_ACCESS_EN_DWLEN]; +} __packed; + +#define NBL_UPMEM_MEM_ACCESS_ADDR_ADDR (0x108110) +#define NBL_UPMEM_MEM_ACCESS_ADDR_DEPTH (1) +#define NBL_UPMEM_MEM_ACCESS_ADDR_WIDTH (32) +#define NBL_UPMEM_MEM_ACCESS_ADDR_DWLEN (1) +union upmem_mem_access_addr_u { + struct upmem_mem_access_addr { + u32 row_raddr:12; /* [11:0] Default:0x0 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 col_raddr:7; /* [22:16] Default:0x0 RW */ + u32 rsv:9; /* [31:23] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_MEM_ACCESS_ADDR_DWLEN]; +} __packed; + +#define NBL_UPMEM_CFG_TEST_ADDR (0x108114) +#define NBL_UPMEM_CFG_TEST_DEPTH (1) +#define NBL_UPMEM_CFG_TEST_WIDTH (32) +#define NBL_UPMEM_CFG_TEST_DWLEN (1) +union upmem_cfg_test_u { + struct upmem_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPMEM_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_UPMEM_INIT_DONE_ADDR (0x108400) +#define NBL_UPMEM_INIT_DONE_DEPTH (1) +#define NBL_UPMEM_INIT_DONE_WIDTH (32) +#define NBL_UPMEM_INIT_DONE_DWLEN (1) +union upmem_init_done_u { + struct upmem_init_done { + u32 init_done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UPMEM_MEM_ACCESS_RDATA_ADDR (0x108404) +#define NBL_UPMEM_MEM_ACCESS_RDATA_DEPTH (1) +#define NBL_UPMEM_MEM_ACCESS_RDATA_WIDTH (32) +#define NBL_UPMEM_MEM_ACCESS_RDATA_DWLEN (1) +union upmem_mem_access_rdata_u { + struct upmem_mem_access_rdata { + u32 rdata:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPMEM_MEM_ACCESS_RDATA_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uqm.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uqm.h new file mode 100644 index 0000000000000000000000000000000000000000..3fc1e5a045b7bf181e7926a6b271e42405ed55a4 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uqm.h @@ -0,0 +1,617 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UQM_H +#define NBL_UQM_H 1 + +#include + +#define NBL_UQM_BASE (0x00114000) + +#define NBL_UQM_INT_STATUS_ADDR (0x114000) +#define NBL_UQM_INT_STATUS_DEPTH (1) +#define NBL_UQM_INT_STATUS_WIDTH (32) +#define NBL_UQM_INT_STATUS_DWLEN (1) +union uqm_int_status_u { + struct uqm_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_w_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_r_err:1; /* [2] Default:0x0 RWC */ + u32 dport_err:1; /* [3] Default:0x0 RWC */ + u32 weight_err:1; /* [4] Default:0x0 RWC */ + u32 dport_value_err:1; /* [5] Default:0x0 RWC */ + u32 sport_value_err:1; /* [6] Default:0x0 RWC */ + u32 slice_del_overflow:1; /* [7] Default:0x0 RWC */ + u32 color_err:1; /* [8] Default:0x0 RWC */ + u32 cor_err:1; /* [9] Default:0x0 RWC */ + u32 cif_err:1; /* [10] Default:0x0 RWC */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UQM_INT_MASK_ADDR (0x114004) +#define NBL_UQM_INT_MASK_DEPTH (1) +#define NBL_UQM_INT_MASK_WIDTH (32) +#define NBL_UQM_INT_MASK_DWLEN (1) +union uqm_int_mask_u { + struct uqm_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 fifo_w_err:1; /* [1] Default:0x0 RW */ + u32 fifo_r_err:1; /* [2] Default:0x0 RW */ + u32 dport_err:1; /* [3] Default:0x0 RW */ + u32 weight_err:1; /* [4] Default:0x0 RW */ + u32 dport_value_err:1; /* [5] Default:0x0 RW */ + u32 sport_value_err:1; /* [6] Default:0x0 RW */ + u32 slice_del_overflow:1; /* [7] Default:0x0 RW */ + u32 color_err:1; /* [8] Default:0x0 RW */ + u32 cor_err:1; /* [9] Default:0x0 RW */ + u32 cif_err:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UQM_INT_SET_ADDR (0x114008) +#define NBL_UQM_INT_SET_DEPTH (1) +#define NBL_UQM_INT_SET_WIDTH (32) +#define NBL_UQM_INT_SET_DWLEN (1) +union uqm_int_set_u { + struct uqm_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 fifo_w_err:1; /* [1] Default:0x0 WO */ + u32 fifo_r_err:1; /* [2] Default:0x0 WO */ + u32 dport_err:1; /* [3] Default:0x0 WO */ + u32 weight_err:1; /* [4] Default:0x0 WO */ + u32 dport_value_err:1; /* [5] Default:0x0 WO */ + u32 sport_value_err:1; /* [6] Default:0x0 WO */ + u32 slice_del_overflow:1; /* [7] Default:0x0 WO */ + u32 color_err:1; /* [8] Default:0x0 WO */ + u32 cor_err:1; /* [9] Default:0x0 WO */ + u32 cif_err:1; /* [10] Default:0x0 WO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_INT_SET_DWLEN]; +} __packed; + +#define NBL_UQM_UCOR_ERR_INFO_ADDR (0x11400c) +#define NBL_UQM_UCOR_ERR_INFO_DEPTH (1) +#define NBL_UQM_UCOR_ERR_INFO_WIDTH (32) +#define NBL_UQM_UCOR_ERR_INFO_DWLEN (1) +union uqm_ucor_err_info_u { + struct uqm_ucor_err_info { + u32 ram_addr:13; /* [12:0] Default:0x0 RO */ + u32 ram_id:4; /* [16:13] Default:0x0 RO */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_UCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UQM_DPORT_VALUE_ERR_INFO_ADDR (0x11402c) +#define NBL_UQM_DPORT_VALUE_ERR_INFO_DEPTH (1) +#define NBL_UQM_DPORT_VALUE_ERR_INFO_WIDTH (32) +#define NBL_UQM_DPORT_VALUE_ERR_INFO_DWLEN (1) +union uqm_dport_value_err_info_u { + struct uqm_dport_value_err_info { + u32 id:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_DPORT_VALUE_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UQM_SPORT_VALUE_ERR_INFO_ADDR (0x114034) +#define NBL_UQM_SPORT_VALUE_ERR_INFO_DEPTH (1) +#define NBL_UQM_SPORT_VALUE_ERR_INFO_WIDTH (32) +#define NBL_UQM_SPORT_VALUE_ERR_INFO_DWLEN (1) +union uqm_sport_value_err_info_u { + struct uqm_sport_value_err_info { + u32 id:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_SPORT_VALUE_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UQM_COR_ERR_INFO_ADDR (0x11404c) +#define NBL_UQM_COR_ERR_INFO_DEPTH (1) +#define NBL_UQM_COR_ERR_INFO_WIDTH (32) +#define NBL_UQM_COR_ERR_INFO_DWLEN (1) +union uqm_cor_err_info_u { + struct uqm_cor_err_info { + u32 ram_addr:13; /* [12:0] Default:0x0 RO */ + u32 ram_id:4; /* [16:13] Default:0x0 RO */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UQM_CIF_ERR_INFO_ADDR (0x114054) +#define NBL_UQM_CIF_ERR_INFO_DEPTH (1) +#define NBL_UQM_CIF_ERR_INFO_WIDTH (32) +#define NBL_UQM_CIF_ERR_INFO_DWLEN (1) +union uqm_cif_err_info_u { + struct uqm_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UQM_CAR_CTRL_ADDR (0x114100) +#define NBL_UQM_CAR_CTRL_DEPTH (1) +#define NBL_UQM_CAR_CTRL_WIDTH (32) +#define NBL_UQM_CAR_CTRL_DWLEN (1) +union uqm_car_ctrl_u { + struct uqm_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UQM_INIT_START_ADDR (0x114104) +#define NBL_UQM_INIT_START_DEPTH (1) +#define NBL_UQM_INIT_START_WIDTH (32) +#define NBL_UQM_INIT_START_DWLEN (1) +union uqm_init_start_u { + struct uqm_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_INIT_START_DWLEN]; +} __packed; + +#define NBL_UQM_ACTION_ID_ADDR (0x114130) +#define NBL_UQM_ACTION_ID_DEPTH (1) +#define NBL_UQM_ACTION_ID_WIDTH (32) +#define NBL_UQM_ACTION_ID_DWLEN (1) +union uqm_action_id_u { + struct uqm_action_id { + u32 dport:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_ACTION_ID_DWLEN]; +} __packed; + +#define NBL_UQM_FLAG_OFFSET_ADDR (0x114134) +#define NBL_UQM_FLAG_OFFSET_DEPTH (1) +#define NBL_UQM_FLAG_OFFSET_WIDTH (32) +#define NBL_UQM_FLAG_OFFSET_DWLEN (1) +union uqm_flag_offset_u { + struct uqm_flag_offset { + u32 rdma:5; /* [4:0] Default:0xA RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_FLAG_OFFSET_DWLEN]; +} __packed; + +#define NBL_UQM_INQUE_SCH_ADDR (0x114138) +#define NBL_UQM_INQUE_SCH_DEPTH (1) +#define NBL_UQM_INQUE_SCH_WIDTH (32) +#define NBL_UQM_INQUE_SCH_DWLEN (1) +union uqm_inque_sch_u { + struct uqm_inque_sch { + u32 ucar_ppe:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_INQUE_SCH_DWLEN]; +} __packed; + +#define NBL_UQM_QUE_TYPE_ADDR (0x11413c) +#define NBL_UQM_QUE_TYPE_DEPTH (1) +#define NBL_UQM_QUE_TYPE_WIDTH (32) +#define NBL_UQM_QUE_TYPE_DWLEN (1) +union uqm_que_type_u { + struct uqm_que_type { + u32 bp_drop:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_QUE_TYPE_DWLEN]; +} __packed; + +#define NBL_UQM_STAT_TYPE_ADDR (0x114140) +#define NBL_UQM_STAT_TYPE_DEPTH (1) +#define NBL_UQM_STAT_TYPE_WIDTH (32) +#define NBL_UQM_STAT_TYPE_DWLEN (1) +union uqm_stat_type_u { + struct uqm_stat_type { + u32 bp_drop:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_STAT_TYPE_DWLEN]; +} __packed; + +#define NBL_UQM_RDMA_EN_ADDR (0x114144) +#define NBL_UQM_RDMA_EN_DEPTH (1) +#define NBL_UQM_RDMA_EN_WIDTH (32) +#define NBL_UQM_RDMA_EN_DWLEN (1) +union uqm_rdma_en_u { + struct uqm_rdma_en { + u32 vld:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_RDMA_EN_DWLEN]; +} __packed; + +#define NBL_UQM_LB_TTL_DROP_ADDR (0x114148) +#define NBL_UQM_LB_TTL_DROP_DEPTH (1) +#define NBL_UQM_LB_TTL_DROP_WIDTH (32) +#define NBL_UQM_LB_TTL_DROP_DWLEN (1) +union uqm_lb_ttl_drop_u { + struct uqm_lb_ttl_drop { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_LB_TTL_DROP_DWLEN]; +} __packed; + +#define NBL_UQM_HIGH_PRI_EN_ADDR (0x114150) +#define NBL_UQM_HIGH_PRI_EN_DEPTH (1) +#define NBL_UQM_HIGH_PRI_EN_WIDTH (32) +#define NBL_UQM_HIGH_PRI_EN_DWLEN (1) +union uqm_high_pri_en_u { + struct uqm_high_pri_en { + u32 inque:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_HIGH_PRI_EN_DWLEN]; +} __packed; + +#define NBL_UQM_ERR_DROP_EN_ADDR (0x114160) +#define NBL_UQM_ERR_DROP_EN_DEPTH (1) +#define NBL_UQM_ERR_DROP_EN_WIDTH (32) +#define NBL_UQM_ERR_DROP_EN_DWLEN (1) +union uqm_err_drop_en_u { + struct uqm_err_drop_en { + u32 dport:1; /* [0] Default:0x1 RW */ + u32 sport:1; /* [1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_ERR_DROP_EN_DWLEN]; +} __packed; + +#define NBL_UQM_QUE_LEN_ADDR (0x114200) +#define NBL_UQM_QUE_LEN_DEPTH (48) +#define NBL_UQM_QUE_LEN_WIDTH (32) +#define NBL_UQM_QUE_LEN_DWLEN (1) +union uqm_que_len_u { + struct uqm_que_len { + u32 unshare:12; /* [11:0] Default:0x2A RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 sahre:12; /* [27:16] Default:0x823 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_QUE_LEN_DWLEN]; +} __packed; +#define NBL_UQM_QUE_LEN_REG(r) (NBL_UQM_QUE_LEN_ADDR + \ + (NBL_UQM_QUE_LEN_DWLEN * 4) * (r)) + +#define NBL_UQM_PORT_LEN_ADDR (0x114300) +#define NBL_UQM_PORT_LEN_DEPTH (6) +#define NBL_UQM_PORT_LEN_WIDTH (32) +#define NBL_UQM_PORT_LEN_DWLEN (1) +union uqm_port_len_u { + struct uqm_port_len { + u32 port:12; /* [11:0] Default:0x7F9 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_PORT_LEN_DWLEN]; +} __packed; +#define NBL_UQM_PORT_LEN_REG(r) (NBL_UQM_PORT_LEN_ADDR + \ + (NBL_UQM_PORT_LEN_DWLEN * 4) * (r)) + +#define NBL_UQM_COS_LEN_ADDR (0x114360) +#define NBL_UQM_COS_LEN_DEPTH (1) +#define NBL_UQM_COS_LEN_WIDTH (32) +#define NBL_UQM_COS_LEN_DWLEN (1) +union uqm_cos_len_u { + struct uqm_cos_len { + u32 low:12; /* [11:0] Default:0x7F9 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 high:12; /* [27:16] Default:0x7F9 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_COS_LEN_DWLEN]; +} __packed; + +#define NBL_UQM_COS_TSH_ADDR (0x114400) +#define NBL_UQM_COS_TSH_DEPTH (40) +#define NBL_UQM_COS_TSH_WIDTH (32) +#define NBL_UQM_COS_TSH_DWLEN (1) +union uqm_cos_tsh_u { + struct uqm_cos_tsh { + u32 low:12; /* [11:0] Default:0x54 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 high:12; /* [27:16] Default:0x7E RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_COS_TSH_DWLEN]; +} __packed; +#define NBL_UQM_COS_TSH_REG(r) (NBL_UQM_COS_TSH_ADDR + \ + (NBL_UQM_COS_TSH_DWLEN * 4) * (r)) + +#define NBL_UQM_PORT_TSH_ADDR (0x114500) +#define NBL_UQM_PORT_TSH_DEPTH (5) +#define NBL_UQM_PORT_TSH_WIDTH (32) +#define NBL_UQM_PORT_TSH_DWLEN (1) +union uqm_port_tsh_u { + struct uqm_port_tsh { + u32 low:12; /* [11:0] Default:0xFC RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 high:12; /* [27:16] Default:0x155 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_PORT_TSH_DWLEN]; +} __packed; +#define NBL_UQM_PORT_TSH_REG(r) (NBL_UQM_PORT_TSH_ADDR + \ + (NBL_UQM_PORT_TSH_DWLEN * 4) * (r)) + +#define NBL_UQM_COS_WEIGHT_ADDR (0x114624) +#define NBL_UQM_COS_WEIGHT_DEPTH (1) +#define NBL_UQM_COS_WEIGHT_WIDTH (48) +#define NBL_UQM_COS_WEIGHT_DWLEN (2) +union uqm_cos_weight_u { + struct uqm_cos_weight { + u32 cos_l:32; /* [47:0] Default:0xffff_ffff_ffff RW */ + u32 cos_h:16; /* [47:0] Default:0xffff_ffff_ffff RW */ + } __packed info; + u32 data[NBL_UQM_COS_WEIGHT_DWLEN]; +} __packed; + +#define NBL_UQM_BP_TSH_ADDR (0x114630) +#define NBL_UQM_BP_TSH_DEPTH (1) +#define NBL_UQM_BP_TSH_WIDTH (32) +#define NBL_UQM_BP_TSH_DWLEN (1) +union uqm_bp_tsh_u { + struct uqm_bp_tsh { + u32 timming:32; /* [31:0] Default:0x00ff_ffff RW */ + } __packed info; + u32 data[NBL_UQM_BP_TSH_DWLEN]; +} __packed; + +#define NBL_UQM_PORT_AGING_ADDR (0x114634) +#define NBL_UQM_PORT_AGING_DEPTH (1) +#define NBL_UQM_PORT_AGING_WIDTH (32) +#define NBL_UQM_PORT_AGING_DWLEN (1) +union uqm_port_aging_u { + struct uqm_port_aging { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_PORT_AGING_DWLEN]; +} __packed; + +#define NBL_UQM_SHAPING_TIMING_ADD_PERIOD_ADDR (0x114640) +#define NBL_UQM_SHAPING_TIMING_ADD_PERIOD_DEPTH (1) +#define NBL_UQM_SHAPING_TIMING_ADD_PERIOD_WIDTH (32) +#define NBL_UQM_SHAPING_TIMING_ADD_PERIOD_DWLEN (1) +union uqm_shaping_timing_add_period_u { + struct uqm_shaping_timing_add_period { + u32 sch:12; /* [11:0] Default:0x320 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_SHAPING_TIMING_ADD_PERIOD_DWLEN]; +} __packed; + +#define NBL_UQM_SHAPING_DEPTH_ADDR (0x114644) +#define NBL_UQM_SHAPING_DEPTH_DEPTH (1) +#define NBL_UQM_SHAPING_DEPTH_WIDTH (32) +#define NBL_UQM_SHAPING_DEPTH_DWLEN (1) +union uqm_shaping_depth_u { + struct uqm_shaping_depth { + u32 sch:4; /* [3:0] Default:0x6 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_SHAPING_DEPTH_DWLEN]; +} __packed; + +#define NBL_UQM_SHAPING_COLOR_Y_DROP_ADDR (0x114648) +#define NBL_UQM_SHAPING_COLOR_Y_DROP_DEPTH (1) +#define NBL_UQM_SHAPING_COLOR_Y_DROP_WIDTH (32) +#define NBL_UQM_SHAPING_COLOR_Y_DROP_DWLEN (1) +union uqm_shaping_color_y_drop_u { + struct uqm_shaping_color_y_drop { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_SHAPING_COLOR_Y_DROP_DWLEN]; +} __packed; + +#define NBL_UQM_SHAPING_TYPE_ADDR (0x11464c) +#define NBL_UQM_SHAPING_TYPE_DEPTH (1) +#define NBL_UQM_SHAPING_TYPE_WIDTH (32) +#define NBL_UQM_SHAPING_TYPE_DWLEN (1) +union uqm_shaping_type_u { + struct uqm_shaping_type { + u32 value:2; /* [1:0] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_SHAPING_TYPE_DWLEN]; +} __packed; + +#define NBL_UQM_INIT_DONE_ADDR (0x114800) +#define NBL_UQM_INIT_DONE_DEPTH (1) +#define NBL_UQM_INIT_DONE_WIDTH (32) +#define NBL_UQM_INIT_DONE_DWLEN (1) +union uqm_init_done_u { + struct uqm_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UQM_NFULL_HISTORY_ADDR (0x114830) +#define NBL_UQM_NFULL_HISTORY_DEPTH (1) +#define NBL_UQM_NFULL_HISTORY_WIDTH (32) +#define NBL_UQM_NFULL_HISTORY_DWLEN (1) +union uqm_nfull_history_u { + struct uqm_nfull_history { + u32 ucar:1; /* [0] Default:0x0 RC */ + u32 ppe:1; /* [1] Default:0x0 RC */ + u32 info_buf_0:1; /* [2] Default:0x0 RC */ + u32 info_buf_1:1; /* [3] Default:0x0 RC */ + u32 info_buf_2:1; /* [4] Default:0x0 RC */ + u32 info_buf_3:1; /* [5] Default:0x0 RC */ + u32 pkt_len_buf_0:1; /* [6] Default:0x0 RC */ + u32 pkt_len_buf_1:1; /* [7] Default:0x0 RC */ + u32 pkt_len_buf_2:1; /* [8] Default:0x0 RC */ + u32 pkt_len_buf_3:1; /* [9] Default:0x0 RC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_NFULL_HISTORY_DWLEN]; +} __packed; + +#define NBL_UQM_NAFULL_HISTORY_ADDR (0x114834) +#define NBL_UQM_NAFULL_HISTORY_DEPTH (1) +#define NBL_UQM_NAFULL_HISTORY_WIDTH (32) +#define NBL_UQM_NAFULL_HISTORY_DWLEN (1) +union uqm_nafull_history_u { + struct uqm_nafull_history { + u32 ucar:1; /* [0] Default:0x0 RC */ + u32 ppe:1; /* [1] Default:0x0 RC */ + u32 info_buf_0:1; /* [2] Default:0x0 RC */ + u32 info_buf_1:1; /* [3] Default:0x0 RC */ + u32 info_buf_2:1; /* [4] Default:0x0 RC */ + u32 info_buf_3:1; /* [5] Default:0x0 RC */ + u32 pkt_len_buf_0:1; /* [6] Default:0x0 RC */ + u32 pkt_len_buf_1:1; /* [7] Default:0x0 RC */ + u32 pkt_len_buf_2:1; /* [8] Default:0x0 RC */ + u32 pkt_len_buf_3:1; /* [9] Default:0x0 RC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_NAFULL_HISTORY_DWLEN]; +} __packed; + +#define NBL_UQM_WERR_HISTORY_ADDR (0x114838) +#define NBL_UQM_WERR_HISTORY_DEPTH (1) +#define NBL_UQM_WERR_HISTORY_WIDTH (32) +#define NBL_UQM_WERR_HISTORY_DWLEN (1) +union uqm_werr_history_u { + struct uqm_werr_history { + u32 ucar:1; /* [0] Default:0x0 RC */ + u32 ppe:1; /* [1] Default:0x0 RC */ + u32 mfifo:1; /* [2] Default:0x0 RC */ + u32 info_buf_0:1; /* [3] Default:0x0 RC */ + u32 info_buf_1:1; /* [4] Default:0x0 RC */ + u32 info_buf_2:1; /* [5] Default:0x0 RC */ + u32 info_buf_3:1; /* [6] Default:0x0 RC */ + u32 pkt_len_buf_0:1; /* [7] Default:0x0 RC */ + u32 pkt_len_buf_1:1; /* [8] Default:0x0 RC */ + u32 pkt_len_buf_2:1; /* [9] Default:0x0 RC */ + u32 pkt_len_buf_3:1; /* [10] Default:0x0 RC */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_WERR_HISTORY_DWLEN]; +} __packed; + +#define NBL_UQM_RERR_HISTORY_ADDR (0x11483c) +#define NBL_UQM_RERR_HISTORY_DEPTH (1) +#define NBL_UQM_RERR_HISTORY_WIDTH (32) +#define NBL_UQM_RERR_HISTORY_DWLEN (1) +union uqm_rerr_history_u { + struct uqm_rerr_history { + u32 ucar:1; /* [0] Default:0x0 RC */ + u32 ppe:1; /* [1] Default:0x0 RC */ + u32 mfifo:1; /* [2] Default:0x0 RC */ + u32 info_buf_0:1; /* [3] Default:0x0 RC */ + u32 info_buf_1:1; /* [4] Default:0x0 RC */ + u32 info_buf_2:1; /* [5] Default:0x0 RC */ + u32 info_buf_3:1; /* [6] Default:0x0 RC */ + u32 pkt_len_buf_0:1; /* [7] Default:0x0 RC */ + u32 pkt_len_buf_1:1; /* [8] Default:0x0 RC */ + u32 pkt_len_buf_2:1; /* [9] Default:0x0 RC */ + u32 pkt_len_buf_3:1; /* [10] Default:0x0 RC */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_RERR_HISTORY_DWLEN]; +} __packed; + +#define NBL_UQM_QUE_RDY_LOW_ADDR (0x114be0) +#define NBL_UQM_QUE_RDY_LOW_DEPTH (1) +#define NBL_UQM_QUE_RDY_LOW_WIDTH (32) +#define NBL_UQM_QUE_RDY_LOW_DWLEN (1) +union uqm_que_rdy_low_u { + struct uqm_que_rdy_low { + u32 pntr:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_QUE_RDY_LOW_DWLEN]; +} __packed; + +#define NBL_UQM_QUE_RDY_HIGH_ADDR (0x114be4) +#define NBL_UQM_QUE_RDY_HIGH_DEPTH (1) +#define NBL_UQM_QUE_RDY_HIGH_WIDTH (32) +#define NBL_UQM_QUE_RDY_HIGH_DWLEN (1) +union uqm_que_rdy_high_u { + struct uqm_que_rdy_high { + u32 pntr:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_QUE_RDY_HIGH_DWLEN]; +} __packed; + +#define NBL_UQM_LEN_UNDERFLOW_ADDR (0x114be8) +#define NBL_UQM_LEN_UNDERFLOW_DEPTH (1) +#define NBL_UQM_LEN_UNDERFLOW_WIDTH (32) +#define NBL_UQM_LEN_UNDERFLOW_DWLEN (1) +union uqm_len_underflow_u { + struct uqm_len_underflow { + u32 share:1; /* [0] Default:0x0 RO */ + u32 cos_l:1; /* [1] Default:0x0 RO */ + u32 cos_h:1; /* [2] Default:0x0 RO */ + u32 port:6; /* [8:3] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_LEN_UNDERFLOW_DWLEN]; +} __packed; + +#define NBL_UQM_TOTAL_LEN_ADDR (0x114bec) +#define NBL_UQM_TOTAL_LEN_DEPTH (1) +#define NBL_UQM_TOTAL_LEN_WIDTH (32) +#define NBL_UQM_TOTAL_LEN_DWLEN (1) +union uqm_total_len_u { + struct uqm_total_len { + u32 pntr:12; /* [11:0] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_TOTAL_LEN_DWLEN]; +} __packed; + +#define NBL_UQM_SHAPING_TBL_ADDR (0x118000) +#define NBL_UQM_SHAPING_TBL_DEPTH (6) +#define NBL_UQM_SHAPING_TBL_WIDTH (128) +#define NBL_UQM_SHAPING_TBL_DWLEN (4) +union uqm_shaping_tbl_u { + struct uqm_shaping_tbl { + u32 valid:1; /* [0] Default:0x0 RW */ + u32 depth:19; /* [19:1] Default:0x0 RW */ + u32 cir:19; /* [38:20] Default:0x0 RW */ + u32 pir:19; /* [57:39] Default:0x0 RW */ + u32 cbs:21; /* [78:58] Default:0x0 RW */ + u32 pbs:21; /* [99:79] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_SHAPING_TBL_DWLEN]; +} __packed; +#define NBL_UQM_SHAPING_TBL_REG(r) (NBL_UQM_SHAPING_TBL_ADDR + \ + (NBL_UQM_SHAPING_TBL_DWLEN * 4) * (r)) + +#define NBL_UQM_VSI_MAPPING_TBL_ADDR (0x119000) +#define NBL_UQM_VSI_MAPPING_TBL_DEPTH (1024) +#define NBL_UQM_VSI_MAPPING_TBL_WIDTH (32) +#define NBL_UQM_VSI_MAPPING_TBL_DWLEN (1) +union uqm_vsi_mapping_tbl_u { + struct uqm_vsi_mapping_tbl { + u32 vf_id:6; /* [5:0] Default:0x0 RW */ + u32 valid:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UQM_VSI_MAPPING_TBL_DWLEN]; +} __packed; +#define NBL_UQM_VSI_MAPPING_TBL_REG(r) (NBL_UQM_VSI_MAPPING_TBL_ADDR + \ + (NBL_UQM_VSI_MAPPING_TBL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_urmux.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_urmux.h new file mode 100644 index 0000000000000000000000000000000000000000..a30f8f9c5ab874ca52e4e1de78e65ffd5ddc5ea3 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_urmux.h @@ -0,0 +1,1151 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_URMUX_H +#define NBL_URMUX_H 1 + +#include + +#define NBL_URMUX_BASE (0x00008000) + +#define NBL_URMUX_INT_STATUS_ADDR (0x8000) +#define NBL_URMUX_INT_STATUS_DEPTH (1) +#define NBL_URMUX_INT_STATUS_WIDTH (32) +#define NBL_URMUX_INT_STATUS_DWLEN (1) +union urmux_int_status_u { + struct urmux_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 parity_err:1; /* [4] Default:0x0 RWC */ + u32 lowp:1; /* [5] Default:0x0 RWC */ + u32 cif_err:1; /* [6] Default:0x0 RWC */ + u32 eth0_drop:1; /* [7] Default:0x0 RWC */ + u32 eth1_drop:1; /* [8] Default:0x0 RWC */ + u32 eth2_drop:1; /* [9] Default:0x0 RWC */ + u32 eth3_drop:1; /* [10] Default:0x0 RWC */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_URMUX_INT_MASK_ADDR (0x8004) +#define NBL_URMUX_INT_MASK_DEPTH (1) +#define NBL_URMUX_INT_MASK_WIDTH (32) +#define NBL_URMUX_INT_MASK_DWLEN (1) +union urmux_int_mask_u { + struct urmux_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 parity_err:1; /* [4] Default:0x0 RW */ + u32 lowp:1; /* [5] Default:0x0 RW */ + u32 cif_err:1; /* [6] Default:0x0 RW */ + u32 eth0_drop:1; /* [7] Default:0x0 RW */ + u32 eth1_drop:1; /* [8] Default:0x0 RW */ + u32 eth2_drop:1; /* [9] Default:0x0 RW */ + u32 eth3_drop:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_INT_MASK_DWLEN]; +} __packed; + +#define NBL_URMUX_INT_SET_ADDR (0x8008) +#define NBL_URMUX_INT_SET_DEPTH (1) +#define NBL_URMUX_INT_SET_WIDTH (32) +#define NBL_URMUX_INT_SET_DWLEN (1) +union urmux_int_set_u { + struct urmux_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 parity_err:1; /* [4] Default:0x0 WO */ + u32 lowp:1; /* [5] Default:0x0 WO */ + u32 cif_err:1; /* [6] Default:0x0 WO */ + u32 eth0_drop:1; /* [7] Default:0x0 WO */ + u32 eth1_drop:1; /* [8] Default:0x0 WO */ + u32 eth2_drop:1; /* [9] Default:0x0 WO */ + u32 eth3_drop:1; /* [10] Default:0x0 WO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_INT_SET_DWLEN]; +} __packed; + +#define NBL_URMUX_UCOR_ERR_INFO_ADDR (0x800c) +#define NBL_URMUX_UCOR_ERR_INFO_DEPTH (1) +#define NBL_URMUX_UCOR_ERR_INFO_WIDTH (32) +#define NBL_URMUX_UCOR_ERR_INFO_DWLEN (1) +union urmux_ucor_err_info_u { + struct urmux_ucor_err_info { + u32 ram_addr:28; /* [27:0] Default:0x0 RO */ + u32 ram_id:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_UCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_URMUX_COR_ERR_INFO_ADDR (0x8014) +#define NBL_URMUX_COR_ERR_INFO_DEPTH (1) +#define NBL_URMUX_COR_ERR_INFO_WIDTH (32) +#define NBL_URMUX_COR_ERR_INFO_DWLEN (1) +union urmux_cor_err_info_u { + struct urmux_cor_err_info { + u32 ram_addr:28; /* [27:0] Default:0x0 RO */ + u32 ram_id:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_URMUX_PARITY_ERR_INFO_ADDR (0x802c) +#define NBL_URMUX_PARITY_ERR_INFO_DEPTH (1) +#define NBL_URMUX_PARITY_ERR_INFO_WIDTH (32) +#define NBL_URMUX_PARITY_ERR_INFO_DWLEN (1) +union urmux_parity_err_info_u { + struct urmux_parity_err_info { + u32 ram_addr:28; /* [27:0] Default:0x0 RO */ + u32 ram_id:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_URMUX_CIF_ERR_INFO_ADDR (0x8038) +#define NBL_URMUX_CIF_ERR_INFO_DEPTH (1) +#define NBL_URMUX_CIF_ERR_INFO_WIDTH (32) +#define NBL_URMUX_CIF_ERR_INFO_DWLEN (1) +union urmux_cif_err_info_u { + struct urmux_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_URMUX_CAR_CTRL_ADDR (0x8100) +#define NBL_URMUX_CAR_CTRL_DEPTH (1) +#define NBL_URMUX_CAR_CTRL_WIDTH (32) +#define NBL_URMUX_CAR_CTRL_DWLEN (1) +union urmux_car_ctrl_u { + struct urmux_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_URMUX_PKT_LEN_ADDR (0x8104) +#define NBL_URMUX_PKT_LEN_DEPTH (1) +#define NBL_URMUX_PKT_LEN_WIDTH (32) +#define NBL_URMUX_PKT_LEN_DWLEN (1) +union urmux_pkt_len_u { + struct urmux_pkt_len { + u32 min:7; /* [6:0] Default:60 RW */ + u32 rsv1:8; /* [14:7] Default:0x0 RO */ + u32 min_chk_en:1; /* [15] Default:0x1 RW */ + u32 max:14; /* [29:16] Default:9600 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 max_chk_en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_URMUX_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_DROP_TH_ADDR (0x8130) +#define NBL_URMUX_ETH0_DROP_TH_DEPTH (1) +#define NBL_URMUX_ETH0_DROP_TH_WIDTH (32) +#define NBL_URMUX_ETH0_DROP_TH_DWLEN (1) +union urmux_eth0_drop_th_u { + struct urmux_eth0_drop_th { + u32 info_drop_th:9; /* [8:0] Default:240 RW */ + u32 rsv1:7; /* [15:9] Default:0x0 RO */ + u32 data_drop_th:9; /* [24:16] Default:240 RW */ + u32 rsv:7; /* [31:25] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_ETH0_DROP_TH_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_DROP_TH_ADDR (0x8134) +#define NBL_URMUX_ETH1_DROP_TH_DEPTH (1) +#define NBL_URMUX_ETH1_DROP_TH_WIDTH (32) +#define NBL_URMUX_ETH1_DROP_TH_DWLEN (1) +union urmux_eth1_drop_th_u { + struct urmux_eth1_drop_th { + u32 info_drop_th:9; /* [8:0] Default:240 RW */ + u32 rsv1:7; /* [15:9] Default:0x0 RO */ + u32 data_drop_th:9; /* [24:16] Default:240 RW */ + u32 rsv:7; /* [31:25] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_ETH1_DROP_TH_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_DROP_TH_ADDR (0x8138) +#define NBL_URMUX_ETH2_DROP_TH_DEPTH (1) +#define NBL_URMUX_ETH2_DROP_TH_WIDTH (32) +#define NBL_URMUX_ETH2_DROP_TH_DWLEN (1) +union urmux_eth2_drop_th_u { + struct urmux_eth2_drop_th { + u32 info_drop_th:9; /* [8:0] Default:240 RW */ + u32 rsv1:7; /* [15:9] Default:0x0 RO */ + u32 data_drop_th:9; /* [24:16] Default:240 RW */ + u32 rsv:7; /* [31:25] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_ETH2_DROP_TH_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_DROP_TH_ADDR (0x813c) +#define NBL_URMUX_ETH3_DROP_TH_DEPTH (1) +#define NBL_URMUX_ETH3_DROP_TH_WIDTH (32) +#define NBL_URMUX_ETH3_DROP_TH_DWLEN (1) +union urmux_eth3_drop_th_u { + struct urmux_eth3_drop_th { + u32 info_drop_th:9; /* [8:0] Default:240 RW */ + u32 rsv1:7; /* [15:9] Default:0x0 RO */ + u32 data_drop_th:9; /* [24:16] Default:240 RW */ + u32 rsv:7; /* [31:25] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_ETH3_DROP_TH_DWLEN]; +} __packed; + +#define NBL_URMUX_DDMUX_FC_TH_ADDR (0x8150) +#define NBL_URMUX_DDMUX_FC_TH_DEPTH (1) +#define NBL_URMUX_DDMUX_FC_TH_WIDTH (32) +#define NBL_URMUX_DDMUX_FC_TH_DWLEN (1) +union urmux_ddmux_fc_th_u { + struct urmux_ddmux_fc_th { + u32 info_fc_th:10; /* [9:0] Default:320 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 data_fc_th:10; /* [25:16] Default:320 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_DDMUX_FC_TH_DWLEN]; +} __packed; + +#define NBL_URMUX_DDMUX_LINK_FC_EN_ADDR (0x8158) +#define NBL_URMUX_DDMUX_LINK_FC_EN_DEPTH (1) +#define NBL_URMUX_DDMUX_LINK_FC_EN_WIDTH (32) +#define NBL_URMUX_DDMUX_LINK_FC_EN_DWLEN (1) +union urmux_ddmux_link_fc_en_u { + struct urmux_ddmux_link_fc_en { + u32 loop_link_fc_en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_DDMUX_LINK_FC_EN_DWLEN]; +} __packed; + +#define NBL_URMUX_SEOP_CHK_EN_ADDR (0x815c) +#define NBL_URMUX_SEOP_CHK_EN_DEPTH (1) +#define NBL_URMUX_SEOP_CHK_EN_WIDTH (32) +#define NBL_URMUX_SEOP_CHK_EN_DWLEN (1) +union urmux_seop_chk_en_u { + struct urmux_seop_chk_en { + u32 eth0_seop_chk_en:1; /* [0] Default:0x1 RW */ + u32 eth1_seop_chk_en:1; /* [1] Default:0x1 RW */ + u32 eth2_seop_chk_en:1; /* [2] Default:0x1 RW */ + u32 eth3_seop_chk_en:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_SEOP_CHK_EN_DWLEN]; +} __packed; + +#define NBL_URMUX_LOWP_TIME_ADDR (0x8160) +#define NBL_URMUX_LOWP_TIME_DEPTH (1) +#define NBL_URMUX_LOWP_TIME_WIDTH (64) +#define NBL_URMUX_LOWP_TIME_DWLEN (2) +union urmux_lowp_time_u { + struct urmux_lowp_time { + u32 lowp_time_arr[2]; /* [63:0] Default:0x3938700 RW */ + } __packed info; + u32 data[NBL_URMUX_LOWP_TIME_DWLEN]; +} __packed; + +#define NBL_URMUX_LOWP_EN_ADDR (0x8168) +#define NBL_URMUX_LOWP_EN_DEPTH (1) +#define NBL_URMUX_LOWP_EN_WIDTH (32) +#define NBL_URMUX_LOWP_EN_DWLEN (1) +union urmux_lowp_en_u { + struct urmux_lowp_en { + u32 lowp_en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_LOWP_EN_DWLEN]; +} __packed; + +#define NBL_URMUX_TDM_SLOT_LEN_ADDR (0x816c) +#define NBL_URMUX_TDM_SLOT_LEN_DEPTH (1) +#define NBL_URMUX_TDM_SLOT_LEN_WIDTH (32) +#define NBL_URMUX_TDM_SLOT_LEN_DWLEN (1) +union urmux_tdm_slot_len_u { + struct urmux_tdm_slot_len { + u32 len:4; /* [3:0] Default:0xf RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_TDM_SLOT_LEN_DWLEN]; +} __packed; + +#define NBL_URMUX_TDM_SLOT_ID_0_ADDR (0x8170) +#define NBL_URMUX_TDM_SLOT_ID_0_DEPTH (1) +#define NBL_URMUX_TDM_SLOT_ID_0_WIDTH (32) +#define NBL_URMUX_TDM_SLOT_ID_0_DWLEN (1) +union urmux_tdm_slot_id_0_u { + struct urmux_tdm_slot_id_0 { + u32 slot0_id:3; /* [2:0] Default:0x0 RW */ + u32 rsv3:5; /* [7:3] Default:0x0 RO */ + u32 slot1_id:3; /* [10:8] Default:0x1 RW */ + u32 rsv2:5; /* [15:11] Default:0x0 RO */ + u32 slot2_id:3; /* [18:16] Default:0x2 RW */ + u32 rsv1:5; /* [23:19] Default:0x0 RO */ + u32 slot3_id:3; /* [26:24] Default:0x3 RW */ + u32 rsv:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_TDM_SLOT_ID_0_DWLEN]; +} __packed; + +#define NBL_URMUX_TDM_SLOT_ID_1_ADDR (0x8174) +#define NBL_URMUX_TDM_SLOT_ID_1_DEPTH (1) +#define NBL_URMUX_TDM_SLOT_ID_1_WIDTH (32) +#define NBL_URMUX_TDM_SLOT_ID_1_DWLEN (1) +union urmux_tdm_slot_id_1_u { + struct urmux_tdm_slot_id_1 { + u32 slot4_id:3; /* [2:0] Default:0x4 RW */ + u32 rsv3:5; /* [7:3] Default:0x0 RO */ + u32 slot5_id:3; /* [10:8] Default:0x0 RW */ + u32 rsv2:5; /* [15:11] Default:0x0 RO */ + u32 slot6_id:3; /* [18:16] Default:0x1 RW */ + u32 rsv1:5; /* [23:19] Default:0x0 RO */ + u32 slot7_id:3; /* [26:24] Default:0x2 RW */ + u32 rsv:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_TDM_SLOT_ID_1_DWLEN]; +} __packed; + +#define NBL_URMUX_TDM_SLOT_ID_2_ADDR (0x8178) +#define NBL_URMUX_TDM_SLOT_ID_2_DEPTH (1) +#define NBL_URMUX_TDM_SLOT_ID_2_WIDTH (32) +#define NBL_URMUX_TDM_SLOT_ID_2_DWLEN (1) +union urmux_tdm_slot_id_2_u { + struct urmux_tdm_slot_id_2 { + u32 slot8_id:3; /* [2:0] Default:0x3 RW */ + u32 rsv3:5; /* [7:3] Default:0x0 RO */ + u32 slot9_id:3; /* [10:8] Default:0x4 RW */ + u32 rsv2:5; /* [15:11] Default:0x0 RO */ + u32 slot10_id:3; /* [18:16] Default:0x0 RW */ + u32 rsv1:5; /* [23:19] Default:0x0 RO */ + u32 slot11_id:3; /* [26:24] Default:0x1 RW */ + u32 rsv:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_TDM_SLOT_ID_2_DWLEN]; +} __packed; + +#define NBL_URMUX_TDM_SLOT_ID_3_ADDR (0x817c) +#define NBL_URMUX_TDM_SLOT_ID_3_DEPTH (1) +#define NBL_URMUX_TDM_SLOT_ID_3_WIDTH (32) +#define NBL_URMUX_TDM_SLOT_ID_3_DWLEN (1) +union urmux_tdm_slot_id_3_u { + struct urmux_tdm_slot_id_3 { + u32 slot12_id:3; /* [2:0] Default:0x2 RW */ + u32 rsv2:5; /* [7:3] Default:0x0 RO */ + u32 slot13_id:3; /* [10:8] Default:0x3 RW */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 slot14_id:3; /* [18:16] Default:0x4 RW */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_URMUX_TDM_SLOT_ID_3_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_SOP_ADDR (0x8800) +#define NBL_URMUX_ETH0_RX_SOP_DEPTH (1) +#define NBL_URMUX_ETH0_RX_SOP_WIDTH (32) +#define NBL_URMUX_ETH0_RX_SOP_DWLEN (1) +union urmux_eth0_rx_sop_u { + struct urmux_eth0_rx_sop { + u32 sop_cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_SOP_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_EOP_ADDR (0x8804) +#define NBL_URMUX_ETH0_RX_EOP_DEPTH (1) +#define NBL_URMUX_ETH0_RX_EOP_WIDTH (32) +#define NBL_URMUX_ETH0_RX_EOP_DWLEN (1) +union urmux_eth0_rx_eop_u { + struct urmux_eth0_rx_eop { + u32 eop_cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_EOP_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_PKT_ADDR (0x8808) +#define NBL_URMUX_ETH0_RX_PKT_DEPTH (1) +#define NBL_URMUX_ETH0_RX_PKT_WIDTH (32) +#define NBL_URMUX_ETH0_RX_PKT_DWLEN (1) +union urmux_eth0_rx_pkt_u { + struct urmux_eth0_rx_pkt { + u32 eth0_rx_pkt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_PKT_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_BYTE_ADDR (0x880c) +#define NBL_URMUX_ETH0_RX_BYTE_DEPTH (1) +#define NBL_URMUX_ETH0_RX_BYTE_WIDTH (64) +#define NBL_URMUX_ETH0_RX_BYTE_DWLEN (2) +union urmux_eth0_rx_byte_u { + struct urmux_eth0_rx_byte { + u32 eth0_rx_byte_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_BYTE_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_UC_ADDR (0x8814) +#define NBL_URMUX_ETH0_RX_UC_DEPTH (1) +#define NBL_URMUX_ETH0_RX_UC_WIDTH (32) +#define NBL_URMUX_ETH0_RX_UC_DWLEN (1) +union urmux_eth0_rx_uc_u { + struct urmux_eth0_rx_uc { + u32 eth0_rx_uc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_UC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_MC_ADDR (0x8818) +#define NBL_URMUX_ETH0_RX_MC_DEPTH (1) +#define NBL_URMUX_ETH0_RX_MC_WIDTH (32) +#define NBL_URMUX_ETH0_RX_MC_DWLEN (1) +union urmux_eth0_rx_mc_u { + struct urmux_eth0_rx_mc { + u32 eth0_rx_mc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_MC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_BC_ADDR (0x881c) +#define NBL_URMUX_ETH0_RX_BC_DEPTH (1) +#define NBL_URMUX_ETH0_RX_BC_WIDTH (32) +#define NBL_URMUX_ETH0_RX_BC_DWLEN (1) +union urmux_eth0_rx_bc_u { + struct urmux_eth0_rx_bc { + u32 eth0_rx_bc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_BC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_LESS_60_ADDR (0x8820) +#define NBL_URMUX_ETH0_RX_LESS_60_DEPTH (1) +#define NBL_URMUX_ETH0_RX_LESS_60_WIDTH (32) +#define NBL_URMUX_ETH0_RX_LESS_60_DWLEN (1) +union urmux_eth0_rx_less_60_u { + struct urmux_eth0_rx_less_60 { + u32 eth0_rx_less_60:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_LESS_60_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_61_123_ADDR (0x8828) +#define NBL_URMUX_ETH0_RX_61_123_DEPTH (1) +#define NBL_URMUX_ETH0_RX_61_123_WIDTH (32) +#define NBL_URMUX_ETH0_RX_61_123_DWLEN (1) +union urmux_eth0_rx_61_123_u { + struct urmux_eth0_rx_61_123 { + u32 eth0_rx_61_123:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_61_123_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_124_251_ADDR (0x882c) +#define NBL_URMUX_ETH0_RX_124_251_DEPTH (1) +#define NBL_URMUX_ETH0_RX_124_251_WIDTH (32) +#define NBL_URMUX_ETH0_RX_124_251_DWLEN (1) +union urmux_eth0_rx_124_251_u { + struct urmux_eth0_rx_124_251 { + u32 eth0_rx_124_251:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_124_251_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_252_507_ADDR (0x8830) +#define NBL_URMUX_ETH0_RX_252_507_DEPTH (1) +#define NBL_URMUX_ETH0_RX_252_507_WIDTH (32) +#define NBL_URMUX_ETH0_RX_252_507_DWLEN (1) +union urmux_eth0_rx_252_507_u { + struct urmux_eth0_rx_252_507 { + u32 eth0_rx_252_507:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_252_507_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_508_1019_ADDR (0x8834) +#define NBL_URMUX_ETH0_RX_508_1019_DEPTH (1) +#define NBL_URMUX_ETH0_RX_508_1019_WIDTH (32) +#define NBL_URMUX_ETH0_RX_508_1019_DWLEN (1) +union urmux_eth0_rx_508_1019_u { + struct urmux_eth0_rx_508_1019 { + u32 eth0_rx_508_1019:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_508_1019_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_1020_1514_ADDR (0x8838) +#define NBL_URMUX_ETH0_RX_1020_1514_DEPTH (1) +#define NBL_URMUX_ETH0_RX_1020_1514_WIDTH (32) +#define NBL_URMUX_ETH0_RX_1020_1514_DWLEN (1) +union urmux_eth0_rx_1020_1514_u { + struct urmux_eth0_rx_1020_1514 { + u32 eth0_rx_1020_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_1020_1514_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_LARGE_1514_ADDR (0x883c) +#define NBL_URMUX_ETH0_RX_LARGE_1514_DEPTH (1) +#define NBL_URMUX_ETH0_RX_LARGE_1514_WIDTH (32) +#define NBL_URMUX_ETH0_RX_LARGE_1514_DWLEN (1) +union urmux_eth0_rx_large_1514_u { + struct urmux_eth0_rx_large_1514 { + u32 eth0_rx_large_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_LARGE_1514_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_MAC_ERR_ADDR (0x8840) +#define NBL_URMUX_ETH0_RX_MAC_ERR_DEPTH (1) +#define NBL_URMUX_ETH0_RX_MAC_ERR_WIDTH (32) +#define NBL_URMUX_ETH0_RX_MAC_ERR_DWLEN (1) +union urmux_eth0_rx_mac_err_u { + struct urmux_eth0_rx_mac_err { + u32 eth0_rx_mac_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_MAC_ERR_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_OVERLONG_ADDR (0x8844) +#define NBL_URMUX_ETH0_RX_OVERLONG_DEPTH (1) +#define NBL_URMUX_ETH0_RX_OVERLONG_WIDTH (32) +#define NBL_URMUX_ETH0_RX_OVERLONG_DWLEN (1) +union urmux_eth0_rx_overlong_u { + struct urmux_eth0_rx_overlong { + u32 eth0_rx_overlong:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_OVERLONG_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_OVERSHORT_ADDR (0x8848) +#define NBL_URMUX_ETH0_RX_OVERSHORT_DEPTH (1) +#define NBL_URMUX_ETH0_RX_OVERSHORT_WIDTH (32) +#define NBL_URMUX_ETH0_RX_OVERSHORT_DWLEN (1) +union urmux_eth0_rx_overshort_u { + struct urmux_eth0_rx_overshort { + u32 eth0_rx_overshort:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_OVERSHORT_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH0_RX_SEOP_ERR_ADDR (0x884c) +#define NBL_URMUX_ETH0_RX_SEOP_ERR_DEPTH (1) +#define NBL_URMUX_ETH0_RX_SEOP_ERR_WIDTH (32) +#define NBL_URMUX_ETH0_RX_SEOP_ERR_DWLEN (1) +union urmux_eth0_rx_seop_err_u { + struct urmux_eth0_rx_seop_err { + u32 eth0_rx_seop_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH0_RX_SEOP_ERR_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_SOP_ADDR (0x8900) +#define NBL_URMUX_ETH1_RX_SOP_DEPTH (1) +#define NBL_URMUX_ETH1_RX_SOP_WIDTH (32) +#define NBL_URMUX_ETH1_RX_SOP_DWLEN (1) +union urmux_eth1_rx_sop_u { + struct urmux_eth1_rx_sop { + u32 sop_cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_SOP_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_EOP_ADDR (0x8904) +#define NBL_URMUX_ETH1_RX_EOP_DEPTH (1) +#define NBL_URMUX_ETH1_RX_EOP_WIDTH (32) +#define NBL_URMUX_ETH1_RX_EOP_DWLEN (1) +union urmux_eth1_rx_eop_u { + struct urmux_eth1_rx_eop { + u32 eop_cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_EOP_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_PKT_ADDR (0x8908) +#define NBL_URMUX_ETH1_RX_PKT_DEPTH (1) +#define NBL_URMUX_ETH1_RX_PKT_WIDTH (32) +#define NBL_URMUX_ETH1_RX_PKT_DWLEN (1) +union urmux_eth1_rx_pkt_u { + struct urmux_eth1_rx_pkt { + u32 eth1_rx_pkt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_PKT_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_BYTE_ADDR (0x890c) +#define NBL_URMUX_ETH1_RX_BYTE_DEPTH (1) +#define NBL_URMUX_ETH1_RX_BYTE_WIDTH (64) +#define NBL_URMUX_ETH1_RX_BYTE_DWLEN (2) +union urmux_eth1_rx_byte_u { + struct urmux_eth1_rx_byte { + u32 eth1_rx_byte_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_BYTE_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_UC_ADDR (0x8914) +#define NBL_URMUX_ETH1_RX_UC_DEPTH (1) +#define NBL_URMUX_ETH1_RX_UC_WIDTH (32) +#define NBL_URMUX_ETH1_RX_UC_DWLEN (1) +union urmux_eth1_rx_uc_u { + struct urmux_eth1_rx_uc { + u32 eth1_rx_uc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_UC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_MC_ADDR (0x8918) +#define NBL_URMUX_ETH1_RX_MC_DEPTH (1) +#define NBL_URMUX_ETH1_RX_MC_WIDTH (32) +#define NBL_URMUX_ETH1_RX_MC_DWLEN (1) +union urmux_eth1_rx_mc_u { + struct urmux_eth1_rx_mc { + u32 eth1_rx_mc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_MC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_BC_ADDR (0x891c) +#define NBL_URMUX_ETH1_RX_BC_DEPTH (1) +#define NBL_URMUX_ETH1_RX_BC_WIDTH (32) +#define NBL_URMUX_ETH1_RX_BC_DWLEN (1) +union urmux_eth1_rx_bc_u { + struct urmux_eth1_rx_bc { + u32 eth1_rx_bc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_BC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_LESS_60_ADDR (0x8920) +#define NBL_URMUX_ETH1_RX_LESS_60_DEPTH (1) +#define NBL_URMUX_ETH1_RX_LESS_60_WIDTH (32) +#define NBL_URMUX_ETH1_RX_LESS_60_DWLEN (1) +union urmux_eth1_rx_less_60_u { + struct urmux_eth1_rx_less_60 { + u32 eth1_rx_less_60:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_LESS_60_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_61_123_ADDR (0x8928) +#define NBL_URMUX_ETH1_RX_61_123_DEPTH (1) +#define NBL_URMUX_ETH1_RX_61_123_WIDTH (32) +#define NBL_URMUX_ETH1_RX_61_123_DWLEN (1) +union urmux_eth1_rx_61_123_u { + struct urmux_eth1_rx_61_123 { + u32 eth1_rx_61_123:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_61_123_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_124_251_ADDR (0x892c) +#define NBL_URMUX_ETH1_RX_124_251_DEPTH (1) +#define NBL_URMUX_ETH1_RX_124_251_WIDTH (32) +#define NBL_URMUX_ETH1_RX_124_251_DWLEN (1) +union urmux_eth1_rx_124_251_u { + struct urmux_eth1_rx_124_251 { + u32 eth1_rx_124_251:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_124_251_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_252_507_ADDR (0x8930) +#define NBL_URMUX_ETH1_RX_252_507_DEPTH (1) +#define NBL_URMUX_ETH1_RX_252_507_WIDTH (32) +#define NBL_URMUX_ETH1_RX_252_507_DWLEN (1) +union urmux_eth1_rx_252_507_u { + struct urmux_eth1_rx_252_507 { + u32 eth1_rx_252_507:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_252_507_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_508_1019_ADDR (0x8934) +#define NBL_URMUX_ETH1_RX_508_1019_DEPTH (1) +#define NBL_URMUX_ETH1_RX_508_1019_WIDTH (32) +#define NBL_URMUX_ETH1_RX_508_1019_DWLEN (1) +union urmux_eth1_rx_508_1019_u { + struct urmux_eth1_rx_508_1019 { + u32 eth1_rx_508_1019:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_508_1019_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_1020_1514_ADDR (0x8938) +#define NBL_URMUX_ETH1_RX_1020_1514_DEPTH (1) +#define NBL_URMUX_ETH1_RX_1020_1514_WIDTH (32) +#define NBL_URMUX_ETH1_RX_1020_1514_DWLEN (1) +union urmux_eth1_rx_1020_1514_u { + struct urmux_eth1_rx_1020_1514 { + u32 eth1_rx_1020_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_1020_1514_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_LARGE_1514_ADDR (0x893c) +#define NBL_URMUX_ETH1_RX_LARGE_1514_DEPTH (1) +#define NBL_URMUX_ETH1_RX_LARGE_1514_WIDTH (32) +#define NBL_URMUX_ETH1_RX_LARGE_1514_DWLEN (1) +union urmux_eth1_rx_large_1514_u { + struct urmux_eth1_rx_large_1514 { + u32 eth1_rx_large_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_LARGE_1514_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_MAC_ERR_ADDR (0x8940) +#define NBL_URMUX_ETH1_RX_MAC_ERR_DEPTH (1) +#define NBL_URMUX_ETH1_RX_MAC_ERR_WIDTH (32) +#define NBL_URMUX_ETH1_RX_MAC_ERR_DWLEN (1) +union urmux_eth1_rx_mac_err_u { + struct urmux_eth1_rx_mac_err { + u32 eth1_rx_mac_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_MAC_ERR_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_OVERLONG_ADDR (0x8944) +#define NBL_URMUX_ETH1_RX_OVERLONG_DEPTH (1) +#define NBL_URMUX_ETH1_RX_OVERLONG_WIDTH (32) +#define NBL_URMUX_ETH1_RX_OVERLONG_DWLEN (1) +union urmux_eth1_rx_overlong_u { + struct urmux_eth1_rx_overlong { + u32 eth1_rx_overlong:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_OVERLONG_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_OVERSHORT_ADDR (0x8948) +#define NBL_URMUX_ETH1_RX_OVERSHORT_DEPTH (1) +#define NBL_URMUX_ETH1_RX_OVERSHORT_WIDTH (32) +#define NBL_URMUX_ETH1_RX_OVERSHORT_DWLEN (1) +union urmux_eth1_rx_overshort_u { + struct urmux_eth1_rx_overshort { + u32 eth1_rx_overshort:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_OVERSHORT_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH1_RX_SEOP_ERR_ADDR (0x894c) +#define NBL_URMUX_ETH1_RX_SEOP_ERR_DEPTH (1) +#define NBL_URMUX_ETH1_RX_SEOP_ERR_WIDTH (32) +#define NBL_URMUX_ETH1_RX_SEOP_ERR_DWLEN (1) +union urmux_eth1_rx_seop_err_u { + struct urmux_eth1_rx_seop_err { + u32 eth1_rx_seop_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH1_RX_SEOP_ERR_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_SOP_ADDR (0x8a00) +#define NBL_URMUX_ETH2_RX_SOP_DEPTH (1) +#define NBL_URMUX_ETH2_RX_SOP_WIDTH (32) +#define NBL_URMUX_ETH2_RX_SOP_DWLEN (1) +union urmux_eth2_rx_sop_u { + struct urmux_eth2_rx_sop { + u32 sop_cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_SOP_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_EOP_ADDR (0x8a04) +#define NBL_URMUX_ETH2_RX_EOP_DEPTH (1) +#define NBL_URMUX_ETH2_RX_EOP_WIDTH (32) +#define NBL_URMUX_ETH2_RX_EOP_DWLEN (1) +union urmux_eth2_rx_eop_u { + struct urmux_eth2_rx_eop { + u32 eop_cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_EOP_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_PKT_ADDR (0x8a08) +#define NBL_URMUX_ETH2_RX_PKT_DEPTH (1) +#define NBL_URMUX_ETH2_RX_PKT_WIDTH (32) +#define NBL_URMUX_ETH2_RX_PKT_DWLEN (1) +union urmux_eth2_rx_pkt_u { + struct urmux_eth2_rx_pkt { + u32 eth2_rx_pkt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_PKT_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_BYTE_ADDR (0x8a0c) +#define NBL_URMUX_ETH2_RX_BYTE_DEPTH (1) +#define NBL_URMUX_ETH2_RX_BYTE_WIDTH (64) +#define NBL_URMUX_ETH2_RX_BYTE_DWLEN (2) +union urmux_eth2_rx_byte_u { + struct urmux_eth2_rx_byte { + u32 eth2_rx_byte_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_BYTE_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_UC_ADDR (0x8a14) +#define NBL_URMUX_ETH2_RX_UC_DEPTH (1) +#define NBL_URMUX_ETH2_RX_UC_WIDTH (32) +#define NBL_URMUX_ETH2_RX_UC_DWLEN (1) +union urmux_eth2_rx_uc_u { + struct urmux_eth2_rx_uc { + u32 eth2_rx_uc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_UC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_MC_ADDR (0x8a18) +#define NBL_URMUX_ETH2_RX_MC_DEPTH (1) +#define NBL_URMUX_ETH2_RX_MC_WIDTH (32) +#define NBL_URMUX_ETH2_RX_MC_DWLEN (1) +union urmux_eth2_rx_mc_u { + struct urmux_eth2_rx_mc { + u32 eth2_rx_mc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_MC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_BC_ADDR (0x8a1c) +#define NBL_URMUX_ETH2_RX_BC_DEPTH (1) +#define NBL_URMUX_ETH2_RX_BC_WIDTH (32) +#define NBL_URMUX_ETH2_RX_BC_DWLEN (1) +union urmux_eth2_rx_bc_u { + struct urmux_eth2_rx_bc { + u32 eth2_rx_bc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_BC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_LESS_60_ADDR (0x8a20) +#define NBL_URMUX_ETH2_RX_LESS_60_DEPTH (1) +#define NBL_URMUX_ETH2_RX_LESS_60_WIDTH (32) +#define NBL_URMUX_ETH2_RX_LESS_60_DWLEN (1) +union urmux_eth2_rx_less_60_u { + struct urmux_eth2_rx_less_60 { + u32 eth2_rx_less_60:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_LESS_60_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_61_123_ADDR (0x8a28) +#define NBL_URMUX_ETH2_RX_61_123_DEPTH (1) +#define NBL_URMUX_ETH2_RX_61_123_WIDTH (32) +#define NBL_URMUX_ETH2_RX_61_123_DWLEN (1) +union urmux_eth2_rx_61_123_u { + struct urmux_eth2_rx_61_123 { + u32 eth2_rx_61_123:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_61_123_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_124_251_ADDR (0x8a2c) +#define NBL_URMUX_ETH2_RX_124_251_DEPTH (1) +#define NBL_URMUX_ETH2_RX_124_251_WIDTH (32) +#define NBL_URMUX_ETH2_RX_124_251_DWLEN (1) +union urmux_eth2_rx_124_251_u { + struct urmux_eth2_rx_124_251 { + u32 eth2_rx_124_251:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_124_251_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_252_507_ADDR (0x8a30) +#define NBL_URMUX_ETH2_RX_252_507_DEPTH (1) +#define NBL_URMUX_ETH2_RX_252_507_WIDTH (32) +#define NBL_URMUX_ETH2_RX_252_507_DWLEN (1) +union urmux_eth2_rx_252_507_u { + struct urmux_eth2_rx_252_507 { + u32 eth2_rx_252_507:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_252_507_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_508_1019_ADDR (0x8a34) +#define NBL_URMUX_ETH2_RX_508_1019_DEPTH (1) +#define NBL_URMUX_ETH2_RX_508_1019_WIDTH (32) +#define NBL_URMUX_ETH2_RX_508_1019_DWLEN (1) +union urmux_eth2_rx_508_1019_u { + struct urmux_eth2_rx_508_1019 { + u32 eth2_rx_508_1019:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_508_1019_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_1020_1514_ADDR (0x8a38) +#define NBL_URMUX_ETH2_RX_1020_1514_DEPTH (1) +#define NBL_URMUX_ETH2_RX_1020_1514_WIDTH (32) +#define NBL_URMUX_ETH2_RX_1020_1514_DWLEN (1) +union urmux_eth2_rx_1020_1514_u { + struct urmux_eth2_rx_1020_1514 { + u32 eth2_rx_1020_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_1020_1514_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_LARGE_1514_ADDR (0x8a3c) +#define NBL_URMUX_ETH2_RX_LARGE_1514_DEPTH (1) +#define NBL_URMUX_ETH2_RX_LARGE_1514_WIDTH (32) +#define NBL_URMUX_ETH2_RX_LARGE_1514_DWLEN (1) +union urmux_eth2_rx_large_1514_u { + struct urmux_eth2_rx_large_1514 { + u32 eth2_rx_large_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_LARGE_1514_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_MAC_ERR_ADDR (0x8a40) +#define NBL_URMUX_ETH2_RX_MAC_ERR_DEPTH (1) +#define NBL_URMUX_ETH2_RX_MAC_ERR_WIDTH (32) +#define NBL_URMUX_ETH2_RX_MAC_ERR_DWLEN (1) +union urmux_eth2_rx_mac_err_u { + struct urmux_eth2_rx_mac_err { + u32 eth2_rx_mac_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_MAC_ERR_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_OVERLONG_ADDR (0x8a44) +#define NBL_URMUX_ETH2_RX_OVERLONG_DEPTH (1) +#define NBL_URMUX_ETH2_RX_OVERLONG_WIDTH (32) +#define NBL_URMUX_ETH2_RX_OVERLONG_DWLEN (1) +union urmux_eth2_rx_overlong_u { + struct urmux_eth2_rx_overlong { + u32 eth2_rx_overlong:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_OVERLONG_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_OVERSHORT_ADDR (0x8a48) +#define NBL_URMUX_ETH2_RX_OVERSHORT_DEPTH (1) +#define NBL_URMUX_ETH2_RX_OVERSHORT_WIDTH (32) +#define NBL_URMUX_ETH2_RX_OVERSHORT_DWLEN (1) +union urmux_eth2_rx_overshort_u { + struct urmux_eth2_rx_overshort { + u32 eth2_rx_overshort:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_OVERSHORT_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH2_RX_SEOP_ERR_ADDR (0x8a4c) +#define NBL_URMUX_ETH2_RX_SEOP_ERR_DEPTH (1) +#define NBL_URMUX_ETH2_RX_SEOP_ERR_WIDTH (32) +#define NBL_URMUX_ETH2_RX_SEOP_ERR_DWLEN (1) +union urmux_eth2_rx_seop_err_u { + struct urmux_eth2_rx_seop_err { + u32 eth2_rx_seop_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH2_RX_SEOP_ERR_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_SOP_ADDR (0x8b00) +#define NBL_URMUX_ETH3_RX_SOP_DEPTH (1) +#define NBL_URMUX_ETH3_RX_SOP_WIDTH (32) +#define NBL_URMUX_ETH3_RX_SOP_DWLEN (1) +union urmux_eth3_rx_sop_u { + struct urmux_eth3_rx_sop { + u32 sop_cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_SOP_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_EOP_ADDR (0x8b04) +#define NBL_URMUX_ETH3_RX_EOP_DEPTH (1) +#define NBL_URMUX_ETH3_RX_EOP_WIDTH (32) +#define NBL_URMUX_ETH3_RX_EOP_DWLEN (1) +union urmux_eth3_rx_eop_u { + struct urmux_eth3_rx_eop { + u32 eop_cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_EOP_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_PKT_ADDR (0x8b08) +#define NBL_URMUX_ETH3_RX_PKT_DEPTH (1) +#define NBL_URMUX_ETH3_RX_PKT_WIDTH (32) +#define NBL_URMUX_ETH3_RX_PKT_DWLEN (1) +union urmux_eth3_rx_pkt_u { + struct urmux_eth3_rx_pkt { + u32 eth3_rx_pkt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_PKT_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_BYTE_ADDR (0x8b0c) +#define NBL_URMUX_ETH3_RX_BYTE_DEPTH (1) +#define NBL_URMUX_ETH3_RX_BYTE_WIDTH (64) +#define NBL_URMUX_ETH3_RX_BYTE_DWLEN (2) +union urmux_eth3_rx_byte_u { + struct urmux_eth3_rx_byte { + u32 eth3_rx_byte_arr[2]; /* [63:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_BYTE_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_UC_ADDR (0x8b14) +#define NBL_URMUX_ETH3_RX_UC_DEPTH (1) +#define NBL_URMUX_ETH3_RX_UC_WIDTH (32) +#define NBL_URMUX_ETH3_RX_UC_DWLEN (1) +union urmux_eth3_rx_uc_u { + struct urmux_eth3_rx_uc { + u32 eth3_rx_uc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_UC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_MC_ADDR (0x8b18) +#define NBL_URMUX_ETH3_RX_MC_DEPTH (1) +#define NBL_URMUX_ETH3_RX_MC_WIDTH (32) +#define NBL_URMUX_ETH3_RX_MC_DWLEN (1) +union urmux_eth3_rx_mc_u { + struct urmux_eth3_rx_mc { + u32 eth3_rx_mc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_MC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_BC_ADDR (0x8b1c) +#define NBL_URMUX_ETH3_RX_BC_DEPTH (1) +#define NBL_URMUX_ETH3_RX_BC_WIDTH (32) +#define NBL_URMUX_ETH3_RX_BC_DWLEN (1) +union urmux_eth3_rx_bc_u { + struct urmux_eth3_rx_bc { + u32 eth3_rx_bc:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_BC_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_LESS_60_ADDR (0x8b20) +#define NBL_URMUX_ETH3_RX_LESS_60_DEPTH (1) +#define NBL_URMUX_ETH3_RX_LESS_60_WIDTH (32) +#define NBL_URMUX_ETH3_RX_LESS_60_DWLEN (1) +union urmux_eth3_rx_less_60_u { + struct urmux_eth3_rx_less_60 { + u32 eth3_rx_less_60:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_LESS_60_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_61_123_ADDR (0x8b28) +#define NBL_URMUX_ETH3_RX_61_123_DEPTH (1) +#define NBL_URMUX_ETH3_RX_61_123_WIDTH (32) +#define NBL_URMUX_ETH3_RX_61_123_DWLEN (1) +union urmux_eth3_rx_61_123_u { + struct urmux_eth3_rx_61_123 { + u32 eth3_rx_61_123:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_61_123_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_124_251_ADDR (0x8b2c) +#define NBL_URMUX_ETH3_RX_124_251_DEPTH (1) +#define NBL_URMUX_ETH3_RX_124_251_WIDTH (32) +#define NBL_URMUX_ETH3_RX_124_251_DWLEN (1) +union urmux_eth3_rx_124_251_u { + struct urmux_eth3_rx_124_251 { + u32 eth3_rx_124_251:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_124_251_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_252_507_ADDR (0x8b30) +#define NBL_URMUX_ETH3_RX_252_507_DEPTH (1) +#define NBL_URMUX_ETH3_RX_252_507_WIDTH (32) +#define NBL_URMUX_ETH3_RX_252_507_DWLEN (1) +union urmux_eth3_rx_252_507_u { + struct urmux_eth3_rx_252_507 { + u32 eth3_rx_252_507:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_252_507_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_508_1019_ADDR (0x8b34) +#define NBL_URMUX_ETH3_RX_508_1019_DEPTH (1) +#define NBL_URMUX_ETH3_RX_508_1019_WIDTH (32) +#define NBL_URMUX_ETH3_RX_508_1019_DWLEN (1) +union urmux_eth3_rx_508_1019_u { + struct urmux_eth3_rx_508_1019 { + u32 eth3_rx_508_1019:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_508_1019_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_1020_1514_ADDR (0x8b38) +#define NBL_URMUX_ETH3_RX_1020_1514_DEPTH (1) +#define NBL_URMUX_ETH3_RX_1020_1514_WIDTH (32) +#define NBL_URMUX_ETH3_RX_1020_1514_DWLEN (1) +union urmux_eth3_rx_1020_1514_u { + struct urmux_eth3_rx_1020_1514 { + u32 eth3_rx_1020_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_1020_1514_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_LARGE_1514_ADDR (0x8b3c) +#define NBL_URMUX_ETH3_RX_LARGE_1514_DEPTH (1) +#define NBL_URMUX_ETH3_RX_LARGE_1514_WIDTH (32) +#define NBL_URMUX_ETH3_RX_LARGE_1514_DWLEN (1) +union urmux_eth3_rx_large_1514_u { + struct urmux_eth3_rx_large_1514 { + u32 eth3_rx_large_1514:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_LARGE_1514_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_MAC_ERR_ADDR (0x8b40) +#define NBL_URMUX_ETH3_RX_MAC_ERR_DEPTH (1) +#define NBL_URMUX_ETH3_RX_MAC_ERR_WIDTH (32) +#define NBL_URMUX_ETH3_RX_MAC_ERR_DWLEN (1) +union urmux_eth3_rx_mac_err_u { + struct urmux_eth3_rx_mac_err { + u32 eth3_rx_mac_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_MAC_ERR_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_OVERLONG_ADDR (0x8b44) +#define NBL_URMUX_ETH3_RX_OVERLONG_DEPTH (1) +#define NBL_URMUX_ETH3_RX_OVERLONG_WIDTH (32) +#define NBL_URMUX_ETH3_RX_OVERLONG_DWLEN (1) +union urmux_eth3_rx_overlong_u { + struct urmux_eth3_rx_overlong { + u32 eth3_rx_overlong:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_OVERLONG_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_OVERSHORT_ADDR (0x8b48) +#define NBL_URMUX_ETH3_RX_OVERSHORT_DEPTH (1) +#define NBL_URMUX_ETH3_RX_OVERSHORT_WIDTH (32) +#define NBL_URMUX_ETH3_RX_OVERSHORT_DWLEN (1) +union urmux_eth3_rx_overshort_u { + struct urmux_eth3_rx_overshort { + u32 eth3_rx_overshort:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_OVERSHORT_DWLEN]; +} __packed; + +#define NBL_URMUX_ETH3_RX_SEOP_ERR_ADDR (0x8b4c) +#define NBL_URMUX_ETH3_RX_SEOP_ERR_DEPTH (1) +#define NBL_URMUX_ETH3_RX_SEOP_ERR_WIDTH (32) +#define NBL_URMUX_ETH3_RX_SEOP_ERR_DWLEN (1) +union urmux_eth3_rx_seop_err_u { + struct urmux_eth3_rx_seop_err { + u32 eth3_rx_seop_err:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_URMUX_ETH3_RX_SEOP_ERR_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ustat.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ustat.h new file mode 100644 index 0000000000000000000000000000000000000000..4289e830d203f7454c1836c83818e07a597c709c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ustat.h @@ -0,0 +1,378 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_USTAT_H +#define NBL_USTAT_H 1 + +#include + +#define NBL_USTAT_BASE (0x0011C000) + +#define NBL_USTAT_INT_STATUS_ADDR (0x11c000) +#define NBL_USTAT_INT_STATUS_DEPTH (1) +#define NBL_USTAT_INT_STATUS_WIDTH (32) +#define NBL_USTAT_INT_STATUS_DWLEN (1) +union ustat_int_status_u { + struct ustat_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 parity_err:1; /* [5] Default:0x0 RWC */ + u32 stat_type_err:1; /* [6] Default:0x0 RWC */ + u32 tbl_conflict_err:1; /* [7] Default:0X0 RWC */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_USTAT_INT_MASK_ADDR (0x11c004) +#define NBL_USTAT_INT_MASK_DEPTH (1) +#define NBL_USTAT_INT_MASK_WIDTH (32) +#define NBL_USTAT_INT_MASK_DWLEN (1) +union ustat_int_mask_u { + struct ustat_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 parity_err:1; /* [5] Default:0x0 RW */ + u32 stat_type_err:1; /* [6] Default:0x0 RW */ + u32 tbl_conflict_err:1; /* [7] Default:0X0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_INT_MASK_DWLEN]; +} __packed; + +#define NBL_USTAT_INT_SET_ADDR (0x11c008) +#define NBL_USTAT_INT_SET_DEPTH (1) +#define NBL_USTAT_INT_SET_WIDTH (32) +#define NBL_USTAT_INT_SET_DWLEN (1) +union ustat_int_set_u { + struct ustat_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 parity_err:1; /* [5] Default:0x0 WO */ + u32 stat_type_err:1; /* [6] Default:0x0 WO */ + u32 tbl_conflict_err:1; /* [7] Default:0x0 WO */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_INT_SET_DWLEN]; +} __packed; + +#define NBL_USTAT_COR_ERR_INFO_ADDR (0x11c00c) +#define NBL_USTAT_COR_ERR_INFO_DEPTH (1) +#define NBL_USTAT_COR_ERR_INFO_WIDTH (32) +#define NBL_USTAT_COR_ERR_INFO_DWLEN (1) +union ustat_cor_err_info_u { + struct ustat_cor_err_info { + u32 addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_USTAT_PARITY_ERR_INFO_ADDR (0x11c01c) +#define NBL_USTAT_PARITY_ERR_INFO_DEPTH (1) +#define NBL_USTAT_PARITY_ERR_INFO_WIDTH (32) +#define NBL_USTAT_PARITY_ERR_INFO_DWLEN (1) +union ustat_parity_err_info_u { + struct ustat_parity_err_info { + u32 ram_id:4; /* [3:0] Default:0x0 RO */ + u32 addr:10; /* [13:4] Default:0x0 RO */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_USTAT_CIF_ERR_INFO_ADDR (0x11c024) +#define NBL_USTAT_CIF_ERR_INFO_DEPTH (1) +#define NBL_USTAT_CIF_ERR_INFO_WIDTH (32) +#define NBL_USTAT_CIF_ERR_INFO_DWLEN (1) +union ustat_cif_err_info_u { + struct ustat_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_USTAT_TBL_CONFLICT_ERR_INFO_ADDR (0x11c030) +#define NBL_USTAT_TBL_CONFLICT_ERR_INFO_DEPTH (1) +#define NBL_USTAT_TBL_CONFLICT_ERR_INFO_WIDTH (32) +#define NBL_USTAT_TBL_CONFLICT_ERR_INFO_DWLEN (1) +union ustat_tbl_conflict_err_info_u { + struct ustat_tbl_conflict_err_info { + u32 tbl_id:4; /* [3:0] Default:0x0 RO */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_TBL_CONFLICT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_USTAT_CAR_CTRL_ADDR (0x11c100) +#define NBL_USTAT_CAR_CTRL_DEPTH (1) +#define NBL_USTAT_CAR_CTRL_WIDTH (32) +#define NBL_USTAT_CAR_CTRL_DWLEN (1) +union ustat_car_ctrl_u { + struct ustat_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x0 RW */ + u32 rctr_car:1; /* [1] Default:0x0 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_USTAT_INIT_START_ADDR (0x11c104) +#define NBL_USTAT_INIT_START_DEPTH (1) +#define NBL_USTAT_INIT_START_WIDTH (32) +#define NBL_USTAT_INIT_START_DWLEN (1) +union ustat_init_start_u { + struct ustat_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_INIT_START_DWLEN]; +} __packed; + +#define NBL_USTAT_CPU_READ_CLR_ADDR (0x11c108) +#define NBL_USTAT_CPU_READ_CLR_DEPTH (1) +#define NBL_USTAT_CPU_READ_CLR_WIDTH (32) +#define NBL_USTAT_CPU_READ_CLR_DWLEN (1) +union ustat_cpu_read_clr_u { + struct ustat_cpu_read_clr { + u32 cpu_clr:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_CPU_READ_CLR_DWLEN]; +} __packed; + +#define NBL_USTAT_GLB_CLR_ADDR (0x11c10c) +#define NBL_USTAT_GLB_CLR_DEPTH (1) +#define NBL_USTAT_GLB_CLR_WIDTH (32) +#define NBL_USTAT_GLB_CLR_DWLEN (1) +union ustat_glb_clr_u { + struct ustat_glb_clr { + u32 glb_clr:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_GLB_CLR_DWLEN]; +} __packed; + +#define NBL_USTAT_SEP_CLR_ADDR (0x11c110) +#define NBL_USTAT_SEP_CLR_DEPTH (1) +#define NBL_USTAT_SEP_CLR_WIDTH (32) +#define NBL_USTAT_SEP_CLR_DWLEN (1) +union ustat_sep_clr_u { + struct ustat_sep_clr { + u32 vsi_tbl_clr:1; /* [0] Default:0x0 RW */ + u32 ptype_tbl_clr:1; /* [1] Default:0x0 RW */ + u32 err_code_tbl_clr:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_SEP_CLR_DWLEN]; +} __packed; + +#define NBL_USTAT_CFG_TEST_ADDR (0x11c118) +#define NBL_USTAT_CFG_TEST_DEPTH (1) +#define NBL_USTAT_CFG_TEST_WIDTH (32) +#define NBL_USTAT_CFG_TEST_DWLEN (1) +union ustat_cfg_test_u { + struct ustat_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_USTAT_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_USTAT_INIT_DONE_ADDR (0x11c200) +#define NBL_USTAT_INIT_DONE_DEPTH (1) +#define NBL_USTAT_INIT_DONE_WIDTH (32) +#define NBL_USTAT_INIT_DONE_DWLEN (1) +union ustat_init_done_u { + struct ustat_init_done { + u32 init_done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_USTAT_GLB_CLR_DONE_ADDR (0x11c204) +#define NBL_USTAT_GLB_CLR_DONE_DEPTH (1) +#define NBL_USTAT_GLB_CLR_DONE_WIDTH (32) +#define NBL_USTAT_GLB_CLR_DONE_DWLEN (1) +union ustat_glb_clr_done_u { + struct ustat_glb_clr_done { + u32 glb_clr_done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_GLB_CLR_DONE_DWLEN]; +} __packed; + +#define NBL_USTAT_SEP_CLR_DONE_ADDR (0x11c208) +#define NBL_USTAT_SEP_CLR_DONE_DEPTH (1) +#define NBL_USTAT_SEP_CLR_DONE_WIDTH (32) +#define NBL_USTAT_SEP_CLR_DONE_DWLEN (1) +union ustat_sep_clr_done_u { + struct ustat_sep_clr_done { + u32 vsi_tbl_done:1; /* [0] Default:0x0 RO */ + u32 ptype_tbl_done:1; /* [1] Default:0x0 RO */ + u32 err_code_tbl_done:1; /* [2] Default:0x0 RO */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_SEP_CLR_DONE_DWLEN]; +} __packed; + +#define NBL_USTAT_ERR_CODE_STAT_ETH0_ADDR (0x11c400) +#define NBL_USTAT_ERR_CODE_STAT_ETH0_DEPTH (16) +#define NBL_USTAT_ERR_CODE_STAT_ETH0_WIDTH (32) +#define NBL_USTAT_ERR_CODE_STAT_ETH0_DWLEN (1) +union ustat_err_code_stat_eth0_u { + struct ustat_err_code_stat_eth0 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_ERR_CODE_STAT_ETH0_DWLEN]; +} __packed; +#define NBL_USTAT_ERR_CODE_STAT_ETH0_REG(r) (NBL_USTAT_ERR_CODE_STAT_ETH0_ADDR + \ + (NBL_USTAT_ERR_CODE_STAT_ETH0_DWLEN * 4) * (r)) + +#define NBL_USTAT_ERR_CODE_STAT_ETH1_ADDR (0x11c500) +#define NBL_USTAT_ERR_CODE_STAT_ETH1_DEPTH (16) +#define NBL_USTAT_ERR_CODE_STAT_ETH1_WIDTH (32) +#define NBL_USTAT_ERR_CODE_STAT_ETH1_DWLEN (1) +union ustat_err_code_stat_eth1_u { + struct ustat_err_code_stat_eth1 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_ERR_CODE_STAT_ETH1_DWLEN]; +} __packed; +#define NBL_USTAT_ERR_CODE_STAT_ETH1_REG(r) (NBL_USTAT_ERR_CODE_STAT_ETH1_ADDR + \ + (NBL_USTAT_ERR_CODE_STAT_ETH1_DWLEN * 4) * (r)) + +#define NBL_USTAT_ERR_CODE_STAT_ETH2_ADDR (0x11c600) +#define NBL_USTAT_ERR_CODE_STAT_ETH2_DEPTH (16) +#define NBL_USTAT_ERR_CODE_STAT_ETH2_WIDTH (32) +#define NBL_USTAT_ERR_CODE_STAT_ETH2_DWLEN (1) +union ustat_err_code_stat_eth2_u { + struct ustat_err_code_stat_eth2 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_ERR_CODE_STAT_ETH2_DWLEN]; +} __packed; +#define NBL_USTAT_ERR_CODE_STAT_ETH2_REG(r) (NBL_USTAT_ERR_CODE_STAT_ETH2_ADDR + \ + (NBL_USTAT_ERR_CODE_STAT_ETH2_DWLEN * 4) * (r)) + +#define NBL_USTAT_ERR_CODE_STAT_ETH3_ADDR (0x11c700) +#define NBL_USTAT_ERR_CODE_STAT_ETH3_DEPTH (16) +#define NBL_USTAT_ERR_CODE_STAT_ETH3_WIDTH (32) +#define NBL_USTAT_ERR_CODE_STAT_ETH3_DWLEN (1) +union ustat_err_code_stat_eth3_u { + struct ustat_err_code_stat_eth3 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_ERR_CODE_STAT_ETH3_DWLEN]; +} __packed; +#define NBL_USTAT_ERR_CODE_STAT_ETH3_REG(r) (NBL_USTAT_ERR_CODE_STAT_ETH3_ADDR + \ + (NBL_USTAT_ERR_CODE_STAT_ETH3_DWLEN * 4) * (r)) + +#define NBL_USTAT_ERR_CODE_STAT_LOOPBACK_ADDR (0x11c800) +#define NBL_USTAT_ERR_CODE_STAT_LOOPBACK_DEPTH (16) +#define NBL_USTAT_ERR_CODE_STAT_LOOPBACK_WIDTH (32) +#define NBL_USTAT_ERR_CODE_STAT_LOOPBACK_DWLEN (1) +union ustat_err_code_stat_loopback_u { + struct ustat_err_code_stat_loopback { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_ERR_CODE_STAT_LOOPBACK_DWLEN]; +} __packed; +#define NBL_USTAT_ERR_CODE_STAT_LOOPBACK_REG(r) (NBL_USTAT_ERR_CODE_STAT_LOOPBACK_ADDR + \ + (NBL_USTAT_ERR_CODE_STAT_LOOPBACK_DWLEN * 4) * (r)) + +#define NBL_USTAT_PTYPE_STAT_ETH0_ADDR (0x11d000) +#define NBL_USTAT_PTYPE_STAT_ETH0_DEPTH (256) +#define NBL_USTAT_PTYPE_STAT_ETH0_WIDTH (32) +#define NBL_USTAT_PTYPE_STAT_ETH0_DWLEN (1) +union ustat_ptype_stat_eth0_u { + struct ustat_ptype_stat_eth0 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_PTYPE_STAT_ETH0_DWLEN]; +} __packed; +#define NBL_USTAT_PTYPE_STAT_ETH0_REG(r) (NBL_USTAT_PTYPE_STAT_ETH0_ADDR + \ + (NBL_USTAT_PTYPE_STAT_ETH0_DWLEN * 4) * (r)) + +#define NBL_USTAT_PTYPE_STAT_ETH1_ADDR (0x11d400) +#define NBL_USTAT_PTYPE_STAT_ETH1_DEPTH (256) +#define NBL_USTAT_PTYPE_STAT_ETH1_WIDTH (32) +#define NBL_USTAT_PTYPE_STAT_ETH1_DWLEN (1) +union ustat_ptype_stat_eth1_u { + struct ustat_ptype_stat_eth1 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_PTYPE_STAT_ETH1_DWLEN]; +} __packed; +#define NBL_USTAT_PTYPE_STAT_ETH1_REG(r) (NBL_USTAT_PTYPE_STAT_ETH1_ADDR + \ + (NBL_USTAT_PTYPE_STAT_ETH1_DWLEN * 4) * (r)) + +#define NBL_USTAT_PTYPE_STAT_ETH2_ADDR (0x11d800) +#define NBL_USTAT_PTYPE_STAT_ETH2_DEPTH (256) +#define NBL_USTAT_PTYPE_STAT_ETH2_WIDTH (32) +#define NBL_USTAT_PTYPE_STAT_ETH2_DWLEN (1) +union ustat_ptype_stat_eth2_u { + struct ustat_ptype_stat_eth2 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_PTYPE_STAT_ETH2_DWLEN]; +} __packed; +#define NBL_USTAT_PTYPE_STAT_ETH2_REG(r) (NBL_USTAT_PTYPE_STAT_ETH2_ADDR + \ + (NBL_USTAT_PTYPE_STAT_ETH2_DWLEN * 4) * (r)) + +#define NBL_USTAT_PTYPE_STAT_ETH3_ADDR (0x11dc00) +#define NBL_USTAT_PTYPE_STAT_ETH3_DEPTH (256) +#define NBL_USTAT_PTYPE_STAT_ETH3_WIDTH (32) +#define NBL_USTAT_PTYPE_STAT_ETH3_DWLEN (1) +union ustat_ptype_stat_eth3_u { + struct ustat_ptype_stat_eth3 { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_PTYPE_STAT_ETH3_DWLEN]; +} __packed; +#define NBL_USTAT_PTYPE_STAT_ETH3_REG(r) (NBL_USTAT_PTYPE_STAT_ETH3_ADDR + \ + (NBL_USTAT_PTYPE_STAT_ETH3_DWLEN * 4) * (r)) + +#define NBL_USTAT_PTYPE_STAT_LOOPBACK_ADDR (0x11e000) +#define NBL_USTAT_PTYPE_STAT_LOOPBACK_DEPTH (256) +#define NBL_USTAT_PTYPE_STAT_LOOPBACK_WIDTH (32) +#define NBL_USTAT_PTYPE_STAT_LOOPBACK_DWLEN (1) +union ustat_ptype_stat_loopback_u { + struct ustat_ptype_stat_loopback { + u32 fwd_pkt_cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_PTYPE_STAT_LOOPBACK_DWLEN]; +} __packed; +#define NBL_USTAT_PTYPE_STAT_LOOPBACK_REG(r) (NBL_USTAT_PTYPE_STAT_LOOPBACK_ADDR + \ + (NBL_USTAT_PTYPE_STAT_LOOPBACK_DWLEN * 4) * (r)) + +#define NBL_USTAT_VSI_STAT_ADDR (0x124000) +#define NBL_USTAT_VSI_STAT_DEPTH (1024) +#define NBL_USTAT_VSI_STAT_WIDTH (128) +#define NBL_USTAT_VSI_STAT_DWLEN (4) +union ustat_vsi_stat_u { + struct ustat_vsi_stat { + u32 fwd_byte_cnt_low:32; /* [31:0] Default:0x0 RO */ + u32 fwd_byte_cnt_high:32; /* [63:32] Default:0x0 RO */ + u32 fwd_pkt_cnt_low:32; /* [95:64] Default:0x0 RO */ + u32 fwd_pkt_cnt_high:32; /* [127:96] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTAT_VSI_STAT_DWLEN]; +} __packed; +#define NBL_USTAT_VSI_STAT_REG(r) (NBL_USTAT_VSI_STAT_ADDR + \ + (NBL_USTAT_VSI_STAT_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ustore.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ustore.h new file mode 100644 index 0000000000000000000000000000000000000000..ecabddea8f17b9f47004c41c6af7f93c0793a15c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_ustore.h @@ -0,0 +1,950 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_USTORE_H +#define NBL_USTORE_H 1 + +#include + +#define NBL_USTORE_BASE (0x00104000) + +#define NBL_USTORE_INT_STATUS_ADDR (0x104000) +#define NBL_USTORE_INT_STATUS_DEPTH (1) +#define NBL_USTORE_INT_STATUS_WIDTH (32) +#define NBL_USTORE_INT_STATUS_DWLEN (1) +union ustore_int_status_u { + struct ustore_int_status { + u32 ucor_err:1; /* [0] Default:0x0 RWC */ + u32 cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 parity_err:1; /* [5] Default:0x0 RWC */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_USTORE_INT_MASK_ADDR (0x104004) +#define NBL_USTORE_INT_MASK_DEPTH (1) +#define NBL_USTORE_INT_MASK_WIDTH (32) +#define NBL_USTORE_INT_MASK_DWLEN (1) +union ustore_int_mask_u { + struct ustore_int_mask { + u32 ucor_err:1; /* [0] Default:0x0 RW */ + u32 cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 parity_err:1; /* [5] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_INT_MASK_DWLEN]; +} __packed; + +#define NBL_USTORE_INT_SET_ADDR (0x104008) +#define NBL_USTORE_INT_SET_DEPTH (1) +#define NBL_USTORE_INT_SET_WIDTH (32) +#define NBL_USTORE_INT_SET_DWLEN (1) +union ustore_int_set_u { + struct ustore_int_set { + u32 ucor_err:1; /* [0] Default:0x0 WO */ + u32 cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 parity_err:1; /* [5] Default:0x0 WO */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_INT_SET_DWLEN]; +} __packed; + +#define NBL_USTORE_COR_ERR_INFO_ADDR (0x10400c) +#define NBL_USTORE_COR_ERR_INFO_DEPTH (1) +#define NBL_USTORE_COR_ERR_INFO_WIDTH (32) +#define NBL_USTORE_COR_ERR_INFO_DWLEN (1) +union ustore_cor_err_info_u { + struct ustore_cor_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_USTORE_PARITY_ERR_INFO_ADDR (0x104014) +#define NBL_USTORE_PARITY_ERR_INFO_DEPTH (1) +#define NBL_USTORE_PARITY_ERR_INFO_WIDTH (32) +#define NBL_USTORE_PARITY_ERR_INFO_DWLEN (1) +union ustore_parity_err_info_u { + struct ustore_parity_err_info { + u32 ram_addr:10; /* [9:0] Default:0x0 RO */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_USTORE_CIF_ERR_INFO_ADDR (0x10401c) +#define NBL_USTORE_CIF_ERR_INFO_DEPTH (1) +#define NBL_USTORE_CIF_ERR_INFO_WIDTH (32) +#define NBL_USTORE_CIF_ERR_INFO_DWLEN (1) +union ustore_cif_err_info_u { + struct ustore_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_USTORE_CAR_CTRL_ADDR (0x104100) +#define NBL_USTORE_CAR_CTRL_DEPTH (1) +#define NBL_USTORE_CAR_CTRL_WIDTH (32) +#define NBL_USTORE_CAR_CTRL_DWLEN (1) +union ustore_car_ctrl_u { + struct ustore_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_USTORE_INIT_START_ADDR (0x104104) +#define NBL_USTORE_INIT_START_DEPTH (1) +#define NBL_USTORE_INIT_START_WIDTH (32) +#define NBL_USTORE_INIT_START_DWLEN (1) +union ustore_init_start_u { + struct ustore_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_INIT_START_DWLEN]; +} __packed; + +#define NBL_USTORE_PKT_LEN_ADDR (0x104108) +#define NBL_USTORE_PKT_LEN_DEPTH (1) +#define NBL_USTORE_PKT_LEN_WIDTH (32) +#define NBL_USTORE_PKT_LEN_DWLEN (1) +union ustore_pkt_len_u { + struct ustore_pkt_len { + u32 min:7; /* [6:0] Default:60 RW */ + u32 rsv1:8; /* [14:7] Default:0x0 RO */ + u32 min_chk_en:1; /* [15] Default:0x1 RW */ + u32 max:14; /* [29:16] Default:9600 RW */ + u32 rsv:1; /* [30] Default:0x0 RO */ + u32 max_chk_en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_USTORE_PKT_LEN_DWLEN]; +} __packed; + +#define NBL_USTORE_SCH_PD_BUFFER_TH_ADDR (0x104128) +#define NBL_USTORE_SCH_PD_BUFFER_TH_DEPTH (1) +#define NBL_USTORE_SCH_PD_BUFFER_TH_WIDTH (32) +#define NBL_USTORE_SCH_PD_BUFFER_TH_DWLEN (1) +union ustore_sch_pd_buffer_th_u { + struct ustore_sch_pd_buffer_th { + u32 aful_th:10; /* [9:0] Default:1000 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_SCH_PD_BUFFER_TH_DWLEN]; +} __packed; + +#define NBL_USTORE_GLB_FC_TH_ADDR (0x10412c) +#define NBL_USTORE_GLB_FC_TH_DEPTH (1) +#define NBL_USTORE_GLB_FC_TH_WIDTH (32) +#define NBL_USTORE_GLB_FC_TH_DWLEN (1) +union ustore_glb_fc_th_u { + struct ustore_glb_fc_th { + u32 xoff_th:12; /* [11:0] Default:3000 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 xon_th:12; /* [27:16] Default:3000 RW */ + u32 rsv:3; /* [30:28] Default:0x0 RO */ + u32 fc_en:1; /* [31:31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_USTORE_GLB_FC_TH_DWLEN]; +} __packed; + +#define NBL_USTORE_GLB_DROP_TH_ADDR (0x104130) +#define NBL_USTORE_GLB_DROP_TH_DEPTH (1) +#define NBL_USTORE_GLB_DROP_TH_WIDTH (32) +#define NBL_USTORE_GLB_DROP_TH_DWLEN (1) +union ustore_glb_drop_th_u { + struct ustore_glb_drop_th { + u32 disc_th:12; /* [11:0] Default:3950 RW */ + u32 rsv:19; /* [30:12] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_USTORE_GLB_DROP_TH_DWLEN]; +} __packed; + +#define NBL_USTORE_PORT_FC_TH_ADDR (0x104134) +#define NBL_USTORE_PORT_FC_TH_DEPTH (5) +#define NBL_USTORE_PORT_FC_TH_WIDTH (32) +#define NBL_USTORE_PORT_FC_TH_DWLEN (1) +union ustore_port_fc_th_u { + struct ustore_port_fc_th { + u32 xoff_th:12; /* [11:0] Default:400 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 xon_th:12; /* [27:16] Default:400 RW */ + u32 rsv:2; /* [29:28] Default:0x0 RO */ + u32 fc_set:1; /* [30:30] Default:0x0 RW */ + u32 fc_en:1; /* [31:31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_USTORE_PORT_FC_TH_DWLEN]; +} __packed; +#define NBL_USTORE_PORT_FC_TH_REG(r) (NBL_USTORE_PORT_FC_TH_ADDR + \ + (NBL_USTORE_PORT_FC_TH_DWLEN * 4) * (r)) + +#define NBL_USTORE_PORT_DROP_TH_ADDR (0x104150) +#define NBL_USTORE_PORT_DROP_TH_DEPTH (5) +#define NBL_USTORE_PORT_DROP_TH_WIDTH (32) +#define NBL_USTORE_PORT_DROP_TH_DWLEN (1) +union ustore_port_drop_th_u { + struct ustore_port_drop_th { + u32 disc_th:12; /* [11:0] Default:800 RW */ + u32 rsv:19; /* [30:12] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_USTORE_PORT_DROP_TH_DWLEN]; +} __packed; +#define NBL_USTORE_PORT_DROP_TH_REG(r) (NBL_USTORE_PORT_DROP_TH_ADDR + \ + (NBL_USTORE_PORT_DROP_TH_DWLEN * 4) * (r)) + +#define NBL_USTORE_CFG_TEST_ADDR (0x104170) +#define NBL_USTORE_CFG_TEST_DEPTH (1) +#define NBL_USTORE_CFG_TEST_WIDTH (32) +#define NBL_USTORE_CFG_TEST_DWLEN (1) +union ustore_cfg_test_u { + struct ustore_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_USTORE_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_USTORE_HIGH_PRI_PKT_ADDR (0x10417c) +#define NBL_USTORE_HIGH_PRI_PKT_DEPTH (1) +#define NBL_USTORE_HIGH_PRI_PKT_WIDTH (32) +#define NBL_USTORE_HIGH_PRI_PKT_DWLEN (1) +union ustore_high_pri_pkt_u { + struct ustore_high_pri_pkt { + u32 en:1; /* [0:0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_HIGH_PRI_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_COS_FC_TH_ADDR (0x104200) +#define NBL_USTORE_COS_FC_TH_DEPTH (40) +#define NBL_USTORE_COS_FC_TH_WIDTH (32) +#define NBL_USTORE_COS_FC_TH_DWLEN (1) +union ustore_cos_fc_th_u { + struct ustore_cos_fc_th { + u32 xoff_th:12; /* [11:0] Default:100 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 xon_th:12; /* [27:16] Default:100 RW */ + u32 rsv:2; /* [29:28] Default:0x0 RO */ + u32 fc_set:1; /* [30:30] Default:0x0 RW */ + u32 fc_en:1; /* [31:31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_USTORE_COS_FC_TH_DWLEN]; +} __packed; +#define NBL_USTORE_COS_FC_TH_REG(r) (NBL_USTORE_COS_FC_TH_ADDR + \ + (NBL_USTORE_COS_FC_TH_DWLEN * 4) * (r)) + +#define NBL_USTORE_COS_DROP_TH_ADDR (0x104300) +#define NBL_USTORE_COS_DROP_TH_DEPTH (40) +#define NBL_USTORE_COS_DROP_TH_WIDTH (32) +#define NBL_USTORE_COS_DROP_TH_DWLEN (1) +union ustore_cos_drop_th_u { + struct ustore_cos_drop_th { + u32 disc_th:12; /* [11:0] Default:120 RW */ + u32 rsv:19; /* [30:12] Default:0x0 RO */ + u32 en:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_USTORE_COS_DROP_TH_DWLEN]; +} __packed; +#define NBL_USTORE_COS_DROP_TH_REG(r) (NBL_USTORE_COS_DROP_TH_ADDR + \ + (NBL_USTORE_COS_DROP_TH_DWLEN * 4) * (r)) + +#define NBL_USTORE_SCH_PD_WRR_WGT_ADDR (0x104400) +#define NBL_USTORE_SCH_PD_WRR_WGT_DEPTH (30) +#define NBL_USTORE_SCH_PD_WRR_WGT_WIDTH (32) +#define NBL_USTORE_SCH_PD_WRR_WGT_DWLEN (1) +union ustore_sch_pd_wrr_wgt_u { + struct ustore_sch_pd_wrr_wgt { + u32 wgt_cos:4; /* [3:0] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_SCH_PD_WRR_WGT_DWLEN]; +} __packed; +#define NBL_USTORE_SCH_PD_WRR_WGT_REG(r) (NBL_USTORE_SCH_PD_WRR_WGT_ADDR + \ + (NBL_USTORE_SCH_PD_WRR_WGT_DWLEN * 4) * (r)) + +#define NBL_USTORE_LINK_FC_MERGE_ADDR (0x104504) +#define NBL_USTORE_LINK_FC_MERGE_DEPTH (1) +#define NBL_USTORE_LINK_FC_MERGE_WIDTH (32) +#define NBL_USTORE_LINK_FC_MERGE_DWLEN (1) +union ustore_link_fc_merge_u { + struct ustore_link_fc_merge { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_LINK_FC_MERGE_DWLEN]; +} __packed; + +#define NBL_USTORE_PFC_MERGE_ADDR (0x104508) +#define NBL_USTORE_PFC_MERGE_DEPTH (1) +#define NBL_USTORE_PFC_MERGE_WIDTH (32) +#define NBL_USTORE_PFC_MERGE_DWLEN (1) +union ustore_pfc_merge_u { + struct ustore_pfc_merge { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_PFC_MERGE_DWLEN]; +} __packed; + +#define NBL_USTORE_COS7_FORCE_ADDR (0x10450c) +#define NBL_USTORE_COS7_FORCE_DEPTH (1) +#define NBL_USTORE_COS7_FORCE_WIDTH (32) +#define NBL_USTORE_COS7_FORCE_DWLEN (1) +union ustore_cos7_force_u { + struct ustore_cos7_force { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_COS7_FORCE_DWLEN]; +} __packed; + +#define NBL_USTORE_PFC_BITMAP_HPRI_ADDR (0x104600) +#define NBL_USTORE_PFC_BITMAP_HPRI_DEPTH (5) +#define NBL_USTORE_PFC_BITMAP_HPRI_WIDTH (32) +#define NBL_USTORE_PFC_BITMAP_HPRI_DWLEN (1) +union ustore_pfc_bitmap_hpri_u { + struct ustore_pfc_bitmap_hpri { + u32 cos4:8; /* [7:0] Default:16 RW */ + u32 cos5:8; /* [15:8] Default:32 RW */ + u32 cos6:8; /* [23:16] Default:64 RW */ + u32 cos7:8; /* [31:24] Default:128 RW */ + } __packed info; + u32 data[NBL_USTORE_PFC_BITMAP_HPRI_DWLEN]; +} __packed; +#define NBL_USTORE_PFC_BITMAP_HPRI_REG(r) (NBL_USTORE_PFC_BITMAP_HPRI_ADDR + \ + (NBL_USTORE_PFC_BITMAP_HPRI_DWLEN * 4) * (r)) + +#define NBL_USTORE_PFC_BITMAP_LPRI_ADDR (0x104700) +#define NBL_USTORE_PFC_BITMAP_LPRI_DEPTH (5) +#define NBL_USTORE_PFC_BITMAP_LPRI_WIDTH (32) +#define NBL_USTORE_PFC_BITMAP_LPRI_DWLEN (1) +union ustore_pfc_bitmap_lpri_u { + struct ustore_pfc_bitmap_lpri { + u32 cos0:8; /* [7:0] Default:1 RW */ + u32 cos1:8; /* [15:8] Default:2 RW */ + u32 cos2:8; /* [23:16] Default:4 RW */ + u32 cos3:8; /* [31:24] Default:8 RW */ + } __packed info; + u32 data[NBL_USTORE_PFC_BITMAP_LPRI_DWLEN]; +} __packed; +#define NBL_USTORE_PFC_BITMAP_LPRI_REG(r) (NBL_USTORE_PFC_BITMAP_LPRI_ADDR + \ + (NBL_USTORE_PFC_BITMAP_LPRI_DWLEN * 4) * (r)) + +#define NBL_USTORE_INIT_DONE_ADDR (0x104800) +#define NBL_USTORE_INIT_DONE_DEPTH (1) +#define NBL_USTORE_INIT_DONE_WIDTH (32) +#define NBL_USTORE_INIT_DONE_DWLEN (1) +union ustore_init_done_u { + struct ustore_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_USTORE_SCH_IDLE_LIST_STATUS_CURR_ADDR (0x10481c) +#define NBL_USTORE_SCH_IDLE_LIST_STATUS_CURR_DEPTH (1) +#define NBL_USTORE_SCH_IDLE_LIST_STATUS_CURR_WIDTH (32) +#define NBL_USTORE_SCH_IDLE_LIST_STATUS_CURR_DWLEN (1) +union ustore_sch_idle_list_status_curr_u { + struct ustore_sch_idle_list_status_curr { + u32 empt:1; /* [0] Default:0x0 RO */ + u32 full:1; /* [1] Default:0x1 RO */ + u32 cnt:11; /* [12:2] Default:0x400 RO */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_SCH_IDLE_LIST_STATUS_CURR_DWLEN]; +} __packed; + +#define NBL_USTORE_SCH_QUE_LIST_STATUS_ADDR (0x104820) +#define NBL_USTORE_SCH_QUE_LIST_STATUS_DEPTH (40) +#define NBL_USTORE_SCH_QUE_LIST_STATUS_WIDTH (32) +#define NBL_USTORE_SCH_QUE_LIST_STATUS_DWLEN (1) +union ustore_sch_que_list_status_u { + struct ustore_sch_que_list_status { + u32 curr_empt:1; /* [0] Default:0x1 RO */ + u32 curr_cnt:11; /* [11:1] Default:0x0 RO */ + u32 history_udf:1; /* [12] Default:0x0 RC */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_SCH_QUE_LIST_STATUS_DWLEN]; +} __packed; +#define NBL_USTORE_SCH_QUE_LIST_STATUS_REG(r) (NBL_USTORE_SCH_QUE_LIST_STATUS_ADDR + \ + (NBL_USTORE_SCH_QUE_LIST_STATUS_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_TOTAL_PKT_ADDR (0x105050) +#define NBL_USTORE_RCV_TOTAL_PKT_DEPTH (1) +#define NBL_USTORE_RCV_TOTAL_PKT_WIDTH (32) +#define NBL_USTORE_RCV_TOTAL_PKT_DWLEN (1) +union ustore_rcv_total_pkt_u { + struct ustore_rcv_total_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_TOTAL_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_TOTAL_BYTE_ADDR (0x105054) +#define NBL_USTORE_RCV_TOTAL_BYTE_DEPTH (1) +#define NBL_USTORE_RCV_TOTAL_BYTE_WIDTH (48) +#define NBL_USTORE_RCV_TOTAL_BYTE_DWLEN (2) +union ustore_rcv_total_byte_u { + struct ustore_rcv_total_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_TOTAL_BYTE_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_TOTAL_RIGHT_PKT_ADDR (0x10505c) +#define NBL_USTORE_RCV_TOTAL_RIGHT_PKT_DEPTH (1) +#define NBL_USTORE_RCV_TOTAL_RIGHT_PKT_WIDTH (32) +#define NBL_USTORE_RCV_TOTAL_RIGHT_PKT_DWLEN (1) +union ustore_rcv_total_right_pkt_u { + struct ustore_rcv_total_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_TOTAL_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_TOTAL_WRONG_PKT_ADDR (0x105060) +#define NBL_USTORE_RCV_TOTAL_WRONG_PKT_DEPTH (1) +#define NBL_USTORE_RCV_TOTAL_WRONG_PKT_WIDTH (32) +#define NBL_USTORE_RCV_TOTAL_WRONG_PKT_DWLEN (1) +union ustore_rcv_total_wrong_pkt_u { + struct ustore_rcv_total_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_TOTAL_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_FWD_RIGHT_PKT_ADDR (0x105064) +#define NBL_USTORE_RCV_FWD_RIGHT_PKT_DEPTH (1) +#define NBL_USTORE_RCV_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_USTORE_RCV_FWD_RIGHT_PKT_DWLEN (1) +union ustore_rcv_fwd_right_pkt_u { + struct ustore_rcv_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_FWD_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_FWD_WRONG_PKT_ADDR (0x105068) +#define NBL_USTORE_RCV_FWD_WRONG_PKT_DEPTH (1) +#define NBL_USTORE_RCV_FWD_WRONG_PKT_WIDTH (32) +#define NBL_USTORE_RCV_FWD_WRONG_PKT_DWLEN (1) +union ustore_rcv_fwd_wrong_pkt_u { + struct ustore_rcv_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_FWD_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_HERR_RIGHT_PKT_ADDR (0x10506c) +#define NBL_USTORE_RCV_HERR_RIGHT_PKT_DEPTH (1) +#define NBL_USTORE_RCV_HERR_RIGHT_PKT_WIDTH (32) +#define NBL_USTORE_RCV_HERR_RIGHT_PKT_DWLEN (1) +union ustore_rcv_herr_right_pkt_u { + struct ustore_rcv_herr_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_HERR_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_HERR_WRONG_PKT_ADDR (0x105070) +#define NBL_USTORE_RCV_HERR_WRONG_PKT_DEPTH (1) +#define NBL_USTORE_RCV_HERR_WRONG_PKT_WIDTH (32) +#define NBL_USTORE_RCV_HERR_WRONG_PKT_DWLEN (1) +union ustore_rcv_herr_wrong_pkt_u { + struct ustore_rcv_herr_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_HERR_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_IPRO_TOTAL_PKT_ADDR (0x105074) +#define NBL_USTORE_IPRO_TOTAL_PKT_DEPTH (1) +#define NBL_USTORE_IPRO_TOTAL_PKT_WIDTH (32) +#define NBL_USTORE_IPRO_TOTAL_PKT_DWLEN (1) +union ustore_ipro_total_pkt_u { + struct ustore_ipro_total_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_TOTAL_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_IPRO_TOTAL_BYTE_ADDR (0x105078) +#define NBL_USTORE_IPRO_TOTAL_BYTE_DEPTH (1) +#define NBL_USTORE_IPRO_TOTAL_BYTE_WIDTH (48) +#define NBL_USTORE_IPRO_TOTAL_BYTE_DWLEN (2) +union ustore_ipro_total_byte_u { + struct ustore_ipro_total_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_TOTAL_BYTE_DWLEN]; +} __packed; + +#define NBL_USTORE_IPRO_FWD_RIGHT_PKT_ADDR (0x105080) +#define NBL_USTORE_IPRO_FWD_RIGHT_PKT_DEPTH (1) +#define NBL_USTORE_IPRO_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_USTORE_IPRO_FWD_RIGHT_PKT_DWLEN (1) +union ustore_ipro_fwd_right_pkt_u { + struct ustore_ipro_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_FWD_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_IPRO_FWD_WRONG_PKT_ADDR (0x105084) +#define NBL_USTORE_IPRO_FWD_WRONG_PKT_DEPTH (1) +#define NBL_USTORE_IPRO_FWD_WRONG_PKT_WIDTH (32) +#define NBL_USTORE_IPRO_FWD_WRONG_PKT_DWLEN (1) +union ustore_ipro_fwd_wrong_pkt_u { + struct ustore_ipro_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_FWD_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_IPRO_HERR_RIGHT_PKT_ADDR (0x105088) +#define NBL_USTORE_IPRO_HERR_RIGHT_PKT_DEPTH (1) +#define NBL_USTORE_IPRO_HERR_RIGHT_PKT_WIDTH (32) +#define NBL_USTORE_IPRO_HERR_RIGHT_PKT_DWLEN (1) +union ustore_ipro_herr_right_pkt_u { + struct ustore_ipro_herr_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_HERR_RIGHT_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_IPRO_HERR_WRONG_PKT_ADDR (0x10508c) +#define NBL_USTORE_IPRO_HERR_WRONG_PKT_DEPTH (1) +#define NBL_USTORE_IPRO_HERR_WRONG_PKT_WIDTH (32) +#define NBL_USTORE_IPRO_HERR_WRONG_PKT_DWLEN (1) +union ustore_ipro_herr_wrong_pkt_u { + struct ustore_ipro_herr_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_HERR_WRONG_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_PMEM_TOTAL_PKT_ADDR (0x105090) +#define NBL_USTORE_PMEM_TOTAL_PKT_DEPTH (1) +#define NBL_USTORE_PMEM_TOTAL_PKT_WIDTH (32) +#define NBL_USTORE_PMEM_TOTAL_PKT_DWLEN (1) +union ustore_pmem_total_pkt_u { + struct ustore_pmem_total_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_PMEM_TOTAL_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_PMEM_TOTAL_BYTE_ADDR (0x105094) +#define NBL_USTORE_PMEM_TOTAL_BYTE_DEPTH (1) +#define NBL_USTORE_PMEM_TOTAL_BYTE_WIDTH (48) +#define NBL_USTORE_PMEM_TOTAL_BYTE_DWLEN (2) +union ustore_pmem_total_byte_u { + struct ustore_pmem_total_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_PMEM_TOTAL_BYTE_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_TOTAL_ERR_DROP_PKT_ADDR (0x10509c) +#define NBL_USTORE_RCV_TOTAL_ERR_DROP_PKT_DEPTH (1) +#define NBL_USTORE_RCV_TOTAL_ERR_DROP_PKT_WIDTH (32) +#define NBL_USTORE_RCV_TOTAL_ERR_DROP_PKT_DWLEN (1) +union ustore_rcv_total_err_drop_pkt_u { + struct ustore_rcv_total_err_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_TOTAL_ERR_DROP_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_TOTAL_SHORT_PKT_ADDR (0x1050a0) +#define NBL_USTORE_RCV_TOTAL_SHORT_PKT_DEPTH (1) +#define NBL_USTORE_RCV_TOTAL_SHORT_PKT_WIDTH (32) +#define NBL_USTORE_RCV_TOTAL_SHORT_PKT_DWLEN (1) +union ustore_rcv_total_short_pkt_u { + struct ustore_rcv_total_short_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_TOTAL_SHORT_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_TOTAL_LONG_PKT_ADDR (0x1050a4) +#define NBL_USTORE_RCV_TOTAL_LONG_PKT_DEPTH (1) +#define NBL_USTORE_RCV_TOTAL_LONG_PKT_WIDTH (32) +#define NBL_USTORE_RCV_TOTAL_LONG_PKT_DWLEN (1) +union ustore_rcv_total_long_pkt_u { + struct ustore_rcv_total_long_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_TOTAL_LONG_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_BUF_TOTAL_DROP_PKT_ADDR (0x1050a8) +#define NBL_USTORE_BUF_TOTAL_DROP_PKT_DEPTH (1) +#define NBL_USTORE_BUF_TOTAL_DROP_PKT_WIDTH (32) +#define NBL_USTORE_BUF_TOTAL_DROP_PKT_DWLEN (1) +union ustore_buf_total_drop_pkt_u { + struct ustore_buf_total_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_BUF_TOTAL_DROP_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_BUF_TOTAL_TRUN_PKT_ADDR (0x1050ac) +#define NBL_USTORE_BUF_TOTAL_TRUN_PKT_DEPTH (1) +#define NBL_USTORE_BUF_TOTAL_TRUN_PKT_WIDTH (32) +#define NBL_USTORE_BUF_TOTAL_TRUN_PKT_DWLEN (1) +union ustore_buf_total_trun_pkt_u { + struct ustore_buf_total_trun_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_BUF_TOTAL_TRUN_PKT_DWLEN]; +} __packed; + +#define NBL_USTORE_RCV_PORT_PKT_ADDR (0x106000) +#define NBL_USTORE_RCV_PORT_PKT_DEPTH (10) +#define NBL_USTORE_RCV_PORT_PKT_WIDTH (32) +#define NBL_USTORE_RCV_PORT_PKT_DWLEN (1) +union ustore_rcv_port_pkt_u { + struct ustore_rcv_port_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_PKT_REG(r) (NBL_USTORE_RCV_PORT_PKT_ADDR + \ + (NBL_USTORE_RCV_PORT_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_PORT_BYTE_ADDR (0x106040) +#define NBL_USTORE_RCV_PORT_BYTE_DEPTH (10) +#define NBL_USTORE_RCV_PORT_BYTE_WIDTH (48) +#define NBL_USTORE_RCV_PORT_BYTE_DWLEN (2) +union ustore_rcv_port_byte_u { + struct ustore_rcv_port_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_BYTE_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_BYTE_REG(r) (NBL_USTORE_RCV_PORT_BYTE_ADDR + \ + (NBL_USTORE_RCV_PORT_BYTE_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_PORT_TOTAL_RIGHT_PKT_ADDR (0x1060c0) +#define NBL_USTORE_RCV_PORT_TOTAL_RIGHT_PKT_DEPTH (10) +#define NBL_USTORE_RCV_PORT_TOTAL_RIGHT_PKT_WIDTH (32) +#define NBL_USTORE_RCV_PORT_TOTAL_RIGHT_PKT_DWLEN (1) +union ustore_rcv_port_total_right_pkt_u { + struct ustore_rcv_port_total_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_TOTAL_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_TOTAL_RIGHT_PKT_REG(r) (NBL_USTORE_RCV_PORT_TOTAL_RIGHT_PKT_ADDR + \ + (NBL_USTORE_RCV_PORT_TOTAL_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_PORT_TOTAL_WRONG_PKT_ADDR (0x106100) +#define NBL_USTORE_RCV_PORT_TOTAL_WRONG_PKT_DEPTH (10) +#define NBL_USTORE_RCV_PORT_TOTAL_WRONG_PKT_WIDTH (32) +#define NBL_USTORE_RCV_PORT_TOTAL_WRONG_PKT_DWLEN (1) +union ustore_rcv_port_total_wrong_pkt_u { + struct ustore_rcv_port_total_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_TOTAL_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_TOTAL_WRONG_PKT_REG(r) (NBL_USTORE_RCV_PORT_TOTAL_WRONG_PKT_ADDR + \ + (NBL_USTORE_RCV_PORT_TOTAL_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_PORT_FWD_RIGHT_PKT_ADDR (0x106140) +#define NBL_USTORE_RCV_PORT_FWD_RIGHT_PKT_DEPTH (10) +#define NBL_USTORE_RCV_PORT_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_USTORE_RCV_PORT_FWD_RIGHT_PKT_DWLEN (1) +union ustore_rcv_port_fwd_right_pkt_u { + struct ustore_rcv_port_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_FWD_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_FWD_RIGHT_PKT_REG(r) (NBL_USTORE_RCV_PORT_FWD_RIGHT_PKT_ADDR + \ + (NBL_USTORE_RCV_PORT_FWD_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_PORT_FWD_WRONG_PKT_ADDR (0x106180) +#define NBL_USTORE_RCV_PORT_FWD_WRONG_PKT_DEPTH (10) +#define NBL_USTORE_RCV_PORT_FWD_WRONG_PKT_WIDTH (32) +#define NBL_USTORE_RCV_PORT_FWD_WRONG_PKT_DWLEN (1) +union ustore_rcv_port_fwd_wrong_pkt_u { + struct ustore_rcv_port_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_FWD_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_FWD_WRONG_PKT_REG(r) (NBL_USTORE_RCV_PORT_FWD_WRONG_PKT_ADDR + \ + (NBL_USTORE_RCV_PORT_FWD_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_PORT_HERR_RIGHT_PKT_ADDR (0x1061c0) +#define NBL_USTORE_RCV_PORT_HERR_RIGHT_PKT_DEPTH (10) +#define NBL_USTORE_RCV_PORT_HERR_RIGHT_PKT_WIDTH (32) +#define NBL_USTORE_RCV_PORT_HERR_RIGHT_PKT_DWLEN (1) +union ustore_rcv_port_herr_right_pkt_u { + struct ustore_rcv_port_herr_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_HERR_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_HERR_RIGHT_PKT_REG(r) (NBL_USTORE_RCV_PORT_HERR_RIGHT_PKT_ADDR + \ + (NBL_USTORE_RCV_PORT_HERR_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_PORT_HERR_WRONG_PKT_ADDR (0x106200) +#define NBL_USTORE_RCV_PORT_HERR_WRONG_PKT_DEPTH (10) +#define NBL_USTORE_RCV_PORT_HERR_WRONG_PKT_WIDTH (32) +#define NBL_USTORE_RCV_PORT_HERR_WRONG_PKT_DWLEN (1) +union ustore_rcv_port_herr_wrong_pkt_u { + struct ustore_rcv_port_herr_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_HERR_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_HERR_WRONG_PKT_REG(r) (NBL_USTORE_RCV_PORT_HERR_WRONG_PKT_ADDR + \ + (NBL_USTORE_RCV_PORT_HERR_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_IPRO_PORT_PKT_ADDR (0x106240) +#define NBL_USTORE_IPRO_PORT_PKT_DEPTH (10) +#define NBL_USTORE_IPRO_PORT_PKT_WIDTH (32) +#define NBL_USTORE_IPRO_PORT_PKT_DWLEN (1) +union ustore_ipro_port_pkt_u { + struct ustore_ipro_port_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_PORT_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_IPRO_PORT_PKT_REG(r) (NBL_USTORE_IPRO_PORT_PKT_ADDR + \ + (NBL_USTORE_IPRO_PORT_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_IPRO_PORT_BYTE_ADDR (0x106280) +#define NBL_USTORE_IPRO_PORT_BYTE_DEPTH (10) +#define NBL_USTORE_IPRO_PORT_BYTE_WIDTH (48) +#define NBL_USTORE_IPRO_PORT_BYTE_DWLEN (2) +union ustore_ipro_port_byte_u { + struct ustore_ipro_port_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_PORT_BYTE_DWLEN]; +} __packed; +#define NBL_USTORE_IPRO_PORT_BYTE_REG(r) (NBL_USTORE_IPRO_PORT_BYTE_ADDR + \ + (NBL_USTORE_IPRO_PORT_BYTE_DWLEN * 4) * (r)) + +#define NBL_USTORE_IPRO_PORT_FWD_RIGHT_PKT_ADDR (0x106300) +#define NBL_USTORE_IPRO_PORT_FWD_RIGHT_PKT_DEPTH (10) +#define NBL_USTORE_IPRO_PORT_FWD_RIGHT_PKT_WIDTH (32) +#define NBL_USTORE_IPRO_PORT_FWD_RIGHT_PKT_DWLEN (1) +union ustore_ipro_port_fwd_right_pkt_u { + struct ustore_ipro_port_fwd_right_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_PORT_FWD_RIGHT_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_IPRO_PORT_FWD_RIGHT_PKT_REG(r) (NBL_USTORE_IPRO_PORT_FWD_RIGHT_PKT_ADDR + \ + (NBL_USTORE_IPRO_PORT_FWD_RIGHT_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_IPRO_PORT_FWD_WRONG_PKT_ADDR (0x106340) +#define NBL_USTORE_IPRO_PORT_FWD_WRONG_PKT_DEPTH (10) +#define NBL_USTORE_IPRO_PORT_FWD_WRONG_PKT_WIDTH (32) +#define NBL_USTORE_IPRO_PORT_FWD_WRONG_PKT_DWLEN (1) +union ustore_ipro_port_fwd_wrong_pkt_u { + struct ustore_ipro_port_fwd_wrong_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_IPRO_PORT_FWD_WRONG_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_IPRO_PORT_FWD_WRONG_PKT_REG(r) (NBL_USTORE_IPRO_PORT_FWD_WRONG_PKT_ADDR + \ + (NBL_USTORE_IPRO_PORT_FWD_WRONG_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_PMEM_PORT_PKT_ADDR (0x106380) +#define NBL_USTORE_PMEM_PORT_PKT_DEPTH (10) +#define NBL_USTORE_PMEM_PORT_PKT_WIDTH (32) +#define NBL_USTORE_PMEM_PORT_PKT_DWLEN (1) +union ustore_pmem_port_pkt_u { + struct ustore_pmem_port_pkt { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_PMEM_PORT_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_PMEM_PORT_PKT_REG(r) (NBL_USTORE_PMEM_PORT_PKT_ADDR + \ + (NBL_USTORE_PMEM_PORT_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_PMEM_PORT_BYTE_ADDR (0x1063c0) +#define NBL_USTORE_PMEM_PORT_BYTE_DEPTH (10) +#define NBL_USTORE_PMEM_PORT_BYTE_WIDTH (48) +#define NBL_USTORE_PMEM_PORT_BYTE_DWLEN (2) +union ustore_pmem_port_byte_u { + struct ustore_pmem_port_byte { + u32 cnt_l:32; /* [47:0] Default:0x0 RCTR */ + u32 cnt_h:16; /* [47:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_USTORE_PMEM_PORT_BYTE_DWLEN]; +} __packed; +#define NBL_USTORE_PMEM_PORT_BYTE_REG(r) (NBL_USTORE_PMEM_PORT_BYTE_ADDR + \ + (NBL_USTORE_PMEM_PORT_BYTE_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_ERR_PORT_DROP_PKT_ADDR (0x106440) +#define NBL_USTORE_RCV_ERR_PORT_DROP_PKT_DEPTH (10) +#define NBL_USTORE_RCV_ERR_PORT_DROP_PKT_WIDTH (32) +#define NBL_USTORE_RCV_ERR_PORT_DROP_PKT_DWLEN (1) +union ustore_rcv_err_port_drop_pkt_u { + struct ustore_rcv_err_port_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_ERR_PORT_DROP_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_ERR_PORT_DROP_PKT_REG(r) (NBL_USTORE_RCV_ERR_PORT_DROP_PKT_ADDR + \ + (NBL_USTORE_RCV_ERR_PORT_DROP_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_PORT_SHORT_DROP_PKT_ADDR (0x106480) +#define NBL_USTORE_RCV_PORT_SHORT_DROP_PKT_DEPTH (10) +#define NBL_USTORE_RCV_PORT_SHORT_DROP_PKT_WIDTH (32) +#define NBL_USTORE_RCV_PORT_SHORT_DROP_PKT_DWLEN (1) +union ustore_rcv_port_short_drop_pkt_u { + struct ustore_rcv_port_short_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_SHORT_DROP_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_SHORT_DROP_PKT_REG(r) (NBL_USTORE_RCV_PORT_SHORT_DROP_PKT_ADDR + \ + (NBL_USTORE_RCV_PORT_SHORT_DROP_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_RCV_PORT_LONG_PKT_ADDR (0x1064c0) +#define NBL_USTORE_RCV_PORT_LONG_PKT_DEPTH (10) +#define NBL_USTORE_RCV_PORT_LONG_PKT_WIDTH (32) +#define NBL_USTORE_RCV_PORT_LONG_PKT_DWLEN (1) +union ustore_rcv_port_long_pkt_u { + struct ustore_rcv_port_long_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_RCV_PORT_LONG_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_RCV_PORT_LONG_PKT_REG(r) (NBL_USTORE_RCV_PORT_LONG_PKT_ADDR + \ + (NBL_USTORE_RCV_PORT_LONG_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_BUF_PORT_DROP_PKT_ADDR (0x106500) +#define NBL_USTORE_BUF_PORT_DROP_PKT_DEPTH (10) +#define NBL_USTORE_BUF_PORT_DROP_PKT_WIDTH (32) +#define NBL_USTORE_BUF_PORT_DROP_PKT_DWLEN (1) +union ustore_buf_port_drop_pkt_u { + struct ustore_buf_port_drop_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_BUF_PORT_DROP_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_BUF_PORT_DROP_PKT_REG(r) (NBL_USTORE_BUF_PORT_DROP_PKT_ADDR + \ + (NBL_USTORE_BUF_PORT_DROP_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_BUF_PORT_TRUN_PKT_ADDR (0x106540) +#define NBL_USTORE_BUF_PORT_TRUN_PKT_DEPTH (10) +#define NBL_USTORE_BUF_PORT_TRUN_PKT_WIDTH (32) +#define NBL_USTORE_BUF_PORT_TRUN_PKT_DWLEN (1) +union ustore_buf_port_trun_pkt_u { + struct ustore_buf_port_trun_pkt { + u32 cnt:32; /* [31:0] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_USTORE_BUF_PORT_TRUN_PKT_DWLEN]; +} __packed; +#define NBL_USTORE_BUF_PORT_TRUN_PKT_REG(r) (NBL_USTORE_BUF_PORT_TRUN_PKT_ADDR + \ + (NBL_USTORE_BUF_PORT_TRUN_PKT_DWLEN * 4) * (r)) + +#define NBL_USTORE_BP_CUR_1ST_ADDR (0x106580) +#define NBL_USTORE_BP_CUR_1ST_DEPTH (1) +#define NBL_USTORE_BP_CUR_1ST_WIDTH (32) +#define NBL_USTORE_BP_CUR_1ST_DWLEN (1) +union ustore_bp_cur_1st_u { + struct ustore_bp_cur_1st { + u32 link_fc:5; /* [4:0] Default:0x0 RO */ + u32 rsv:3; /* [7:5] Default:0x0 RO */ + u32 pfc:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_BP_CUR_1ST_DWLEN]; +} __packed; + +#define NBL_USTORE_BP_CUR_2ND_ADDR (0x106584) +#define NBL_USTORE_BP_CUR_2ND_DEPTH (1) +#define NBL_USTORE_BP_CUR_2ND_WIDTH (32) +#define NBL_USTORE_BP_CUR_2ND_DWLEN (1) +union ustore_bp_cur_2nd_u { + struct ustore_bp_cur_2nd { + u32 pfc:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_BP_CUR_2ND_DWLEN]; +} __packed; + +#define NBL_USTORE_BP_HISTORY_LINK_ADDR (0x106590) +#define NBL_USTORE_BP_HISTORY_LINK_DEPTH (5) +#define NBL_USTORE_BP_HISTORY_LINK_WIDTH (32) +#define NBL_USTORE_BP_HISTORY_LINK_DWLEN (1) +union ustore_bp_history_link_u { + struct ustore_bp_history_link { + u32 fc:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_BP_HISTORY_LINK_DWLEN]; +} __packed; +#define NBL_USTORE_BP_HISTORY_LINK_REG(r) (NBL_USTORE_BP_HISTORY_LINK_ADDR + \ + (NBL_USTORE_BP_HISTORY_LINK_DWLEN * 4) * (r)) + +#define NBL_USTORE_BP_HISTORY_ADDR (0x1065b0) +#define NBL_USTORE_BP_HISTORY_DEPTH (40) +#define NBL_USTORE_BP_HISTORY_WIDTH (32) +#define NBL_USTORE_BP_HISTORY_DWLEN (1) +union ustore_bp_history_u { + struct ustore_bp_history { + u32 pfc:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_BP_HISTORY_DWLEN]; +} __packed; +#define NBL_USTORE_BP_HISTORY_REG(r) (NBL_USTORE_BP_HISTORY_ADDR + \ + (NBL_USTORE_BP_HISTORY_DWLEN * 4) * (r)) + +#define NBL_USTORE_WRR_CUR_ADDR (0x106800) +#define NBL_USTORE_WRR_CUR_DEPTH (30) +#define NBL_USTORE_WRR_CUR_WIDTH (32) +#define NBL_USTORE_WRR_CUR_DWLEN (1) +union ustore_wrr_cur_u { + struct ustore_wrr_cur { + u32 wgt_cos:5; /* [4:0] Default:0x0 RO */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_USTORE_WRR_CUR_DWLEN]; +} __packed; +#define NBL_USTORE_WRR_CUR_REG(r) (NBL_USTORE_WRR_CUR_ADDR + \ + (NBL_USTORE_WRR_CUR_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uvn.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uvn.h new file mode 100644 index 0000000000000000000000000000000000000000..6d590b63aecbcc59d67ef30c7203a478fbafa6a2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_datapath_uvn.h @@ -0,0 +1,1613 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UVN_H +#define NBL_UVN_H 1 + +#include + +#define NBL_UVN_BASE (0x00244000) + +#define NBL_UVN_INT_STATUS_ADDR (0x244000) +#define NBL_UVN_INT_STATUS_DEPTH (1) +#define NBL_UVN_INT_STATUS_WIDTH (32) +#define NBL_UVN_INT_STATUS_DWLEN (1) +union uvn_int_status_u { + struct uvn_int_status { + u32 qtbl_ucor_err:1; /* [0] Default:0x0 RWC */ + u32 qtbl_cor_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RWC */ + u32 ram_parity_err:1; /* [4] Default:0x0 RWC */ + u32 queue_err:1; /* [5] Default:0x0 RWC */ + u32 cram_ucor_err:1; /* [6] Default:0x0 RWC */ + u32 cram_cor_err:1; /* [7] Default:0x0 RWC */ + u32 dram_ucor_err:1; /* [8] Default:0x0 RWC */ + u32 dram_cor_err:1; /* [9] Default:0x0 RWC */ + u32 cif_err:1; /* [10] Default:0x0 RWC */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UVN_INT_MASK_ADDR (0x244004) +#define NBL_UVN_INT_MASK_DEPTH (1) +#define NBL_UVN_INT_MASK_WIDTH (32) +#define NBL_UVN_INT_MASK_DWLEN (1) +union uvn_int_mask_u { + struct uvn_int_mask { + u32 qtbl_ucor_err:1; /* [0] Default:0x0 RW */ + u32 qtbl_cor_err:1; /* [1] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 RW */ + u32 ram_parity_err:1; /* [4] Default:0x0 RW */ + u32 queue_err:1; /* [5] Default:0x0 RW */ + u32 cram_ucor_err:1; /* [6] Default:0x0 RW */ + u32 cram_cor_err:1; /* [7] Default:0x0 RW */ + u32 dram_ucor_err:1; /* [8] Default:0x0 RW */ + u32 dram_cor_err:1; /* [9] Default:0x0 RW */ + u32 cif_err:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UVN_INT_SET_ADDR (0x244008) +#define NBL_UVN_INT_SET_DEPTH (1) +#define NBL_UVN_INT_SET_WIDTH (32) +#define NBL_UVN_INT_SET_DWLEN (1) +union uvn_int_set_u { + struct uvn_int_set { + u32 qtbl_ucor_err:1; /* [0] Default:0x0 WO */ + u32 qtbl_cor_err:1; /* [1] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [2] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [3] Default:0x0 WO */ + u32 ram_parity_err:1; /* [4] Default:0x0 WO */ + u32 queue_err:1; /* [5] Default:0x0 WO */ + u32 cram_ucor_err:1; /* [6] Default:0x0 WO */ + u32 cram_cor_err:1; /* [7] Default:0x0 WO */ + u32 dram_ucor_err:1; /* [8] Default:0x0 WO */ + u32 dram_cor_err:1; /* [9] Default:0x0 WO */ + u32 cif_err:1; /* [10] Default:0x0 WO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_INT_SET_DWLEN]; +} __packed; + +#define NBL_UVN_RAM_PARITY_ERR_INFO_ADDR (0x24402c) +#define NBL_UVN_RAM_PARITY_ERR_INFO_DEPTH (1) +#define NBL_UVN_RAM_PARITY_ERR_INFO_WIDTH (32) +#define NBL_UVN_RAM_PARITY_ERR_INFO_DWLEN (1) +union uvn_ram_parity_err_info_u { + struct uvn_ram_parity_err_info { + u32 ram_addr:16; /* [15:0] Default:0x0 RO */ + u32 ram_id:4; /* [19:16] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_RAM_PARITY_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UVN_QUEUE_ERR_INFO_ADDR (0x244034) +#define NBL_UVN_QUEUE_ERR_INFO_DEPTH (1) +#define NBL_UVN_QUEUE_ERR_INFO_WIDTH (32) +#define NBL_UVN_QUEUE_ERR_INFO_DWLEN (1) +union uvn_queue_err_info_u { + struct uvn_queue_err_info { + u32 index:11; /* [10:0] Default:0x0 RO */ + u32 typ:5; /* [15:11] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_QUEUE_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UVN_CIF_ERR_INFO_ADDR (0x24405c) +#define NBL_UVN_CIF_ERR_INFO_DEPTH (1) +#define NBL_UVN_CIF_ERR_INFO_WIDTH (32) +#define NBL_UVN_CIF_ERR_INFO_DWLEN (1) +union uvn_cif_err_info_u { + struct uvn_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UVN_CAR_CTRL_ADDR (0x244100) +#define NBL_UVN_CAR_CTRL_DEPTH (1) +#define NBL_UVN_CAR_CTRL_WIDTH (32) +#define NBL_UVN_CAR_CTRL_DWLEN (1) +union uvn_car_ctrl_u { + struct uvn_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UVN_INIT_START_ADDR (0x244104) +#define NBL_UVN_INIT_START_DEPTH (1) +#define NBL_UVN_INIT_START_WIDTH (32) +#define NBL_UVN_INIT_START_DWLEN (1) +union uvn_init_start_u { + struct uvn_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_INIT_START_DWLEN]; +} __packed; + +#define NBL_UVN_GLB_CLR_ADDR (0x24410c) +#define NBL_UVN_GLB_CLR_DEPTH (1) +#define NBL_UVN_GLB_CLR_WIDTH (32) +#define NBL_UVN_GLB_CLR_DWLEN (1) +union uvn_glb_clr_u { + struct uvn_glb_clr { + u32 glb_clr:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_GLB_CLR_DWLEN]; +} __packed; + +#define NBL_UVN_QUEUE_RESET_ADDR (0x244200) +#define NBL_UVN_QUEUE_RESET_DEPTH (1) +#define NBL_UVN_QUEUE_RESET_WIDTH (32) +#define NBL_UVN_QUEUE_RESET_DWLEN (1) +union uvn_queue_reset_u { + struct uvn_queue_reset { + u32 index:11; /* [10:0] Default:0x0 RW */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 vld:1; /* [16] Default:0x0 WO */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_QUEUE_RESET_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PREFETCH_INIT_ADDR (0x244204) +#define NBL_UVN_DESC_PREFETCH_INIT_DEPTH (1) +#define NBL_UVN_DESC_PREFETCH_INIT_WIDTH (32) +#define NBL_UVN_DESC_PREFETCH_INIT_DWLEN (1) +union uvn_desc_prefetch_init_u { + struct uvn_desc_prefetch_init { + u32 num:8; /* [7:0] Default:0x10 RW */ + u32 rsv1:8; /* [15:8] Default:0x0 RO */ + u32 sel:1; /* [16] Default:0x1 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_PREFETCH_INIT_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_TH_ADDR (0x244208) +#define NBL_UVN_DESC_RD_TH_DEPTH (1) +#define NBL_UVN_DESC_RD_TH_WIDTH (32) +#define NBL_UVN_DESC_RD_TH_DWLEN (1) +union uvn_desc_rd_th_u { + struct uvn_desc_rd_th { + u32 num:8; /* [7:0] Default:0x10 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_TH_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_WAIT_ADDR (0x24420c) +#define NBL_UVN_DESC_RD_WAIT_DEPTH (1) +#define NBL_UVN_DESC_RD_WAIT_WIDTH (32) +#define NBL_UVN_DESC_RD_WAIT_DWLEN (1) +union uvn_desc_rd_wait_u { + struct uvn_desc_rd_wait { + u32 timeout:32; /* [31:0] Default:0x12c RW */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_WAIT_DWLEN]; +} __packed; + +#define NBL_UVN_MIRROR_DESC_ADDR (0x244210) +#define NBL_UVN_MIRROR_DESC_DEPTH (1) +#define NBL_UVN_MIRROR_DESC_WIDTH (32) +#define NBL_UVN_MIRROR_DESC_DWLEN (1) +union uvn_mirror_desc_u { + struct uvn_mirror_desc { + u32 queue_id:11; /* [10:0] Default:0x0 RW */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 vld:1; /* [16] Default:0x0 WO */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_MIRROR_DESC_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_WR_TIMEOUT_ADDR (0x244214) +#define NBL_UVN_DESC_WR_TIMEOUT_DEPTH (1) +#define NBL_UVN_DESC_WR_TIMEOUT_WIDTH (32) +#define NBL_UVN_DESC_WR_TIMEOUT_DWLEN (1) +union uvn_desc_wr_timeout_u { + struct uvn_desc_wr_timeout { + u32 num:15; /* [14:0] Default:0x12c RW */ + u32 mask:1; /* [15] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_WR_TIMEOUT_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_WR_PACKED_TH_ADDR (0x24421c) +#define NBL_UVN_DESC_WR_PACKED_TH_DEPTH (1) +#define NBL_UVN_DESC_WR_PACKED_TH_WIDTH (32) +#define NBL_UVN_DESC_WR_PACKED_TH_DWLEN (1) +union uvn_desc_wr_packed_th_u { + struct uvn_desc_wr_packed_th { + u32 num:8; /* [7:0] Default:0x4 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_WR_PACKED_TH_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_WR_SPLIT_TH_ADDR (0x244220) +#define NBL_UVN_DESC_WR_SPLIT_TH_DEPTH (1) +#define NBL_UVN_DESC_WR_SPLIT_TH_WIDTH (32) +#define NBL_UVN_DESC_WR_SPLIT_TH_DWLEN (1) +union uvn_desc_wr_split_th_u { + struct uvn_desc_wr_split_th { + u32 num:8; /* [7:0] Default:0x8 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_WR_SPLIT_TH_DWLEN]; +} __packed; + +#define NBL_UVN_QUEUE_ERR_MASK_ADDR (0x244224) +#define NBL_UVN_QUEUE_ERR_MASK_DEPTH (1) +#define NBL_UVN_QUEUE_ERR_MASK_WIDTH (32) +#define NBL_UVN_QUEUE_ERR_MASK_DWLEN (1) +union uvn_queue_err_mask_u { + struct uvn_queue_err_mask { + u32 typ:6; /* [5:0] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_QUEUE_ERR_MASK_DWLEN]; +} __packed; + +#define NBL_UVN_PREFETCH_ENHANCE_ADDR (0x244228) +#define NBL_UVN_PREFETCH_ENHANCE_DEPTH (1) +#define NBL_UVN_PREFETCH_ENHANCE_WIDTH (32) +#define NBL_UVN_PREFETCH_ENHANCE_DWLEN (1) +union uvn_prefetch_enhance_u { + struct uvn_prefetch_enhance { + u32 en:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_PREFETCH_ENHANCE_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_IDX_DIFF_ADDR (0x24422c) +#define NBL_UVN_AVAIL_IDX_DIFF_DEPTH (1) +#define NBL_UVN_AVAIL_IDX_DIFF_WIDTH (32) +#define NBL_UVN_AVAIL_IDX_DIFF_DWLEN (1) +union uvn_avail_idx_diff_u { + struct uvn_avail_idx_diff { + u32 num:3; /* [2:0] Default:0x3 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_AVAIL_IDX_DIFF_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_CACHE_MODE_ADDR (0x244230) +#define NBL_UVN_DESC_CACHE_MODE_DEPTH (1) +#define NBL_UVN_DESC_CACHE_MODE_WIDTH (32) +#define NBL_UVN_DESC_CACHE_MODE_DWLEN (1) +union uvn_desc_cache_mode_u { + struct uvn_desc_cache_mode { + u32 sel:2; /* [1:0] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_CACHE_MODE_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PF_SUB_THOLD_ADDR (0x244234) +#define NBL_UVN_DESC_PF_SUB_THOLD_DEPTH (1) +#define NBL_UVN_DESC_PF_SUB_THOLD_WIDTH (32) +#define NBL_UVN_DESC_PF_SUB_THOLD_DWLEN (1) +union uvn_desc_pf_sub_thold_u { + struct uvn_desc_pf_sub_thold { + u32 desc_th:16; /* [15:0] Default:0x1ec RW */ + u32 rerr_th:16; /* [31:16] Default:0xfc RW */ + } __packed info; + u32 data[NBL_UVN_DESC_PF_SUB_THOLD_DWLEN]; +} __packed; + +#define NBL_UVN_TRIGGER_NOTIFY_ADDR (0x244238) +#define NBL_UVN_TRIGGER_NOTIFY_DEPTH (1) +#define NBL_UVN_TRIGGER_NOTIFY_WIDTH (32) +#define NBL_UVN_TRIGGER_NOTIFY_DWLEN (1) +union uvn_trigger_notify_u { + struct uvn_trigger_notify { + u32 enable:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_TRIGGER_NOTIFY_DWLEN]; +} __packed; + +#define NBL_UVN_QUEUE_ALLOCATE_ADDR (0x24423c) +#define NBL_UVN_QUEUE_ALLOCATE_DEPTH (1) +#define NBL_UVN_QUEUE_ALLOCATE_WIDTH (32) +#define NBL_UVN_QUEUE_ALLOCATE_DWLEN (1) +union uvn_queue_allocate_u { + struct uvn_queue_allocate { + u32 ecpu_num:9; /* [8:0] Default:0x20 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_QUEUE_ALLOCATE_DWLEN]; +} __packed; + +#define NBL_UVN_DIF_REQ_AVAIL_RD_ADDR (0x244240) +#define NBL_UVN_DIF_REQ_AVAIL_RD_DEPTH (1) +#define NBL_UVN_DIF_REQ_AVAIL_RD_WIDTH (32) +#define NBL_UVN_DIF_REQ_AVAIL_RD_DWLEN (1) +union uvn_dif_req_avail_rd_u { + struct uvn_dif_req_avail_rd { + u32 vn_ph:2; /* [1:0] Default:0x0 RW */ + u32 th_en:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DIF_REQ_AVAIL_RD_DWLEN]; +} __packed; + +#define NBL_UVN_DIF_REQ_DESC_RD_ADDR (0x244244) +#define NBL_UVN_DIF_REQ_DESC_RD_DEPTH (1) +#define NBL_UVN_DIF_REQ_DESC_RD_WIDTH (32) +#define NBL_UVN_DIF_REQ_DESC_RD_DWLEN (1) +union uvn_dif_req_desc_rd_u { + struct uvn_dif_req_desc_rd { + u32 vn_ph:2; /* [1:0] Default:0x0 RW */ + u32 th_en:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DIF_REQ_DESC_RD_DWLEN]; +} __packed; + +#define NBL_UVN_DIF_REQ_DESC_WR_ADDR (0x244248) +#define NBL_UVN_DIF_REQ_DESC_WR_DEPTH (1) +#define NBL_UVN_DIF_REQ_DESC_WR_WIDTH (32) +#define NBL_UVN_DIF_REQ_DESC_WR_DWLEN (1) +union uvn_dif_req_desc_wr_u { + struct uvn_dif_req_desc_wr { + u32 vn_ph:2; /* [1:0] Default:0x0 RW */ + u32 th_en:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DIF_REQ_DESC_WR_DWLEN]; +} __packed; + +#define NBL_UVN_DIF_REQ_PKT_WR_ADDR (0x24424c) +#define NBL_UVN_DIF_REQ_PKT_WR_DEPTH (1) +#define NBL_UVN_DIF_REQ_PKT_WR_WIDTH (32) +#define NBL_UVN_DIF_REQ_PKT_WR_DWLEN (1) +union uvn_dif_req_pkt_wr_u { + struct uvn_dif_req_pkt_wr { + u32 vn_ph:2; /* [1:0] Default:0x0 RW */ + u32 th_en:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DIF_REQ_PKT_WR_DWLEN]; +} __packed; + +#define NBL_UVN_DIF_REQ_RO_FLAG_ADDR (0x244250) +#define NBL_UVN_DIF_REQ_RO_FLAG_DEPTH (1) +#define NBL_UVN_DIF_REQ_RO_FLAG_WIDTH (32) +#define NBL_UVN_DIF_REQ_RO_FLAG_DWLEN (1) +union uvn_dif_req_ro_flag_u { + struct uvn_dif_req_ro_flag { + u32 avail_rd:1; /* [0] Default:0x0 RW */ + u32 desc_rd:1; /* [1] Default:0x0 RW */ + u32 pkt_wr:1; /* [2] Default:0x0 RW */ + u32 desc_wr:1; /* [3] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DIF_REQ_RO_FLAG_DWLEN]; +} __packed; + +#define NBL_UVN_DIF_DELAY_PERIOD_ADDR (0x244254) +#define NBL_UVN_DIF_DELAY_PERIOD_DEPTH (1) +#define NBL_UVN_DIF_DELAY_PERIOD_WIDTH (32) +#define NBL_UVN_DIF_DELAY_PERIOD_DWLEN (1) +union uvn_dif_delay_period_u { + struct uvn_dif_delay_period { + u32 num:32; /* [31:0] Default:0x23c34600 RW */ + } __packed info; + u32 data[NBL_UVN_DIF_DELAY_PERIOD_DWLEN]; +} __packed; + +#define NBL_UVN_SELF_NOTIFY_ADDR (0x244258) +#define NBL_UVN_SELF_NOTIFY_DEPTH (1) +#define NBL_UVN_SELF_NOTIFY_WIDTH (32) +#define NBL_UVN_SELF_NOTIFY_DWLEN (1) +union uvn_self_notify_u { + struct uvn_self_notify { + u32 index:11; /* [10:0] Default:0x0 RW */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 vld:1; /* [16] Default:0x0 WO */ + u32 sel:2; /* [18:17] Default:0x0 RW */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_SELF_NOTIFY_DWLEN]; +} __packed; + +#define NBL_UVN_SELF_NOTIFY_STATE_ADDR (0x24425c) +#define NBL_UVN_SELF_NOTIFY_STATE_DEPTH (1) +#define NBL_UVN_SELF_NOTIFY_STATE_WIDTH (32) +#define NBL_UVN_SELF_NOTIFY_STATE_DWLEN (1) +union uvn_self_notify_state_u { + struct uvn_self_notify_state { + u32 run:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_SELF_NOTIFY_STATE_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_BUF_THOLD_ADDR (0x244260) +#define NBL_UVN_NOTIFY_BUF_THOLD_DEPTH (1) +#define NBL_UVN_NOTIFY_BUF_THOLD_WIDTH (32) +#define NBL_UVN_NOTIFY_BUF_THOLD_DWLEN (1) +union uvn_notify_buf_thold_u { + struct uvn_notify_buf_thold { + u32 naf_th:8; /* [7:0] Default:0x40 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_BUF_THOLD_DWLEN]; +} __packed; + +#define NBL_UVN_INT_DMA_MASK_ADDR (0x244270) +#define NBL_UVN_INT_DMA_MASK_DEPTH (1) +#define NBL_UVN_INT_DMA_MASK_WIDTH (32) +#define NBL_UVN_INT_DMA_MASK_DWLEN (1) +union uvn_int_dma_mask_u { + struct uvn_int_dma_mask { + u32 typ:4; /* [3:0] Default:0x0 RW */ + u32 qtbl:8; /* [11:4] Default:0x0 RW */ + u32 cram:16; /* [27:12] Default:0x0 RW */ + u32 rsv:2; /* [29:28] Default:0x0 RO */ + u32 port:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UVN_INT_DMA_MASK_DWLEN]; +} __packed; + +#define NBL_UVN_INIT_DONE_ADDR (0x244400) +#define NBL_UVN_INIT_DONE_DEPTH (1) +#define NBL_UVN_INIT_DONE_WIDTH (32) +#define NBL_UVN_INIT_DONE_DWLEN (1) +union uvn_init_done_u { + struct uvn_init_done { + u32 init_done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UVN_GLB_CLR_DONE_ADDR (0x244404) +#define NBL_UVN_GLB_CLR_DONE_DEPTH (1) +#define NBL_UVN_GLB_CLR_DONE_WIDTH (32) +#define NBL_UVN_GLB_CLR_DONE_DWLEN (1) +union uvn_glb_clr_done_u { + struct uvn_glb_clr_done { + u32 glb_clr_done:1; /* [0] Default:0x1 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_GLB_CLR_DONE_DWLEN]; +} __packed; + +#define NBL_UVN_QUEUE_RESET_DONE_ADDR (0x244408) +#define NBL_UVN_QUEUE_RESET_DONE_DEPTH (1) +#define NBL_UVN_QUEUE_RESET_DONE_WIDTH (32) +#define NBL_UVN_QUEUE_RESET_DONE_DWLEN (1) +union uvn_queue_reset_done_u { + struct uvn_queue_reset_done { + u32 flag:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_QUEUE_RESET_DONE_DWLEN]; +} __packed; + +#define NBL_UVN_TRANSIT_DEBUG0_ADDR (0x245004) +#define NBL_UVN_TRANSIT_DEBUG0_DEPTH (1) +#define NBL_UVN_TRANSIT_DEBUG0_WIDTH (32) +#define NBL_UVN_TRANSIT_DEBUG0_DWLEN (1) +union uvn_transit_debug0_u { + struct uvn_transit_debug0 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_TRANSIT_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_TRANSIT_DEBUG1_ADDR (0x245008) +#define NBL_UVN_TRANSIT_DEBUG1_DEPTH (1) +#define NBL_UVN_TRANSIT_DEBUG1_WIDTH (32) +#define NBL_UVN_TRANSIT_DEBUG1_DWLEN (1) +union uvn_transit_debug1_u { + struct uvn_transit_debug1 { + u32 status:32; /* [31:0] Default:0xf000f000 RO */ + } __packed info; + u32 data[NBL_UVN_TRANSIT_DEBUG1_DWLEN]; +} __packed; + +#define NBL_UVN_CFG_DEBUG0_ADDR (0x24500c) +#define NBL_UVN_CFG_DEBUG0_DEPTH (1) +#define NBL_UVN_CFG_DEBUG0_WIDTH (32) +#define NBL_UVN_CFG_DEBUG0_DWLEN (1) +union uvn_cfg_debug0_u { + struct uvn_cfg_debug0 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_CFG_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_QUEUE_CXT_DEBUG0_ADDR (0x245010) +#define NBL_UVN_QUEUE_CXT_DEBUG0_DEPTH (1) +#define NBL_UVN_QUEUE_CXT_DEBUG0_WIDTH (32) +#define NBL_UVN_QUEUE_CXT_DEBUG0_DWLEN (1) +union uvn_queue_cxt_debug0_u { + struct uvn_queue_cxt_debug0 { + u32 status:32; /* [31:0] Default:0xf RO */ + } __packed info; + u32 data[NBL_UVN_QUEUE_CXT_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_CACHE_CXT_DEBUG0_ADDR (0x245014) +#define NBL_UVN_DESC_CACHE_CXT_DEBUG0_DEPTH (1) +#define NBL_UVN_DESC_CACHE_CXT_DEBUG0_WIDTH (32) +#define NBL_UVN_DESC_CACHE_CXT_DEBUG0_DWLEN (1) +union uvn_desc_cache_cxt_debug0_u { + struct uvn_desc_cache_cxt_debug0 { + u32 status:32; /* [31:0] Default:0xf RO */ + } __packed info; + u32 data[NBL_UVN_DESC_CACHE_CXT_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG00_ADDR (0x245020) +#define NBL_UVN_NOTIFY_DEBUG00_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG00_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG00_DWLEN (1) +union uvn_notify_debug00_u { + struct uvn_notify_debug00 { + u32 status:32; /* [31:0] Default:0x10 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG00_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG01_ADDR (0x245024) +#define NBL_UVN_NOTIFY_DEBUG01_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG01_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG01_DWLEN (1) +union uvn_notify_debug01_u { + struct uvn_notify_debug01 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG01_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG02_ADDR (0x245028) +#define NBL_UVN_NOTIFY_DEBUG02_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG02_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG02_DWLEN (1) +union uvn_notify_debug02_u { + struct uvn_notify_debug02 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG02_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG03_ADDR (0x24502c) +#define NBL_UVN_NOTIFY_DEBUG03_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG03_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG03_DWLEN (1) +union uvn_notify_debug03_u { + struct uvn_notify_debug03 { + u32 status:32; /* [31:0] Default:0xf000 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG03_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG04_ADDR (0x245030) +#define NBL_UVN_NOTIFY_DEBUG04_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG04_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG04_DWLEN (1) +union uvn_notify_debug04_u { + struct uvn_notify_debug04 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG04_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG05_ADDR (0x245034) +#define NBL_UVN_NOTIFY_DEBUG05_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG05_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG05_DWLEN (1) +union uvn_notify_debug05_u { + struct uvn_notify_debug05 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG05_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG10_ADDR (0x245040) +#define NBL_UVN_NOTIFY_DEBUG10_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG10_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG10_DWLEN (1) +union uvn_notify_debug10_u { + struct uvn_notify_debug10 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG10_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG11_ADDR (0x245044) +#define NBL_UVN_NOTIFY_DEBUG11_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG11_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG11_DWLEN (1) +union uvn_notify_debug11_u { + struct uvn_notify_debug11 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG11_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG12_ADDR (0x245048) +#define NBL_UVN_NOTIFY_DEBUG12_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG12_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG12_DWLEN (1) +union uvn_notify_debug12_u { + struct uvn_notify_debug12 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG12_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG13_ADDR (0x24504c) +#define NBL_UVN_NOTIFY_DEBUG13_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG13_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG13_DWLEN (1) +union uvn_notify_debug13_u { + struct uvn_notify_debug13 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG13_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG14_ADDR (0x245050) +#define NBL_UVN_NOTIFY_DEBUG14_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG14_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG14_DWLEN (1) +union uvn_notify_debug14_u { + struct uvn_notify_debug14 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG14_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG15_ADDR (0x245054) +#define NBL_UVN_NOTIFY_DEBUG15_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG15_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG15_DWLEN (1) +union uvn_notify_debug15_u { + struct uvn_notify_debug15 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG15_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG16_ADDR (0x245058) +#define NBL_UVN_NOTIFY_DEBUG16_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG16_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG16_DWLEN (1) +union uvn_notify_debug16_u { + struct uvn_notify_debug16 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG16_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DEBUG17_ADDR (0x24505c) +#define NBL_UVN_NOTIFY_DEBUG17_DEPTH (1) +#define NBL_UVN_NOTIFY_DEBUG17_WIDTH (32) +#define NBL_UVN_NOTIFY_DEBUG17_DWLEN (1) +union uvn_notify_debug17_u { + struct uvn_notify_debug17 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DEBUG17_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_DEBUG0_ADDR (0x245060) +#define NBL_UVN_AVAIL_PRE_DEBUG0_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_DEBUG0_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_DEBUG0_DWLEN (1) +union uvn_avail_pre_debug0_u { + struct uvn_avail_pre_debug0 { + u32 status_sub:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_DEBUG1_ADDR (0x245064) +#define NBL_UVN_AVAIL_PRE_DEBUG1_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_DEBUG1_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_DEBUG1_DWLEN (1) +union uvn_avail_pre_debug1_u { + struct uvn_avail_pre_debug1 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_DEBUG1_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DEBUG0_ADDR (0x245070) +#define NBL_UVN_DESC_PRE_DEBUG0_DEPTH (1) +#define NBL_UVN_DESC_PRE_DEBUG0_WIDTH (32) +#define NBL_UVN_DESC_PRE_DEBUG0_DWLEN (1) +union uvn_desc_pre_debug0_u { + struct uvn_desc_pre_debug0 { + u32 status_sub:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DEBUG1_ADDR (0x245074) +#define NBL_UVN_DESC_PRE_DEBUG1_DEPTH (1) +#define NBL_UVN_DESC_PRE_DEBUG1_WIDTH (32) +#define NBL_UVN_DESC_PRE_DEBUG1_DWLEN (1) +union uvn_desc_pre_debug1_u { + struct uvn_desc_pre_debug1 { + u32 status:32; /* [31:0] Default:0x20 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DEBUG1_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DEBUG2_ADDR (0x245078) +#define NBL_UVN_DESC_PRE_DEBUG2_DEPTH (1) +#define NBL_UVN_DESC_PRE_DEBUG2_WIDTH (32) +#define NBL_UVN_DESC_PRE_DEBUG2_DWLEN (1) +union uvn_desc_pre_debug2_u { + struct uvn_desc_pre_debug2 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DEBUG2_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_DEBUG0_ADDR (0x245080) +#define NBL_UVN_DESC_RD_DEBUG0_DEPTH (1) +#define NBL_UVN_DESC_RD_DEBUG0_WIDTH (32) +#define NBL_UVN_DESC_RD_DEBUG0_DWLEN (1) +union uvn_desc_rd_debug0_u { + struct uvn_desc_rd_debug0 { + u32 status:32; /* [31:0] Default:0x1040 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_DEBUG1_ADDR (0x245084) +#define NBL_UVN_DESC_RD_DEBUG1_DEPTH (1) +#define NBL_UVN_DESC_RD_DEBUG1_WIDTH (32) +#define NBL_UVN_DESC_RD_DEBUG1_DWLEN (1) +union uvn_desc_rd_debug1_u { + struct uvn_desc_rd_debug1 { + u32 status:32; /* [31:0] Default:0xf000 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_DEBUG1_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_DEBUG2_ADDR (0x245088) +#define NBL_UVN_DESC_RD_DEBUG2_DEPTH (1) +#define NBL_UVN_DESC_RD_DEBUG2_WIDTH (32) +#define NBL_UVN_DESC_RD_DEBUG2_DWLEN (1) +union uvn_desc_rd_debug2_u { + struct uvn_desc_rd_debug2 { + u32 status:32; /* [31:0] Default:0xf000 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_DEBUG2_DWLEN]; +} __packed; + +#define NBL_UVN_PKT_WR_DEBUG0_ADDR (0x245090) +#define NBL_UVN_PKT_WR_DEBUG0_DEPTH (1) +#define NBL_UVN_PKT_WR_DEBUG0_WIDTH (32) +#define NBL_UVN_PKT_WR_DEBUG0_DWLEN (1) +union uvn_pkt_wr_debug0_u { + struct uvn_pkt_wr_debug0 { + u32 status:32; /* [31:0] Default:0x20 RO */ + } __packed info; + u32 data[NBL_UVN_PKT_WR_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_WR_DEBUG0_ADDR (0x2450a0) +#define NBL_UVN_DESC_WR_DEBUG0_DEPTH (1) +#define NBL_UVN_DESC_WR_DEBUG0_WIDTH (32) +#define NBL_UVN_DESC_WR_DEBUG0_DWLEN (1) +union uvn_desc_wr_debug0_u { + struct uvn_desc_wr_debug0 { + u32 status:32; /* [31:0] Default:0x20 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_WR_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_MERGE_DEBUG0_ADDR (0x2450b0) +#define NBL_UVN_DESC_MERGE_DEBUG0_DEPTH (1) +#define NBL_UVN_DESC_MERGE_DEBUG0_WIDTH (32) +#define NBL_UVN_DESC_MERGE_DEBUG0_DWLEN (1) +union uvn_desc_merge_debug0_u { + struct uvn_desc_merge_debug0 { + u32 status:32; /* [31:0] Default:0x1f RO */ + } __packed info; + u32 data[NBL_UVN_DESC_MERGE_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_MERGE_DEBUG1_ADDR (0x2450b4) +#define NBL_UVN_DESC_MERGE_DEBUG1_DEPTH (1) +#define NBL_UVN_DESC_MERGE_DEBUG1_WIDTH (32) +#define NBL_UVN_DESC_MERGE_DEBUG1_DWLEN (1) +union uvn_desc_merge_debug1_u { + struct uvn_desc_merge_debug1 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_MERGE_DEBUG1_DWLEN]; +} __packed; + +#define NBL_UVN_STAT_DEBUG0_ADDR (0x2450c0) +#define NBL_UVN_STAT_DEBUG0_DEPTH (1) +#define NBL_UVN_STAT_DEBUG0_WIDTH (32) +#define NBL_UVN_STAT_DEBUG0_DWLEN (1) +union uvn_stat_debug0_u { + struct uvn_stat_debug0 { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_STAT_DEBUG0_DWLEN]; +} __packed; + +#define NBL_UVN_DIF_DELAY_REQ_ADDR (0x2450d0) +#define NBL_UVN_DIF_DELAY_REQ_DEPTH (1) +#define NBL_UVN_DIF_DELAY_REQ_WIDTH (32) +#define NBL_UVN_DIF_DELAY_REQ_DWLEN (1) +union uvn_dif_delay_req_u { + struct uvn_dif_delay_req { + u32 sum:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DIF_DELAY_REQ_DWLEN]; +} __packed; + +#define NBL_UVN_DIF_DELAY_TIME_ADDR (0x2450d4) +#define NBL_UVN_DIF_DELAY_TIME_DEPTH (1) +#define NBL_UVN_DIF_DELAY_TIME_WIDTH (32) +#define NBL_UVN_DIF_DELAY_TIME_DWLEN (1) +union uvn_dif_delay_time_u { + struct uvn_dif_delay_time { + u32 sum:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DIF_DELAY_TIME_DWLEN]; +} __packed; + +#define NBL_UVN_DIF_DELAY_MAX_ADDR (0x2450d8) +#define NBL_UVN_DIF_DELAY_MAX_DEPTH (1) +#define NBL_UVN_DIF_DELAY_MAX_WIDTH (32) +#define NBL_UVN_DIF_DELAY_MAX_DWLEN (1) +union uvn_dif_delay_max_u { + struct uvn_dif_delay_max { + u32 num:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DIF_DELAY_MAX_DWLEN]; +} __packed; + +#define NBL_UVN_TRANSIT_PRE_DATA_HERR_ADDR (0x24521c) +#define NBL_UVN_TRANSIT_PRE_DATA_HERR_DEPTH (1) +#define NBL_UVN_TRANSIT_PRE_DATA_HERR_WIDTH (32) +#define NBL_UVN_TRANSIT_PRE_DATA_HERR_DWLEN (1) +union uvn_transit_pre_data_herr_u { + struct uvn_transit_pre_data_herr { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_TRANSIT_PRE_DATA_HERR_DWLEN]; +} __packed; + +#define NBL_UVN_HOST_NOTIFY_VLD_ADDR (0x245230) +#define NBL_UVN_HOST_NOTIFY_VLD_DEPTH (1) +#define NBL_UVN_HOST_NOTIFY_VLD_WIDTH (32) +#define NBL_UVN_HOST_NOTIFY_VLD_DWLEN (1) +union uvn_host_notify_vld_u { + struct uvn_host_notify_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_HOST_NOTIFY_VLD_DWLEN]; +} __packed; + +#define NBL_UVN_ECPU_NOTIFY_VLD_ADDR (0x245234) +#define NBL_UVN_ECPU_NOTIFY_VLD_DEPTH (1) +#define NBL_UVN_ECPU_NOTIFY_VLD_WIDTH (32) +#define NBL_UVN_ECPU_NOTIFY_VLD_DWLEN (1) +union uvn_ecpu_notify_vld_u { + struct uvn_ecpu_notify_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_ECPU_NOTIFY_VLD_DWLEN]; +} __packed; + +#define NBL_UVN_HOST_LEGAL_NOTIFY_VLD_ADDR (0x245238) +#define NBL_UVN_HOST_LEGAL_NOTIFY_VLD_DEPTH (1) +#define NBL_UVN_HOST_LEGAL_NOTIFY_VLD_WIDTH (32) +#define NBL_UVN_HOST_LEGAL_NOTIFY_VLD_DWLEN (1) +union uvn_host_legal_notify_vld_u { + struct uvn_host_legal_notify_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_HOST_LEGAL_NOTIFY_VLD_DWLEN]; +} __packed; + +#define NBL_UVN_ECPU_LEGAL_NOTIFY_VLD_ADDR (0x24523c) +#define NBL_UVN_ECPU_LEGAL_NOTIFY_VLD_DEPTH (1) +#define NBL_UVN_ECPU_LEGAL_NOTIFY_VLD_WIDTH (32) +#define NBL_UVN_ECPU_LEGAL_NOTIFY_VLD_DWLEN (1) +union uvn_ecpu_legal_notify_vld_u { + struct uvn_ecpu_legal_notify_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_ECPU_LEGAL_NOTIFY_VLD_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_VLD_ADDR (0x245240) +#define NBL_UVN_NOTIFY_VLD_DEPTH (1) +#define NBL_UVN_NOTIFY_VLD_WIDTH (32) +#define NBL_UVN_NOTIFY_VLD_DWLEN (1) +union uvn_notify_vld_u { + struct uvn_notify_vld { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_VLD_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_DROP_ADDR (0x245244) +#define NBL_UVN_NOTIFY_DROP_DEPTH (1) +#define NBL_UVN_NOTIFY_DROP_WIDTH (32) +#define NBL_UVN_NOTIFY_DROP_DWLEN (1) +union uvn_notify_drop_u { + struct uvn_notify_drop { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_DROP_DWLEN]; +} __packed; + +#define NBL_UVN_NOTIFY_AHEAD_QENABLE_ADDR (0x245248) +#define NBL_UVN_NOTIFY_AHEAD_QENABLE_DEPTH (1) +#define NBL_UVN_NOTIFY_AHEAD_QENABLE_WIDTH (32) +#define NBL_UVN_NOTIFY_AHEAD_QENABLE_DWLEN (1) +union uvn_notify_ahead_qenable_u { + struct uvn_notify_ahead_qenable { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_NOTIFY_AHEAD_QENABLE_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_SUB_NTF_REQ_ADDR (0x245250) +#define NBL_UVN_AVAIL_PRE_SUB_NTF_REQ_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_SUB_NTF_REQ_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_SUB_NTF_REQ_DWLEN (1) +union uvn_avail_pre_sub_ntf_req_u { + struct uvn_avail_pre_sub_ntf_req { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_SUB_NTF_REQ_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_SUB_NTF_ACK_ADDR (0x245254) +#define NBL_UVN_AVAIL_PRE_SUB_NTF_ACK_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_SUB_NTF_ACK_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_SUB_NTF_ACK_DWLEN (1) +union uvn_avail_pre_sub_ntf_ack_u { + struct uvn_avail_pre_sub_ntf_ack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_SUB_NTF_ACK_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_SUB_PKT_REQ_ADDR (0x245258) +#define NBL_UVN_AVAIL_PRE_SUB_PKT_REQ_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_SUB_PKT_REQ_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_SUB_PKT_REQ_DWLEN (1) +union uvn_avail_pre_sub_pkt_req_u { + struct uvn_avail_pre_sub_pkt_req { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_SUB_PKT_REQ_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_SUB_PKT_ACK_ADDR (0x24525c) +#define NBL_UVN_AVAIL_PRE_SUB_PKT_ACK_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_SUB_PKT_ACK_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_SUB_PKT_ACK_DWLEN (1) +union uvn_avail_pre_sub_pkt_ack_u { + struct uvn_avail_pre_sub_pkt_ack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_SUB_PKT_ACK_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_SUB_DIF_ACK_ADDR (0x245260) +#define NBL_UVN_AVAIL_PRE_SUB_DIF_ACK_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_SUB_DIF_ACK_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_SUB_DIF_ACK_DWLEN (1) +union uvn_avail_pre_sub_dif_ack_u { + struct uvn_avail_pre_sub_dif_ack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_SUB_DIF_ACK_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_SUB_DIF_REOB_ADDR (0x245264) +#define NBL_UVN_AVAIL_PRE_SUB_DIF_REOB_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_SUB_DIF_REOB_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_SUB_DIF_REOB_DWLEN (1) +union uvn_avail_pre_sub_dif_reob_u { + struct uvn_avail_pre_sub_dif_reob { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_SUB_DIF_REOB_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_SUB_DIF_RERR_ADDR (0x245268) +#define NBL_UVN_AVAIL_PRE_SUB_DIF_RERR_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_SUB_DIF_RERR_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_SUB_DIF_RERR_DWLEN (1) +union uvn_avail_pre_sub_dif_rerr_u { + struct uvn_avail_pre_sub_dif_rerr { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_SUB_DIF_RERR_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_REQ_EQ_0_ADDR (0x245270) +#define NBL_UVN_AVAIL_PRE_REQ_EQ_0_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_REQ_EQ_0_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_REQ_EQ_0_DWLEN (1) +union uvn_avail_pre_req_eq_0_u { + struct uvn_avail_pre_req_eq_0 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_REQ_EQ_0_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_ACK_EQ_0_ADDR (0x245274) +#define NBL_UVN_AVAIL_PRE_ACK_EQ_0_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_ACK_EQ_0_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_ACK_EQ_0_DWLEN (1) +union uvn_avail_pre_ack_eq_0_u { + struct uvn_avail_pre_ack_eq_0 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_ACK_EQ_0_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_ACK_EQ_0_NF_ADDR (0x245278) +#define NBL_UVN_AVAIL_PRE_ACK_EQ_0_NF_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_ACK_EQ_0_NF_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_ACK_EQ_0_NF_DWLEN (1) +union uvn_avail_pre_ack_eq_0_nf_u { + struct uvn_avail_pre_ack_eq_0_nf { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_ACK_EQ_0_NF_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_ACK_LT_REQ_NF_ADDR (0x24527c) +#define NBL_UVN_AVAIL_PRE_ACK_LT_REQ_NF_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_ACK_LT_REQ_NF_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_ACK_LT_REQ_NF_DWLEN (1) +union uvn_avail_pre_ack_lt_req_nf_u { + struct uvn_avail_pre_ack_lt_req_nf { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_ACK_LT_REQ_NF_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_ACK_GT_32_ADDR (0x245280) +#define NBL_UVN_AVAIL_PRE_ACK_GT_32_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_ACK_GT_32_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_ACK_GT_32_DWLEN (1) +union uvn_avail_pre_ack_gt_32_u { + struct uvn_avail_pre_ack_gt_32 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_ACK_GT_32_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_RERR_0_ADDR (0x245284) +#define NBL_UVN_AVAIL_PRE_RERR_0_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_RERR_0_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_RERR_0_DWLEN (1) +union uvn_avail_pre_rerr_0_u { + struct uvn_avail_pre_rerr_0 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_RERR_0_DWLEN]; +} __packed; + +#define NBL_UVN_AVAIL_PRE_RERR_1_ADDR (0x245288) +#define NBL_UVN_AVAIL_PRE_RERR_1_DEPTH (1) +#define NBL_UVN_AVAIL_PRE_RERR_1_WIDTH (32) +#define NBL_UVN_AVAIL_PRE_RERR_1_DWLEN (1) +union uvn_avail_pre_rerr_1_u { + struct uvn_avail_pre_rerr_1 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_AVAIL_PRE_RERR_1_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_SUB_NTF_REQ_ADDR (0x245290) +#define NBL_UVN_DESC_PRE_SUB_NTF_REQ_DEPTH (1) +#define NBL_UVN_DESC_PRE_SUB_NTF_REQ_WIDTH (32) +#define NBL_UVN_DESC_PRE_SUB_NTF_REQ_DWLEN (1) +union uvn_desc_pre_sub_ntf_req_u { + struct uvn_desc_pre_sub_ntf_req { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_SUB_NTF_REQ_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_SUB_NTF_ACK_ADDR (0x245294) +#define NBL_UVN_DESC_PRE_SUB_NTF_ACK_DEPTH (1) +#define NBL_UVN_DESC_PRE_SUB_NTF_ACK_WIDTH (32) +#define NBL_UVN_DESC_PRE_SUB_NTF_ACK_DWLEN (1) +union uvn_desc_pre_sub_ntf_ack_u { + struct uvn_desc_pre_sub_ntf_ack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_SUB_NTF_ACK_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_SUB_PKT_REQ_ADDR (0x245298) +#define NBL_UVN_DESC_PRE_SUB_PKT_REQ_DEPTH (1) +#define NBL_UVN_DESC_PRE_SUB_PKT_REQ_WIDTH (32) +#define NBL_UVN_DESC_PRE_SUB_PKT_REQ_DWLEN (1) +union uvn_desc_pre_sub_pkt_req_u { + struct uvn_desc_pre_sub_pkt_req { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_SUB_PKT_REQ_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_SUB_PKT_ACK_ADDR (0x24529c) +#define NBL_UVN_DESC_PRE_SUB_PKT_ACK_DEPTH (1) +#define NBL_UVN_DESC_PRE_SUB_PKT_ACK_WIDTH (32) +#define NBL_UVN_DESC_PRE_SUB_PKT_ACK_DWLEN (1) +union uvn_desc_pre_sub_pkt_ack_u { + struct uvn_desc_pre_sub_pkt_ack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_SUB_PKT_ACK_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_SUB_SPLIT_REQ_ADDR (0x2452a0) +#define NBL_UVN_DESC_PRE_SUB_SPLIT_REQ_DEPTH (1) +#define NBL_UVN_DESC_PRE_SUB_SPLIT_REQ_WIDTH (32) +#define NBL_UVN_DESC_PRE_SUB_SPLIT_REQ_DWLEN (1) +union uvn_desc_pre_sub_split_req_u { + struct uvn_desc_pre_sub_split_req { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_SUB_SPLIT_REQ_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_SUB_SPLIT_ACK_ADDR (0x2452a4) +#define NBL_UVN_DESC_PRE_SUB_SPLIT_ACK_DEPTH (1) +#define NBL_UVN_DESC_PRE_SUB_SPLIT_ACK_WIDTH (32) +#define NBL_UVN_DESC_PRE_SUB_SPLIT_ACK_DWLEN (1) +union uvn_desc_pre_sub_split_ack_u { + struct uvn_desc_pre_sub_split_ack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_SUB_SPLIT_ACK_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_SUB_DIF_ACK_ADDR (0x2452a8) +#define NBL_UVN_DESC_PRE_SUB_DIF_ACK_DEPTH (1) +#define NBL_UVN_DESC_PRE_SUB_DIF_ACK_WIDTH (32) +#define NBL_UVN_DESC_PRE_SUB_DIF_ACK_DWLEN (1) +union uvn_desc_pre_sub_dif_ack_u { + struct uvn_desc_pre_sub_dif_ack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_SUB_DIF_ACK_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_SUB_DIF_REOB_ADDR (0x2452ac) +#define NBL_UVN_DESC_PRE_SUB_DIF_REOB_DEPTH (1) +#define NBL_UVN_DESC_PRE_SUB_DIF_REOB_WIDTH (32) +#define NBL_UVN_DESC_PRE_SUB_DIF_REOB_DWLEN (1) +union uvn_desc_pre_sub_dif_reob_u { + struct uvn_desc_pre_sub_dif_reob { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_SUB_DIF_REOB_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_SUB_DIF_RERR_ADDR (0x2452b0) +#define NBL_UVN_DESC_PRE_SUB_DIF_RERR_DEPTH (1) +#define NBL_UVN_DESC_PRE_SUB_DIF_RERR_WIDTH (32) +#define NBL_UVN_DESC_PRE_SUB_DIF_RERR_DWLEN (1) +union uvn_desc_pre_sub_dif_rerr_u { + struct uvn_desc_pre_sub_dif_rerr { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_SUB_DIF_RERR_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DESC_INVALID_0_ADDR (0x2452b4) +#define NBL_UVN_DESC_PRE_DESC_INVALID_0_DEPTH (1) +#define NBL_UVN_DESC_PRE_DESC_INVALID_0_WIDTH (32) +#define NBL_UVN_DESC_PRE_DESC_INVALID_0_DWLEN (1) +union uvn_desc_pre_desc_invalid_0_u { + struct uvn_desc_pre_desc_invalid_0 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DESC_INVALID_0_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DESC_INVALID_1_ADDR (0x2452b8) +#define NBL_UVN_DESC_PRE_DESC_INVALID_1_DEPTH (1) +#define NBL_UVN_DESC_PRE_DESC_INVALID_1_WIDTH (32) +#define NBL_UVN_DESC_PRE_DESC_INVALID_1_DWLEN (1) +union uvn_desc_pre_desc_invalid_1_u { + struct uvn_desc_pre_desc_invalid_1 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DESC_INVALID_1_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DESC_INVALID_2_ADDR (0x2452bc) +#define NBL_UVN_DESC_PRE_DESC_INVALID_2_DEPTH (1) +#define NBL_UVN_DESC_PRE_DESC_INVALID_2_WIDTH (32) +#define NBL_UVN_DESC_PRE_DESC_INVALID_2_DWLEN (1) +union uvn_desc_pre_desc_invalid_2_u { + struct uvn_desc_pre_desc_invalid_2 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DESC_INVALID_2_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DESC_INVALID_3_ADDR (0x2452c0) +#define NBL_UVN_DESC_PRE_DESC_INVALID_3_DEPTH (1) +#define NBL_UVN_DESC_PRE_DESC_INVALID_3_WIDTH (32) +#define NBL_UVN_DESC_PRE_DESC_INVALID_3_DWLEN (1) +union uvn_desc_pre_desc_invalid_3_u { + struct uvn_desc_pre_desc_invalid_3 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DESC_INVALID_3_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DESC_INVALID_4_ADDR (0x2452c4) +#define NBL_UVN_DESC_PRE_DESC_INVALID_4_DEPTH (1) +#define NBL_UVN_DESC_PRE_DESC_INVALID_4_WIDTH (32) +#define NBL_UVN_DESC_PRE_DESC_INVALID_4_DWLEN (1) +union uvn_desc_pre_desc_invalid_4_u { + struct uvn_desc_pre_desc_invalid_4 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DESC_INVALID_4_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DESC_REQ_NULL_ADDR (0x2452c8) +#define NBL_UVN_DESC_PRE_DESC_REQ_NULL_DEPTH (1) +#define NBL_UVN_DESC_PRE_DESC_REQ_NULL_WIDTH (32) +#define NBL_UVN_DESC_PRE_DESC_REQ_NULL_DWLEN (1) +union uvn_desc_pre_desc_req_null_u { + struct uvn_desc_pre_desc_req_null { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DESC_REQ_NULL_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_PRE_DESC_REQ_LACK_ADDR (0x2452cc) +#define NBL_UVN_DESC_PRE_DESC_REQ_LACK_DEPTH (1) +#define NBL_UVN_DESC_PRE_DESC_REQ_LACK_WIDTH (32) +#define NBL_UVN_DESC_PRE_DESC_REQ_LACK_DWLEN (1) +union uvn_desc_pre_desc_req_lack_u { + struct uvn_desc_pre_desc_req_lack { + u32 cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_PRE_DESC_REQ_LACK_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_ENTRY_ADDR (0x2452d0) +#define NBL_UVN_DESC_RD_ENTRY_DEPTH (1) +#define NBL_UVN_DESC_RD_ENTRY_WIDTH (32) +#define NBL_UVN_DESC_RD_ENTRY_DWLEN (1) +union uvn_desc_rd_entry_u { + struct uvn_desc_rd_entry { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_ENTRY_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_DROP_QUEUE_DISABLE_ADDR (0x2452d4) +#define NBL_UVN_DESC_RD_DROP_QUEUE_DISABLE_DEPTH (1) +#define NBL_UVN_DESC_RD_DROP_QUEUE_DISABLE_WIDTH (32) +#define NBL_UVN_DESC_RD_DROP_QUEUE_DISABLE_DWLEN (1) +union uvn_desc_rd_drop_queue_disable_u { + struct uvn_desc_rd_drop_queue_disable { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_DROP_QUEUE_DISABLE_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_DROP_PKT_ILLEGAL_ADDR (0x2452d8) +#define NBL_UVN_DESC_RD_DROP_PKT_ILLEGAL_DEPTH (1) +#define NBL_UVN_DESC_RD_DROP_PKT_ILLEGAL_WIDTH (32) +#define NBL_UVN_DESC_RD_DROP_PKT_ILLEGAL_DWLEN (1) +union uvn_desc_rd_drop_pkt_illegal_u { + struct uvn_desc_rd_drop_pkt_illegal { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_DROP_PKT_ILLEGAL_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_DROP_QUEUE_ERR_ADDR (0x2452dc) +#define NBL_UVN_DESC_RD_DROP_QUEUE_ERR_DEPTH (1) +#define NBL_UVN_DESC_RD_DROP_QUEUE_ERR_WIDTH (32) +#define NBL_UVN_DESC_RD_DROP_QUEUE_ERR_DWLEN (1) +union uvn_desc_rd_drop_queue_err_u { + struct uvn_desc_rd_drop_queue_err { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_DROP_QUEUE_ERR_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_DROP_DESC_LACK_ADDR (0x2452e0) +#define NBL_UVN_DESC_RD_DROP_DESC_LACK_DEPTH (1) +#define NBL_UVN_DESC_RD_DROP_DESC_LACK_WIDTH (32) +#define NBL_UVN_DESC_RD_DROP_DESC_LACK_DWLEN (1) +union uvn_desc_rd_drop_desc_lack_u { + struct uvn_desc_rd_drop_desc_lack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_DROP_DESC_LACK_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_RD_DROP_DESC_GT_32_ADDR (0x2452e4) +#define NBL_UVN_DESC_RD_DROP_DESC_GT_32_DEPTH (1) +#define NBL_UVN_DESC_RD_DROP_DESC_GT_32_WIDTH (32) +#define NBL_UVN_DESC_RD_DROP_DESC_GT_32_DWLEN (1) +union uvn_desc_rd_drop_desc_gt_32_u { + struct uvn_desc_rd_drop_desc_gt_32 { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_RD_DROP_DESC_GT_32_DWLEN]; +} __packed; + +#define NBL_UVN_PKT_WR_EXPORT_ADDR (0x2452f0) +#define NBL_UVN_PKT_WR_EXPORT_DEPTH (1) +#define NBL_UVN_PKT_WR_EXPORT_WIDTH (32) +#define NBL_UVN_PKT_WR_EXPORT_DWLEN (1) +union uvn_pkt_wr_export_u { + struct uvn_pkt_wr_export { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_PKT_WR_EXPORT_DWLEN]; +} __packed; + +#define NBL_UVN_PKT_WR_DROP_ADDR (0x2452f4) +#define NBL_UVN_PKT_WR_DROP_DEPTH (1) +#define NBL_UVN_PKT_WR_DROP_WIDTH (32) +#define NBL_UVN_PKT_WR_DROP_DWLEN (1) +union uvn_pkt_wr_drop_u { + struct uvn_pkt_wr_drop { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_PKT_WR_DROP_DWLEN]; +} __packed; + +#define NBL_UVN_PKT_WR_DIF_ACK_ADDR (0x2452f8) +#define NBL_UVN_PKT_WR_DIF_ACK_DEPTH (1) +#define NBL_UVN_PKT_WR_DIF_ACK_WIDTH (32) +#define NBL_UVN_PKT_WR_DIF_ACK_DWLEN (1) +union uvn_pkt_wr_dif_ack_u { + struct uvn_pkt_wr_dif_ack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_PKT_WR_DIF_ACK_DWLEN]; +} __packed; + +#define NBL_UVN_PKT_WR_DIF_WEOB_ADDR (0x2452fc) +#define NBL_UVN_PKT_WR_DIF_WEOB_DEPTH (1) +#define NBL_UVN_PKT_WR_DIF_WEOB_WIDTH (32) +#define NBL_UVN_PKT_WR_DIF_WEOB_DWLEN (1) +union uvn_pkt_wr_dif_weob_u { + struct uvn_pkt_wr_dif_weob { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_PKT_WR_DIF_WEOB_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_WR_DIF_ACK_ADDR (0x245300) +#define NBL_UVN_DESC_WR_DIF_ACK_DEPTH (1) +#define NBL_UVN_DESC_WR_DIF_ACK_WIDTH (32) +#define NBL_UVN_DESC_WR_DIF_ACK_DWLEN (1) +union uvn_desc_wr_dif_ack_u { + struct uvn_desc_wr_dif_ack { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_WR_DIF_ACK_DWLEN]; +} __packed; + +#define NBL_UVN_DESC_WR_DIF_WEOB_ADDR (0x245304) +#define NBL_UVN_DESC_WR_DIF_WEOB_DEPTH (1) +#define NBL_UVN_DESC_WR_DIF_WEOB_WIDTH (32) +#define NBL_UVN_DESC_WR_DIF_WEOB_DWLEN (1) +union uvn_desc_wr_dif_weob_u { + struct uvn_desc_wr_dif_weob { + u32 cnt:32; /* [31:0] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_UVN_DESC_WR_DIF_WEOB_DWLEN]; +} __packed; + +#define NBL_UVN_INT_ERROR_ADDR (0x246000) +#define NBL_UVN_INT_ERROR_DEPTH (1) +#define NBL_UVN_INT_ERROR_WIDTH (32) +#define NBL_UVN_INT_ERROR_DWLEN (1) +union uvn_int_error_u { + struct uvn_int_error { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_INT_ERROR_DWLEN]; +} __packed; + +#define NBL_UVN_SHOW_WARNING_ADDR (0x246004) +#define NBL_UVN_SHOW_WARNING_DEPTH (1) +#define NBL_UVN_SHOW_WARNING_WIDTH (32) +#define NBL_UVN_SHOW_WARNING_DWLEN (1) +union uvn_show_warning_u { + struct uvn_show_warning { + u32 status:9; /* [8:0] Default:0x0 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_SHOW_WARNING_DWLEN]; +} __packed; + +#define NBL_UVN_RAM_ERR_ADDR (0x246008) +#define NBL_UVN_RAM_ERR_DEPTH (1) +#define NBL_UVN_RAM_ERR_WIDTH (32) +#define NBL_UVN_RAM_ERR_DWLEN (1) +union uvn_ram_err_u { + struct uvn_ram_err { + u32 status:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_UVN_QUEUE_TABLE_ADDR (0x254000) +#define NBL_UVN_QUEUE_TABLE_DEPTH (2048) +#define NBL_UVN_QUEUE_TABLE_WIDTH (256) +#define NBL_UVN_QUEUE_TABLE_DWLEN (8) +union uvn_queue_table_u { + struct uvn_queue_table { + u32 used_baddr_arr[2]; /* [63:0] Default:0x0 RW */ + u32 avail_baddr_arr[2]; /* [127:64] Default:0x0 RW */ + u32 queue_baddr_arr[2]; /* [191:128] Default:0x0 RW */ + u32 queue_size_mask_pow:4; /* [195:192] Default:0x0 RW */ + u32 queue_type:1; /* [196] Default:0x0 RW */ + u32 queue_enable:1; /* [197] Default:0x0 RW */ + u32 extend_header_en:1; /* [198] Default:0x0 RW */ + u32 guest_csum_en:1; /* [199] Default:0x0 RW */ + u32 half_offload_en:1; /* [200] Default:0x0 RW */ + u32 rsv_l:32; /* [255:201] Default:0x0 RO */ + u32 rsv_h:23; /* [255:201] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_QUEUE_TABLE_DWLEN]; +} __packed; +#define NBL_UVN_QUEUE_TABLE_REG(r) (NBL_UVN_QUEUE_TABLE_ADDR + \ + (NBL_UVN_QUEUE_TABLE_DWLEN * 4) * (r)) + +#define NBL_UVN_QUEUE_CXT_ADDR (0x264000) +#define NBL_UVN_QUEUE_CXT_DEPTH (2048) +#define NBL_UVN_QUEUE_CXT_WIDTH (128) +#define NBL_UVN_QUEUE_CXT_DWLEN (4) +union uvn_queue_cxt_u { + struct uvn_queue_cxt { + u32 queue_head:16; /* [15:0] Default:0x0 RW */ + u32 wrap_count:1; /* [16] Default:0x1 RW */ + u32 queue_err:1; /* [17] Default:0x0 RW */ + u32 prefetch_null_cnt:2; /* [19:18] Default:0x0 RW */ + u32 ntf_finish:1; /* [20] Default:0x0 RW */ + u32 spnd_flag:1; /* [21] Default:0x0 RW */ + u32 reserve0:10; /* [31:22] Default:0x0 RO */ + u32 avail_idx:16; /* [47:32] Default:0x0 RO */ + u32 avail_idx_spnd_flag:1; /* [48] Default:0x0 RO */ + u32 reserve1:15; /* [127:49] Default:0x0 RO */ + u32 reserve1_arr[2]; /* [127:49] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_QUEUE_CXT_DWLEN]; +} __packed; +#define NBL_UVN_QUEUE_CXT_REG(r) (NBL_UVN_QUEUE_CXT_ADDR + \ + (NBL_UVN_QUEUE_CXT_DWLEN * 4) * (r)) + +#define NBL_UVN_DESC_CXT_ADDR (0x26c000) +#define NBL_UVN_DESC_CXT_DEPTH (2048) +#define NBL_UVN_DESC_CXT_WIDTH (128) +#define NBL_UVN_DESC_CXT_DWLEN (4) +union uvn_desc_cxt_u { + struct uvn_desc_cxt { + u32 cache_head:9; /* [8:0] Default:0x0 RO */ + u32 reserve0:7; /* [15:9] Default:0x0 RO */ + u32 cache_tail:9; /* [24:16] Default:0x0 RO */ + u32 reserve1:7; /* [31:25] Default:0x0 RO */ + u32 cache_pref_num_prev:9; /* [40:32] Default:0x0 RO */ + u32 reserve2:7; /* [47:41] Default:0x0 RO */ + u32 cache_pref_num_post:9; /* [56:48] Default:0x0 RO */ + u32 reserve3:7; /* [63:57] Default:0x0 RO */ + u32 cache_head_byte:30; /* [93:64] Default:0x0 RO */ + u32 reserve4:2; /* [95:94] Default:0x0 RO */ + u32 cache_tail_byte:30; /* [125:96] Default:0x0 RO */ + u32 reserve5:2; /* [127:126] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_DESC_CXT_DWLEN]; +} __packed; +#define NBL_UVN_DESC_CXT_REG(r) (NBL_UVN_DESC_CXT_ADDR + \ + (NBL_UVN_DESC_CXT_DWLEN * 4) * (r)) + +#define NBL_UVN_STATIS_DESC_PRE_ADDR (0x274000) +#define NBL_UVN_STATIS_DESC_PRE_DEPTH (2048) +#define NBL_UVN_STATIS_DESC_PRE_WIDTH (32) +#define NBL_UVN_STATIS_DESC_PRE_DWLEN (1) +union uvn_statis_desc_pre_u { + struct uvn_statis_desc_pre { + u32 cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_STATIS_DESC_PRE_DWLEN]; +} __packed; +#define NBL_UVN_STATIS_DESC_PRE_REG(r) (NBL_UVN_STATIS_DESC_PRE_ADDR + \ + (NBL_UVN_STATIS_DESC_PRE_DWLEN * 4) * (r)) + +#define NBL_UVN_STATIS_DESC_WB_ADDR (0x276000) +#define NBL_UVN_STATIS_DESC_WB_DEPTH (2048) +#define NBL_UVN_STATIS_DESC_WB_WIDTH (32) +#define NBL_UVN_STATIS_DESC_WB_DWLEN (1) +union uvn_statis_desc_wb_u { + struct uvn_statis_desc_wb { + u32 cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_STATIS_DESC_WB_DWLEN]; +} __packed; +#define NBL_UVN_STATIS_DESC_WB_REG(r) (NBL_UVN_STATIS_DESC_WB_ADDR + \ + (NBL_UVN_STATIS_DESC_WB_DWLEN * 4) * (r)) + +#define NBL_UVN_STATIS_PKT_IN_ADDR (0x278000) +#define NBL_UVN_STATIS_PKT_IN_DEPTH (2048) +#define NBL_UVN_STATIS_PKT_IN_WIDTH (32) +#define NBL_UVN_STATIS_PKT_IN_DWLEN (1) +union uvn_statis_pkt_in_u { + struct uvn_statis_pkt_in { + u32 cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_STATIS_PKT_IN_DWLEN]; +} __packed; +#define NBL_UVN_STATIS_PKT_IN_REG(r) (NBL_UVN_STATIS_PKT_IN_ADDR + \ + (NBL_UVN_STATIS_PKT_IN_DWLEN * 4) * (r)) + +#define NBL_UVN_STATIS_PKT_OUT_ADDR (0x27a000) +#define NBL_UVN_STATIS_PKT_OUT_DEPTH (2048) +#define NBL_UVN_STATIS_PKT_OUT_WIDTH (32) +#define NBL_UVN_STATIS_PKT_OUT_DWLEN (1) +union uvn_statis_pkt_out_u { + struct uvn_statis_pkt_out { + u32 cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_STATIS_PKT_OUT_DWLEN]; +} __packed; +#define NBL_UVN_STATIS_PKT_OUT_REG(r) (NBL_UVN_STATIS_PKT_OUT_ADDR + \ + (NBL_UVN_STATIS_PKT_OUT_DWLEN * 4) * (r)) + +#define NBL_UVN_STATIS_PKT_DROP_ADDR (0x27c000) +#define NBL_UVN_STATIS_PKT_DROP_DEPTH (2048) +#define NBL_UVN_STATIS_PKT_DROP_WIDTH (32) +#define NBL_UVN_STATIS_PKT_DROP_DWLEN (1) +union uvn_statis_pkt_drop_u { + struct uvn_statis_pkt_drop { + u32 cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UVN_STATIS_PKT_DROP_DWLEN]; +} __packed; +#define NBL_UVN_STATIS_PKT_DROP_REG(r) (NBL_UVN_STATIS_PKT_DROP_ADDR + \ + (NBL_UVN_STATIS_PKT_DROP_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h new file mode 100644 index 0000000000000000000000000000000000000000..c8798808f93dee00a76a2e6bb01db7c538768d09 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf.h @@ -0,0 +1,23 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#include "nbl_intf_cmdq.h" +//#include "nbl_intf_mailbox.h" +#include "nbl_intf_pcie_ecpu.h" +#include "nbl_intf_pcie_host.h" +#include "nbl_intf_ctrlq_host.h" +#include "nbl_intf_ctrlq_ecpu.h" +#include "nbl_intf_ctrlq_emp.h" +#include "nbl_intf_vblk.h" +#include "nbl_intf_vdpa.h" +#include "nbl_intf_ptlp.h" +#include "nbl_intf_pcompleter_host.h" +#include "nbl_intf_ecpu_padpt.h" +#include "nbl_intf_host_padpt.h" +#include "nbl_intf_msgq_notify.h" +#include "nbl_intf_msgq_aged.h" +#include "nbl_intf_fifo_ch.h" +#include "nbl_intf_host_pcap.h" +#include "nbl_intf_ecpu_pcap.h" +#include "nbl_intf_native_host.h" +#include "nbl_intf_native_ecpu.h" diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h new file mode 100644 index 0000000000000000000000000000000000000000..3b3442e5a5fceb706b03ea6154101d4380502f57 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_cmdq.h @@ -0,0 +1,438 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_CMDQ_H +#define NBL_CMDQ_H 1 + +#include + +#define NBL_CMDQ_BASE (0x00FA0000) + +#define NBL_CMDQ_INT_STATUS_ADDR (0xfa0000) +#define NBL_CMDQ_INT_STATUS_DEPTH (1) +#define NBL_CMDQ_INT_STATUS_WIDTH (32) +#define NBL_CMDQ_INT_STATUS_DWLEN (1) +union cmdq_int_status_u { + struct cmdq_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RWC */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RWC */ + u32 cif_err:1; /* [05:05] Default:0x0 RWC */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_CMDQ_INT_MASK_ADDR (0xfa0004) +#define NBL_CMDQ_INT_MASK_DEPTH (1) +#define NBL_CMDQ_INT_MASK_WIDTH (32) +#define NBL_CMDQ_INT_MASK_DWLEN (1) +union cmdq_int_mask_u { + struct cmdq_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RW */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RW */ + u32 cif_err:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_INT_MASK_DWLEN]; +} __packed; + +#define NBL_CMDQ_INT_SET_ADDR (0xfa0008) +#define NBL_CMDQ_INT_SET_DEPTH (1) +#define NBL_CMDQ_INT_SET_WIDTH (32) +#define NBL_CMDQ_INT_SET_DWLEN (1) +union cmdq_int_set_u { + struct cmdq_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 WO */ + u32 data_cor_err:1; /* [04:04] Default:0x0 WO */ + u32 cif_err:1; /* [05:05] Default:0x0 WO */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_INT_SET_DWLEN]; +} __packed; + +#define NBL_CMDQ_INIT_DONE_ADDR (0xfa000c) +#define NBL_CMDQ_INIT_DONE_DEPTH (1) +#define NBL_CMDQ_INIT_DONE_WIDTH (32) +#define NBL_CMDQ_INIT_DONE_DWLEN (1) +union cmdq_init_done_u { + struct cmdq_init_done { + u32 done:1; /* [00:00] Default:0x1 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_CMDQ_WARNING_ADDR (0xfa0010) +#define NBL_CMDQ_WARNING_DEPTH (1) +#define NBL_CMDQ_WARNING_WIDTH (32) +#define NBL_CMDQ_WARNING_DWLEN (1) +union cmdq_warning_u { + struct cmdq_warning { + u32 ecpu_wr_seq_err:1; /* [00:00] Default:0x0 RO */ + u32 ecpu_data_len_err:1; /* [01:01] Default:0x0 RO */ + u32 ecpu_data_olen:1; /* [02:02] Default:0x0 RO */ + u32 ecpu_desc_rdif_rerr:1; /* [03:03] Default:0x0 RO */ + u32 ecpu_data_rdif_rerr:1; /* [04:04] Default:0x0 RO */ + u32 host_wr_seq_err:1; /* [05:05] Default:0x0 RO */ + u32 host_data_len_err:1; /* [06:06] Default:0x0 RO */ + u32 host_data_olen:1; /* [07:07] Default:0x0 RO */ + u32 host_desc_rdif_rerr:1; /* [08:08] Default:0x0 RO */ + u32 host_data_rdif_rerr:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_WARNING_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_ADDR (0xfa00ac) +#define NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_DEPTH (1) +#define NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_WIDTH (32) +#define NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_DWLEN (1) +union cmdq_ecpu_wr_seq_err_latch_u { + struct cmdq_ecpu_wr_seq_err_latch { + u32 err_latch:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_WR_SEQ_ERR_LATCH_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_ADDR (0xfa00c8) +#define NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_DEPTH (1) +#define NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_WIDTH (32) +#define NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_DWLEN (1) +union cmdq_host_wr_seq_err_latch_u { + struct cmdq_host_wr_seq_err_latch { + u32 err_latch:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_WR_SEQ_ERR_LATCH_DWLEN]; +} __packed; + +#define NBL_CMDQ_CAR_CTRL_ADDR (0xfa00d0) +#define NBL_CMDQ_CAR_CTRL_DEPTH (1) +#define NBL_CMDQ_CAR_CTRL_WIDTH (32) +#define NBL_CMDQ_CAR_CTRL_DWLEN (1) +union cmdq_car_ctrl_u { + struct cmdq_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_CMDQ_FLOW_EN_ADDR (0xfa00d4) +#define NBL_CMDQ_FLOW_EN_DEPTH (1) +#define NBL_CMDQ_FLOW_EN_WIDTH (32) +#define NBL_CMDQ_FLOW_EN_DWLEN (1) +union cmdq_flow_en_u { + struct cmdq_flow_en { + u32 ecpu_desc_rdif_ack_cnt_en:1; /* [00:00] Default:0x1 RW */ + u32 ecpu_data_rdif_ack_cnt_en:1; /* [01:01] Default:0x1 RW */ + u32 ecpu_desc_wdif_ack_cnt_en:1; /* [02:02] Default:0x1 RW */ + u32 ecpu_data_wdif_ack_cnt_en:1; /* [03:03] Default:0x1 RW */ + u32 host_desc_rdif_ack_cnt_en:1; /* [04:04] Default:0x1 RW */ + u32 host_data_rdif_ack_cnt_en:1; /* [05:05] Default:0x1 RW */ + u32 host_desc_wdif_ack_cnt_en:1; /* [06:06] Default:0x1 RW */ + u32 host_data_wdif_ack_cnt_en:1; /* [07:07] Default:0x1 RW */ + u32 cmdq_inside_cnt_en:1; /* [08:08] Default:0x1 RW */ + u32 ecpu_cmdq_desc_rdif_rerr_cnt_en:1; /* [09:09] Default:0x1 RW */ + u32 ecpu_cmdq_data_rdif_rerr_cnt_en:1; /* [10:10] Default:0x1 RW */ + u32 ecpu_data_len_err_cnt_en:1; /* [11:11] Default:0x1 RW */ + u32 ecpu_data_olen_cnt_en:1; /* [12:12] Default:0x1 RW */ + u32 ecpu_wr_seq_err_cnt_en:1; /* [13:13] Default:0x1 RW */ + u32 ecpu_rst_cnt:1; /* [14:14] Default:0x1 RW */ + u32 host_cmdq_desc_rdif_rerr_cnt_en:1; /* [15:15] Default:0x1 RW */ + u32 host_cmdq_data_rdif_rerr_cnt_en:1; /* [16:16] Default:0x1 RW */ + u32 host_data_len_err_cnt_en:1; /* [17:17] Default:0x1 RW */ + u32 host_data_olen_cnt_en:1; /* [18:18] Default:0x1 RW */ + u32 host_wr_seq_err_cnt_en:1; /* [19:19] Default:0x1 RW */ + u32 host_rst_cnt:1; /* [20:20] Default:0x1 RW */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_FLOW_EN_DWLEN]; +} __packed; + +#define NBL_CMDQ_CIF_ERR_INFO_ADDR (0xfa00d8) +#define NBL_CMDQ_CIF_ERR_INFO_DEPTH (1) +#define NBL_CMDQ_CIF_ERR_INFO_WIDTH (32) +#define NBL_CMDQ_CIF_ERR_INFO_DWLEN (1) +union cmdq_cif_err_info_u { + struct cmdq_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_BADDR_L_ADDR (0xfa1000) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_L_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_L_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_L_DWLEN (1) +union cmdq_ecpu_cmdq_baddr_l_u { + struct cmdq_ecpu_cmdq_baddr_l { + u32 baddr_l:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_BADDR_L_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_BADDR_H_ADDR (0xfa1004) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_H_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_H_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_BADDR_H_DWLEN (1) +union cmdq_ecpu_cmdq_baddr_h_u { + struct cmdq_ecpu_cmdq_baddr_h { + u32 baddr_h:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_BADDR_H_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_SIZE_ADDR (0xfa1008) +#define NBL_CMDQ_ECPU_CMDQ_SIZE_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_SIZE_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_SIZE_DWLEN (1) +union cmdq_ecpu_cmdq_size_u { + struct cmdq_ecpu_cmdq_size { + u32 cmdq_size:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_SIZE_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_CURR_ADDR (0xfa100c) +#define NBL_CMDQ_ECPU_CMDQ_CURR_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_CURR_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_CURR_DWLEN (1) +union cmdq_ecpu_cmdq_curr_u { + struct cmdq_ecpu_cmdq_curr { + u32 cmdq_curr:17; /* [16:00] Default:0x0 RO */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_CURR_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_TAIL_ADDR (0xfa1010) +#define NBL_CMDQ_ECPU_CMDQ_TAIL_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_TAIL_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_TAIL_DWLEN (1) +union cmdq_ecpu_cmdq_tail_u { + struct cmdq_ecpu_cmdq_tail { + u32 cmdq_tail:17; /* [16:00] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_TAIL_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_EN_ADDR (0xfa1014) +#define NBL_CMDQ_ECPU_CMDQ_EN_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_EN_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_EN_DWLEN (1) +union cmdq_ecpu_cmdq_en_u { + struct cmdq_ecpu_cmdq_en { + u32 cmdq_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_EN_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_DIF_MODE_ADDR (0xfa1018) +#define NBL_CMDQ_ECPU_CMDQ_DIF_MODE_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_DIF_MODE_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_DIF_MODE_DWLEN (1) +union cmdq_ecpu_cmdq_dif_mode_u { + struct cmdq_ecpu_cmdq_dif_mode { + u32 dif_mode:3; /* [02:00] Default:0x2 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_DIF_MODE_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_DIF_INFO_ADDR (0xfa101c) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INFO_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INFO_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INFO_DWLEN (1) +union cmdq_ecpu_cmdq_dif_info_u { + struct cmdq_ecpu_cmdq_dif_info { + u32 dif_info:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_DIF_INFO_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_DIF_BDF_ADDR (0xfa1020) +#define NBL_CMDQ_ECPU_CMDQ_DIF_BDF_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_DIF_BDF_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_DIF_BDF_DWLEN (1) +union cmdq_ecpu_cmdq_dif_bdf_u { + struct cmdq_ecpu_cmdq_dif_bdf { + u32 dif_bdf:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_DIF_BDF_DWLEN]; +} __packed; + +#define NBL_CMDQ_ECPU_CMDQ_DIF_INT_ADDR (0xfa1024) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INT_DEPTH (1) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INT_WIDTH (32) +#define NBL_CMDQ_ECPU_CMDQ_DIF_INT_DWLEN (1) +union cmdq_ecpu_cmdq_dif_int_u { + struct cmdq_ecpu_cmdq_dif_int { + u32 dif_int:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_ECPU_CMDQ_DIF_INT_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_BADDR_L_ADDR (0xfa1100) +#define NBL_CMDQ_HOST_CMDQ_BADDR_L_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_BADDR_L_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_BADDR_L_DWLEN (1) +union cmdq_host_cmdq_baddr_l_u { + struct cmdq_host_cmdq_baddr_l { + u32 cmdq_baddr_l:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_BADDR_L_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_BADDR_H_ADDR (0xfa1104) +#define NBL_CMDQ_HOST_CMDQ_BADDR_H_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_BADDR_H_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_BADDR_H_DWLEN (1) +union cmdq_host_cmdq_baddr_h_u { + struct cmdq_host_cmdq_baddr_h { + u32 cmdq_baddr_h:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_BADDR_H_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_SIZE_ADDR (0xfa1108) +#define NBL_CMDQ_HOST_CMDQ_SIZE_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_SIZE_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_SIZE_DWLEN (1) +union cmdq_host_cmdq_size_u { + struct cmdq_host_cmdq_size { + u32 cmdq_size:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_SIZE_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_CURR_ADDR (0xfa110c) +#define NBL_CMDQ_HOST_CMDQ_CURR_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_CURR_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_CURR_DWLEN (1) +union cmdq_host_cmdq_curr_u { + struct cmdq_host_cmdq_curr { + u32 cmdq_curr:17; /* [16:00] Default:0x0 RO */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_CURR_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_TAIL_ADDR (0xfa1110) +#define NBL_CMDQ_HOST_CMDQ_TAIL_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_TAIL_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_TAIL_DWLEN (1) +union cmdq_host_cmdq_tail_u { + struct cmdq_host_cmdq_tail { + u32 cmdq_tail:17; /* [16:00] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_TAIL_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_EN_ADDR (0xfa1114) +#define NBL_CMDQ_HOST_CMDQ_EN_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_EN_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_EN_DWLEN (1) +union cmdq_host_cmdq_en_u { + struct cmdq_host_cmdq_en { + u32 cmdq_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_EN_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_DIF_MODE_ADDR (0xfa1118) +#define NBL_CMDQ_HOST_CMDQ_DIF_MODE_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_DIF_MODE_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_DIF_MODE_DWLEN (1) +union cmdq_host_cmdq_dif_mode_u { + struct cmdq_host_cmdq_dif_mode { + u32 dif_mode:3; /* [02:00] Default:0x2 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_DIF_MODE_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_DIF_INFO_ADDR (0xfa111c) +#define NBL_CMDQ_HOST_CMDQ_DIF_INFO_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_DIF_INFO_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_DIF_INFO_DWLEN (1) +union cmdq_host_cmdq_dif_info_u { + struct cmdq_host_cmdq_dif_info { + u32 dif_info:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_DIF_INFO_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_DIF_BDF_ADDR (0xfa1120) +#define NBL_CMDQ_HOST_CMDQ_DIF_BDF_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_DIF_BDF_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_DIF_BDF_DWLEN (1) +union cmdq_host_cmdq_dif_bdf_u { + struct cmdq_host_cmdq_dif_bdf { + u32 dif_bdf:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_DIF_BDF_DWLEN]; +} __packed; + +#define NBL_CMDQ_HOST_CMDQ_DIF_INT_ADDR (0xfa1124) +#define NBL_CMDQ_HOST_CMDQ_DIF_INT_DEPTH (1) +#define NBL_CMDQ_HOST_CMDQ_DIF_INT_WIDTH (32) +#define NBL_CMDQ_HOST_CMDQ_DIF_INT_DWLEN (1) +union cmdq_host_cmdq_dif_int_u { + struct cmdq_host_cmdq_dif_int { + u32 dif_int:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CMDQ_HOST_CMDQ_DIF_INT_DWLEN]; +} __packed; + +#define NBL_CMDQ_MDL_INFO_ADDR (0xfa1300) +#define NBL_CMDQ_MDL_INFO_DEPTH (1) +#define NBL_CMDQ_MDL_INFO_WIDTH (32) +#define NBL_CMDQ_MDL_INFO_DWLEN (1) +union cmdq_mdl_info_u { + struct cmdq_mdl_info { + u32 version_id:16; /* [15:00] Default:0x0001 RO */ + u32 prj_id:16; /* [31:16] Default:0x0020 RO */ + } __packed info; + u32 data[NBL_CMDQ_MDL_INFO_DWLEN]; +} __packed; + +#define NBL_CMDQ_VERSION_ADDR (0xfa1304) +#define NBL_CMDQ_VERSION_DEPTH (1) +#define NBL_CMDQ_VERSION_WIDTH (32) +#define NBL_CMDQ_VERSION_DWLEN (1) +union cmdq_version_u { + struct cmdq_version { + u32 date:32; /* [31:00] Default:0x20220803 RO */ + } __packed info; + u32 data[NBL_CMDQ_VERSION_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ctrlq_ecpu.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ctrlq_ecpu.h new file mode 100644 index 0000000000000000000000000000000000000000..929b45d6991a3b2a055dd5e5f0fd15cf6e801a7c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ctrlq_ecpu.h @@ -0,0 +1,428 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_CTRLQ_ECPU_H +#define NBL_CTRLQ_ECPU_H 1 + +#include + +#define NBL_CTRLQ_ECPU_BASE (0x0108C000) + +#define NBL_CTRLQ_ECPU_INT_STATUS_ADDR (0x108c000) +#define NBL_CTRLQ_ECPU_INT_STATUS_DEPTH (1) +#define NBL_CTRLQ_ECPU_INT_STATUS_WIDTH (32) +#define NBL_CTRLQ_ECPU_INT_STATUS_DWLEN (1) +union ctrlq_ecpu_int_status_u { + struct ctrlq_ecpu_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv5:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv4:1; /* [05:05] Default:0x0 RO */ + u32 rsv3:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_INT_MASK_ADDR (0x108c004) +#define NBL_CTRLQ_ECPU_INT_MASK_DEPTH (1) +#define NBL_CTRLQ_ECPU_INT_MASK_WIDTH (32) +#define NBL_CTRLQ_ECPU_INT_MASK_DWLEN (1) +union ctrlq_ecpu_int_mask_u { + struct ctrlq_ecpu_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv5:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv4:1; /* [05:05] Default:0x0 RO */ + u32 rsv3:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_INT_MASK_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_INT_SET_ADDR (0x108c008) +#define NBL_CTRLQ_ECPU_INT_SET_DEPTH (1) +#define NBL_CTRLQ_ECPU_INT_SET_WIDTH (32) +#define NBL_CTRLQ_ECPU_INT_SET_DWLEN (1) +union ctrlq_ecpu_int_set_u { + struct ctrlq_ecpu_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv5:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv4:1; /* [05:05] Default:0x0 RO */ + u32 rsv3:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_INT_SET_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_INIT_DONE_ADDR (0x108c00c) +#define NBL_CTRLQ_ECPU_INIT_DONE_DEPTH (1) +#define NBL_CTRLQ_ECPU_INIT_DONE_WIDTH (32) +#define NBL_CTRLQ_ECPU_INIT_DONE_DWLEN (1) +union ctrlq_ecpu_init_done_u { + struct ctrlq_ecpu_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_CIF_ERR_INFO_ADDR (0x108c040) +#define NBL_CTRLQ_ECPU_CIF_ERR_INFO_DEPTH (1) +#define NBL_CTRLQ_ECPU_CIF_ERR_INFO_WIDTH (32) +#define NBL_CTRLQ_ECPU_CIF_ERR_INFO_DWLEN (1) +union ctrlq_ecpu_cif_err_info_u { + struct ctrlq_ecpu_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_CAR_CTRL_ADDR (0x108c100) +#define NBL_CTRLQ_ECPU_CAR_CTRL_DEPTH (1) +#define NBL_CTRLQ_ECPU_CAR_CTRL_WIDTH (32) +#define NBL_CTRLQ_ECPU_CAR_CTRL_DWLEN (1) +union ctrlq_ecpu_car_ctrl_u { + struct ctrlq_ecpu_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_QUEUE_SIZE_ADDR (0x108c200) +#define NBL_CTRLQ_ECPU_QUEUE_SIZE_DEPTH (1) +#define NBL_CTRLQ_ECPU_QUEUE_SIZE_WIDTH (32) +#define NBL_CTRLQ_ECPU_QUEUE_SIZE_DWLEN (1) +union ctrlq_ecpu_queue_size_u { + struct ctrlq_ecpu_queue_size { + u32 mask:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_QUEUE_SIZE_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_QUEUE_BASE_ADDR_ADDR (0x108c204) +#define NBL_CTRLQ_ECPU_QUEUE_BASE_ADDR_DEPTH (1) +#define NBL_CTRLQ_ECPU_QUEUE_BASE_ADDR_WIDTH (64) +#define NBL_CTRLQ_ECPU_QUEUE_BASE_ADDR_DWLEN (2) +union ctrlq_ecpu_queue_base_addr_u { + struct ctrlq_ecpu_queue_base_addr { + u32 val_arr[2]; /* [63:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_QUEUE_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_QUEUE_RESET_ADDR (0x108c20c) +#define NBL_CTRLQ_ECPU_QUEUE_RESET_DEPTH (1) +#define NBL_CTRLQ_ECPU_QUEUE_RESET_WIDTH (32) +#define NBL_CTRLQ_ECPU_QUEUE_RESET_DWLEN (1) +union ctrlq_ecpu_queue_reset_u { + struct ctrlq_ecpu_queue_reset { + u32 vld:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_QUEUE_RESET_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_QUEUE_NOTIFY_ADDR (0x108c210) +#define NBL_CTRLQ_ECPU_QUEUE_NOTIFY_DEPTH (1) +#define NBL_CTRLQ_ECPU_QUEUE_NOTIFY_WIDTH (32) +#define NBL_CTRLQ_ECPU_QUEUE_NOTIFY_DWLEN (1) +union ctrlq_ecpu_queue_notify_u { + struct ctrlq_ecpu_queue_notify { + u32 vld:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_QUEUE_NOTIFY_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_BDF_ADDR (0x108c214) +#define NBL_CTRLQ_ECPU_BDF_DEPTH (1) +#define NBL_CTRLQ_ECPU_BDF_WIDTH (32) +#define NBL_CTRLQ_ECPU_BDF_DWLEN (1) +union ctrlq_ecpu_bdf_u { + struct ctrlq_ecpu_bdf { + u32 bdf:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_BDF_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_INTTERUPT_ADDR (0x108c218) +#define NBL_CTRLQ_ECPU_INTTERUPT_DEPTH (1) +#define NBL_CTRLQ_ECPU_INTTERUPT_WIDTH (32) +#define NBL_CTRLQ_ECPU_INTTERUPT_DWLEN (1) +union ctrlq_ecpu_intterupt_u { + struct ctrlq_ecpu_intterupt { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_INTTERUPT_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_MSIX_ADDR (0x108c21c) +#define NBL_CTRLQ_ECPU_MSIX_DEPTH (1) +#define NBL_CTRLQ_ECPU_MSIX_WIDTH (32) +#define NBL_CTRLQ_ECPU_MSIX_DWLEN (1) +union ctrlq_ecpu_msix_u { + struct ctrlq_ecpu_msix { + u32 idx:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_MSIX_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_QUEUE_STATE_BUSY_ADDR (0x108c900) +#define NBL_CTRLQ_ECPU_QUEUE_STATE_BUSY_DEPTH (1) +#define NBL_CTRLQ_ECPU_QUEUE_STATE_BUSY_WIDTH (32) +#define NBL_CTRLQ_ECPU_QUEUE_STATE_BUSY_DWLEN (1) +union ctrlq_ecpu_queue_state_busy_u { + struct ctrlq_ecpu_queue_state_busy { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_QUEUE_STATE_BUSY_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_DQDESC_DESC0_ADDR (0x108c914) +#define NBL_CTRLQ_ECPU_DQDESC_DESC0_DEPTH (1) +#define NBL_CTRLQ_ECPU_DQDESC_DESC0_WIDTH (32) +#define NBL_CTRLQ_ECPU_DQDESC_DESC0_DWLEN (1) +union ctrlq_ecpu_dqdesc_desc0_u { + struct ctrlq_ecpu_dqdesc_desc0 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_DQDESC_DESC0_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_DQDESC_DESC1_ADDR (0x108c918) +#define NBL_CTRLQ_ECPU_DQDESC_DESC1_DEPTH (1) +#define NBL_CTRLQ_ECPU_DQDESC_DESC1_WIDTH (32) +#define NBL_CTRLQ_ECPU_DQDESC_DESC1_DWLEN (1) +union ctrlq_ecpu_dqdesc_desc1_u { + struct ctrlq_ecpu_dqdesc_desc1 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_DQDESC_DESC1_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_DQDESC_DESC2_ADDR (0x108c91c) +#define NBL_CTRLQ_ECPU_DQDESC_DESC2_DEPTH (1) +#define NBL_CTRLQ_ECPU_DQDESC_DESC2_WIDTH (32) +#define NBL_CTRLQ_ECPU_DQDESC_DESC2_DWLEN (1) +union ctrlq_ecpu_dqdesc_desc2_u { + struct ctrlq_ecpu_dqdesc_desc2 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_DQDESC_DESC2_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_DQDESC_DESC3_ADDR (0x108c920) +#define NBL_CTRLQ_ECPU_DQDESC_DESC3_DEPTH (1) +#define NBL_CTRLQ_ECPU_DQDESC_DESC3_WIDTH (32) +#define NBL_CTRLQ_ECPU_DQDESC_DESC3_DWLEN (1) +union ctrlq_ecpu_dqdesc_desc3_u { + struct ctrlq_ecpu_dqdesc_desc3 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_DQDESC_DESC3_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_DQDESC_DESC4_ADDR (0x108c924) +#define NBL_CTRLQ_ECPU_DQDESC_DESC4_DEPTH (1) +#define NBL_CTRLQ_ECPU_DQDESC_DESC4_WIDTH (32) +#define NBL_CTRLQ_ECPU_DQDESC_DESC4_DWLEN (1) +union ctrlq_ecpu_dqdesc_desc4_u { + struct ctrlq_ecpu_dqdesc_desc4 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_DQDESC_DESC4_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_DQDESC_DESC5_ADDR (0x108c928) +#define NBL_CTRLQ_ECPU_DQDESC_DESC5_DEPTH (1) +#define NBL_CTRLQ_ECPU_DQDESC_DESC5_WIDTH (32) +#define NBL_CTRLQ_ECPU_DQDESC_DESC5_DWLEN (1) +union ctrlq_ecpu_dqdesc_desc5_u { + struct ctrlq_ecpu_dqdesc_desc5 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_DQDESC_DESC5_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_DQDESC_DESC6_ADDR (0x108c92c) +#define NBL_CTRLQ_ECPU_DQDESC_DESC6_DEPTH (1) +#define NBL_CTRLQ_ECPU_DQDESC_DESC6_WIDTH (32) +#define NBL_CTRLQ_ECPU_DQDESC_DESC6_DWLEN (1) +union ctrlq_ecpu_dqdesc_desc6_u { + struct ctrlq_ecpu_dqdesc_desc6 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_DQDESC_DESC6_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_DQDESC_DESC7_ADDR (0x108c930) +#define NBL_CTRLQ_ECPU_DQDESC_DESC7_DEPTH (1) +#define NBL_CTRLQ_ECPU_DQDESC_DESC7_WIDTH (32) +#define NBL_CTRLQ_ECPU_DQDESC_DESC7_DWLEN (1) +union ctrlq_ecpu_dqdesc_desc7_u { + struct ctrlq_ecpu_dqdesc_desc7 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_DQDESC_DESC7_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_H2EDMA_RD_ACK_REQ_ADDR (0x108c938) +#define NBL_CTRLQ_ECPU_H2EDMA_RD_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_ECPU_H2EDMA_RD_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_ECPU_H2EDMA_RD_ACK_REQ_DWLEN (1) +union ctrlq_ecpu_h2edma_rd_ack_req_u { + struct ctrlq_ecpu_h2edma_rd_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_H2EDMA_RD_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_H2EDMA_RD_RVLD_EOB_ADDR (0x108c93c) +#define NBL_CTRLQ_ECPU_H2EDMA_RD_RVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_ECPU_H2EDMA_RD_RVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_ECPU_H2EDMA_RD_RVLD_EOB_DWLEN (1) +union ctrlq_ecpu_h2edma_rd_rvld_eob_u { + struct ctrlq_ecpu_h2edma_rd_rvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_H2EDMA_RD_RVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_H2EDMA_WR_ACK_REQ_ADDR (0x108c940) +#define NBL_CTRLQ_ECPU_H2EDMA_WR_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_ECPU_H2EDMA_WR_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_ECPU_H2EDMA_WR_ACK_REQ_DWLEN (1) +union ctrlq_ecpu_h2edma_wr_ack_req_u { + struct ctrlq_ecpu_h2edma_wr_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_H2EDMA_WR_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_H2EDMA_WR_WVLD_EOB_ADDR (0x108c944) +#define NBL_CTRLQ_ECPU_H2EDMA_WR_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_ECPU_H2EDMA_WR_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_ECPU_H2EDMA_WR_WVLD_EOB_DWLEN (1) +union ctrlq_ecpu_h2edma_wr_wvld_eob_u { + struct ctrlq_ecpu_h2edma_wr_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_H2EDMA_WR_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_H2EDMA_WR1_ACK_REQ_ADDR (0x108c948) +#define NBL_CTRLQ_ECPU_H2EDMA_WR1_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_ECPU_H2EDMA_WR1_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_ECPU_H2EDMA_WR1_ACK_REQ_DWLEN (1) +union ctrlq_ecpu_h2edma_wr1_ack_req_u { + struct ctrlq_ecpu_h2edma_wr1_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_H2EDMA_WR1_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_H2EDMA_WR1_WVLD_EOB_ADDR (0x108c94c) +#define NBL_CTRLQ_ECPU_H2EDMA_WR1_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_ECPU_H2EDMA_WR1_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_ECPU_H2EDMA_WR1_WVLD_EOB_DWLEN (1) +union ctrlq_ecpu_h2edma_wr1_wvld_eob_u { + struct ctrlq_ecpu_h2edma_wr1_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_H2EDMA_WR1_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_E2HDMA_RD_ACK_REQ_ADDR (0x108c958) +#define NBL_CTRLQ_ECPU_E2HDMA_RD_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_ECPU_E2HDMA_RD_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_ECPU_E2HDMA_RD_ACK_REQ_DWLEN (1) +union ctrlq_ecpu_e2hdma_rd_ack_req_u { + struct ctrlq_ecpu_e2hdma_rd_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_E2HDMA_RD_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_E2HDMA_RD_RVLD_EOB_ADDR (0x108c95c) +#define NBL_CTRLQ_ECPU_E2HDMA_RD_RVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_ECPU_E2HDMA_RD_RVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_ECPU_E2HDMA_RD_RVLD_EOB_DWLEN (1) +union ctrlq_ecpu_e2hdma_rd_rvld_eob_u { + struct ctrlq_ecpu_e2hdma_rd_rvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_E2HDMA_RD_RVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_E2HDMA_WR_ACK_REQ_ADDR (0x108c960) +#define NBL_CTRLQ_ECPU_E2HDMA_WR_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_ECPU_E2HDMA_WR_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_ECPU_E2HDMA_WR_ACK_REQ_DWLEN (1) +union ctrlq_ecpu_e2hdma_wr_ack_req_u { + struct ctrlq_ecpu_e2hdma_wr_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_E2HDMA_WR_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_E2HDMA_WR_WVLD_EOB_ADDR (0x108c964) +#define NBL_CTRLQ_ECPU_E2HDMA_WR_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_ECPU_E2HDMA_WR_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_ECPU_E2HDMA_WR_WVLD_EOB_DWLEN (1) +union ctrlq_ecpu_e2hdma_wr_wvld_eob_u { + struct ctrlq_ecpu_e2hdma_wr_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_E2HDMA_WR_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_E2HDMA_WR1_ACK_REQ_ADDR (0x108c968) +#define NBL_CTRLQ_ECPU_E2HDMA_WR1_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_ECPU_E2HDMA_WR1_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_ECPU_E2HDMA_WR1_ACK_REQ_DWLEN (1) +union ctrlq_ecpu_e2hdma_wr1_ack_req_u { + struct ctrlq_ecpu_e2hdma_wr1_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_E2HDMA_WR1_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_ECPU_E2HDMA_WR1_WVLD_EOB_ADDR (0x108c96c) +#define NBL_CTRLQ_ECPU_E2HDMA_WR1_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_ECPU_E2HDMA_WR1_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_ECPU_E2HDMA_WR1_WVLD_EOB_DWLEN (1) +union ctrlq_ecpu_e2hdma_wr1_wvld_eob_u { + struct ctrlq_ecpu_e2hdma_wr1_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_ECPU_E2HDMA_WR1_WVLD_EOB_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ctrlq_emp.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ctrlq_emp.h new file mode 100644 index 0000000000000000000000000000000000000000..9c1828ed41e3b541059e5e83e81a334bff499af6 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ctrlq_emp.h @@ -0,0 +1,530 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_CTRLQ_EMP_H +#define NBL_CTRLQ_EMP_H 1 + +#include + +#define NBL_CTRLQ_EMP_BASE (0x00F90000) + +#define NBL_CTRLQ_EMP_INT_STATUS_ADDR (0xf90000) +#define NBL_CTRLQ_EMP_INT_STATUS_DEPTH (1) +#define NBL_CTRLQ_EMP_INT_STATUS_WIDTH (32) +#define NBL_CTRLQ_EMP_INT_STATUS_DWLEN (1) +union ctrlq_emp_int_status_u { + struct ctrlq_emp_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv5:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv4:1; /* [05:05] Default:0x0 RO */ + u32 rsv3:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_INT_MASK_ADDR (0xf90004) +#define NBL_CTRLQ_EMP_INT_MASK_DEPTH (1) +#define NBL_CTRLQ_EMP_INT_MASK_WIDTH (32) +#define NBL_CTRLQ_EMP_INT_MASK_DWLEN (1) +union ctrlq_emp_int_mask_u { + struct ctrlq_emp_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv5:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv4:1; /* [05:05] Default:0x0 RO */ + u32 rsv3:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_INT_MASK_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_INT_SET_ADDR (0xf90008) +#define NBL_CTRLQ_EMP_INT_SET_DEPTH (1) +#define NBL_CTRLQ_EMP_INT_SET_WIDTH (32) +#define NBL_CTRLQ_EMP_INT_SET_DWLEN (1) +union ctrlq_emp_int_set_u { + struct ctrlq_emp_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv5:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv4:1; /* [05:05] Default:0x0 RO */ + u32 rsv3:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_INT_SET_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_INIT_DONE_ADDR (0xf9000c) +#define NBL_CTRLQ_EMP_INIT_DONE_DEPTH (1) +#define NBL_CTRLQ_EMP_INIT_DONE_WIDTH (32) +#define NBL_CTRLQ_EMP_INIT_DONE_DWLEN (1) +union ctrlq_emp_init_done_u { + struct ctrlq_emp_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_CIF_ERR_INFO_ADDR (0xf90040) +#define NBL_CTRLQ_EMP_CIF_ERR_INFO_DEPTH (1) +#define NBL_CTRLQ_EMP_CIF_ERR_INFO_WIDTH (32) +#define NBL_CTRLQ_EMP_CIF_ERR_INFO_DWLEN (1) +union ctrlq_emp_cif_err_info_u { + struct ctrlq_emp_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_CAR_CTRL_ADDR (0xf90100) +#define NBL_CTRLQ_EMP_CAR_CTRL_DEPTH (1) +#define NBL_CTRLQ_EMP_CAR_CTRL_WIDTH (32) +#define NBL_CTRLQ_EMP_CAR_CTRL_DWLEN (1) +union ctrlq_emp_car_ctrl_u { + struct ctrlq_emp_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_QUEUE_SIZE_ADDR (0xf90200) +#define NBL_CTRLQ_EMP_QUEUE_SIZE_DEPTH (1) +#define NBL_CTRLQ_EMP_QUEUE_SIZE_WIDTH (32) +#define NBL_CTRLQ_EMP_QUEUE_SIZE_DWLEN (1) +union ctrlq_emp_queue_size_u { + struct ctrlq_emp_queue_size { + u32 mask:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_QUEUE_SIZE_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_QUEUE_BASE_ADDR_ADDR (0xf90204) +#define NBL_CTRLQ_EMP_QUEUE_BASE_ADDR_DEPTH (1) +#define NBL_CTRLQ_EMP_QUEUE_BASE_ADDR_WIDTH (64) +#define NBL_CTRLQ_EMP_QUEUE_BASE_ADDR_DWLEN (2) +union ctrlq_emp_queue_base_addr_u { + struct ctrlq_emp_queue_base_addr { + u32 val_arr[2]; /* [63:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_QUEUE_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_QUEUE_RESET_ADDR (0xf9020c) +#define NBL_CTRLQ_EMP_QUEUE_RESET_DEPTH (1) +#define NBL_CTRLQ_EMP_QUEUE_RESET_WIDTH (32) +#define NBL_CTRLQ_EMP_QUEUE_RESET_DWLEN (1) +union ctrlq_emp_queue_reset_u { + struct ctrlq_emp_queue_reset { + u32 vld:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_QUEUE_RESET_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_QUEUE_NOTIFY_ADDR (0xf90210) +#define NBL_CTRLQ_EMP_QUEUE_NOTIFY_DEPTH (1) +#define NBL_CTRLQ_EMP_QUEUE_NOTIFY_WIDTH (32) +#define NBL_CTRLQ_EMP_QUEUE_NOTIFY_DWLEN (1) +union ctrlq_emp_queue_notify_u { + struct ctrlq_emp_queue_notify { + u32 vld:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_QUEUE_NOTIFY_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_BDF_ADDR (0xf90214) +#define NBL_CTRLQ_EMP_BDF_DEPTH (1) +#define NBL_CTRLQ_EMP_BDF_WIDTH (32) +#define NBL_CTRLQ_EMP_BDF_DWLEN (1) +union ctrlq_emp_bdf_u { + struct ctrlq_emp_bdf { + u32 bdf:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_BDF_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_INTTERUPT_ADDR (0xf90218) +#define NBL_CTRLQ_EMP_INTTERUPT_DEPTH (1) +#define NBL_CTRLQ_EMP_INTTERUPT_WIDTH (32) +#define NBL_CTRLQ_EMP_INTTERUPT_DWLEN (1) +union ctrlq_emp_intterupt_u { + struct ctrlq_emp_intterupt { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_INTTERUPT_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_MSIX_ADDR (0xf9021c) +#define NBL_CTRLQ_EMP_MSIX_DEPTH (1) +#define NBL_CTRLQ_EMP_MSIX_WIDTH (32) +#define NBL_CTRLQ_EMP_MSIX_DWLEN (1) +union ctrlq_emp_msix_u { + struct ctrlq_emp_msix { + u32 idx:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_MSIX_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_MODE_ADDR (0xf90220) +#define NBL_CTRLQ_EMP_MODE_DEPTH (1) +#define NBL_CTRLQ_EMP_MODE_WIDTH (32) +#define NBL_CTRLQ_EMP_MODE_DWLEN (1) +union ctrlq_emp_mode_u { + struct ctrlq_emp_mode { + u32 sel:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_MODE_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_INT_CLR_ADDR (0xf90224) +#define NBL_CTRLQ_EMP_INT_CLR_DEPTH (1) +#define NBL_CTRLQ_EMP_INT_CLR_WIDTH (32) +#define NBL_CTRLQ_EMP_INT_CLR_DWLEN (1) +union ctrlq_emp_int_clr_u { + struct ctrlq_emp_int_clr { + u32 en:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_INT_CLR_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_AXI_TIMEOUT_ADDR (0xf9022c) +#define NBL_CTRLQ_EMP_AXI_TIMEOUT_DEPTH (1) +#define NBL_CTRLQ_EMP_AXI_TIMEOUT_WIDTH (32) +#define NBL_CTRLQ_EMP_AXI_TIMEOUT_DWLEN (1) +union ctrlq_emp_axi_timeout_u { + struct ctrlq_emp_axi_timeout { + u32 val:32; /* [31:0] Default:0x5fffff RW */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_AXI_TIMEOUT_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_QUEUE_STATE_BUSY_ADDR (0xf90900) +#define NBL_CTRLQ_EMP_QUEUE_STATE_BUSY_DEPTH (1) +#define NBL_CTRLQ_EMP_QUEUE_STATE_BUSY_WIDTH (32) +#define NBL_CTRLQ_EMP_QUEUE_STATE_BUSY_DWLEN (1) +union ctrlq_emp_queue_state_busy_u { + struct ctrlq_emp_queue_state_busy { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_QUEUE_STATE_BUSY_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_DQDESC_DESC0_ADDR (0xf90914) +#define NBL_CTRLQ_EMP_DQDESC_DESC0_DEPTH (1) +#define NBL_CTRLQ_EMP_DQDESC_DESC0_WIDTH (32) +#define NBL_CTRLQ_EMP_DQDESC_DESC0_DWLEN (1) +union ctrlq_emp_dqdesc_desc0_u { + struct ctrlq_emp_dqdesc_desc0 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_DQDESC_DESC0_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_DQDESC_DESC1_ADDR (0xf90918) +#define NBL_CTRLQ_EMP_DQDESC_DESC1_DEPTH (1) +#define NBL_CTRLQ_EMP_DQDESC_DESC1_WIDTH (32) +#define NBL_CTRLQ_EMP_DQDESC_DESC1_DWLEN (1) +union ctrlq_emp_dqdesc_desc1_u { + struct ctrlq_emp_dqdesc_desc1 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_DQDESC_DESC1_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_DQDESC_DESC2_ADDR (0xf9091c) +#define NBL_CTRLQ_EMP_DQDESC_DESC2_DEPTH (1) +#define NBL_CTRLQ_EMP_DQDESC_DESC2_WIDTH (32) +#define NBL_CTRLQ_EMP_DQDESC_DESC2_DWLEN (1) +union ctrlq_emp_dqdesc_desc2_u { + struct ctrlq_emp_dqdesc_desc2 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_DQDESC_DESC2_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_DQDESC_DESC3_ADDR (0xf90920) +#define NBL_CTRLQ_EMP_DQDESC_DESC3_DEPTH (1) +#define NBL_CTRLQ_EMP_DQDESC_DESC3_WIDTH (32) +#define NBL_CTRLQ_EMP_DQDESC_DESC3_DWLEN (1) +union ctrlq_emp_dqdesc_desc3_u { + struct ctrlq_emp_dqdesc_desc3 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_DQDESC_DESC3_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_DQDESC_DESC4_ADDR (0xf90924) +#define NBL_CTRLQ_EMP_DQDESC_DESC4_DEPTH (1) +#define NBL_CTRLQ_EMP_DQDESC_DESC4_WIDTH (32) +#define NBL_CTRLQ_EMP_DQDESC_DESC4_DWLEN (1) +union ctrlq_emp_dqdesc_desc4_u { + struct ctrlq_emp_dqdesc_desc4 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_DQDESC_DESC4_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_DQDESC_DESC5_ADDR (0xf90928) +#define NBL_CTRLQ_EMP_DQDESC_DESC5_DEPTH (1) +#define NBL_CTRLQ_EMP_DQDESC_DESC5_WIDTH (32) +#define NBL_CTRLQ_EMP_DQDESC_DESC5_DWLEN (1) +union ctrlq_emp_dqdesc_desc5_u { + struct ctrlq_emp_dqdesc_desc5 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_DQDESC_DESC5_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_DQDESC_DESC6_ADDR (0xf9092c) +#define NBL_CTRLQ_EMP_DQDESC_DESC6_DEPTH (1) +#define NBL_CTRLQ_EMP_DQDESC_DESC6_WIDTH (32) +#define NBL_CTRLQ_EMP_DQDESC_DESC6_DWLEN (1) +union ctrlq_emp_dqdesc_desc6_u { + struct ctrlq_emp_dqdesc_desc6 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_DQDESC_DESC6_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_DQDESC_DESC7_ADDR (0xf90930) +#define NBL_CTRLQ_EMP_DQDESC_DESC7_DEPTH (1) +#define NBL_CTRLQ_EMP_DQDESC_DESC7_WIDTH (32) +#define NBL_CTRLQ_EMP_DQDESC_DESC7_DWLEN (1) +union ctrlq_emp_dqdesc_desc7_u { + struct ctrlq_emp_dqdesc_desc7 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_DQDESC_DESC7_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_H2EDMA_RD_ACK_REQ_ADDR (0xf90938) +#define NBL_CTRLQ_EMP_H2EDMA_RD_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_EMP_H2EDMA_RD_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_EMP_H2EDMA_RD_ACK_REQ_DWLEN (1) +union ctrlq_emp_h2edma_rd_ack_req_u { + struct ctrlq_emp_h2edma_rd_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_H2EDMA_RD_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_H2EDMA_RD_RVLD_EOB_ADDR (0xf9093c) +#define NBL_CTRLQ_EMP_H2EDMA_RD_RVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_EMP_H2EDMA_RD_RVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_EMP_H2EDMA_RD_RVLD_EOB_DWLEN (1) +union ctrlq_emp_h2edma_rd_rvld_eob_u { + struct ctrlq_emp_h2edma_rd_rvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_H2EDMA_RD_RVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_H2EDMA_WR_ACK_REQ_ADDR (0xf90940) +#define NBL_CTRLQ_EMP_H2EDMA_WR_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_EMP_H2EDMA_WR_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_EMP_H2EDMA_WR_ACK_REQ_DWLEN (1) +union ctrlq_emp_h2edma_wr_ack_req_u { + struct ctrlq_emp_h2edma_wr_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_H2EDMA_WR_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_H2EDMA_WR_WVLD_EOB_ADDR (0xf90944) +#define NBL_CTRLQ_EMP_H2EDMA_WR_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_EMP_H2EDMA_WR_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_EMP_H2EDMA_WR_WVLD_EOB_DWLEN (1) +union ctrlq_emp_h2edma_wr_wvld_eob_u { + struct ctrlq_emp_h2edma_wr_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_H2EDMA_WR_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_H2EDMA_WR1_ACK_REQ_ADDR (0xf90948) +#define NBL_CTRLQ_EMP_H2EDMA_WR1_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_EMP_H2EDMA_WR1_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_EMP_H2EDMA_WR1_ACK_REQ_DWLEN (1) +union ctrlq_emp_h2edma_wr1_ack_req_u { + struct ctrlq_emp_h2edma_wr1_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_H2EDMA_WR1_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_H2EDMA_WR1_WVLD_EOB_ADDR (0xf9094c) +#define NBL_CTRLQ_EMP_H2EDMA_WR1_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_EMP_H2EDMA_WR1_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_EMP_H2EDMA_WR1_WVLD_EOB_DWLEN (1) +union ctrlq_emp_h2edma_wr1_wvld_eob_u { + struct ctrlq_emp_h2edma_wr1_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_H2EDMA_WR1_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_E2HDMA_RD_ACK_REQ_ADDR (0xf90958) +#define NBL_CTRLQ_EMP_E2HDMA_RD_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_EMP_E2HDMA_RD_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_EMP_E2HDMA_RD_ACK_REQ_DWLEN (1) +union ctrlq_emp_e2hdma_rd_ack_req_u { + struct ctrlq_emp_e2hdma_rd_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_E2HDMA_RD_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_E2HDMA_RD_RVLD_EOB_ADDR (0xf9095c) +#define NBL_CTRLQ_EMP_E2HDMA_RD_RVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_EMP_E2HDMA_RD_RVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_EMP_E2HDMA_RD_RVLD_EOB_DWLEN (1) +union ctrlq_emp_e2hdma_rd_rvld_eob_u { + struct ctrlq_emp_e2hdma_rd_rvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_E2HDMA_RD_RVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_E2HDMA_WR_ACK_REQ_ADDR (0xf90960) +#define NBL_CTRLQ_EMP_E2HDMA_WR_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_EMP_E2HDMA_WR_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_EMP_E2HDMA_WR_ACK_REQ_DWLEN (1) +union ctrlq_emp_e2hdma_wr_ack_req_u { + struct ctrlq_emp_e2hdma_wr_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_E2HDMA_WR_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_E2HDMA_WR_WVLD_EOB_ADDR (0xf90964) +#define NBL_CTRLQ_EMP_E2HDMA_WR_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_EMP_E2HDMA_WR_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_EMP_E2HDMA_WR_WVLD_EOB_DWLEN (1) +union ctrlq_emp_e2hdma_wr_wvld_eob_u { + struct ctrlq_emp_e2hdma_wr_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_E2HDMA_WR_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_E2HDMA_WR1_ACK_REQ_ADDR (0xf90968) +#define NBL_CTRLQ_EMP_E2HDMA_WR1_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_EMP_E2HDMA_WR1_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_EMP_E2HDMA_WR1_ACK_REQ_DWLEN (1) +union ctrlq_emp_e2hdma_wr1_ack_req_u { + struct ctrlq_emp_e2hdma_wr1_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_E2HDMA_WR1_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_E2HDMA_WR1_WVLD_EOB_ADDR (0xf9096c) +#define NBL_CTRLQ_EMP_E2HDMA_WR1_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_EMP_E2HDMA_WR1_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_EMP_E2HDMA_WR1_WVLD_EOB_DWLEN (1) +union ctrlq_emp_e2hdma_wr1_wvld_eob_u { + struct ctrlq_emp_e2hdma_wr1_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_E2HDMA_WR1_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_WDIF_ERR_ADDR (0xf90a00) +#define NBL_CTRLQ_EMP_WDIF_ERR_DEPTH (1) +#define NBL_CTRLQ_EMP_WDIF_ERR_WIDTH (32) +#define NBL_CTRLQ_EMP_WDIF_ERR_DWLEN (1) +union ctrlq_emp_wdif_err_u { + struct ctrlq_emp_wdif_err { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_WDIF_ERR_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_WDIF_ERR_INFO_ADDR (0xf90a04) +#define NBL_CTRLQ_EMP_WDIF_ERR_INFO_DEPTH (1) +#define NBL_CTRLQ_EMP_WDIF_ERR_INFO_WIDTH (32) +#define NBL_CTRLQ_EMP_WDIF_ERR_INFO_DWLEN (1) +union ctrlq_emp_wdif_err_info_u { + struct ctrlq_emp_wdif_err_info { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_WDIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_WDIF_DEBUG_ADDR (0xf90a20) +#define NBL_CTRLQ_EMP_WDIF_DEBUG_DEPTH (1) +#define NBL_CTRLQ_EMP_WDIF_DEBUG_WIDTH (32) +#define NBL_CTRLQ_EMP_WDIF_DEBUG_DWLEN (1) +union ctrlq_emp_wdif_debug_u { + struct ctrlq_emp_wdif_debug { + u32 bdg:32; /* [31:0] Default:0x1 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_WDIF_DEBUG_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_RDIF_ERR_ADDR (0xf90a24) +#define NBL_CTRLQ_EMP_RDIF_ERR_DEPTH (1) +#define NBL_CTRLQ_EMP_RDIF_ERR_WIDTH (32) +#define NBL_CTRLQ_EMP_RDIF_ERR_DWLEN (1) +union ctrlq_emp_rdif_err_u { + struct ctrlq_emp_rdif_err { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_RDIF_ERR_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_RDIF_DEBUG_INFO_ADDR (0xf90a48) +#define NBL_CTRLQ_EMP_RDIF_DEBUG_INFO_DEPTH (1) +#define NBL_CTRLQ_EMP_RDIF_DEBUG_INFO_WIDTH (32) +#define NBL_CTRLQ_EMP_RDIF_DEBUG_INFO_DWLEN (1) +union ctrlq_emp_rdif_debug_info_u { + struct ctrlq_emp_rdif_debug_info { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_RDIF_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_CTRLQ_EMP_DIF2AXI_ERR_ADDR (0xf90a54) +#define NBL_CTRLQ_EMP_DIF2AXI_ERR_DEPTH (1) +#define NBL_CTRLQ_EMP_DIF2AXI_ERR_WIDTH (32) +#define NBL_CTRLQ_EMP_DIF2AXI_ERR_DWLEN (1) +union ctrlq_emp_dif2axi_err_u { + struct ctrlq_emp_dif2axi_err { + u32 axi_bresp:16; /* [15:00] Default:0x0 RO */ + u32 axi_rresp:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_EMP_DIF2AXI_ERR_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ctrlq_host.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ctrlq_host.h new file mode 100644 index 0000000000000000000000000000000000000000..5820057e8116711ea28cc32fa4e6d3bb30952eb2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ctrlq_host.h @@ -0,0 +1,428 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_CTRLQ_HOST_H +#define NBL_CTRLQ_HOST_H 1 + +#include + +#define NBL_CTRLQ_HOST_BASE (0x00F8C000) + +#define NBL_CTRLQ_HOST_INT_STATUS_ADDR (0xf8c000) +#define NBL_CTRLQ_HOST_INT_STATUS_DEPTH (1) +#define NBL_CTRLQ_HOST_INT_STATUS_WIDTH (32) +#define NBL_CTRLQ_HOST_INT_STATUS_DWLEN (1) +union ctrlq_host_int_status_u { + struct ctrlq_host_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv5:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv4:1; /* [05:05] Default:0x0 RO */ + u32 rsv3:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_INT_MASK_ADDR (0xf8c004) +#define NBL_CTRLQ_HOST_INT_MASK_DEPTH (1) +#define NBL_CTRLQ_HOST_INT_MASK_WIDTH (32) +#define NBL_CTRLQ_HOST_INT_MASK_DWLEN (1) +union ctrlq_host_int_mask_u { + struct ctrlq_host_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv5:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv4:1; /* [05:05] Default:0x0 RO */ + u32 rsv3:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_INT_MASK_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_INT_SET_ADDR (0xf8c008) +#define NBL_CTRLQ_HOST_INT_SET_DEPTH (1) +#define NBL_CTRLQ_HOST_INT_SET_WIDTH (32) +#define NBL_CTRLQ_HOST_INT_SET_DWLEN (1) +union ctrlq_host_int_set_u { + struct ctrlq_host_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv5:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv4:1; /* [05:05] Default:0x0 RO */ + u32 rsv3:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_INT_SET_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_INIT_DONE_ADDR (0xf8c00c) +#define NBL_CTRLQ_HOST_INIT_DONE_DEPTH (1) +#define NBL_CTRLQ_HOST_INIT_DONE_WIDTH (32) +#define NBL_CTRLQ_HOST_INIT_DONE_DWLEN (1) +union ctrlq_host_init_done_u { + struct ctrlq_host_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_CIF_ERR_INFO_ADDR (0xf8c040) +#define NBL_CTRLQ_HOST_CIF_ERR_INFO_DEPTH (1) +#define NBL_CTRLQ_HOST_CIF_ERR_INFO_WIDTH (32) +#define NBL_CTRLQ_HOST_CIF_ERR_INFO_DWLEN (1) +union ctrlq_host_cif_err_info_u { + struct ctrlq_host_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_CAR_CTRL_ADDR (0xf8c100) +#define NBL_CTRLQ_HOST_CAR_CTRL_DEPTH (1) +#define NBL_CTRLQ_HOST_CAR_CTRL_WIDTH (32) +#define NBL_CTRLQ_HOST_CAR_CTRL_DWLEN (1) +union ctrlq_host_car_ctrl_u { + struct ctrlq_host_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_QUEUE_SIZE_ADDR (0xf8c200) +#define NBL_CTRLQ_HOST_QUEUE_SIZE_DEPTH (1) +#define NBL_CTRLQ_HOST_QUEUE_SIZE_WIDTH (32) +#define NBL_CTRLQ_HOST_QUEUE_SIZE_DWLEN (1) +union ctrlq_host_queue_size_u { + struct ctrlq_host_queue_size { + u32 mask:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_QUEUE_SIZE_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_QUEUE_BASE_ADDR_ADDR (0xf8c204) +#define NBL_CTRLQ_HOST_QUEUE_BASE_ADDR_DEPTH (1) +#define NBL_CTRLQ_HOST_QUEUE_BASE_ADDR_WIDTH (64) +#define NBL_CTRLQ_HOST_QUEUE_BASE_ADDR_DWLEN (2) +union ctrlq_host_queue_base_addr_u { + struct ctrlq_host_queue_base_addr { + u32 val_arr[2]; /* [63:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_QUEUE_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_QUEUE_RESET_ADDR (0xf8c20c) +#define NBL_CTRLQ_HOST_QUEUE_RESET_DEPTH (1) +#define NBL_CTRLQ_HOST_QUEUE_RESET_WIDTH (32) +#define NBL_CTRLQ_HOST_QUEUE_RESET_DWLEN (1) +union ctrlq_host_queue_reset_u { + struct ctrlq_host_queue_reset { + u32 vld:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_QUEUE_RESET_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_QUEUE_NOTIFY_ADDR (0xf8c210) +#define NBL_CTRLQ_HOST_QUEUE_NOTIFY_DEPTH (1) +#define NBL_CTRLQ_HOST_QUEUE_NOTIFY_WIDTH (32) +#define NBL_CTRLQ_HOST_QUEUE_NOTIFY_DWLEN (1) +union ctrlq_host_queue_notify_u { + struct ctrlq_host_queue_notify { + u32 vld:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_QUEUE_NOTIFY_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_BDF_ADDR (0xf8c214) +#define NBL_CTRLQ_HOST_BDF_DEPTH (1) +#define NBL_CTRLQ_HOST_BDF_WIDTH (32) +#define NBL_CTRLQ_HOST_BDF_DWLEN (1) +union ctrlq_host_bdf_u { + struct ctrlq_host_bdf { + u32 bdf:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_BDF_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_INTTERUPT_ADDR (0xf8c218) +#define NBL_CTRLQ_HOST_INTTERUPT_DEPTH (1) +#define NBL_CTRLQ_HOST_INTTERUPT_WIDTH (32) +#define NBL_CTRLQ_HOST_INTTERUPT_DWLEN (1) +union ctrlq_host_intterupt_u { + struct ctrlq_host_intterupt { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_INTTERUPT_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_MSIX_ADDR (0xf8c21c) +#define NBL_CTRLQ_HOST_MSIX_DEPTH (1) +#define NBL_CTRLQ_HOST_MSIX_WIDTH (32) +#define NBL_CTRLQ_HOST_MSIX_DWLEN (1) +union ctrlq_host_msix_u { + struct ctrlq_host_msix { + u32 idx:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_MSIX_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_QUEUE_STATE_BUSY_ADDR (0xf8c900) +#define NBL_CTRLQ_HOST_QUEUE_STATE_BUSY_DEPTH (1) +#define NBL_CTRLQ_HOST_QUEUE_STATE_BUSY_WIDTH (32) +#define NBL_CTRLQ_HOST_QUEUE_STATE_BUSY_DWLEN (1) +union ctrlq_host_queue_state_busy_u { + struct ctrlq_host_queue_state_busy { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_QUEUE_STATE_BUSY_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_DQDESC_DESC0_ADDR (0xf8c914) +#define NBL_CTRLQ_HOST_DQDESC_DESC0_DEPTH (1) +#define NBL_CTRLQ_HOST_DQDESC_DESC0_WIDTH (32) +#define NBL_CTRLQ_HOST_DQDESC_DESC0_DWLEN (1) +union ctrlq_host_dqdesc_desc0_u { + struct ctrlq_host_dqdesc_desc0 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_DQDESC_DESC0_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_DQDESC_DESC1_ADDR (0xf8c918) +#define NBL_CTRLQ_HOST_DQDESC_DESC1_DEPTH (1) +#define NBL_CTRLQ_HOST_DQDESC_DESC1_WIDTH (32) +#define NBL_CTRLQ_HOST_DQDESC_DESC1_DWLEN (1) +union ctrlq_host_dqdesc_desc1_u { + struct ctrlq_host_dqdesc_desc1 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_DQDESC_DESC1_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_DQDESC_DESC2_ADDR (0xf8c91c) +#define NBL_CTRLQ_HOST_DQDESC_DESC2_DEPTH (1) +#define NBL_CTRLQ_HOST_DQDESC_DESC2_WIDTH (32) +#define NBL_CTRLQ_HOST_DQDESC_DESC2_DWLEN (1) +union ctrlq_host_dqdesc_desc2_u { + struct ctrlq_host_dqdesc_desc2 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_DQDESC_DESC2_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_DQDESC_DESC3_ADDR (0xf8c920) +#define NBL_CTRLQ_HOST_DQDESC_DESC3_DEPTH (1) +#define NBL_CTRLQ_HOST_DQDESC_DESC3_WIDTH (32) +#define NBL_CTRLQ_HOST_DQDESC_DESC3_DWLEN (1) +union ctrlq_host_dqdesc_desc3_u { + struct ctrlq_host_dqdesc_desc3 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_DQDESC_DESC3_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_DQDESC_DESC4_ADDR (0xf8c924) +#define NBL_CTRLQ_HOST_DQDESC_DESC4_DEPTH (1) +#define NBL_CTRLQ_HOST_DQDESC_DESC4_WIDTH (32) +#define NBL_CTRLQ_HOST_DQDESC_DESC4_DWLEN (1) +union ctrlq_host_dqdesc_desc4_u { + struct ctrlq_host_dqdesc_desc4 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_DQDESC_DESC4_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_DQDESC_DESC5_ADDR (0xf8c928) +#define NBL_CTRLQ_HOST_DQDESC_DESC5_DEPTH (1) +#define NBL_CTRLQ_HOST_DQDESC_DESC5_WIDTH (32) +#define NBL_CTRLQ_HOST_DQDESC_DESC5_DWLEN (1) +union ctrlq_host_dqdesc_desc5_u { + struct ctrlq_host_dqdesc_desc5 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_DQDESC_DESC5_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_DQDESC_DESC6_ADDR (0xf8c92c) +#define NBL_CTRLQ_HOST_DQDESC_DESC6_DEPTH (1) +#define NBL_CTRLQ_HOST_DQDESC_DESC6_WIDTH (32) +#define NBL_CTRLQ_HOST_DQDESC_DESC6_DWLEN (1) +union ctrlq_host_dqdesc_desc6_u { + struct ctrlq_host_dqdesc_desc6 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_DQDESC_DESC6_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_DQDESC_DESC7_ADDR (0xf8c930) +#define NBL_CTRLQ_HOST_DQDESC_DESC7_DEPTH (1) +#define NBL_CTRLQ_HOST_DQDESC_DESC7_WIDTH (32) +#define NBL_CTRLQ_HOST_DQDESC_DESC7_DWLEN (1) +union ctrlq_host_dqdesc_desc7_u { + struct ctrlq_host_dqdesc_desc7 { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_DQDESC_DESC7_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_H2EDMA_RD_ACK_REQ_ADDR (0xf8c938) +#define NBL_CTRLQ_HOST_H2EDMA_RD_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_HOST_H2EDMA_RD_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_HOST_H2EDMA_RD_ACK_REQ_DWLEN (1) +union ctrlq_host_h2edma_rd_ack_req_u { + struct ctrlq_host_h2edma_rd_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_H2EDMA_RD_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_H2EDMA_RD_RVLD_EOB_ADDR (0xf8c93c) +#define NBL_CTRLQ_HOST_H2EDMA_RD_RVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_HOST_H2EDMA_RD_RVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_HOST_H2EDMA_RD_RVLD_EOB_DWLEN (1) +union ctrlq_host_h2edma_rd_rvld_eob_u { + struct ctrlq_host_h2edma_rd_rvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_H2EDMA_RD_RVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_H2EDMA_WR_ACK_REQ_ADDR (0xf8c940) +#define NBL_CTRLQ_HOST_H2EDMA_WR_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_HOST_H2EDMA_WR_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_HOST_H2EDMA_WR_ACK_REQ_DWLEN (1) +union ctrlq_host_h2edma_wr_ack_req_u { + struct ctrlq_host_h2edma_wr_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_H2EDMA_WR_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_H2EDMA_WR_WVLD_EOB_ADDR (0xf8c944) +#define NBL_CTRLQ_HOST_H2EDMA_WR_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_HOST_H2EDMA_WR_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_HOST_H2EDMA_WR_WVLD_EOB_DWLEN (1) +union ctrlq_host_h2edma_wr_wvld_eob_u { + struct ctrlq_host_h2edma_wr_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_H2EDMA_WR_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_H2EDMA_WR1_ACK_REQ_ADDR (0xf8c948) +#define NBL_CTRLQ_HOST_H2EDMA_WR1_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_HOST_H2EDMA_WR1_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_HOST_H2EDMA_WR1_ACK_REQ_DWLEN (1) +union ctrlq_host_h2edma_wr1_ack_req_u { + struct ctrlq_host_h2edma_wr1_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_H2EDMA_WR1_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_H2EDMA_WR1_WVLD_EOB_ADDR (0xf8c94c) +#define NBL_CTRLQ_HOST_H2EDMA_WR1_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_HOST_H2EDMA_WR1_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_HOST_H2EDMA_WR1_WVLD_EOB_DWLEN (1) +union ctrlq_host_h2edma_wr1_wvld_eob_u { + struct ctrlq_host_h2edma_wr1_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_H2EDMA_WR1_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_E2HDMA_RD_ACK_REQ_ADDR (0xf8c958) +#define NBL_CTRLQ_HOST_E2HDMA_RD_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_HOST_E2HDMA_RD_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_HOST_E2HDMA_RD_ACK_REQ_DWLEN (1) +union ctrlq_host_e2hdma_rd_ack_req_u { + struct ctrlq_host_e2hdma_rd_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_E2HDMA_RD_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_E2HDMA_RD_RVLD_EOB_ADDR (0xf8c95c) +#define NBL_CTRLQ_HOST_E2HDMA_RD_RVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_HOST_E2HDMA_RD_RVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_HOST_E2HDMA_RD_RVLD_EOB_DWLEN (1) +union ctrlq_host_e2hdma_rd_rvld_eob_u { + struct ctrlq_host_e2hdma_rd_rvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_E2HDMA_RD_RVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_E2HDMA_WR_ACK_REQ_ADDR (0xf8c960) +#define NBL_CTRLQ_HOST_E2HDMA_WR_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_HOST_E2HDMA_WR_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_HOST_E2HDMA_WR_ACK_REQ_DWLEN (1) +union ctrlq_host_e2hdma_wr_ack_req_u { + struct ctrlq_host_e2hdma_wr_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_E2HDMA_WR_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_E2HDMA_WR_WVLD_EOB_ADDR (0xf8c964) +#define NBL_CTRLQ_HOST_E2HDMA_WR_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_HOST_E2HDMA_WR_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_HOST_E2HDMA_WR_WVLD_EOB_DWLEN (1) +union ctrlq_host_e2hdma_wr_wvld_eob_u { + struct ctrlq_host_e2hdma_wr_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_E2HDMA_WR_WVLD_EOB_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_E2HDMA_WR1_ACK_REQ_ADDR (0xf8c968) +#define NBL_CTRLQ_HOST_E2HDMA_WR1_ACK_REQ_DEPTH (1) +#define NBL_CTRLQ_HOST_E2HDMA_WR1_ACK_REQ_WIDTH (32) +#define NBL_CTRLQ_HOST_E2HDMA_WR1_ACK_REQ_DWLEN (1) +union ctrlq_host_e2hdma_wr1_ack_req_u { + struct ctrlq_host_e2hdma_wr1_ack_req { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_E2HDMA_WR1_ACK_REQ_DWLEN]; +} __packed; + +#define NBL_CTRLQ_HOST_E2HDMA_WR1_WVLD_EOB_ADDR (0xf8c96c) +#define NBL_CTRLQ_HOST_E2HDMA_WR1_WVLD_EOB_DEPTH (1) +#define NBL_CTRLQ_HOST_E2HDMA_WR1_WVLD_EOB_WIDTH (32) +#define NBL_CTRLQ_HOST_E2HDMA_WR1_WVLD_EOB_DWLEN (1) +union ctrlq_host_e2hdma_wr1_wvld_eob_u { + struct ctrlq_host_e2hdma_wr1_wvld_eob { + u32 bdg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CTRLQ_HOST_E2HDMA_WR1_WVLD_EOB_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ecpu_padpt.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ecpu_padpt.h new file mode 100644 index 0000000000000000000000000000000000000000..32f90eca1bae0788465a1abbd31bb35a143f6bdd --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ecpu_padpt.h @@ -0,0 +1,2999 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_ECPU_PADPT_H +#define NBL_ECPU_PADPT_H 1 + +#include + +#define NBL_ECPU_PADPT_BASE (0x0104C000) + +#define NBL_ECPU_PADPT_INT_STATUS_ADDR (0x104c000) +#define NBL_ECPU_PADPT_INT_STATUS_DEPTH (1) +#define NBL_ECPU_PADPT_INT_STATUS_WIDTH (32) +#define NBL_ECPU_PADPT_INT_STATUS_DWLEN (1) +union ecpu_padpt_int_status_u { + struct ecpu_padpt_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_INT_MASK_ADDR (0x104c004) +#define NBL_ECPU_PADPT_INT_MASK_DEPTH (1) +#define NBL_ECPU_PADPT_INT_MASK_WIDTH (32) +#define NBL_ECPU_PADPT_INT_MASK_DWLEN (1) +union ecpu_padpt_int_mask_u { + struct ecpu_padpt_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_INT_MASK_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_INT_SET_ADDR (0x104c008) +#define NBL_ECPU_PADPT_INT_SET_DEPTH (1) +#define NBL_ECPU_PADPT_INT_SET_WIDTH (32) +#define NBL_ECPU_PADPT_INT_SET_DWLEN (1) +union ecpu_padpt_int_set_u { + struct ecpu_padpt_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_INT_SET_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_INIT_DONE_ADDR (0x104c00c) +#define NBL_ECPU_PADPT_INIT_DONE_DEPTH (1) +#define NBL_ECPU_PADPT_INIT_DONE_WIDTH (32) +#define NBL_ECPU_PADPT_INIT_DONE_DWLEN (1) +union ecpu_padpt_init_done_u { + struct ecpu_padpt_init_done { + u32 init_done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_CIF_ERR_INFO_ADDR (0x104c040) +#define NBL_ECPU_PADPT_CIF_ERR_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_CIF_ERR_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_CIF_ERR_INFO_DWLEN (1) +union ecpu_padpt_cif_err_info_u { + struct ecpu_padpt_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_RAM_ERROR_ADDR (0x104c050) +#define NBL_ECPU_PADPT_RAM_ERROR_DEPTH (1) +#define NBL_ECPU_PADPT_RAM_ERROR_WIDTH (32) +#define NBL_ECPU_PADPT_RAM_ERROR_DWLEN (1) +union ecpu_padpt_ram_error_u { + struct ecpu_padpt_ram_error { + u32 dif0_rd_info_fifo:1; /* [00] Default:0x0 RC */ + u32 dif1_rd_info_fifo:1; /* [01] Default:0x0 RC */ + u32 dif2_rd_info_fifo:1; /* [02] Default:0x0 RC */ + u32 dif3_rd_info_fifo:1; /* [03] Default:0x0 RC */ + u32 dif0_wr_info_fifo:1; /* [04] Default:0x0 RC */ + u32 dif0_wr_data_fifo:1; /* [05] Default:0x0 RC */ + u32 dif1_wr_info_fifo:1; /* [06] Default:0x0 RC */ + u32 dif1_wr_data_fifo:1; /* [07] Default:0x0 RC */ + u32 dif2_wr_info_fifo:1; /* [08] Default:0x0 RC */ + u32 dif2_wr_data_fifo:1; /* [09] Default:0x0 RC */ + u32 dif3_wr_info_fifo:1; /* [10] Default:0x0 RC */ + u32 dif3_wr_data_fifo:1; /* [11] Default:0x0 RC */ + u32 rdma_dif0_rw_info_fifo:1; /* [12] Default:0x0 RC */ + u32 rdma_dif0_rw_data_fifo:1; /* [13] Default:0x0 RC */ + u32 rdma_dif1_rw_info_fifo:1; /* [14] Default:0x0 RC */ + u32 rdma_dif1_rw_data_fifo:1; /* [15] Default:0x0 RC */ + u32 wr_mux_dif_info_fifo:1; /* [16] Default:0x0 RC */ + u32 wr_mux_dif_data_fifo:1; /* [17] Default:0x0 RC */ + u32 rd_mux_dif_info_fifo:1; /* [18] Default:0x0 RC */ + u32 rw_mux_dif_info_fifo:1; /* [19] Default:0x0 RC */ + u32 rw_mux_dif_data_fifo:1; /* [20] Default:0x0 RC */ + u32 spl_tlp_info_fifo:1; /* [21] Default:0x0 RC */ + u32 spl_tag_fifo:1; /* [22] Default:0x0 RC */ + u32 spl_sel_fifo:1; /* [23] Default:0x0 RC */ + u32 spl_sel_tag_fifo:1; /* [24] Default:0x0 RC */ + u32 spl_msix_fifo:1; /* [25] Default:0x0 RC */ + u32 jon_rdif_data_fifo:1; /* [26] Default:0x0 RC */ + u32 jon_rdif_info_fifo:1; /* [27] Default:0x0 RC */ + u32 msix_data_fifo:1; /* [28] Default:0x0 RC */ + u32 msix_info_fifo:1; /* [29] Default:0x0 RC */ + u32 tlp_info_fifo:1; /* [30] Default:0x0 RC */ + u32 tlp_data_fifo:1; /* [31] Default:0x0 RC */ + } __packed info; + u32 data[NBL_ECPU_PADPT_RAM_ERROR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_TABLE_RAM_ERROR_ADDR (0x104c080) +#define NBL_ECPU_PADPT_TABLE_RAM_ERROR_DEPTH (1) +#define NBL_ECPU_PADPT_TABLE_RAM_ERROR_WIDTH (32) +#define NBL_ECPU_PADPT_TABLE_RAM_ERROR_DWLEN (1) +union ecpu_padpt_table_ram_error_u { + struct ecpu_padpt_table_ram_error { + u32 vnet_qinfo_ram:1; /* [00] Default:0x0 RC */ + u32 vblk_qinfo_ram:1; /* [01] Default:0x0 RC */ + u32 msix_ctrl_ram:1; /* [02] Default:0x0 RC */ + u32 msix_pba_ram:1; /* [03] Default:0x0 RC */ + u32 msix_intrl_info_ram:1; /* [04] Default:0x0 RC */ + u32 msix_intrl_bdf_ram:1; /* [05] Default:0x0 RC */ + u32 msix_intrl_ram:1; /* [06] Default:0x0 RC */ + u32 cpl_dmux_high_0_ram:1; /* [07] Default:0x0 RC */ + u32 cpl_dmux_high_1_ram:1; /* [08] Default:0x0 RC */ + u32 cpl_dmux_low_0_ram:1; /* [09] Default:0x0 RC */ + u32 cpl_dmux_low_1_ram:1; /* [10] Default:0x0 RC */ + u32 cpl_dmux_tag_fifo_ram:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_TABLE_RAM_ERROR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_TABLE_RAM_SBITERR_ADDR (0x104c084) +#define NBL_ECPU_PADPT_TABLE_RAM_SBITERR_DEPTH (1) +#define NBL_ECPU_PADPT_TABLE_RAM_SBITERR_WIDTH (32) +#define NBL_ECPU_PADPT_TABLE_RAM_SBITERR_DWLEN (1) +union ecpu_padpt_table_ram_sbiterr_u { + struct ecpu_padpt_table_ram_sbiterr { + u32 vnet_qinfo_ram:1; /* [00] Default:0x0 RC */ + u32 vblk_qinfo_ram:1; /* [01] Default:0x0 RC */ + u32 msix_ctrl_ram:1; /* [02] Default:0x0 RC */ + u32 msix_pba_ram:1; /* [03] Default:0x0 RC */ + u32 msix_intrl_info_ram:1; /* [04] Default:0x0 RC */ + u32 msix_intrl_bdf_ram:1; /* [05] Default:0x0 RC */ + u32 msix_intrl_ram:1; /* [06] Default:0x0 RC */ + u32 cpl_dmux_high_0_ram:1; /* [07] Default:0x0 RC */ + u32 cpl_dmux_high_1_ram:1; /* [08] Default:0x0 RC */ + u32 cpl_dmux_low_0_ram:1; /* [09] Default:0x0 RC */ + u32 cpl_dmux_low_1_ram:1; /* [10] Default:0x0 RC */ + u32 cpl_dmux_tag_fifo_ram:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_TABLE_RAM_SBITERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_TABLE_RAM_DBITERR_ADDR (0x104c088) +#define NBL_ECPU_PADPT_TABLE_RAM_DBITERR_DEPTH (1) +#define NBL_ECPU_PADPT_TABLE_RAM_DBITERR_WIDTH (32) +#define NBL_ECPU_PADPT_TABLE_RAM_DBITERR_DWLEN (1) +union ecpu_padpt_table_ram_dbiterr_u { + struct ecpu_padpt_table_ram_dbiterr { + u32 vnet_qinfo_ram:1; /* [00] Default:0x0 RC */ + u32 vblk_qinfo_ram:1; /* [01] Default:0x0 RC */ + u32 msix_ctrl_ram:1; /* [02] Default:0x0 RC */ + u32 msix_pba_ram:1; /* [03] Default:0x0 RC */ + u32 msix_intrl_info_ram:1; /* [04] Default:0x0 RC */ + u32 msix_intrl_bdf_ram:1; /* [05] Default:0x0 RC */ + u32 msix_intrl_ram:1; /* [06] Default:0x0 RC */ + u32 cpl_dmux_high_0_ram:1; /* [07] Default:0x0 RC */ + u32 cpl_dmux_high_1_ram:1; /* [08] Default:0x0 RC */ + u32 cpl_dmux_low_0_ram:1; /* [09] Default:0x0 RC */ + u32 cpl_dmux_low_1_ram:1; /* [10] Default:0x0 RC */ + u32 cpl_dmux_tag_fifo_ram:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_TABLE_RAM_DBITERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_CAR_CTRL_ADDR (0x104c100) +#define NBL_ECPU_PADPT_CAR_CTRL_DEPTH (1) +#define NBL_ECPU_PADPT_CAR_CTRL_WIDTH (32) +#define NBL_ECPU_PADPT_CAR_CTRL_DWLEN (1) +union ecpu_padpt_car_ctrl_u { + struct ecpu_padpt_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TAG_AGED_ADDR (0x104c104) +#define NBL_ECPU_PADPT_ECPU_TAG_AGED_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TAG_AGED_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TAG_AGED_DWLEN (1) +union ecpu_padpt_ecpu_tag_aged_u { + struct ecpu_padpt_ecpu_tag_aged { + u32 times:31; /* [30:00] Default:0xE4E1C RW */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TAG_AGED_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_GBL_CTRL_ADDR (0x104c108) +#define NBL_ECPU_PADPT_ECPU_GBL_CTRL_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_GBL_CTRL_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_GBL_CTRL_DWLEN (1) +union ecpu_padpt_ecpu_gbl_ctrl_u { + struct ecpu_padpt_ecpu_gbl_ctrl { + u32 wr_en:1; /* [0:0] Default:0x1 RW */ + u32 rd_en:1; /* [1:1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_GBL_CTRL_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_GBL_INTRL_CTRL_ADDR (0x104c10c) +#define NBL_ECPU_PADPT_ECPU_GBL_INTRL_CTRL_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_GBL_INTRL_CTRL_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_GBL_INTRL_CTRL_DWLEN (1) +union ecpu_padpt_ecpu_gbl_intrl_ctrl_u { + struct ecpu_padpt_ecpu_gbl_intrl_ctrl { + u32 valid:1; /* [0:0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_GBL_INTRL_CTRL_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_GBL_INTRL_ADDR (0x104c110) +#define NBL_ECPU_PADPT_ECPU_GBL_INTRL_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_GBL_INTRL_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_GBL_INTRL_DWLEN (1) +union ecpu_padpt_ecpu_gbl_intrl_u { + struct ecpu_padpt_ecpu_gbl_intrl { + u32 pnum:16; /* [15:00] Default:0x0 RW */ + u32 rate:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_GBL_INTRL_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_10BTAG_ADDR (0x104c114) +#define NBL_ECPU_PADPT_ECPU_10BTAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_10BTAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_10BTAG_DWLEN (1) +union ecpu_padpt_ecpu_10btag_u { + struct ecpu_padpt_ecpu_10btag { + u32 rsv1:1; /* [0:0] Default:0x0 RO */ + u32 vld:1; /* [1:1] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_10BTAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_GBL_MSIX_MASK_ADDR (0x104c158) +#define NBL_ECPU_PADPT_ECPU_GBL_MSIX_MASK_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_GBL_MSIX_MASK_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_GBL_MSIX_MASK_DWLEN (1) +union ecpu_padpt_ecpu_gbl_msix_mask_u { + struct ecpu_padpt_ecpu_gbl_msix_mask { + u32 en:1; /* [0:0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_GBL_MSIX_MASK_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CFG_UP_ADDR (0x104c15c) +#define NBL_ECPU_PADPT_ECPU_CFG_UP_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CFG_UP_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CFG_UP_DWLEN (1) +union ecpu_padpt_ecpu_cfg_up_u { + struct ecpu_padpt_ecpu_cfg_up { + u32 tag8b_th:16; /* [15:00] Default:0xF0 RW */ + u32 tag10b_th:16; /* [31:16] Default:0x1F0 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CFG_UP_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CFG_FC_PD_DN_ADDR (0x104c160) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_PD_DN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_PD_DN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_PD_DN_DWLEN (1) +union ecpu_padpt_ecpu_cfg_fc_pd_dn_u { + struct ecpu_padpt_ecpu_cfg_fc_pd_dn { + u32 th:16; /* [15:00] Default:0x20 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CFG_FC_PD_DN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CFG_FC_PH_DN_ADDR (0x104c164) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_PH_DN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_PH_DN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_PH_DN_DWLEN (1) +union ecpu_padpt_ecpu_cfg_fc_ph_dn_u { + struct ecpu_padpt_ecpu_cfg_fc_ph_dn { + u32 th:12; /* [11:00] Default:0x20 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CFG_FC_PH_DN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CFG_FC_NPD_DN_ADDR (0x104c168) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_NPD_DN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_NPD_DN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_NPD_DN_DWLEN (1) +union ecpu_padpt_ecpu_cfg_fc_npd_dn_u { + struct ecpu_padpt_ecpu_cfg_fc_npd_dn { + u32 th:16; /* [15:00] Default:0x20 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CFG_FC_NPD_DN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CFG_FC_NPH_DN_ADDR (0x104c16c) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_NPH_DN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_NPH_DN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_NPH_DN_DWLEN (1) +union ecpu_padpt_ecpu_cfg_fc_nph_dn_u { + struct ecpu_padpt_ecpu_cfg_fc_nph_dn { + u32 th:12; /* [11:00] Default:0x20 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CFG_FC_NPH_DN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CFG_FC_CPLH_UP_ADDR (0x104c170) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_CPLH_UP_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_CPLH_UP_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_CPLH_UP_DWLEN (1) +union ecpu_padpt_ecpu_cfg_fc_cplh_up_u { + struct ecpu_padpt_ecpu_cfg_fc_cplh_up { + u32 th:16; /* [15:00] Default:0x80 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CFG_FC_CPLH_UP_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CFG_FC_CPLD_UP_ADDR (0x104c174) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_CPLD_UP_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_CPLD_UP_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CFG_FC_CPLD_UP_DWLEN (1) +union ecpu_padpt_ecpu_cfg_fc_cpld_up_u { + struct ecpu_padpt_ecpu_cfg_fc_cpld_up { + u32 th:16; /* [15:00] Default:0x400 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CFG_FC_CPLD_UP_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_P_ADDR (0x104c180) +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_P_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_P_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_P_DWLEN (1) +union ecpu_padpt_ecpu_native_tx_p_u { + struct ecpu_padpt_ecpu_native_tx_p { + u32 credits:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_NATIVE_TX_P_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_NP_ADDR (0x104c184) +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_NP_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_NP_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_NP_DWLEN (1) +union ecpu_padpt_ecpu_native_tx_np_u { + struct ecpu_padpt_ecpu_native_tx_np { + u32 credits:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_NATIVE_TX_NP_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_CPL_ADDR (0x104c188) +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_CPL_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_CPL_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_NATIVE_TX_CPL_DWLEN (1) +union ecpu_padpt_ecpu_native_tx_cpl_u { + struct ecpu_padpt_ecpu_native_tx_cpl { + u32 credits:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_NATIVE_TX_CPL_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_ABNORMAL_MSIX_VEC_ADDR (0x104c200) +#define NBL_ECPU_PADPT_ECPU_ABNORMAL_MSIX_VEC_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_ABNORMAL_MSIX_VEC_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_ABNORMAL_MSIX_VEC_DWLEN (1) +union ecpu_padpt_ecpu_abnormal_msix_vec_u { + struct ecpu_padpt_ecpu_abnormal_msix_vec { + u32 idx:16; /* [15:0] Default:0x0 RW */ + u32 vld:1; /* [16:16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_ABNORMAL_MSIX_VEC_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_ABNORMAL_MISX_TIMEOUT_ADDR (0x104c204) +#define NBL_ECPU_PADPT_ECPU_ABNORMAL_MISX_TIMEOUT_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_ABNORMAL_MISX_TIMEOUT_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_ABNORMAL_MISX_TIMEOUT_DWLEN (1) +union ecpu_padpt_ecpu_abnormal_misx_timeout_u { + struct ecpu_padpt_ecpu_abnormal_misx_timeout { + u32 value:32; /* [31:00] Default:0x3938700 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_ABNORMAL_MISX_TIMEOUT_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW0_ADDR (0x104c300) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW0_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW0_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW0_DWLEN (1) +union ecpu_padpt_ecpu_invld_msix_vec_dw0_u { + struct ecpu_padpt_ecpu_invld_msix_vec_dw0 { + u32 value:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW0_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW1_ADDR (0x104c304) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW1_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW1_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW1_DWLEN (1) +union ecpu_padpt_ecpu_invld_msix_vec_dw1_u { + struct ecpu_padpt_ecpu_invld_msix_vec_dw1 { + u32 value:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW1_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW2_ADDR (0x104c308) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW2_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW2_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW2_DWLEN (1) +union ecpu_padpt_ecpu_invld_msix_vec_dw2_u { + struct ecpu_padpt_ecpu_invld_msix_vec_dw2 { + u32 value:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW2_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW3_ADDR (0x104c30c) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW3_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW3_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW3_DWLEN (1) +union ecpu_padpt_ecpu_invld_msix_vec_dw3_u { + struct ecpu_padpt_ecpu_invld_msix_vec_dw3 { + u32 value:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_INVLD_MSIX_VEC_DW3_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DEBUG_ADDR (0x104c400) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DEBUG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DEBUG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DEBUG_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_debug_u { + struct ecpu_padpt_ecpu_rd_mux_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DEBUG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_GET_ADDR (0x104c404) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_GET_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_GET_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_GET_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif_info_get_u { + struct ecpu_padpt_ecpu_rd_mux_dif_info_get { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_GET_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_REN_ADDR (0x104c408) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif_info_ren_u { + struct ecpu_padpt_ecpu_rd_mux_dif_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_WEN_ADDR (0x104c40c) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_WEN_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif_info_wen_u { + struct ecpu_padpt_ecpu_rd_mux_dif_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_ERR_ADDR (0x104c410) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif_info_err_u { + struct ecpu_padpt_ecpu_rd_mux_dif_info_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_REN_ADDR (0x104c414) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_REN_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif_sel_ren_u { + struct ecpu_padpt_ecpu_rd_mux_dif_sel_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_WEN_ADDR (0x104c418) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_WEN_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif_sel_wen_u { + struct ecpu_padpt_ecpu_rd_mux_dif_sel_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_SEL_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_REOB_ADDR (0x104c41c) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_REOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_REOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_REOB_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif_total_reob_u { + struct ecpu_padpt_ecpu_rd_mux_dif_total_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_REOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_RERR_ADDR (0x104c420) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_RERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_RERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_RERR_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif_total_rerr_u { + struct ecpu_padpt_ecpu_rd_mux_dif_total_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_TOTAL_RERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REOB_ADDR (0x104c424) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REOB_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif0_reob_u { + struct ecpu_padpt_ecpu_rd_mux_dif0_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_RERR_ADDR (0x104c428) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_RERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_RERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_RERR_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif0_rerr_u { + struct ecpu_padpt_ecpu_rd_mux_dif0_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_RERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REN_ADDR (0x104c42c) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REN_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif0_ren_u { + struct ecpu_padpt_ecpu_rd_mux_dif0_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF0_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REOB_ADDR (0x104c430) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REOB_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif1_reob_u { + struct ecpu_padpt_ecpu_rd_mux_dif1_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_RERR_ADDR (0x104c434) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_RERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_RERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_RERR_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif1_rerr_u { + struct ecpu_padpt_ecpu_rd_mux_dif1_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_RERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REN_ADDR (0x104c438) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REN_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif1_ren_u { + struct ecpu_padpt_ecpu_rd_mux_dif1_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF1_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REOB_ADDR (0x104c43c) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REOB_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif2_reob_u { + struct ecpu_padpt_ecpu_rd_mux_dif2_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_RERR_ADDR (0x104c440) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_RERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_RERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_RERR_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif2_rerr_u { + struct ecpu_padpt_ecpu_rd_mux_dif2_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_RERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REN_ADDR (0x104c444) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REN_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif2_ren_u { + struct ecpu_padpt_ecpu_rd_mux_dif2_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF2_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REOB_ADDR (0x104c448) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REOB_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif3_reob_u { + struct ecpu_padpt_ecpu_rd_mux_dif3_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_RERR_ADDR (0x104c44c) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_RERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_RERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_RERR_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif3_rerr_u { + struct ecpu_padpt_ecpu_rd_mux_dif3_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_RERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REN_ADDR (0x104c450) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REN_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif3_ren_u { + struct ecpu_padpt_ecpu_rd_mux_dif3_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF3_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_SEL_INFO_RAM_ERR_ADDR (0x104c458) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_SEL_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_SEL_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_SEL_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_sel_info_ram_err_u { + struct ecpu_padpt_ecpu_rd_mux_sel_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_SEL_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_RAM_ERR_ADDR (0x104c460) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rd_mux_dif_info_ram_err_u { + struct ecpu_padpt_ecpu_rd_mux_dif_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RD_MUX_DIF_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DEBUG_ADDR (0x104c500) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DEBUG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DEBUG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DEBUG_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_debug_u { + struct ecpu_padpt_ecpu_rw_mux_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DEBUG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_GET_ADDR (0x104c504) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_GET_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_GET_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_GET_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_info_get_u { + struct ecpu_padpt_ecpu_rw_mux_dif_info_get { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_GET_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_REN_ADDR (0x104c508) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_info_ren_u { + struct ecpu_padpt_ecpu_rw_mux_dif_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_WEN_ADDR (0x104c50c) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_WEN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_info_wen_u { + struct ecpu_padpt_ecpu_rw_mux_dif_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_ERR_ADDR (0x104c510) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_info_err_u { + struct ecpu_padpt_ecpu_rw_mux_dif_info_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_WEN_ADDR (0x104c514) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_WEN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_data_wen_u { + struct ecpu_padpt_ecpu_rw_mux_dif_data_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_REN_ADDR (0x104c518) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_data_ren_u { + struct ecpu_padpt_ecpu_rw_mux_dif_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_HERR_ADDR (0x104c51c) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_HERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_HERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_HERR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_data_herr_u { + struct ecpu_padpt_ecpu_rw_mux_dif_data_herr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_HERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_REN_ADDR (0x104c520) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_REN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_sel_ren_u { + struct ecpu_padpt_ecpu_rw_mux_dif_sel_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_WEN_ADDR (0x104c524) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_WEN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_sel_wen_u { + struct ecpu_padpt_ecpu_rw_mux_dif_sel_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_SEL_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_REOB_ADDR (0x104c528) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_REOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_REOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_REOB_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_total_reob_u { + struct ecpu_padpt_ecpu_rw_mux_dif_total_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_REOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_RERR_ADDR (0x104c52c) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_RERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_RERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_RERR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_total_rerr_u { + struct ecpu_padpt_ecpu_rw_mux_dif_total_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_TOTAL_RERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_REOB_ADDR (0x104c530) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_REOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_REOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_REOB_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif0_reob_u { + struct ecpu_padpt_ecpu_rw_mux_dif0_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_REOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_RERR_ADDR (0x104c534) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_RERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_RERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_RERR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif0_rerr_u { + struct ecpu_padpt_ecpu_rw_mux_dif0_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_RERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_REN_ADDR (0x104c538) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif0_info_ren_u { + struct ecpu_padpt_ecpu_rw_mux_dif0_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_RD_ADDR (0x104c53c) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_RD_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_RD_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_RD_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif0_info_rd_u { + struct ecpu_padpt_ecpu_rw_mux_dif0_info_rd { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_RD_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_WR_ADDR (0x104c540) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_WR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_WR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_WR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif0_info_wr_u { + struct ecpu_padpt_ecpu_rw_mux_dif0_info_wr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_INFO_WR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_DATA_REN_ADDR (0x104c544) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif0_data_ren_u { + struct ecpu_padpt_ecpu_rw_mux_dif0_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF0_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_REOB_ADDR (0x104c548) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_REOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_REOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_REOB_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif1_reob_u { + struct ecpu_padpt_ecpu_rw_mux_dif1_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_REOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_RERR_ADDR (0x104c54c) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_RERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_RERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_RERR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif1_rerr_u { + struct ecpu_padpt_ecpu_rw_mux_dif1_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_RERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_REN_ADDR (0x104c550) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif1_info_ren_u { + struct ecpu_padpt_ecpu_rw_mux_dif1_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_RD_ADDR (0x104c554) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_RD_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_RD_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_RD_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif1_info_rd_u { + struct ecpu_padpt_ecpu_rw_mux_dif1_info_rd { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_RD_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_WR_ADDR (0x104c558) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_WR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_WR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_WR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif1_info_wr_u { + struct ecpu_padpt_ecpu_rw_mux_dif1_info_wr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_INFO_WR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_DATA_REN_ADDR (0x104c55c) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif1_data_ren_u { + struct ecpu_padpt_ecpu_rw_mux_dif1_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF1_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_SEL_INFO_RAM_ERR_ADDR (0x104c564) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_SEL_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_SEL_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_SEL_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_sel_info_ram_err_u { + struct ecpu_padpt_ecpu_rw_mux_sel_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_SEL_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_RAM_ERR_ADDR (0x104c56c) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_info_ram_err_u { + struct ecpu_padpt_ecpu_rw_mux_dif_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_RAM_ERR_ADDR (0x104c574) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rw_mux_dif_data_ram_err_u { + struct ecpu_padpt_ecpu_rw_mux_dif_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RW_MUX_DIF_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DEBUG_ADDR (0x104c600) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DEBUG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DEBUG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DEBUG_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_debug_u { + struct ecpu_padpt_ecpu_wr_mux_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DEBUG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_GET_ADDR (0x104c604) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_GET_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_GET_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_GET_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif_info_get_u { + struct ecpu_padpt_ecpu_wr_mux_dif_info_get { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_GET_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_REN_ADDR (0x104c608) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif_info_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_WEN_ADDR (0x104c60c) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_WEN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif_info_wen_u { + struct ecpu_padpt_ecpu_wr_mux_dif_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_ERR_ADDR (0x104c610) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_ERR_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif_info_err_u { + struct ecpu_padpt_ecpu_wr_mux_dif_info_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_WEN_ADDR (0x104c614) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_WEN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif_data_wen_u { + struct ecpu_padpt_ecpu_wr_mux_dif_data_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_REN_ADDR (0x104c618) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif_data_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_HERR_ADDR (0x104c61c) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_HERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_HERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_HERR_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif_data_herr_u { + struct ecpu_padpt_ecpu_wr_mux_dif_data_herr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_HERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_INFO_REN_ADDR (0x104c620) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif0_info_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif0_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_DATA_REN_ADDR (0x104c624) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif0_data_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif0_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF0_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_INFO_REN_ADDR (0x104c628) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif1_info_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif1_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_DATA_REN_ADDR (0x104c62c) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif1_data_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif1_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF1_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_INFO_REN_ADDR (0x104c630) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif2_info_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif2_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_DATA_REN_ADDR (0x104c634) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif2_data_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif2_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF2_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_INFO_REN_ADDR (0x104c638) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif3_info_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif3_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_DATA_REN_ADDR (0x104c63c) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif3_data_ren_u { + struct ecpu_padpt_ecpu_wr_mux_dif3_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF3_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_RAM_ERR_ADDR (0x104c644) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif_info_ram_err_u { + struct ecpu_padpt_ecpu_wr_mux_dif_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_RAM_ERR_ADDR (0x104c64c) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_wr_mux_dif_data_ram_err_u { + struct ecpu_padpt_ecpu_wr_mux_dif_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_WR_MUX_DIF_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_DEBUG_ADDR (0x104c700) +#define NBL_ECPU_PADPT_ECPU_MSIX_DEBUG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_DEBUG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_DEBUG_DWLEN (1) +union ecpu_padpt_ecpu_msix_debug_u { + struct ecpu_padpt_ecpu_msix_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_DEBUG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_IN_REN_ADDR (0x104c704) +#define NBL_ECPU_PADPT_ECPU_MSIX_IN_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_IN_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_IN_REN_DWLEN (1) +union ecpu_padpt_ecpu_msix_in_ren_u { + struct ecpu_padpt_ecpu_msix_in_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_IN_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_INT_ACK_ADDR (0x104c708) +#define NBL_ECPU_PADPT_ECPU_MSIX_INT_ACK_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_INT_ACK_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_INT_ACK_DWLEN (1) +union ecpu_padpt_ecpu_msix_int_ack_u { + struct ecpu_padpt_ecpu_msix_int_ack { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_INT_ACK_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_OUT_WEN_ADDR (0x104c70c) +#define NBL_ECPU_PADPT_ECPU_MSIX_OUT_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_OUT_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_OUT_WEN_DWLEN (1) +union ecpu_padpt_ecpu_msix_out_wen_u { + struct ecpu_padpt_ecpu_msix_out_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_OUT_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_CFG_WR_ADDR (0x104c710) +#define NBL_ECPU_PADPT_ECPU_MSIX_CFG_WR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_CFG_WR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_CFG_WR_DWLEN (1) +union ecpu_padpt_ecpu_msix_cfg_wr_u { + struct ecpu_padpt_ecpu_msix_cfg_wr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_CFG_WR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_CFG_RD_ADDR (0x104c714) +#define NBL_ECPU_PADPT_ECPU_MSIX_CFG_RD_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_CFG_RD_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_CFG_RD_DWLEN (1) +union ecpu_padpt_ecpu_msix_cfg_rd_u { + struct ecpu_padpt_ecpu_msix_cfg_rd { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_CFG_RD_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_INFO_RAM_ERR_ADDR (0x104c71c) +#define NBL_ECPU_PADPT_ECPU_MSIX_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_msix_info_ram_err_u { + struct ecpu_padpt_ecpu_msix_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_DATA_RAM_ERR_ADDR (0x104c724) +#define NBL_ECPU_PADPT_ECPU_MSIX_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_msix_data_ram_err_u { + struct ecpu_padpt_ecpu_msix_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_RAM_ERR_ADDR (0x104c728) +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_msix_intrl_ram_err_u { + struct ecpu_padpt_ecpu_msix_intrl_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_INTRL_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_BDF_RAM_ERR_ADDR (0x104c72c) +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_BDF_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_BDF_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_BDF_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_msix_intrl_bdf_ram_err_u { + struct ecpu_padpt_ecpu_msix_intrl_bdf_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_INTRL_BDF_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_INFO_RAM_ERR_ADDR (0x104c730) +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_INTRL_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_msix_intrl_info_ram_err_u { + struct ecpu_padpt_ecpu_msix_intrl_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_INTRL_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_PBA_RAM_ERR_ADDR (0x104c734) +#define NBL_ECPU_PADPT_ECPU_MSIX_PBA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_PBA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_PBA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_msix_pba_ram_err_u { + struct ecpu_padpt_ecpu_msix_pba_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_PBA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_CTRL_RAM_ERR_ADDR (0x104c738) +#define NBL_ECPU_PADPT_ECPU_MSIX_CTRL_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_MSIX_CTRL_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_MSIX_CTRL_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_msix_ctrl_ram_err_u { + struct ecpu_padpt_ecpu_msix_ctrl_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_CTRL_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DEBUG_ADDR (0x104c800) +#define NBL_ECPU_PADPT_ECPU_SPL_DEBUG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DEBUG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DEBUG_DWLEN (1) +union ecpu_padpt_ecpu_spl_debug_u { + struct ecpu_padpt_ecpu_spl_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DEBUG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_0_INFO_REN_ADDR (0x104c804) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_0_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_0_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_0_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_0_info_ren_u { + struct ecpu_padpt_ecpu_spl_dif_0_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_0_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_0_DATA_REOC_ADDR (0x104c808) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_0_DATA_REOC_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_0_DATA_REOC_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_0_DATA_REOC_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_0_data_reoc_u { + struct ecpu_padpt_ecpu_spl_dif_0_data_reoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_0_DATA_REOC_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_1_INFO_REN_ADDR (0x104c80c) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_1_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_1_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_1_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_1_info_ren_u { + struct ecpu_padpt_ecpu_spl_dif_1_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_1_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_1_DATA_REOC_ADDR (0x104c810) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_1_DATA_REOC_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_1_DATA_REOC_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_1_DATA_REOC_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_1_data_reoc_u { + struct ecpu_padpt_ecpu_spl_dif_1_data_reoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_1_DATA_REOC_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_2_INFO_REN_ADDR (0x104c814) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_2_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_2_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_2_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_2_info_ren_u { + struct ecpu_padpt_ecpu_spl_dif_2_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_2_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_2_DATA_REOC_ADDR (0x104c818) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_2_DATA_REOC_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_2_DATA_REOC_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_2_DATA_REOC_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_2_data_reoc_u { + struct ecpu_padpt_ecpu_spl_dif_2_data_reoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_2_DATA_REOC_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_3_INFO_REN_ADDR (0x104c81c) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_3_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_3_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_3_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_3_info_ren_u { + struct ecpu_padpt_ecpu_spl_dif_3_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_3_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_3_DATA_REOC_ADDR (0x104c820) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_3_DATA_REOC_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_3_DATA_REOC_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_3_DATA_REOC_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_3_data_reoc_u { + struct ecpu_padpt_ecpu_spl_dif_3_data_reoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_3_DATA_REOC_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_INFO_TOTAL_ADDR (0x104c824) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_INFO_TOTAL_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_INFO_TOTAL_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_INFO_TOTAL_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_info_total_u { + struct ecpu_padpt_ecpu_spl_dif_info_total { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_INFO_TOTAL_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_DATA_TOTAL_ADDR (0x104c828) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_DATA_TOTAL_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_DATA_TOTAL_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_DIF_DATA_TOTAL_DWLEN (1) +union ecpu_padpt_ecpu_spl_dif_data_total_u { + struct ecpu_padpt_ecpu_spl_dif_data_total { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_DIF_DATA_TOTAL_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_SOB_ADDR (0x104c82c) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_SOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_SOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_SOB_DWLEN (1) +union ecpu_padpt_ecpu_spl_tlp_rd_sob_u { + struct ecpu_padpt_ecpu_spl_tlp_rd_sob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_SOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_EOB_ADDR (0x104c830) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_EOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_EOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_EOB_DWLEN (1) +union ecpu_padpt_ecpu_spl_tlp_rd_eob_u { + struct ecpu_padpt_ecpu_spl_tlp_rd_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_EOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_ERR_ADDR (0x104c834) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_ERR_DWLEN (1) +union ecpu_padpt_ecpu_spl_tlp_rd_err_u { + struct ecpu_padpt_ecpu_spl_tlp_rd_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TLP_RD_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_SOB_ADDR (0x104c838) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_SOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_SOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_SOB_DWLEN (1) +union ecpu_padpt_ecpu_spl_tlp_wr_sob_u { + struct ecpu_padpt_ecpu_spl_tlp_wr_sob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_SOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_EOB_ADDR (0x104c83c) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_EOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_EOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_EOB_DWLEN (1) +union ecpu_padpt_ecpu_spl_tlp_wr_eob_u { + struct ecpu_padpt_ecpu_spl_tlp_wr_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_EOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_ERR_ADDR (0x104c840) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_ERR_DWLEN (1) +union ecpu_padpt_ecpu_spl_tlp_wr_err_u { + struct ecpu_padpt_ecpu_spl_tlp_wr_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TLP_WR_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_ADDR (0x104c844) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_DWLEN (1) +union ecpu_padpt_ecpu_spl_tlp_info_u { + struct ecpu_padpt_ecpu_spl_tlp_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_RAM_ERR_ADDR (0x104c84c) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_spl_tlp_info_ram_err_u { + struct ecpu_padpt_ecpu_spl_tlp_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TLP_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TAG_ADDR (0x104c850) +#define NBL_ECPU_PADPT_ECPU_SPL_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TAG_DWLEN (1) +union ecpu_padpt_ecpu_spl_tag_u { + struct ecpu_padpt_ecpu_spl_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_TAG_RAM_ERR_ADDR (0x104c858) +#define NBL_ECPU_PADPT_ECPU_SPL_TAG_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_TAG_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_TAG_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_spl_tag_ram_err_u { + struct ecpu_padpt_ecpu_spl_tag_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_TAG_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_ADDR (0x104c85c) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_DWLEN (1) +union ecpu_padpt_ecpu_spl_sel_u { + struct ecpu_padpt_ecpu_spl_sel { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_SEL_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_RAM_ERR_ADDR (0x104c864) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_spl_sel_ram_err_u { + struct ecpu_padpt_ecpu_spl_sel_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_SEL_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_ADDR (0x104c868) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_DWLEN (1) +union ecpu_padpt_ecpu_spl_sel_tag_u { + struct ecpu_padpt_ecpu_spl_sel_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_RAM_ERR_ADDR (0x104c874) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_spl_sel_tag_ram_err_u { + struct ecpu_padpt_ecpu_spl_sel_tag_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_SEL_TAG_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_MSIX_ADDR (0x104c878) +#define NBL_ECPU_PADPT_ECPU_SPL_MSIX_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_MSIX_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_MSIX_DWLEN (1) +union ecpu_padpt_ecpu_spl_msix_u { + struct ecpu_padpt_ecpu_spl_msix { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_MSIX_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_MSIX_RAM_ERR_ADDR (0x104c880) +#define NBL_ECPU_PADPT_ECPU_SPL_MSIX_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_MSIX_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_MSIX_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_spl_msix_ram_err_u { + struct ecpu_padpt_ecpu_spl_msix_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_MSIX_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_VNET_QINFO_RAM_ERR_ADDR (0x104c884) +#define NBL_ECPU_PADPT_ECPU_SPL_VNET_QINFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_VNET_QINFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_VNET_QINFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_spl_vnet_qinfo_ram_err_u { + struct ecpu_padpt_ecpu_spl_vnet_qinfo_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_VNET_QINFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_SPL_VBLK_QINFO_RAM_ERR_ADDR (0x104c888) +#define NBL_ECPU_PADPT_ECPU_SPL_VBLK_QINFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_SPL_VBLK_QINFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_SPL_VBLK_QINFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_spl_vblk_qinfo_ram_err_u { + struct ecpu_padpt_ecpu_spl_vblk_qinfo_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_SPL_VBLK_QINFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_DEBUG_ADDR (0x104c900) +#define NBL_ECPU_PADPT_ECPU_TLP_DEBUG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_DEBUG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_DEBUG_DWLEN (1) +union ecpu_padpt_ecpu_tlp_debug_u { + struct ecpu_padpt_ecpu_tlp_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_DEBUG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_SOB_ADDR (0x104c904) +#define NBL_ECPU_PADPT_ECPU_TLP_SOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_SOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_SOB_DWLEN (1) +union ecpu_padpt_ecpu_tlp_sob_u { + struct ecpu_padpt_ecpu_tlp_sob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_SOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_EOB_ADDR (0x104c908) +#define NBL_ECPU_PADPT_ECPU_TLP_EOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_EOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_EOB_DWLEN (1) +union ecpu_padpt_ecpu_tlp_eob_u { + struct ecpu_padpt_ecpu_tlp_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_EOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_ERR_ADDR (0x104c90c) +#define NBL_ECPU_PADPT_ECPU_TLP_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_ERR_DWLEN (1) +union ecpu_padpt_ecpu_tlp_err_u { + struct ecpu_padpt_ecpu_tlp_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_REN_ADDR (0x104c910) +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_tlp_info_ren_u { + struct ecpu_padpt_ecpu_tlp_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_WEN_ADDR (0x104c914) +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_WEN_DWLEN (1) +union ecpu_padpt_ecpu_tlp_info_wen_u { + struct ecpu_padpt_ecpu_tlp_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_REN_ADDR (0x104c918) +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_tlp_data_ren_u { + struct ecpu_padpt_ecpu_tlp_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_WEN_ADDR (0x104c91c) +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_WEN_DWLEN (1) +union ecpu_padpt_ecpu_tlp_data_wen_u { + struct ecpu_padpt_ecpu_tlp_data_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_DATA_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_RAM_ERR_ADDR (0x104c924) +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_tlp_info_ram_err_u { + struct ecpu_padpt_ecpu_tlp_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_RAM_ERR_ADDR (0x104c92c) +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_TLP_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_tlp_data_ram_err_u { + struct ecpu_padpt_ecpu_tlp_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_TLP_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_DEBUG_ADDR (0x104ca00) +#define NBL_ECPU_PADPT_ECPU_JON_DEBUG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_DEBUG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_DEBUG_DWLEN (1) +union ecpu_padpt_ecpu_jon_debug_u { + struct ecpu_padpt_ecpu_jon_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_DEBUG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_TLP_INFO_REN_ADDR (0x104ca04) +#define NBL_ECPU_PADPT_ECPU_JON_TLP_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_TLP_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_TLP_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_jon_tlp_info_ren_u { + struct ecpu_padpt_ecpu_jon_tlp_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_TLP_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_EOB_ADDR (0x104ca08) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_EOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_EOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_EOB_DWLEN (1) +union ecpu_padpt_ecpu_jon_dif_total_eob_u { + struct ecpu_padpt_ecpu_jon_dif_total_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_EOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_ERR_ADDR (0x104ca0c) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_ERR_DWLEN (1) +union ecpu_padpt_ecpu_jon_dif_total_err_u { + struct ecpu_padpt_ecpu_jon_dif_total_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_DIF_TOTAL_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_DIF_RSOB_ADDR (0x104ca10) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_RSOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_RSOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_RSOB_DWLEN (1) +union ecpu_padpt_ecpu_jon_dif_rsob_u { + struct ecpu_padpt_ecpu_jon_dif_rsob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_DIF_RSOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_DIF_REOB_ADDR (0x104ca14) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_REOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_REOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_REOB_DWLEN (1) +union ecpu_padpt_ecpu_jon_dif_reob_u { + struct ecpu_padpt_ecpu_jon_dif_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_DIF_REOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_DIF_RERR_ADDR (0x104ca18) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_RERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_RERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_DIF_RERR_DWLEN (1) +union ecpu_padpt_ecpu_jon_dif_rerr_u { + struct ecpu_padpt_ecpu_jon_dif_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_DIF_RERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_LENGTH_ERR_ADDR (0x104ca1c) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_LENGTH_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_LENGTH_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_LENGTH_ERR_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif_length_err_u { + struct ecpu_padpt_ecpu_jon_rdif_length_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF_LENGTH_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF0_EOB_ADDR (0x104ca20) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF0_EOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF0_EOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF0_EOB_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif0_eob_u { + struct ecpu_padpt_ecpu_jon_rdif0_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF0_EOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF0_ERR_ADDR (0x104ca24) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF0_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF0_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF0_ERR_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif0_err_u { + struct ecpu_padpt_ecpu_jon_rdif0_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF0_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF1_EOB_ADDR (0x104ca28) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF1_EOB_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF1_EOB_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF1_EOB_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif1_eob_u { + struct ecpu_padpt_ecpu_jon_rdif1_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF1_EOB_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF1_ERR_ADDR (0x104ca2c) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF1_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF1_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF1_ERR_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif1_err_u { + struct ecpu_padpt_ecpu_jon_rdif1_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF1_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_REN_ADDR (0x104ca30) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_REN_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif_info_ren_u { + struct ecpu_padpt_ecpu_jon_rdif_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_WEN_ADDR (0x104ca34) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_WEN_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif_info_wen_u { + struct ecpu_padpt_ecpu_jon_rdif_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_REN_ADDR (0x104ca38) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_REN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_REN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_REN_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif_data_ren_u { + struct ecpu_padpt_ecpu_jon_rdif_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_REN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_WEN_ADDR (0x104ca3c) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_WEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_WEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_WEN_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif_data_wen_u { + struct ecpu_padpt_ecpu_jon_rdif_data_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_WEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_RAM_ERR_ADDR (0x104ca44) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif_info_ram_err_u { + struct ecpu_padpt_ecpu_jon_rdif_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_RAM_ERR_ADDR (0x104ca4c) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_jon_rdif_data_ram_err_u { + struct ecpu_padpt_ecpu_jon_rdif_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_JON_RDIF_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_DEBUG_ADDR (0x104cb00) +#define NBL_ECPU_PADPT_ECPU_DMUX_DEBUG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_DEBUG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_DEBUG_DWLEN (1) +union ecpu_padpt_ecpu_dmux_debug_u { + struct ecpu_padpt_ecpu_dmux_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_DEBUG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_SEL_TAG_ADDR (0x104cb04) +#define NBL_ECPU_PADPT_ECPU_DMUX_SEL_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_SEL_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_SEL_TAG_DWLEN (1) +union ecpu_padpt_ecpu_dmux_sel_tag_u { + struct ecpu_padpt_ecpu_dmux_sel_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_SEL_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_NULL_TAG_ADDR (0x104cb08) +#define NBL_ECPU_PADPT_ECPU_DMUX_NULL_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_NULL_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_NULL_TAG_DWLEN (1) +union ecpu_padpt_ecpu_dmux_null_tag_u { + struct ecpu_padpt_ecpu_dmux_null_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_NULL_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_RX_TAG_ADDR (0x104cb0c) +#define NBL_ECPU_PADPT_ECPU_DMUX_RX_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_RX_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_RX_TAG_DWLEN (1) +union ecpu_padpt_ecpu_dmux_rx_tag_u { + struct ecpu_padpt_ecpu_dmux_rx_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_RX_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_ERR_TAG_ADDR (0x104cb10) +#define NBL_ECPU_PADPT_ECPU_DMUX_ERR_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_ERR_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_ERR_TAG_DWLEN (1) +union ecpu_padpt_ecpu_dmux_err_tag_u { + struct ecpu_padpt_ecpu_dmux_err_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_ERR_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_AGED_TAG_ADDR (0x104cb14) +#define NBL_ECPU_PADPT_ECPU_DMUX_AGED_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_AGED_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_AGED_TAG_DWLEN (1) +union ecpu_padpt_ecpu_dmux_aged_tag_u { + struct ecpu_padpt_ecpu_dmux_aged_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_AGED_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_SOC_ADDR (0x104cb18) +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_SOC_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_SOC_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_SOC_DWLEN (1) +union ecpu_padpt_ecpu_dmux_rdif_soc_u { + struct ecpu_padpt_ecpu_dmux_rdif_soc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_RDIF_SOC_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_EOC_ADDR (0x104cb1c) +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_EOC_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_EOC_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_EOC_DWLEN (1) +union ecpu_padpt_ecpu_dmux_rdif_eoc_u { + struct ecpu_padpt_ecpu_dmux_rdif_eoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_RDIF_EOC_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_ERR_ADDR (0x104cb20) +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_RDIF_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dmux_rdif_err_u { + struct ecpu_padpt_ecpu_dmux_rdif_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_RDIF_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_RLS_ADDR (0x104cb24) +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_RLS_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_RLS_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_RLS_DWLEN (1) +union ecpu_padpt_ecpu_dmux_tag_rls_u { + struct ecpu_padpt_ecpu_dmux_tag_rls { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_TAG_RLS_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_CPL_RLS_ADDR (0x104cb28) +#define NBL_ECPU_PADPT_ECPU_DMUX_CPL_RLS_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_CPL_RLS_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_CPL_RLS_DWLEN (1) +union ecpu_padpt_ecpu_dmux_cpl_rls_u { + struct ecpu_padpt_ecpu_dmux_cpl_rls { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_CPL_RLS_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_TAG_ADDR (0x104cb40) +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_TAG_DEPTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_TAG_DWLEN (1) +union ecpu_padpt_ecpu_dmux_is_tag_u { + struct ecpu_padpt_ecpu_dmux_is_tag { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_IS_TAG_DWLEN]; +} __packed; +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_TAG_REG(r) (NBL_ECPU_PADPT_ECPU_DMUX_IS_TAG_ADDR + \ + (NBL_ECPU_PADPT_ECPU_DMUX_IS_TAG_DWLEN * 4) * (r)) + +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_SEL_TAG_ADDR (0x104cbc0) +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_SEL_TAG_DEPTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_SEL_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_SEL_TAG_DWLEN (1) +union ecpu_padpt_ecpu_dmux_is_sel_tag_u { + struct ecpu_padpt_ecpu_dmux_is_sel_tag { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_IS_SEL_TAG_DWLEN]; +} __packed; +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_SEL_TAG_REG(r) (NBL_ECPU_PADPT_ECPU_DMUX_IS_SEL_TAG_ADDR + \ + (NBL_ECPU_PADPT_ECPU_DMUX_IS_SEL_TAG_DWLEN * 4) * (r)) + +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_ERR_TAG_ADDR (0x104cc40) +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_ERR_TAG_DEPTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_ERR_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_ERR_TAG_DWLEN (1) +union ecpu_padpt_ecpu_dmux_is_err_tag_u { + struct ecpu_padpt_ecpu_dmux_is_err_tag { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_IS_ERR_TAG_DWLEN]; +} __packed; +#define NBL_ECPU_PADPT_ECPU_DMUX_IS_ERR_TAG_REG(r) (NBL_ECPU_PADPT_ECPU_DMUX_IS_ERR_TAG_ADDR + \ + (NBL_ECPU_PADPT_ECPU_DMUX_IS_ERR_TAG_DWLEN * 4) * (r)) + +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_MAX_ADDR (0x104cd00) +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_MAX_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_MAX_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_MAX_DWLEN (1) +union ecpu_padpt_ecpu_dmux_tag_aged_max_u { + struct ecpu_padpt_ecpu_dmux_tag_aged_max { + u32 times:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_MAX_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_AVR_ADDR (0x104cd04) +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_AVR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_AVR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_AVR_DWLEN (1) +union ecpu_padpt_ecpu_dmux_tag_aged_avr_u { + struct ecpu_padpt_ecpu_dmux_tag_aged_avr { + u32 times:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DMUX_TAG_AGED_AVR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_DEBUG_ADDR (0x104ce00) +#define NBL_ECPU_PADPT_ECPU_CPL_DEBUG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_DEBUG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_DEBUG_DWLEN (1) +union ecpu_padpt_ecpu_cpl_debug_u { + struct ecpu_padpt_ecpu_cpl_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_DEBUG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_RX_TAG_ADDR (0x104ce04) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_TAG_DWLEN (1) +union ecpu_padpt_ecpu_cpl_rx_tag_u { + struct ecpu_padpt_ecpu_cpl_rx_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_RX_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_ERR_TAG_ADDR (0x104ce08) +#define NBL_ECPU_PADPT_ECPU_CPL_ERR_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_ERR_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_ERR_TAG_DWLEN (1) +union ecpu_padpt_ecpu_cpl_err_tag_u { + struct ecpu_padpt_ecpu_cpl_err_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_ERR_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_EP_TAG_ADDR (0x104ce0c) +#define NBL_ECPU_PADPT_ECPU_CPL_EP_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_EP_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_EP_TAG_DWLEN (1) +union ecpu_padpt_ecpu_cpl_ep_tag_u { + struct ecpu_padpt_ecpu_cpl_ep_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_EP_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_FAIL_TAG_ADDR (0x104ce10) +#define NBL_ECPU_PADPT_ECPU_CPL_FAIL_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_FAIL_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_FAIL_TAG_DWLEN (1) +union ecpu_padpt_ecpu_cpl_fail_tag_u { + struct ecpu_padpt_ecpu_cpl_fail_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_FAIL_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_HERR_TAG_ADDR (0x104ce14) +#define NBL_ECPU_PADPT_ECPU_CPL_HERR_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_HERR_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_HERR_TAG_DWLEN (1) +union ecpu_padpt_ecpu_cpl_herr_tag_u { + struct ecpu_padpt_ecpu_cpl_herr_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_HERR_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_RCB_TAG_ADDR (0x104ce18) +#define NBL_ECPU_PADPT_ECPU_CPL_RCB_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_RCB_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_RCB_TAG_DWLEN (1) +union ecpu_padpt_ecpu_cpl_rcb_tag_u { + struct ecpu_padpt_ecpu_cpl_rcb_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_RCB_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_RCB_ERR_ADDR (0x104ce1c) +#define NBL_ECPU_PADPT_ECPU_CPL_RCB_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_RCB_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_RCB_ERR_DWLEN (1) +union ecpu_padpt_ecpu_cpl_rcb_err_u { + struct ecpu_padpt_ecpu_cpl_rcb_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_RCB_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_DRP_TAG_ADDR (0x104ce20) +#define NBL_ECPU_PADPT_ECPU_CPL_DRP_TAG_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_DRP_TAG_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_DRP_TAG_DWLEN (1) +union ecpu_padpt_ecpu_cpl_drp_tag_u { + struct ecpu_padpt_ecpu_cpl_drp_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_DRP_TAG_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_RX_SOP_ADDR (0x104ce24) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_SOP_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_SOP_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_SOP_DWLEN (1) +union ecpu_padpt_ecpu_cpl_rx_sop_u { + struct ecpu_padpt_ecpu_cpl_rx_sop { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_RX_SOP_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_RX_EOP_ADDR (0x104ce28) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_EOP_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_EOP_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_EOP_DWLEN (1) +union ecpu_padpt_ecpu_cpl_rx_eop_u { + struct ecpu_padpt_ecpu_cpl_rx_eop { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_RX_EOP_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_RX_ERR_ADDR (0x104ce2c) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_RX_ERR_DWLEN (1) +union ecpu_padpt_ecpu_cpl_rx_err_u { + struct ecpu_padpt_ecpu_cpl_rx_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_RX_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_TAG_RAM_ERR_ADDR (0x104ce34) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_TAG_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_TAG_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_TAG_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_cpl_dmux_tag_ram_err_u { + struct ecpu_padpt_ecpu_cpl_dmux_tag_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_DMUX_TAG_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_0_RAM_ADDR (0x104ce38) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_0_RAM_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_0_RAM_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_0_RAM_DWLEN (1) +union ecpu_padpt_ecpu_cpl_dmux_high_0_ram_u { + struct ecpu_padpt_ecpu_cpl_dmux_high_0_ram { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_0_RAM_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_1_RAM_ADDR (0x104ce3c) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_1_RAM_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_1_RAM_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_1_RAM_DWLEN (1) +union ecpu_padpt_ecpu_cpl_dmux_high_1_ram_u { + struct ecpu_padpt_ecpu_cpl_dmux_high_1_ram { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_DMUX_HIGH_1_RAM_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_0_RAM_ADDR (0x104ce40) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_0_RAM_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_0_RAM_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_0_RAM_DWLEN (1) +union ecpu_padpt_ecpu_cpl_dmux_low_0_ram_u { + struct ecpu_padpt_ecpu_cpl_dmux_low_0_ram { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_0_RAM_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_1_RAM_ADDR (0x104ce44) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_1_RAM_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_1_RAM_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_1_RAM_DWLEN (1) +union ecpu_padpt_ecpu_cpl_dmux_low_1_ram_u { + struct ecpu_padpt_ecpu_cpl_dmux_low_1_ram { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_CPL_DMUX_LOW_1_RAM_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_ADDR (0x104ce80) +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_DWLEN (1) +union ecpu_padpt_ecpu_dif0_rd_info_u { + struct ecpu_padpt_ecpu_dif0_rd_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_LEN_ADDR (0x104ce84) +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif0_rd_info_len_u { + struct ecpu_padpt_ecpu_dif0_rd_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_RAM_ERR_ADDR (0x104ce8c) +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif0_rd_info_ram_err_u { + struct ecpu_padpt_ecpu_dif0_rd_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF0_RD_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_ADDR (0x104ce90) +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_DWLEN (1) +union ecpu_padpt_ecpu_dif1_rd_info_u { + struct ecpu_padpt_ecpu_dif1_rd_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_LEN_ADDR (0x104ce94) +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif1_rd_info_len_u { + struct ecpu_padpt_ecpu_dif1_rd_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_RAM_ERR_ADDR (0x104ce9c) +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif1_rd_info_ram_err_u { + struct ecpu_padpt_ecpu_dif1_rd_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF1_RD_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_ADDR (0x104cea0) +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_DWLEN (1) +union ecpu_padpt_ecpu_dif2_rd_info_u { + struct ecpu_padpt_ecpu_dif2_rd_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_LEN_ADDR (0x104cea4) +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif2_rd_info_len_u { + struct ecpu_padpt_ecpu_dif2_rd_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_RAM_ERR_ADDR (0x104ceac) +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif2_rd_info_ram_err_u { + struct ecpu_padpt_ecpu_dif2_rd_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF2_RD_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_ADDR (0x104ceb0) +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_DWLEN (1) +union ecpu_padpt_ecpu_dif3_rd_info_u { + struct ecpu_padpt_ecpu_dif3_rd_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_LEN_ADDR (0x104ceb4) +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif3_rd_info_len_u { + struct ecpu_padpt_ecpu_dif3_rd_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_RAM_ERR_ADDR (0x104cebc) +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif3_rd_info_ram_err_u { + struct ecpu_padpt_ecpu_dif3_rd_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF3_RD_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_ADDR (0x104cec0) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_DWLEN (1) +union ecpu_padpt_ecpu_dif0_wr_info_u { + struct ecpu_padpt_ecpu_dif0_wr_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_LEN_ADDR (0x104cec4) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif0_wr_info_len_u { + struct ecpu_padpt_ecpu_dif0_wr_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_RAM_ERR_ADDR (0x104cecc) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif0_wr_info_ram_err_u { + struct ecpu_padpt_ecpu_dif0_wr_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF0_WR_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_ADDR (0x104ced0) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_DWLEN (1) +union ecpu_padpt_ecpu_dif0_wr_data_u { + struct ecpu_padpt_ecpu_dif0_wr_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_LEN_ADDR (0x104ced4) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif0_wr_data_len_u { + struct ecpu_padpt_ecpu_dif0_wr_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_RAM_ERR_ADDR (0x104cedc) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif0_wr_data_ram_err_u { + struct ecpu_padpt_ecpu_dif0_wr_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF0_WR_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_ADDR (0x104cee0) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_DWLEN (1) +union ecpu_padpt_ecpu_dif1_wr_info_u { + struct ecpu_padpt_ecpu_dif1_wr_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_LEN_ADDR (0x104cee4) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif1_wr_info_len_u { + struct ecpu_padpt_ecpu_dif1_wr_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_RAM_ERR_ADDR (0x104ceec) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif1_wr_info_ram_err_u { + struct ecpu_padpt_ecpu_dif1_wr_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF1_WR_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_ADDR (0x104cef0) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_DWLEN (1) +union ecpu_padpt_ecpu_dif1_wr_data_u { + struct ecpu_padpt_ecpu_dif1_wr_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_LEN_ADDR (0x104cef4) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif1_wr_data_len_u { + struct ecpu_padpt_ecpu_dif1_wr_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_RAM_ERR_ADDR (0x104cefc) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif1_wr_data_ram_err_u { + struct ecpu_padpt_ecpu_dif1_wr_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF1_WR_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_ADDR (0x104cf00) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_DWLEN (1) +union ecpu_padpt_ecpu_dif2_wr_info_u { + struct ecpu_padpt_ecpu_dif2_wr_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_LEN_ADDR (0x104cf04) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif2_wr_info_len_u { + struct ecpu_padpt_ecpu_dif2_wr_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_RAM_ERR_ADDR (0x104cf0c) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif2_wr_info_ram_err_u { + struct ecpu_padpt_ecpu_dif2_wr_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF2_WR_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_ADDR (0x104cf10) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_DWLEN (1) +union ecpu_padpt_ecpu_dif2_wr_data_u { + struct ecpu_padpt_ecpu_dif2_wr_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_LEN_ADDR (0x104cf14) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif2_wr_data_len_u { + struct ecpu_padpt_ecpu_dif2_wr_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_RAM_ERR_ADDR (0x104cf1c) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif2_wr_data_ram_err_u { + struct ecpu_padpt_ecpu_dif2_wr_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF2_WR_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_ADDR (0x104cf20) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_DWLEN (1) +union ecpu_padpt_ecpu_dif3_wr_info_u { + struct ecpu_padpt_ecpu_dif3_wr_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_LEN_ADDR (0x104cf24) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif3_wr_info_len_u { + struct ecpu_padpt_ecpu_dif3_wr_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_RAM_ERR_ADDR (0x104cf2c) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif3_wr_info_ram_err_u { + struct ecpu_padpt_ecpu_dif3_wr_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF3_WR_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_ADDR (0x104cf30) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_DWLEN (1) +union ecpu_padpt_ecpu_dif3_wr_data_u { + struct ecpu_padpt_ecpu_dif3_wr_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_LEN_ADDR (0x104cf34) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_LEN_DWLEN (1) +union ecpu_padpt_ecpu_dif3_wr_data_len_u { + struct ecpu_padpt_ecpu_dif3_wr_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_RAM_ERR_ADDR (0x104cf3c) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_dif3_wr_data_ram_err_u { + struct ecpu_padpt_ecpu_dif3_wr_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_DIF3_WR_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_ADDR (0x104cf40) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif0_rw_info_u { + struct ecpu_padpt_ecpu_rdma_dif0_rw_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_LEN_ADDR (0x104cf44) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif0_rw_info_len_u { + struct ecpu_padpt_ecpu_rdma_dif0_rw_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_RAM_ERR_ADDR (0x104cf4c) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif0_rw_info_ram_err_u { + struct ecpu_padpt_ecpu_rdma_dif0_rw_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_ADDR (0x104cf50) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif0_rw_data_u { + struct ecpu_padpt_ecpu_rdma_dif0_rw_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_LEN_ADDR (0x104cf54) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_LEN_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif0_rw_data_len_u { + struct ecpu_padpt_ecpu_rdma_dif0_rw_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_RAM_ERR_ADDR (0x104cf5c) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif0_rw_data_ram_err_u { + struct ecpu_padpt_ecpu_rdma_dif0_rw_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF0_RW_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_ADDR (0x104cf60) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif1_rw_info_u { + struct ecpu_padpt_ecpu_rdma_dif1_rw_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_LEN_ADDR (0x104cf64) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_LEN_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif1_rw_info_len_u { + struct ecpu_padpt_ecpu_rdma_dif1_rw_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_RAM_ERR_ADDR (0x104cf6c) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif1_rw_info_ram_err_u { + struct ecpu_padpt_ecpu_rdma_dif1_rw_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_ADDR (0x104cf70) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif1_rw_data_u { + struct ecpu_padpt_ecpu_rdma_dif1_rw_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_LEN_ADDR (0x104cf74) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_LEN_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_LEN_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_LEN_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif1_rw_data_len_u { + struct ecpu_padpt_ecpu_rdma_dif1_rw_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_RAM_ERR_ADDR (0x104cf7c) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_RAM_ERR_DWLEN (1) +union ecpu_padpt_ecpu_rdma_dif1_rw_data_ram_err_u { + struct ecpu_padpt_ecpu_rdma_dif1_rw_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_RDMA_DIF1_RW_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PADPT_ECPU_MSIX_PENDING_ADDR (0x104d000) +#define NBL_ECPU_PADPT_ECPU_MSIX_PENDING_DEPTH (16) +#define NBL_ECPU_PADPT_ECPU_MSIX_PENDING_WIDTH (64) +#define NBL_ECPU_PADPT_ECPU_MSIX_PENDING_DWLEN (2) +union ecpu_padpt_ecpu_msix_pending_u { + struct ecpu_padpt_ecpu_msix_pending { + u32 array_arr[2]; /* [63:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_PENDING_DWLEN]; +} __packed; +#define NBL_ECPU_PADPT_ECPU_MSIX_PENDING_REG(r) (NBL_ECPU_PADPT_ECPU_MSIX_PENDING_ADDR + \ + (NBL_ECPU_PADPT_ECPU_MSIX_PENDING_DWLEN * 4) * (r)) + +#define NBL_ECPU_PADPT_ECPU_VBLK_QINFO_ADDR (0x1050000) +#define NBL_ECPU_PADPT_ECPU_VBLK_QINFO_DEPTH (512) +#define NBL_ECPU_PADPT_ECPU_VBLK_QINFO_WIDTH (32) +#define NBL_ECPU_PADPT_ECPU_VBLK_QINFO_DWLEN (1) +union ecpu_padpt_ecpu_vblk_qinfo_u { + struct ecpu_padpt_ecpu_vblk_qinfo { + u32 fuction_id:3; /* [2:0] Default:0x0 RW */ + u32 device_id:5; /* [7:3] Default:0x0 RW */ + u32 bus_id:8; /* [15:8] Default:0x0 RW */ + u32 msix_idx:13; /* [28:16] Default:0x0 RW */ + u32 msix_idx_valid:1; /* [29] Default:0x0 RW */ + u32 log_en:1; /* [30] Default:0x0 RW */ + u32 valid:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_VBLK_QINFO_DWLEN]; +} __packed; +#define NBL_ECPU_PADPT_ECPU_VBLK_QINFO_REG(r) (NBL_ECPU_PADPT_ECPU_VBLK_QINFO_ADDR + \ + (NBL_ECPU_PADPT_ECPU_VBLK_QINFO_DWLEN * 4) * (r)) + +#define NBL_ECPU_PADPT_ECPU_VNET_QINFO_ADDR (0x1054000) +#define NBL_ECPU_PADPT_ECPU_VNET_QINFO_DEPTH (4096) +#define NBL_ECPU_PADPT_ECPU_VNET_QINFO_WIDTH (64) +#define NBL_ECPU_PADPT_ECPU_VNET_QINFO_DWLEN (2) +union ecpu_padpt_ecpu_vnet_qinfo_u { + struct ecpu_padpt_ecpu_vnet_qinfo { + u32 fuction_id:3; /* [2:0] Default:0x0 RW */ + u32 device_id:5; /* [7:3] Default:0x0 RW */ + u32 bus_id:8; /* [15:8] Default:0x0 RW */ + u32 msix_idx:13; /* [28:16] Default:0x0 RW */ + u32 msix_idx_valid:1; /* [29] Default:0x0 RW */ + u32 log_en:1; /* [30] Default:0x0 RW */ + u32 valid:1; /* [31] Default:0x0 RW */ + u32 tph_en:1; /* [32] Default:0x0 RW */ + u32 ido_en:1; /* [33] Default:0x0 RW */ + u32 rlo_en:1; /* [34] Default:0x0 RW */ + u32 rsv:29; /* [63:35] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_VNET_QINFO_DWLEN]; +} __packed; +#define NBL_ECPU_PADPT_ECPU_VNET_QINFO_REG(r) (NBL_ECPU_PADPT_ECPU_VNET_QINFO_ADDR + \ + (NBL_ECPU_PADPT_ECPU_VNET_QINFO_DWLEN * 4) * (r)) + +#define NBL_ECPU_PADPT_ECPU_MSIX_INFO_ADDR (0x105c000) +#define NBL_ECPU_PADPT_ECPU_MSIX_INFO_DEPTH (1024) +#define NBL_ECPU_PADPT_ECPU_MSIX_INFO_WIDTH (64) +#define NBL_ECPU_PADPT_ECPU_MSIX_INFO_DWLEN (2) +union ecpu_padpt_ecpu_msix_info_u { + struct ecpu_padpt_ecpu_msix_info { + u32 intrl_pnum:16; /* [15:0] Default:0x0 RW */ + u32 intrl_rate:16; /* [31:16] Default:0x0 RW */ + u32 fuction_id:3; /* [34:32] Default:0x0 RW */ + u32 device_id:5; /* [39:35] Default:0x0 RW */ + u32 bus_id:8; /* [47:40] Default:0x0 RW */ + u32 valid:1; /* [48:48] Default:0x0 RW */ + u32 msix_mask_en:1; /* [49:49] Default:0x0 RW */ + u32 rsv:14; /* [63:50] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_INFO_DWLEN]; +} __packed; +#define NBL_ECPU_PADPT_ECPU_MSIX_INFO_REG(r) (NBL_ECPU_PADPT_ECPU_MSIX_INFO_ADDR + \ + (NBL_ECPU_PADPT_ECPU_MSIX_INFO_DWLEN * 4) * (r)) + +#define NBL_ECPU_PADPT_ECPU_MSIX_CTRL_ADDR (0x106c000) +#define NBL_ECPU_PADPT_ECPU_MSIX_CTRL_DEPTH (1024) +#define NBL_ECPU_PADPT_ECPU_MSIX_CTRL_WIDTH (128) +#define NBL_ECPU_PADPT_ECPU_MSIX_CTRL_DWLEN (4) +union ecpu_padpt_ecpu_msix_ctrl_u { + struct ecpu_padpt_ecpu_msix_ctrl { + u32 lower_address:32; /* [31:0] Default:0x0 RW */ + u32 upper_address:32; /* [63:32] Default:0x0 RW */ + u32 message_data:32; /* [95:64] Default:0x0 RW */ + u32 vector_control_mask:1; /* [96:96] Default:0x0 RW */ + u32 vector_control_rsv:31; /* [127:97] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PADPT_ECPU_MSIX_CTRL_DWLEN]; +} __packed; +#define NBL_ECPU_PADPT_ECPU_MSIX_CTRL_REG(r) (NBL_ECPU_PADPT_ECPU_MSIX_CTRL_ADDR + \ + (NBL_ECPU_PADPT_ECPU_MSIX_CTRL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ecpu_pcap.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ecpu_pcap.h new file mode 100644 index 0000000000000000000000000000000000000000..ef425d4c079b3107f600a9f0c4447ded96867beb --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ecpu_pcap.h @@ -0,0 +1,529 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_ECPU_PCAP_H +#define NBL_ECPU_PCAP_H 1 + +#include + +#define NBL_ECPU_PCAP_BASE (0x014A4000) + +#define NBL_ECPU_PCAP_INT_STATUS_ADDR (0x14a4000) +#define NBL_ECPU_PCAP_INT_STATUS_DEPTH (1) +#define NBL_ECPU_PCAP_INT_STATUS_WIDTH (32) +#define NBL_ECPU_PCAP_INT_STATUS_DWLEN (1) +union ecpu_pcap_int_status_u { + struct ecpu_pcap_int_status { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 rsv2:2; /* [03:02] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv1:2; /* [06:05] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv:23; /* [31:09] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_INT_MASK_ADDR (0x14a4004) +#define NBL_ECPU_PCAP_INT_MASK_DEPTH (1) +#define NBL_ECPU_PCAP_INT_MASK_WIDTH (32) +#define NBL_ECPU_PCAP_INT_MASK_DWLEN (1) +union ecpu_pcap_int_mask_u { + struct ecpu_pcap_int_mask { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 RW */ + u32 rsv2:2; /* [03:02] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv1:2; /* [06:05] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv:23; /* [31:09] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_INT_MASK_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_INT_SET_ADDR (0x14a4008) +#define NBL_ECPU_PCAP_INT_SET_DEPTH (1) +#define NBL_ECPU_PCAP_INT_SET_WIDTH (32) +#define NBL_ECPU_PCAP_INT_SET_DWLEN (1) +union ecpu_pcap_int_set_u { + struct ecpu_pcap_int_set { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 WO */ + u32 rsv2:2; /* [03:02] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv1:2; /* [06:05] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv:23; /* [31:09] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_INT_SET_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_CIF_ERR_INFO_ADDR (0x14a4040) +#define NBL_ECPU_PCAP_CIF_ERR_INFO_DEPTH (1) +#define NBL_ECPU_PCAP_CIF_ERR_INFO_WIDTH (32) +#define NBL_ECPU_PCAP_CIF_ERR_INFO_DWLEN (1) +union ecpu_pcap_cif_err_info_u { + struct ecpu_pcap_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_CAR_CTRL_ADDR (0x14a4100) +#define NBL_ECPU_PCAP_CAR_CTRL_DEPTH (1) +#define NBL_ECPU_PCAP_CAR_CTRL_WIDTH (32) +#define NBL_ECPU_PCAP_CAR_CTRL_DWLEN (1) +union ecpu_pcap_car_ctrl_u { + struct ecpu_pcap_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_CAP_EN_ADDR (0x14a4200) +#define NBL_ECPU_PCAP_TX_CAP_EN_DEPTH (1) +#define NBL_ECPU_PCAP_TX_CAP_EN_WIDTH (32) +#define NBL_ECPU_PCAP_TX_CAP_EN_DWLEN (1) +union ecpu_pcap_tx_cap_en_u { + struct ecpu_pcap_tx_cap_en { + u32 force_en:1; /* [00:00] Default:0x1 RW */ + u32 pattern_trigger_en:1; /* [01:01] Default:0x0 RW */ + u32 err_trigger_en:1; /* [02:02] Default:0x0 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_CAP_EN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_CAP_STORE_ADDR (0x14a4204) +#define NBL_ECPU_PCAP_TX_CAP_STORE_DEPTH (1) +#define NBL_ECPU_PCAP_TX_CAP_STORE_WIDTH (32) +#define NBL_ECPU_PCAP_TX_CAP_STORE_DWLEN (1) +union ecpu_pcap_tx_cap_store_u { + struct ecpu_pcap_tx_cap_store { + u32 match_mode:2; /* [01:00] Default:0x1 RW */ + u32 match_only_en:1; /* [02:02] Default:0x1 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_CAP_STORE_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_CAP_STALL_ADDR (0x14a4208) +#define NBL_ECPU_PCAP_TX_CAP_STALL_DEPTH (1) +#define NBL_ECPU_PCAP_TX_CAP_STALL_WIDTH (32) +#define NBL_ECPU_PCAP_TX_CAP_STALL_DWLEN (1) +union ecpu_pcap_tx_cap_stall_u { + struct ecpu_pcap_tx_cap_stall { + u32 error_full_stall_ena:1; /* [00:00] Default:0x0 RW */ + u32 error_dly_stall_ena:1; /* [01:01] Default:0x0 RW */ + u32 matched_full_stall_ena:1; /* [02:02] Default:0x0 RW */ + u32 matched_dly_stall_ena:1; /* [03:03] Default:0x0 RW */ + u32 ex_stall_ena:1; /* [04:04] Default:0x1 RW */ + u32 aged_stall_ena:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_CAP_STALL_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_CAP_CLR_ADDR (0x14a420c) +#define NBL_ECPU_PCAP_TX_CAP_CLR_DEPTH (1) +#define NBL_ECPU_PCAP_TX_CAP_CLR_WIDTH (32) +#define NBL_ECPU_PCAP_TX_CAP_CLR_DWLEN (1) +union ecpu_pcap_tx_cap_clr_u { + struct ecpu_pcap_tx_cap_clr { + u32 tlp_clr:1; /* [00:00] Default:0x0 RW */ + u32 ltssm_clr:1; /* [01:01] Default:0x0 RW */ + u32 timer_clr:1; /* [02:02] Default:0x0 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_CAP_CLR_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_EN_ADDR (0x14a4300) +#define NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_EN_DEPTH (1) +#define NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_EN_WIDTH (512) +#define NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_EN_DWLEN (16) +union ecpu_pcap_tx_cap_store_pattern_en_u { + struct ecpu_pcap_tx_cap_store_pattern_en { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_EN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_ADDR (0x14a4340) +#define NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_DEPTH (1) +#define NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_WIDTH (512) +#define NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_DWLEN (16) +union ecpu_pcap_tx_cap_store_pattern_u { + struct ecpu_pcap_tx_cap_store_pattern { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_CAP_STORE_PATTERN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_EN_ADDR (0x14a4380) +#define NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_EN_DEPTH (1) +#define NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_EN_WIDTH (512) +#define NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_EN_DWLEN (16) +union ecpu_pcap_tx_cap_trigger_pattern_en_u { + struct ecpu_pcap_tx_cap_trigger_pattern_en { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_EN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_ADDR (0x14a43c0) +#define NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_DEPTH (1) +#define NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_WIDTH (512) +#define NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_DWLEN (16) +union ecpu_pcap_tx_cap_trigger_pattern_u { + struct ecpu_pcap_tx_cap_trigger_pattern { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_CAP_TRIGGER_PATTERN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_CAP_EN_ADDR (0x14a4800) +#define NBL_ECPU_PCAP_RX_CAP_EN_DEPTH (1) +#define NBL_ECPU_PCAP_RX_CAP_EN_WIDTH (32) +#define NBL_ECPU_PCAP_RX_CAP_EN_DWLEN (1) +union ecpu_pcap_rx_cap_en_u { + struct ecpu_pcap_rx_cap_en { + u32 force_en:1; /* [00:00] Default:0x1 RW */ + u32 pattern_trigger_en:1; /* [01:01] Default:0x0 RW */ + u32 err_trigger_en:1; /* [02:02] Default:0x0 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_CAP_EN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_CAP_STORE_ADDR (0x14a4804) +#define NBL_ECPU_PCAP_RX_CAP_STORE_DEPTH (1) +#define NBL_ECPU_PCAP_RX_CAP_STORE_WIDTH (32) +#define NBL_ECPU_PCAP_RX_CAP_STORE_DWLEN (1) +union ecpu_pcap_rx_cap_store_u { + struct ecpu_pcap_rx_cap_store { + u32 match_mode:2; /* [01:00] Default:0x1 RW */ + u32 match_only_en:1; /* [02:02] Default:0x1 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_CAP_STORE_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_CAP_STALL_ADDR (0x14a4808) +#define NBL_ECPU_PCAP_RX_CAP_STALL_DEPTH (1) +#define NBL_ECPU_PCAP_RX_CAP_STALL_WIDTH (32) +#define NBL_ECPU_PCAP_RX_CAP_STALL_DWLEN (1) +union ecpu_pcap_rx_cap_stall_u { + struct ecpu_pcap_rx_cap_stall { + u32 error_full_stall_ena:1; /* [00:00] Default:0x0 RW */ + u32 error_dly_stall_ena:1; /* [01:01] Default:0x1 RW */ + u32 matched_full_stall_ena:1; /* [02:02] Default:0x0 RW */ + u32 matched_dly_stall_ena:1; /* [03:03] Default:0x0 RW */ + u32 ex_stall_ena:1; /* [04:04] Default:0x0 RW */ + u32 aged_stall_ena:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_CAP_STALL_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_CAP_CLR_ADDR (0x14a480c) +#define NBL_ECPU_PCAP_RX_CAP_CLR_DEPTH (1) +#define NBL_ECPU_PCAP_RX_CAP_CLR_WIDTH (32) +#define NBL_ECPU_PCAP_RX_CAP_CLR_DWLEN (1) +union ecpu_pcap_rx_cap_clr_u { + struct ecpu_pcap_rx_cap_clr { + u32 tlp_clr:1; /* [00:00] Default:0x0 RW */ + u32 ltssm_clr:1; /* [01:01] Default:0x0 RW */ + u32 timer_clr:1; /* [02:02] Default:0x0 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_CAP_CLR_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_EN_ADDR (0x14a4900) +#define NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_EN_DEPTH (1) +#define NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_EN_WIDTH (512) +#define NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_EN_DWLEN (16) +union ecpu_pcap_rx_cap_store_pattern_en_u { + struct ecpu_pcap_rx_cap_store_pattern_en { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_EN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_ADDR (0x14a4940) +#define NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_DEPTH (1) +#define NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_WIDTH (512) +#define NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_DWLEN (16) +union ecpu_pcap_rx_cap_store_pattern_u { + struct ecpu_pcap_rx_cap_store_pattern { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_CAP_STORE_PATTERN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_EN_ADDR (0x14a4980) +#define NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_EN_DEPTH (1) +#define NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_EN_WIDTH (512) +#define NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_EN_DWLEN (16) +union ecpu_pcap_rx_cap_trigger_pattern_en_u { + struct ecpu_pcap_rx_cap_trigger_pattern_en { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_EN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_ADDR (0x14a49c0) +#define NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_DEPTH (1) +#define NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_WIDTH (512) +#define NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_DWLEN (16) +union ecpu_pcap_rx_cap_trigger_pattern_u { + struct ecpu_pcap_rx_cap_trigger_pattern { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_CAP_TRIGGER_PATTERN_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_RAM_ERR_ADDR (0x14a5000) +#define NBL_ECPU_PCAP_TX_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PCAP_TX_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PCAP_TX_RAM_ERR_DWLEN (1) +union ecpu_pcap_tx_ram_err_u { + struct ecpu_pcap_tx_ram_err { + u32 tlp_cap:1; /* [00:00] Default:0x0 RO */ + u32 timer_cap:1; /* [01:01] Default:0x0 RO */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_RAM_ERR_ADDR (0x14a5004) +#define NBL_ECPU_PCAP_RX_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PCAP_RX_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PCAP_RX_RAM_ERR_DWLEN (1) +union ecpu_pcap_rx_ram_err_u { + struct ecpu_pcap_rx_ram_err { + u32 tlp_cap:1; /* [00:00] Default:0x0 RO */ + u32 timer_cap:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_LTSSM_RAM_ERR_ADDR (0x14a5008) +#define NBL_ECPU_PCAP_LTSSM_RAM_ERR_DEPTH (1) +#define NBL_ECPU_PCAP_LTSSM_RAM_ERR_WIDTH (32) +#define NBL_ECPU_PCAP_LTSSM_RAM_ERR_DWLEN (1) +union ecpu_pcap_ltssm_ram_err_u { + struct ecpu_pcap_ltssm_ram_err { + u32 ltssm_cap:1; /* [00:00] Default:0x0 RO */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_LTSSM_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_RAM_TLP_CAP_ADDR (0x14a500c) +#define NBL_ECPU_PCAP_TX_RAM_TLP_CAP_DEPTH (1) +#define NBL_ECPU_PCAP_TX_RAM_TLP_CAP_WIDTH (32) +#define NBL_ECPU_PCAP_TX_RAM_TLP_CAP_DWLEN (1) +union ecpu_pcap_tx_ram_tlp_cap_u { + struct ecpu_pcap_tx_ram_tlp_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_RAM_TLP_CAP_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_RAM_TIMER_CAP_ADDR (0x14a5010) +#define NBL_ECPU_PCAP_TX_RAM_TIMER_CAP_DEPTH (1) +#define NBL_ECPU_PCAP_TX_RAM_TIMER_CAP_WIDTH (32) +#define NBL_ECPU_PCAP_TX_RAM_TIMER_CAP_DWLEN (1) +union ecpu_pcap_tx_ram_timer_cap_u { + struct ecpu_pcap_tx_ram_timer_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_RAM_TIMER_CAP_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_RAM_TLP_CAP_ADDR (0x14a5014) +#define NBL_ECPU_PCAP_RX_RAM_TLP_CAP_DEPTH (1) +#define NBL_ECPU_PCAP_RX_RAM_TLP_CAP_WIDTH (32) +#define NBL_ECPU_PCAP_RX_RAM_TLP_CAP_DWLEN (1) +union ecpu_pcap_rx_ram_tlp_cap_u { + struct ecpu_pcap_rx_ram_tlp_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_RAM_TLP_CAP_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_RAM_TIMER_CAP_ADDR (0x14a5018) +#define NBL_ECPU_PCAP_RX_RAM_TIMER_CAP_DEPTH (1) +#define NBL_ECPU_PCAP_RX_RAM_TIMER_CAP_WIDTH (32) +#define NBL_ECPU_PCAP_RX_RAM_TIMER_CAP_DWLEN (1) +union ecpu_pcap_rx_ram_timer_cap_u { + struct ecpu_pcap_rx_ram_timer_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_RAM_TIMER_CAP_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RAM_LTSSM_CAP_ADDR (0x14a501c) +#define NBL_ECPU_PCAP_RAM_LTSSM_CAP_DEPTH (1) +#define NBL_ECPU_PCAP_RAM_LTSSM_CAP_WIDTH (32) +#define NBL_ECPU_PCAP_RAM_LTSSM_CAP_DWLEN (1) +union ecpu_pcap_ram_ltssm_cap_u { + struct ecpu_pcap_ram_ltssm_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RAM_LTSSM_CAP_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_DEBUG_REG_LTSSM_WADDR_ADDR (0x14a5040) +#define NBL_ECPU_PCAP_DEBUG_REG_LTSSM_WADDR_DEPTH (1) +#define NBL_ECPU_PCAP_DEBUG_REG_LTSSM_WADDR_WIDTH (32) +#define NBL_ECPU_PCAP_DEBUG_REG_LTSSM_WADDR_DWLEN (1) +union ecpu_pcap_debug_reg_ltssm_waddr_u { + struct ecpu_pcap_debug_reg_ltssm_waddr { + u32 dbg:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_DEBUG_REG_LTSSM_WADDR_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_DEBUG_REG_CUR_TIMER_ADDR (0x14a5100) +#define NBL_ECPU_PCAP_TX_DEBUG_REG_CUR_TIMER_DEPTH (4) +#define NBL_ECPU_PCAP_TX_DEBUG_REG_CUR_TIMER_WIDTH (32) +#define NBL_ECPU_PCAP_TX_DEBUG_REG_CUR_TIMER_DWLEN (1) +union ecpu_pcap_tx_debug_reg_cur_timer_u { + struct ecpu_pcap_tx_debug_reg_cur_timer { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_DEBUG_REG_CUR_TIMER_DWLEN]; +} __packed; +#define NBL_ECPU_PCAP_TX_DEBUG_REG_CUR_TIMER_REG(r) (NBL_ECPU_PCAP_TX_DEBUG_REG_CUR_TIMER_ADDR + \ + (NBL_ECPU_PCAP_TX_DEBUG_REG_CUR_TIMER_DWLEN * 4) * (r)) + +#define NBL_ECPU_PCAP_TX_DEBUG_REG_TLP_WADDR_ADDR (0x14a5110) +#define NBL_ECPU_PCAP_TX_DEBUG_REG_TLP_WADDR_DEPTH (1) +#define NBL_ECPU_PCAP_TX_DEBUG_REG_TLP_WADDR_WIDTH (32) +#define NBL_ECPU_PCAP_TX_DEBUG_REG_TLP_WADDR_DWLEN (1) +union ecpu_pcap_tx_debug_reg_tlp_waddr_u { + struct ecpu_pcap_tx_debug_reg_tlp_waddr { + u32 dbg:7; /* [06:00] Default:0x0 RO */ + u32 rsv:25; /* [31:07] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_DEBUG_REG_TLP_WADDR_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_TX_DEBUG_REG_CAP_ADDR (0x14a5114) +#define NBL_ECPU_PCAP_TX_DEBUG_REG_CAP_DEPTH (1) +#define NBL_ECPU_PCAP_TX_DEBUG_REG_CAP_WIDTH (32) +#define NBL_ECPU_PCAP_TX_DEBUG_REG_CAP_DWLEN (1) +union ecpu_pcap_tx_debug_reg_cap_u { + struct ecpu_pcap_tx_debug_reg_cap { + u32 cap_en_dbg:1; /* [00:00] Default:0x0 RO */ + u32 cap_stalled_dbg:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_DEBUG_REG_CAP_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_DEBUG_REG_CUR_TIMER_ADDR (0x14a5200) +#define NBL_ECPU_PCAP_RX_DEBUG_REG_CUR_TIMER_DEPTH (4) +#define NBL_ECPU_PCAP_RX_DEBUG_REG_CUR_TIMER_WIDTH (32) +#define NBL_ECPU_PCAP_RX_DEBUG_REG_CUR_TIMER_DWLEN (1) +union ecpu_pcap_rx_debug_reg_cur_timer_u { + struct ecpu_pcap_rx_debug_reg_cur_timer { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_DEBUG_REG_CUR_TIMER_DWLEN]; +} __packed; +#define NBL_ECPU_PCAP_RX_DEBUG_REG_CUR_TIMER_REG(r) (NBL_ECPU_PCAP_RX_DEBUG_REG_CUR_TIMER_ADDR + \ + (NBL_ECPU_PCAP_RX_DEBUG_REG_CUR_TIMER_DWLEN * 4) * (r)) + +#define NBL_ECPU_PCAP_RX_DEBUG_REG_TLP_WADDR_ADDR (0x14a5210) +#define NBL_ECPU_PCAP_RX_DEBUG_REG_TLP_WADDR_DEPTH (1) +#define NBL_ECPU_PCAP_RX_DEBUG_REG_TLP_WADDR_WIDTH (32) +#define NBL_ECPU_PCAP_RX_DEBUG_REG_TLP_WADDR_DWLEN (1) +union ecpu_pcap_rx_debug_reg_tlp_waddr_u { + struct ecpu_pcap_rx_debug_reg_tlp_waddr { + u32 dbg:7; /* [06:00] Default:0x0 RO */ + u32 rsv:25; /* [31:07] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_DEBUG_REG_TLP_WADDR_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_RX_DEBUG_REG_CAP_ADDR (0x14a5214) +#define NBL_ECPU_PCAP_RX_DEBUG_REG_CAP_DEPTH (1) +#define NBL_ECPU_PCAP_RX_DEBUG_REG_CAP_WIDTH (32) +#define NBL_ECPU_PCAP_RX_DEBUG_REG_CAP_DWLEN (1) +union ecpu_pcap_rx_debug_reg_cap_u { + struct ecpu_pcap_rx_debug_reg_cap { + u32 cap_en_dbg:1; /* [00:00] Default:0x0 RO */ + u32 cap_stalled_dbg:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_DEBUG_REG_CAP_DWLEN]; +} __packed; + +#define NBL_ECPU_PCAP_LTSSM_RAM_TABLE_ADDR (0x14a6000) +#define NBL_ECPU_PCAP_LTSSM_RAM_TABLE_DEPTH (256) +#define NBL_ECPU_PCAP_LTSSM_RAM_TABLE_WIDTH (128) +#define NBL_ECPU_PCAP_LTSSM_RAM_TABLE_DWLEN (4) +union ecpu_pcap_ltssm_ram_table_u { + struct ecpu_pcap_ltssm_ram_table { + u32 ltssm:5; /* [4:0] Default:0x0 RO */ + u32 ltssm_rsv:3; /* [7:5] Default:0x0 RO */ + u32 timer:32; /* [103:8] Default:0x0 RO */ + u32 timer_arr[2]; /* [103:8] Default:0x0 RO */ + u32 rsv:24; /* [127:104] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_LTSSM_RAM_TABLE_DWLEN]; +} __packed; +#define NBL_ECPU_PCAP_LTSSM_RAM_TABLE_REG(r) (NBL_ECPU_PCAP_LTSSM_RAM_TABLE_ADDR + \ + (NBL_ECPU_PCAP_LTSSM_RAM_TABLE_DWLEN * 4) * (r)) + +#define NBL_ECPU_PCAP_TX_TLP_RAM_TABLE_ADDR (0x14b4000) +#define NBL_ECPU_PCAP_TX_TLP_RAM_TABLE_DEPTH (128) +#define NBL_ECPU_PCAP_TX_TLP_RAM_TABLE_WIDTH (1024) +#define NBL_ECPU_PCAP_TX_TLP_RAM_TABLE_DWLEN (32) +union ecpu_pcap_tx_tlp_ram_table_u { + struct ecpu_pcap_tx_tlp_ram_table { + u32 native_data_arr[16]; /* [511:0] Default:0x0 RO */ + u32 timer:32; /* [607:512] Default:0x0 RO */ + u32 timer_arr[2]; /* [607:512] Default:0x0 RO */ + u32 rsv:32; /* [1023:608] Default:0x0 RO */ + u32 rsv_arr[12]; /* [1023:608] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_TX_TLP_RAM_TABLE_DWLEN]; +} __packed; +#define NBL_ECPU_PCAP_TX_TLP_RAM_TABLE_REG(r) (NBL_ECPU_PCAP_TX_TLP_RAM_TABLE_ADDR + \ + (NBL_ECPU_PCAP_TX_TLP_RAM_TABLE_DWLEN * 4) * (r)) + +#define NBL_ECPU_PCAP_RX_TLP_RAM_TABLE_ADDR (0x14bc000) +#define NBL_ECPU_PCAP_RX_TLP_RAM_TABLE_DEPTH (128) +#define NBL_ECPU_PCAP_RX_TLP_RAM_TABLE_WIDTH (1024) +#define NBL_ECPU_PCAP_RX_TLP_RAM_TABLE_DWLEN (32) +union ecpu_pcap_rx_tlp_ram_table_u { + struct ecpu_pcap_rx_tlp_ram_table { + u32 native_data_arr[16]; /* [511:0] Default:0x0 RO */ + u32 timer:32; /* [607:512] Default:0x0 RO */ + u32 timer_arr[2]; /* [607:512] Default:0x0 RO */ + u32 rsv:32; /* [1023:608] Default:0x0 RO */ + u32 rsv_arr[12]; /* [1023:608] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ECPU_PCAP_RX_TLP_RAM_TABLE_DWLEN]; +} __packed; +#define NBL_ECPU_PCAP_RX_TLP_RAM_TABLE_REG(r) (NBL_ECPU_PCAP_RX_TLP_RAM_TABLE_ADDR + \ + (NBL_ECPU_PCAP_RX_TLP_RAM_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_fifo_ch.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_fifo_ch.h new file mode 100644 index 0000000000000000000000000000000000000000..835c2978e7f10336d4e44edbc26c77a5aa9f6550 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_fifo_ch.h @@ -0,0 +1,167 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_FIFO_CH_H +#define NBL_FIFO_CH_H 1 + +#include + +#define NBL_FIFO_CH_BASE (0x00FAC000) + +#define NBL_FIFO_CH_INT_STATUS_ADDR (0xfac000) +#define NBL_FIFO_CH_INT_STATUS_DEPTH (1) +#define NBL_FIFO_CH_INT_STATUS_WIDTH (32) +#define NBL_FIFO_CH_INT_STATUS_DWLEN (1) +union fifo_ch_int_status_u { + struct fifo_ch_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FIFO_CH_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_FIFO_CH_INT_MASK_ADDR (0xfac004) +#define NBL_FIFO_CH_INT_MASK_DEPTH (1) +#define NBL_FIFO_CH_INT_MASK_WIDTH (32) +#define NBL_FIFO_CH_INT_MASK_DWLEN (1) +union fifo_ch_int_mask_u { + struct fifo_ch_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FIFO_CH_INT_MASK_DWLEN]; +} __packed; + +#define NBL_FIFO_CH_INT_SET_ADDR (0xfac008) +#define NBL_FIFO_CH_INT_SET_DEPTH (1) +#define NBL_FIFO_CH_INT_SET_WIDTH (32) +#define NBL_FIFO_CH_INT_SET_DWLEN (1) +union fifo_ch_int_set_u { + struct fifo_ch_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FIFO_CH_INT_SET_DWLEN]; +} __packed; + +#define NBL_FIFO_CH_INIT_DONE_ADDR (0xfac00c) +#define NBL_FIFO_CH_INIT_DONE_DEPTH (1) +#define NBL_FIFO_CH_INIT_DONE_WIDTH (32) +#define NBL_FIFO_CH_INIT_DONE_DWLEN (1) +union fifo_ch_init_done_u { + struct fifo_ch_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FIFO_CH_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_FIFO_CH_CIF_ERR_INFO_ADDR (0xfac040) +#define NBL_FIFO_CH_CIF_ERR_INFO_DEPTH (1) +#define NBL_FIFO_CH_CIF_ERR_INFO_WIDTH (32) +#define NBL_FIFO_CH_CIF_ERR_INFO_DWLEN (1) +union fifo_ch_cif_err_info_u { + struct fifo_ch_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FIFO_CH_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_FIFO_CH_CAR_CTRL_ADDR (0xfac100) +#define NBL_FIFO_CH_CAR_CTRL_DEPTH (1) +#define NBL_FIFO_CH_CAR_CTRL_WIDTH (32) +#define NBL_FIFO_CH_CAR_CTRL_DWLEN (1) +union fifo_ch_car_ctrl_u { + struct fifo_ch_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FIFO_CH_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_FIFO_CH_EMULATOR_READY_ADDR (0xfac214) +#define NBL_FIFO_CH_EMULATOR_READY_DEPTH (1) +#define NBL_FIFO_CH_EMULATOR_READY_WIDTH (32) +#define NBL_FIFO_CH_EMULATOR_READY_DWLEN (1) +union fifo_ch_emulator_ready_u { + struct fifo_ch_emulator_ready { + u32 ecpu_emulater_ready:1; /* [0] Default:0x0 RW */ + u32 rsv1:15; /* [15:1] Default:0x0 RO */ + u32 debug_flag:8; /* [23:16] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FIFO_CH_EMULATOR_READY_DWLEN]; +} __packed; + +#define NBL_FIFO_CH_EMULATOR_BIOS_WAIT_ADDR (0xfac218) +#define NBL_FIFO_CH_EMULATOR_BIOS_WAIT_DEPTH (1) +#define NBL_FIFO_CH_EMULATOR_BIOS_WAIT_WIDTH (32) +#define NBL_FIFO_CH_EMULATOR_BIOS_WAIT_DWLEN (1) +union fifo_ch_emulator_bios_wait_u { + struct fifo_ch_emulator_bios_wait { + u32 cnt:8; /* [7:0] Default:0x0 RW */ + u32 rsv1:8; /* [15:8] Default:0x0 RO */ + u32 debug_flag:8; /* [23:16] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FIFO_CH_EMULATOR_BIOS_WAIT_DWLEN]; +} __packed; + +#define NBL_FIFO_CH_SHARE_BUF_ADDR (0xfac21c) +#define NBL_FIFO_CH_SHARE_BUF_DEPTH (1) +#define NBL_FIFO_CH_SHARE_BUF_WIDTH (32) +#define NBL_FIFO_CH_SHARE_BUF_DWLEN (1) +union fifo_ch_share_buf_u { + struct fifo_ch_share_buf { + u32 intr:1; /* [0:0] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FIFO_CH_SHARE_BUF_DWLEN]; +} __packed; + +#define NBL_FIFO_CH_SHARE_RF_ADDR (0xfac400) +#define NBL_FIFO_CH_SHARE_RF_DEPTH (64) +#define NBL_FIFO_CH_SHARE_RF_WIDTH (32) +#define NBL_FIFO_CH_SHARE_RF_DWLEN (1) +union fifo_ch_share_rf_u { + struct fifo_ch_share_rf { + u32 val:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_FIFO_CH_SHARE_RF_DWLEN]; +} __packed; +#define NBL_FIFO_CH_SHARE_RF_REG(r) (NBL_FIFO_CH_SHARE_RF_ADDR + \ + (NBL_FIFO_CH_SHARE_RF_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_host_padpt.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_host_padpt.h new file mode 100644 index 0000000000000000000000000000000000000000..14110962f58daafb7b0dc24981fe610c6d64a0db --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_host_padpt.h @@ -0,0 +1,2999 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_HOST_PADPT_H +#define NBL_HOST_PADPT_H 1 + +#include + +#define NBL_HOST_PADPT_BASE (0x00F4C000) + +#define NBL_HOST_PADPT_INT_STATUS_ADDR (0xf4c000) +#define NBL_HOST_PADPT_INT_STATUS_DEPTH (1) +#define NBL_HOST_PADPT_INT_STATUS_WIDTH (32) +#define NBL_HOST_PADPT_INT_STATUS_DWLEN (1) +union host_padpt_int_status_u { + struct host_padpt_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_INT_MASK_ADDR (0xf4c004) +#define NBL_HOST_PADPT_INT_MASK_DEPTH (1) +#define NBL_HOST_PADPT_INT_MASK_WIDTH (32) +#define NBL_HOST_PADPT_INT_MASK_DWLEN (1) +union host_padpt_int_mask_u { + struct host_padpt_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_INT_MASK_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_INT_SET_ADDR (0xf4c008) +#define NBL_HOST_PADPT_INT_SET_DEPTH (1) +#define NBL_HOST_PADPT_INT_SET_WIDTH (32) +#define NBL_HOST_PADPT_INT_SET_DWLEN (1) +union host_padpt_int_set_u { + struct host_padpt_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_INT_SET_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_INIT_DONE_ADDR (0xf4c00c) +#define NBL_HOST_PADPT_INIT_DONE_DEPTH (1) +#define NBL_HOST_PADPT_INIT_DONE_WIDTH (32) +#define NBL_HOST_PADPT_INIT_DONE_DWLEN (1) +union host_padpt_init_done_u { + struct host_padpt_init_done { + u32 init_done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_CIF_ERR_INFO_ADDR (0xf4c040) +#define NBL_HOST_PADPT_CIF_ERR_INFO_DEPTH (1) +#define NBL_HOST_PADPT_CIF_ERR_INFO_WIDTH (32) +#define NBL_HOST_PADPT_CIF_ERR_INFO_DWLEN (1) +union host_padpt_cif_err_info_u { + struct host_padpt_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_RAM_ERROR_ADDR (0xf4c050) +#define NBL_HOST_PADPT_RAM_ERROR_DEPTH (1) +#define NBL_HOST_PADPT_RAM_ERROR_WIDTH (32) +#define NBL_HOST_PADPT_RAM_ERROR_DWLEN (1) +union host_padpt_ram_error_u { + struct host_padpt_ram_error { + u32 dif0_rd_info_fifo:1; /* [00] Default:0x0 RC */ + u32 dif1_rd_info_fifo:1; /* [01] Default:0x0 RC */ + u32 dif2_rd_info_fifo:1; /* [02] Default:0x0 RC */ + u32 dif3_rd_info_fifo:1; /* [03] Default:0x0 RC */ + u32 dif0_wr_info_fifo:1; /* [04] Default:0x0 RC */ + u32 dif0_wr_data_fifo:1; /* [05] Default:0x0 RC */ + u32 dif1_wr_info_fifo:1; /* [06] Default:0x0 RC */ + u32 dif1_wr_data_fifo:1; /* [07] Default:0x0 RC */ + u32 dif2_wr_info_fifo:1; /* [08] Default:0x0 RC */ + u32 dif2_wr_data_fifo:1; /* [09] Default:0x0 RC */ + u32 dif3_wr_info_fifo:1; /* [10] Default:0x0 RC */ + u32 dif3_wr_data_fifo:1; /* [11] Default:0x0 RC */ + u32 rdma_dif0_rw_info_fifo:1; /* [12] Default:0x0 RC */ + u32 rdma_dif0_rw_data_fifo:1; /* [13] Default:0x0 RC */ + u32 rdma_dif1_rw_info_fifo:1; /* [14] Default:0x0 RC */ + u32 rdma_dif1_rw_data_fifo:1; /* [15] Default:0x0 RC */ + u32 wr_mux_dif_info_fifo:1; /* [16] Default:0x0 RC */ + u32 wr_mux_dif_data_fifo:1; /* [17] Default:0x0 RC */ + u32 rd_mux_dif_info_fifo:1; /* [18] Default:0x0 RC */ + u32 rw_mux_dif_info_fifo:1; /* [19] Default:0x0 RC */ + u32 rw_mux_dif_data_fifo:1; /* [20] Default:0x0 RC */ + u32 spl_tlp_info_fifo:1; /* [21] Default:0x0 RC */ + u32 spl_tag_fifo:1; /* [22] Default:0x0 RC */ + u32 spl_sel_fifo:1; /* [23] Default:0x0 RC */ + u32 spl_sel_tag_fifo:1; /* [24] Default:0x0 RC */ + u32 spl_msix_fifo:1; /* [25] Default:0x0 RC */ + u32 jon_rdif_data_fifo:1; /* [26] Default:0x0 RC */ + u32 jon_rdif_info_fifo:1; /* [27] Default:0x0 RC */ + u32 msix_data_fifo:1; /* [28] Default:0x0 RC */ + u32 msix_info_fifo:1; /* [29] Default:0x0 RC */ + u32 tlp_info_fifo:1; /* [30] Default:0x0 RC */ + u32 tlp_data_fifo:1; /* [31] Default:0x0 RC */ + } __packed info; + u32 data[NBL_HOST_PADPT_RAM_ERROR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_TABLE_RAM_ERROR_ADDR (0xf4c080) +#define NBL_HOST_PADPT_TABLE_RAM_ERROR_DEPTH (1) +#define NBL_HOST_PADPT_TABLE_RAM_ERROR_WIDTH (32) +#define NBL_HOST_PADPT_TABLE_RAM_ERROR_DWLEN (1) +union host_padpt_table_ram_error_u { + struct host_padpt_table_ram_error { + u32 vnet_qinfo_ram:1; /* [00] Default:0x0 RC */ + u32 vblk_qinfo_ram:1; /* [01] Default:0x0 RC */ + u32 msix_ctrl_ram:1; /* [02] Default:0x0 RC */ + u32 msix_pba_ram:1; /* [03] Default:0x0 RC */ + u32 msix_intrl_info_ram:1; /* [04] Default:0x0 RC */ + u32 msix_intrl_bdf_ram:1; /* [05] Default:0x0 RC */ + u32 msix_intrl_ram:1; /* [06] Default:0x0 RC */ + u32 cpl_dmux_high_0_ram:1; /* [07] Default:0x0 RC */ + u32 cpl_dmux_high_1_ram:1; /* [08] Default:0x0 RC */ + u32 cpl_dmux_low_0_ram:1; /* [09] Default:0x0 RC */ + u32 cpl_dmux_low_1_ram:1; /* [10] Default:0x0 RC */ + u32 cpl_dmux_tag_fifo_ram:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_TABLE_RAM_ERROR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_TABLE_RAM_SBITERR_ADDR (0xf4c084) +#define NBL_HOST_PADPT_TABLE_RAM_SBITERR_DEPTH (1) +#define NBL_HOST_PADPT_TABLE_RAM_SBITERR_WIDTH (32) +#define NBL_HOST_PADPT_TABLE_RAM_SBITERR_DWLEN (1) +union host_padpt_table_ram_sbiterr_u { + struct host_padpt_table_ram_sbiterr { + u32 vnet_qinfo_ram:1; /* [00] Default:0x0 RC */ + u32 vblk_qinfo_ram:1; /* [01] Default:0x0 RC */ + u32 msix_ctrl_ram:1; /* [02] Default:0x0 RC */ + u32 msix_pba_ram:1; /* [03] Default:0x0 RC */ + u32 msix_intrl_info_ram:1; /* [04] Default:0x0 RC */ + u32 msix_intrl_bdf_ram:1; /* [05] Default:0x0 RC */ + u32 msix_intrl_ram:1; /* [06] Default:0x0 RC */ + u32 cpl_dmux_high_0_ram:1; /* [07] Default:0x0 RC */ + u32 cpl_dmux_high_1_ram:1; /* [08] Default:0x0 RC */ + u32 cpl_dmux_low_0_ram:1; /* [09] Default:0x0 RC */ + u32 cpl_dmux_low_1_ram:1; /* [10] Default:0x0 RC */ + u32 cpl_dmux_tag_fifo_ram:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_TABLE_RAM_SBITERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_TABLE_RAM_DBITERR_ADDR (0xf4c088) +#define NBL_HOST_PADPT_TABLE_RAM_DBITERR_DEPTH (1) +#define NBL_HOST_PADPT_TABLE_RAM_DBITERR_WIDTH (32) +#define NBL_HOST_PADPT_TABLE_RAM_DBITERR_DWLEN (1) +union host_padpt_table_ram_dbiterr_u { + struct host_padpt_table_ram_dbiterr { + u32 vnet_qinfo_ram:1; /* [00] Default:0x0 RC */ + u32 vblk_qinfo_ram:1; /* [01] Default:0x0 RC */ + u32 msix_ctrl_ram:1; /* [02] Default:0x0 RC */ + u32 msix_pba_ram:1; /* [03] Default:0x0 RC */ + u32 msix_intrl_info_ram:1; /* [04] Default:0x0 RC */ + u32 msix_intrl_bdf_ram:1; /* [05] Default:0x0 RC */ + u32 msix_intrl_ram:1; /* [06] Default:0x0 RC */ + u32 cpl_dmux_high_0_ram:1; /* [07] Default:0x0 RC */ + u32 cpl_dmux_high_1_ram:1; /* [08] Default:0x0 RC */ + u32 cpl_dmux_low_0_ram:1; /* [09] Default:0x0 RC */ + u32 cpl_dmux_low_1_ram:1; /* [10] Default:0x0 RC */ + u32 cpl_dmux_tag_fifo_ram:1; /* [11] Default:0x0 RC */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_TABLE_RAM_DBITERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_CAR_CTRL_ADDR (0xf4c100) +#define NBL_HOST_PADPT_CAR_CTRL_DEPTH (1) +#define NBL_HOST_PADPT_CAR_CTRL_WIDTH (32) +#define NBL_HOST_PADPT_CAR_CTRL_DWLEN (1) +union host_padpt_car_ctrl_u { + struct host_padpt_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TAG_AGED_ADDR (0xf4c104) +#define NBL_HOST_PADPT_HOST_TAG_AGED_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TAG_AGED_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TAG_AGED_DWLEN (1) +union host_padpt_host_tag_aged_u { + struct host_padpt_host_tag_aged { + u32 times:31; /* [30:00] Default:0xE4E1C RW */ + u32 en:1; /* [31] Default:0x1 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TAG_AGED_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_GBL_CTRL_ADDR (0xf4c108) +#define NBL_HOST_PADPT_HOST_GBL_CTRL_DEPTH (1) +#define NBL_HOST_PADPT_HOST_GBL_CTRL_WIDTH (32) +#define NBL_HOST_PADPT_HOST_GBL_CTRL_DWLEN (1) +union host_padpt_host_gbl_ctrl_u { + struct host_padpt_host_gbl_ctrl { + u32 wr_en:1; /* [0:0] Default:0x1 RW */ + u32 rd_en:1; /* [1:1] Default:0x1 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_GBL_CTRL_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_GBL_INTRL_CTRL_ADDR (0xf4c10c) +#define NBL_HOST_PADPT_HOST_GBL_INTRL_CTRL_DEPTH (1) +#define NBL_HOST_PADPT_HOST_GBL_INTRL_CTRL_WIDTH (32) +#define NBL_HOST_PADPT_HOST_GBL_INTRL_CTRL_DWLEN (1) +union host_padpt_host_gbl_intrl_ctrl_u { + struct host_padpt_host_gbl_intrl_ctrl { + u32 valid:1; /* [0:0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_GBL_INTRL_CTRL_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_GBL_INTRL_ADDR (0xf4c110) +#define NBL_HOST_PADPT_HOST_GBL_INTRL_DEPTH (1) +#define NBL_HOST_PADPT_HOST_GBL_INTRL_WIDTH (32) +#define NBL_HOST_PADPT_HOST_GBL_INTRL_DWLEN (1) +union host_padpt_host_gbl_intrl_u { + struct host_padpt_host_gbl_intrl { + u32 pnum:16; /* [15:00] Default:0x0 RW */ + u32 rate:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_GBL_INTRL_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_10BTAG_ADDR (0xf4c114) +#define NBL_HOST_PADPT_HOST_10BTAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_10BTAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_10BTAG_DWLEN (1) +union host_padpt_host_10btag_u { + struct host_padpt_host_10btag { + u32 en:1; /* [0:0] Default:0x0 RW */ + u32 vld:1; /* [1:1] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_10BTAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_GBL_MSIX_MASK_ADDR (0xf4c158) +#define NBL_HOST_PADPT_HOST_GBL_MSIX_MASK_DEPTH (1) +#define NBL_HOST_PADPT_HOST_GBL_MSIX_MASK_WIDTH (32) +#define NBL_HOST_PADPT_HOST_GBL_MSIX_MASK_DWLEN (1) +union host_padpt_host_gbl_msix_mask_u { + struct host_padpt_host_gbl_msix_mask { + u32 en:1; /* [0:0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_GBL_MSIX_MASK_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CFG_UP_ADDR (0xf4c15c) +#define NBL_HOST_PADPT_HOST_CFG_UP_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CFG_UP_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CFG_UP_DWLEN (1) +union host_padpt_host_cfg_up_u { + struct host_padpt_host_cfg_up { + u32 tag8b_th:16; /* [15:00] Default:0xF0 RW */ + u32 tag10b_th:16; /* [31:16] Default:0x2F0 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CFG_UP_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CFG_FC_PD_DN_ADDR (0xf4c160) +#define NBL_HOST_PADPT_HOST_CFG_FC_PD_DN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CFG_FC_PD_DN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CFG_FC_PD_DN_DWLEN (1) +union host_padpt_host_cfg_fc_pd_dn_u { + struct host_padpt_host_cfg_fc_pd_dn { + u32 th:16; /* [15:00] Default:0x20 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CFG_FC_PD_DN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CFG_FC_PH_DN_ADDR (0xf4c164) +#define NBL_HOST_PADPT_HOST_CFG_FC_PH_DN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CFG_FC_PH_DN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CFG_FC_PH_DN_DWLEN (1) +union host_padpt_host_cfg_fc_ph_dn_u { + struct host_padpt_host_cfg_fc_ph_dn { + u32 th:12; /* [11:00] Default:0x20 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CFG_FC_PH_DN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CFG_FC_NPD_DN_ADDR (0xf4c168) +#define NBL_HOST_PADPT_HOST_CFG_FC_NPD_DN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CFG_FC_NPD_DN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CFG_FC_NPD_DN_DWLEN (1) +union host_padpt_host_cfg_fc_npd_dn_u { + struct host_padpt_host_cfg_fc_npd_dn { + u32 th:16; /* [15:00] Default:0x20 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CFG_FC_NPD_DN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CFG_FC_NPH_DN_ADDR (0xf4c16c) +#define NBL_HOST_PADPT_HOST_CFG_FC_NPH_DN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CFG_FC_NPH_DN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CFG_FC_NPH_DN_DWLEN (1) +union host_padpt_host_cfg_fc_nph_dn_u { + struct host_padpt_host_cfg_fc_nph_dn { + u32 th:12; /* [11:00] Default:0x20 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CFG_FC_NPH_DN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLH_UP_ADDR (0xf4c170) +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLH_UP_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLH_UP_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLH_UP_DWLEN (1) +union host_padpt_host_cfg_fc_cplh_up_u { + struct host_padpt_host_cfg_fc_cplh_up { + u32 th:16; /* [15:00] Default:0x80 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CFG_FC_CPLH_UP_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLD_UP_ADDR (0xf4c174) +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLD_UP_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLD_UP_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLD_UP_DWLEN (1) +union host_padpt_host_cfg_fc_cpld_up_u { + struct host_padpt_host_cfg_fc_cpld_up { + u32 th:16; /* [15:00] Default:0x400 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CFG_FC_CPLD_UP_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_NATIVE_TX_P_ADDR (0xf4c180) +#define NBL_HOST_PADPT_HOST_NATIVE_TX_P_DEPTH (1) +#define NBL_HOST_PADPT_HOST_NATIVE_TX_P_WIDTH (32) +#define NBL_HOST_PADPT_HOST_NATIVE_TX_P_DWLEN (1) +union host_padpt_host_native_tx_p_u { + struct host_padpt_host_native_tx_p { + u32 credits:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_NATIVE_TX_P_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_NATIVE_TX_NP_ADDR (0xf4c184) +#define NBL_HOST_PADPT_HOST_NATIVE_TX_NP_DEPTH (1) +#define NBL_HOST_PADPT_HOST_NATIVE_TX_NP_WIDTH (32) +#define NBL_HOST_PADPT_HOST_NATIVE_TX_NP_DWLEN (1) +union host_padpt_host_native_tx_np_u { + struct host_padpt_host_native_tx_np { + u32 credits:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_NATIVE_TX_NP_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_NATIVE_TX_CPL_ADDR (0xf4c188) +#define NBL_HOST_PADPT_HOST_NATIVE_TX_CPL_DEPTH (1) +#define NBL_HOST_PADPT_HOST_NATIVE_TX_CPL_WIDTH (32) +#define NBL_HOST_PADPT_HOST_NATIVE_TX_CPL_DWLEN (1) +union host_padpt_host_native_tx_cpl_u { + struct host_padpt_host_native_tx_cpl { + u32 credits:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_NATIVE_TX_CPL_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_ABNORMAL_MSIX_VEC_ADDR (0xf4c200) +#define NBL_HOST_PADPT_HOST_ABNORMAL_MSIX_VEC_DEPTH (1) +#define NBL_HOST_PADPT_HOST_ABNORMAL_MSIX_VEC_WIDTH (32) +#define NBL_HOST_PADPT_HOST_ABNORMAL_MSIX_VEC_DWLEN (1) +union host_padpt_host_abnormal_msix_vec_u { + struct host_padpt_host_abnormal_msix_vec { + u32 idx:16; /* [15:0] Default:0x0 RW */ + u32 vld:1; /* [16:16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_ABNORMAL_MSIX_VEC_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_ABNORMAL_MISX_TIMEOUT_ADDR (0xf4c204) +#define NBL_HOST_PADPT_HOST_ABNORMAL_MISX_TIMEOUT_DEPTH (1) +#define NBL_HOST_PADPT_HOST_ABNORMAL_MISX_TIMEOUT_WIDTH (32) +#define NBL_HOST_PADPT_HOST_ABNORMAL_MISX_TIMEOUT_DWLEN (1) +union host_padpt_host_abnormal_misx_timeout_u { + struct host_padpt_host_abnormal_misx_timeout { + u32 value:32; /* [31:00] Default:0x3938700 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_ABNORMAL_MISX_TIMEOUT_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW0_ADDR (0xf4c300) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW0_DEPTH (1) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW0_WIDTH (32) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW0_DWLEN (1) +union host_padpt_host_invld_msix_vec_dw0_u { + struct host_padpt_host_invld_msix_vec_dw0 { + u32 value:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW0_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW1_ADDR (0xf4c304) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW1_DEPTH (1) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW1_WIDTH (32) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW1_DWLEN (1) +union host_padpt_host_invld_msix_vec_dw1_u { + struct host_padpt_host_invld_msix_vec_dw1 { + u32 value:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW1_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW2_ADDR (0xf4c308) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW2_DEPTH (1) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW2_WIDTH (32) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW2_DWLEN (1) +union host_padpt_host_invld_msix_vec_dw2_u { + struct host_padpt_host_invld_msix_vec_dw2 { + u32 value:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW2_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW3_ADDR (0xf4c30c) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW3_DEPTH (1) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW3_WIDTH (32) +#define NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW3_DWLEN (1) +union host_padpt_host_invld_msix_vec_dw3_u { + struct host_padpt_host_invld_msix_vec_dw3 { + u32 value:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_INVLD_MSIX_VEC_DW3_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DEBUG_ADDR (0xf4c400) +#define NBL_HOST_PADPT_HOST_RD_MUX_DEBUG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DEBUG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DEBUG_DWLEN (1) +union host_padpt_host_rd_mux_debug_u { + struct host_padpt_host_rd_mux_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DEBUG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_GET_ADDR (0xf4c404) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_GET_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_GET_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_GET_DWLEN (1) +union host_padpt_host_rd_mux_dif_info_get_u { + struct host_padpt_host_rd_mux_dif_info_get { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_GET_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_REN_ADDR (0xf4c408) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_REN_DWLEN (1) +union host_padpt_host_rd_mux_dif_info_ren_u { + struct host_padpt_host_rd_mux_dif_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_WEN_ADDR (0xf4c40c) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_WEN_DWLEN (1) +union host_padpt_host_rd_mux_dif_info_wen_u { + struct host_padpt_host_rd_mux_dif_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_ERR_ADDR (0xf4c410) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_ERR_DWLEN (1) +union host_padpt_host_rd_mux_dif_info_err_u { + struct host_padpt_host_rd_mux_dif_info_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_REN_ADDR (0xf4c414) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_REN_DWLEN (1) +union host_padpt_host_rd_mux_dif_sel_ren_u { + struct host_padpt_host_rd_mux_dif_sel_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_WEN_ADDR (0xf4c418) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_WEN_DWLEN (1) +union host_padpt_host_rd_mux_dif_sel_wen_u { + struct host_padpt_host_rd_mux_dif_sel_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF_SEL_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_REOB_ADDR (0xf4c41c) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_REOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_REOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_REOB_DWLEN (1) +union host_padpt_host_rd_mux_dif_total_reob_u { + struct host_padpt_host_rd_mux_dif_total_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_REOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_RERR_ADDR (0xf4c420) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_RERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_RERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_RERR_DWLEN (1) +union host_padpt_host_rd_mux_dif_total_rerr_u { + struct host_padpt_host_rd_mux_dif_total_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF_TOTAL_RERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REOB_ADDR (0xf4c424) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REOB_DWLEN (1) +union host_padpt_host_rd_mux_dif0_reob_u { + struct host_padpt_host_rd_mux_dif0_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_RERR_ADDR (0xf4c428) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_RERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_RERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_RERR_DWLEN (1) +union host_padpt_host_rd_mux_dif0_rerr_u { + struct host_padpt_host_rd_mux_dif0_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF0_RERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REN_ADDR (0xf4c42c) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REN_DWLEN (1) +union host_padpt_host_rd_mux_dif0_ren_u { + struct host_padpt_host_rd_mux_dif0_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF0_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REOB_ADDR (0xf4c430) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REOB_DWLEN (1) +union host_padpt_host_rd_mux_dif1_reob_u { + struct host_padpt_host_rd_mux_dif1_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_RERR_ADDR (0xf4c434) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_RERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_RERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_RERR_DWLEN (1) +union host_padpt_host_rd_mux_dif1_rerr_u { + struct host_padpt_host_rd_mux_dif1_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF1_RERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REN_ADDR (0xf4c438) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REN_DWLEN (1) +union host_padpt_host_rd_mux_dif1_ren_u { + struct host_padpt_host_rd_mux_dif1_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF1_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REOB_ADDR (0xf4c43c) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REOB_DWLEN (1) +union host_padpt_host_rd_mux_dif2_reob_u { + struct host_padpt_host_rd_mux_dif2_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_RERR_ADDR (0xf4c440) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_RERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_RERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_RERR_DWLEN (1) +union host_padpt_host_rd_mux_dif2_rerr_u { + struct host_padpt_host_rd_mux_dif2_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF2_RERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REN_ADDR (0xf4c444) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REN_DWLEN (1) +union host_padpt_host_rd_mux_dif2_ren_u { + struct host_padpt_host_rd_mux_dif2_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF2_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REOB_ADDR (0xf4c448) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REOB_DWLEN (1) +union host_padpt_host_rd_mux_dif3_reob_u { + struct host_padpt_host_rd_mux_dif3_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_RERR_ADDR (0xf4c44c) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_RERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_RERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_RERR_DWLEN (1) +union host_padpt_host_rd_mux_dif3_rerr_u { + struct host_padpt_host_rd_mux_dif3_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF3_RERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REN_ADDR (0xf4c450) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REN_DWLEN (1) +union host_padpt_host_rd_mux_dif3_ren_u { + struct host_padpt_host_rd_mux_dif3_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF3_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_SEL_INFO_RAM_ERR_ADDR (0xf4c458) +#define NBL_HOST_PADPT_HOST_RD_MUX_SEL_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_SEL_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_SEL_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_rd_mux_sel_info_ram_err_u { + struct host_padpt_host_rd_mux_sel_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_SEL_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_RAM_ERR_ADDR (0xf4c460) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_rd_mux_dif_info_ram_err_u { + struct host_padpt_host_rd_mux_dif_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RD_MUX_DIF_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DEBUG_ADDR (0xf4c500) +#define NBL_HOST_PADPT_HOST_RW_MUX_DEBUG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DEBUG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DEBUG_DWLEN (1) +union host_padpt_host_rw_mux_debug_u { + struct host_padpt_host_rw_mux_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DEBUG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_GET_ADDR (0xf4c504) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_GET_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_GET_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_GET_DWLEN (1) +union host_padpt_host_rw_mux_dif_info_get_u { + struct host_padpt_host_rw_mux_dif_info_get { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_GET_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_REN_ADDR (0xf4c508) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_REN_DWLEN (1) +union host_padpt_host_rw_mux_dif_info_ren_u { + struct host_padpt_host_rw_mux_dif_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_WEN_ADDR (0xf4c50c) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_WEN_DWLEN (1) +union host_padpt_host_rw_mux_dif_info_wen_u { + struct host_padpt_host_rw_mux_dif_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_ERR_ADDR (0xf4c510) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_ERR_DWLEN (1) +union host_padpt_host_rw_mux_dif_info_err_u { + struct host_padpt_host_rw_mux_dif_info_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_WEN_ADDR (0xf4c514) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_WEN_DWLEN (1) +union host_padpt_host_rw_mux_dif_data_wen_u { + struct host_padpt_host_rw_mux_dif_data_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_REN_ADDR (0xf4c518) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_REN_DWLEN (1) +union host_padpt_host_rw_mux_dif_data_ren_u { + struct host_padpt_host_rw_mux_dif_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_HERR_ADDR (0xf4c51c) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_HERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_HERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_HERR_DWLEN (1) +union host_padpt_host_rw_mux_dif_data_herr_u { + struct host_padpt_host_rw_mux_dif_data_herr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_HERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_REN_ADDR (0xf4c520) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_REN_DWLEN (1) +union host_padpt_host_rw_mux_dif_sel_ren_u { + struct host_padpt_host_rw_mux_dif_sel_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_WEN_ADDR (0xf4c524) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_WEN_DWLEN (1) +union host_padpt_host_rw_mux_dif_sel_wen_u { + struct host_padpt_host_rw_mux_dif_sel_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_SEL_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_REOB_ADDR (0xf4c528) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_REOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_REOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_REOB_DWLEN (1) +union host_padpt_host_rw_mux_dif_total_reob_u { + struct host_padpt_host_rw_mux_dif_total_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_REOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_RERR_ADDR (0xf4c52c) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_RERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_RERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_RERR_DWLEN (1) +union host_padpt_host_rw_mux_dif_total_rerr_u { + struct host_padpt_host_rw_mux_dif_total_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_TOTAL_RERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_REOB_ADDR (0xf4c530) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_REOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_REOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_REOB_DWLEN (1) +union host_padpt_host_rw_mux_dif0_reob_u { + struct host_padpt_host_rw_mux_dif0_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF0_REOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_RERR_ADDR (0xf4c534) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_RERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_RERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_RERR_DWLEN (1) +union host_padpt_host_rw_mux_dif0_rerr_u { + struct host_padpt_host_rw_mux_dif0_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF0_RERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_REN_ADDR (0xf4c538) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_REN_DWLEN (1) +union host_padpt_host_rw_mux_dif0_info_ren_u { + struct host_padpt_host_rw_mux_dif0_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_RD_ADDR (0xf4c53c) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_RD_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_RD_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_RD_DWLEN (1) +union host_padpt_host_rw_mux_dif0_info_rd_u { + struct host_padpt_host_rw_mux_dif0_info_rd { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_RD_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_WR_ADDR (0xf4c540) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_WR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_WR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_WR_DWLEN (1) +union host_padpt_host_rw_mux_dif0_info_wr_u { + struct host_padpt_host_rw_mux_dif0_info_wr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF0_INFO_WR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_DATA_REN_ADDR (0xf4c544) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF0_DATA_REN_DWLEN (1) +union host_padpt_host_rw_mux_dif0_data_ren_u { + struct host_padpt_host_rw_mux_dif0_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF0_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_REOB_ADDR (0xf4c548) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_REOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_REOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_REOB_DWLEN (1) +union host_padpt_host_rw_mux_dif1_reob_u { + struct host_padpt_host_rw_mux_dif1_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF1_REOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_RERR_ADDR (0xf4c54c) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_RERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_RERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_RERR_DWLEN (1) +union host_padpt_host_rw_mux_dif1_rerr_u { + struct host_padpt_host_rw_mux_dif1_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF1_RERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_REN_ADDR (0xf4c550) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_REN_DWLEN (1) +union host_padpt_host_rw_mux_dif1_info_ren_u { + struct host_padpt_host_rw_mux_dif1_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_RD_ADDR (0xf4c554) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_RD_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_RD_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_RD_DWLEN (1) +union host_padpt_host_rw_mux_dif1_info_rd_u { + struct host_padpt_host_rw_mux_dif1_info_rd { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_RD_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_WR_ADDR (0xf4c558) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_WR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_WR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_WR_DWLEN (1) +union host_padpt_host_rw_mux_dif1_info_wr_u { + struct host_padpt_host_rw_mux_dif1_info_wr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF1_INFO_WR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_DATA_REN_ADDR (0xf4c55c) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF1_DATA_REN_DWLEN (1) +union host_padpt_host_rw_mux_dif1_data_ren_u { + struct host_padpt_host_rw_mux_dif1_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF1_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_SEL_INFO_RAM_ERR_ADDR (0xf4c564) +#define NBL_HOST_PADPT_HOST_RW_MUX_SEL_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_SEL_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_SEL_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_rw_mux_sel_info_ram_err_u { + struct host_padpt_host_rw_mux_sel_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_SEL_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_RAM_ERR_ADDR (0xf4c56c) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_rw_mux_dif_info_ram_err_u { + struct host_padpt_host_rw_mux_dif_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_RAM_ERR_ADDR (0xf4c574) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_rw_mux_dif_data_ram_err_u { + struct host_padpt_host_rw_mux_dif_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RW_MUX_DIF_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DEBUG_ADDR (0xf4c600) +#define NBL_HOST_PADPT_HOST_WR_MUX_DEBUG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DEBUG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DEBUG_DWLEN (1) +union host_padpt_host_wr_mux_debug_u { + struct host_padpt_host_wr_mux_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DEBUG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_GET_ADDR (0xf4c604) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_GET_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_GET_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_GET_DWLEN (1) +union host_padpt_host_wr_mux_dif_info_get_u { + struct host_padpt_host_wr_mux_dif_info_get { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_GET_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_REN_ADDR (0xf4c608) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif_info_ren_u { + struct host_padpt_host_wr_mux_dif_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_WEN_ADDR (0xf4c60c) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_WEN_DWLEN (1) +union host_padpt_host_wr_mux_dif_info_wen_u { + struct host_padpt_host_wr_mux_dif_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_ERR_ADDR (0xf4c610) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_ERR_DWLEN (1) +union host_padpt_host_wr_mux_dif_info_err_u { + struct host_padpt_host_wr_mux_dif_info_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_WEN_ADDR (0xf4c614) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_WEN_DWLEN (1) +union host_padpt_host_wr_mux_dif_data_wen_u { + struct host_padpt_host_wr_mux_dif_data_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_REN_ADDR (0xf4c618) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif_data_ren_u { + struct host_padpt_host_wr_mux_dif_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_HERR_ADDR (0xf4c61c) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_HERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_HERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_HERR_DWLEN (1) +union host_padpt_host_wr_mux_dif_data_herr_u { + struct host_padpt_host_wr_mux_dif_data_herr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_HERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF0_INFO_REN_ADDR (0xf4c620) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF0_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF0_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF0_INFO_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif0_info_ren_u { + struct host_padpt_host_wr_mux_dif0_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF0_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF0_DATA_REN_ADDR (0xf4c624) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF0_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF0_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF0_DATA_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif0_data_ren_u { + struct host_padpt_host_wr_mux_dif0_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF0_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF1_INFO_REN_ADDR (0xf4c628) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF1_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF1_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF1_INFO_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif1_info_ren_u { + struct host_padpt_host_wr_mux_dif1_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF1_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF1_DATA_REN_ADDR (0xf4c62c) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF1_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF1_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF1_DATA_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif1_data_ren_u { + struct host_padpt_host_wr_mux_dif1_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF1_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF2_INFO_REN_ADDR (0xf4c630) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF2_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF2_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF2_INFO_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif2_info_ren_u { + struct host_padpt_host_wr_mux_dif2_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF2_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF2_DATA_REN_ADDR (0xf4c634) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF2_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF2_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF2_DATA_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif2_data_ren_u { + struct host_padpt_host_wr_mux_dif2_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF2_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF3_INFO_REN_ADDR (0xf4c638) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF3_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF3_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF3_INFO_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif3_info_ren_u { + struct host_padpt_host_wr_mux_dif3_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF3_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF3_DATA_REN_ADDR (0xf4c63c) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF3_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF3_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF3_DATA_REN_DWLEN (1) +union host_padpt_host_wr_mux_dif3_data_ren_u { + struct host_padpt_host_wr_mux_dif3_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF3_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_RAM_ERR_ADDR (0xf4c644) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_wr_mux_dif_info_ram_err_u { + struct host_padpt_host_wr_mux_dif_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_RAM_ERR_ADDR (0xf4c64c) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_wr_mux_dif_data_ram_err_u { + struct host_padpt_host_wr_mux_dif_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_WR_MUX_DIF_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_DEBUG_ADDR (0xf4c700) +#define NBL_HOST_PADPT_HOST_MSIX_DEBUG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_DEBUG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_DEBUG_DWLEN (1) +union host_padpt_host_msix_debug_u { + struct host_padpt_host_msix_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_DEBUG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_IN_REN_ADDR (0xf4c704) +#define NBL_HOST_PADPT_HOST_MSIX_IN_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_IN_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_IN_REN_DWLEN (1) +union host_padpt_host_msix_in_ren_u { + struct host_padpt_host_msix_in_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_IN_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_INT_ACK_ADDR (0xf4c708) +#define NBL_HOST_PADPT_HOST_MSIX_INT_ACK_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_INT_ACK_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_INT_ACK_DWLEN (1) +union host_padpt_host_msix_int_ack_u { + struct host_padpt_host_msix_int_ack { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_INT_ACK_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_OUT_WEN_ADDR (0xf4c70c) +#define NBL_HOST_PADPT_HOST_MSIX_OUT_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_OUT_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_OUT_WEN_DWLEN (1) +union host_padpt_host_msix_out_wen_u { + struct host_padpt_host_msix_out_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_OUT_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_CFG_WR_ADDR (0xf4c710) +#define NBL_HOST_PADPT_HOST_MSIX_CFG_WR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_CFG_WR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_CFG_WR_DWLEN (1) +union host_padpt_host_msix_cfg_wr_u { + struct host_padpt_host_msix_cfg_wr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_CFG_WR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_CFG_RD_ADDR (0xf4c714) +#define NBL_HOST_PADPT_HOST_MSIX_CFG_RD_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_CFG_RD_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_CFG_RD_DWLEN (1) +union host_padpt_host_msix_cfg_rd_u { + struct host_padpt_host_msix_cfg_rd { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_CFG_RD_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_INFO_RAM_ERR_ADDR (0xf4c71c) +#define NBL_HOST_PADPT_HOST_MSIX_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_msix_info_ram_err_u { + struct host_padpt_host_msix_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_DATA_RAM_ERR_ADDR (0xf4c724) +#define NBL_HOST_PADPT_HOST_MSIX_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_msix_data_ram_err_u { + struct host_padpt_host_msix_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_RAM_ERR_ADDR (0xf4c728) +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_RAM_ERR_DWLEN (1) +union host_padpt_host_msix_intrl_ram_err_u { + struct host_padpt_host_msix_intrl_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_INTRL_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_BDF_RAM_ERR_ADDR (0xf4c72c) +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_BDF_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_BDF_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_BDF_RAM_ERR_DWLEN (1) +union host_padpt_host_msix_intrl_bdf_ram_err_u { + struct host_padpt_host_msix_intrl_bdf_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_INTRL_BDF_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_INFO_RAM_ERR_ADDR (0xf4c730) +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_INTRL_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_msix_intrl_info_ram_err_u { + struct host_padpt_host_msix_intrl_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_INTRL_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_PBA_RAM_ERR_ADDR (0xf4c734) +#define NBL_HOST_PADPT_HOST_MSIX_PBA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_PBA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_PBA_RAM_ERR_DWLEN (1) +union host_padpt_host_msix_pba_ram_err_u { + struct host_padpt_host_msix_pba_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_PBA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_CTRL_RAM_ERR_ADDR (0xf4c738) +#define NBL_HOST_PADPT_HOST_MSIX_CTRL_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_MSIX_CTRL_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_MSIX_CTRL_RAM_ERR_DWLEN (1) +union host_padpt_host_msix_ctrl_ram_err_u { + struct host_padpt_host_msix_ctrl_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_CTRL_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DEBUG_ADDR (0xf4c800) +#define NBL_HOST_PADPT_HOST_SPL_DEBUG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DEBUG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DEBUG_DWLEN (1) +union host_padpt_host_spl_debug_u { + struct host_padpt_host_spl_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DEBUG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_0_INFO_REN_ADDR (0xf4c804) +#define NBL_HOST_PADPT_HOST_SPL_DIF_0_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_0_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_0_INFO_REN_DWLEN (1) +union host_padpt_host_spl_dif_0_info_ren_u { + struct host_padpt_host_spl_dif_0_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_0_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_0_DATA_REOC_ADDR (0xf4c808) +#define NBL_HOST_PADPT_HOST_SPL_DIF_0_DATA_REOC_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_0_DATA_REOC_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_0_DATA_REOC_DWLEN (1) +union host_padpt_host_spl_dif_0_data_reoc_u { + struct host_padpt_host_spl_dif_0_data_reoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_0_DATA_REOC_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_1_INFO_REN_ADDR (0xf4c80c) +#define NBL_HOST_PADPT_HOST_SPL_DIF_1_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_1_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_1_INFO_REN_DWLEN (1) +union host_padpt_host_spl_dif_1_info_ren_u { + struct host_padpt_host_spl_dif_1_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_1_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_1_DATA_REOC_ADDR (0xf4c810) +#define NBL_HOST_PADPT_HOST_SPL_DIF_1_DATA_REOC_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_1_DATA_REOC_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_1_DATA_REOC_DWLEN (1) +union host_padpt_host_spl_dif_1_data_reoc_u { + struct host_padpt_host_spl_dif_1_data_reoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_1_DATA_REOC_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_2_INFO_REN_ADDR (0xf4c814) +#define NBL_HOST_PADPT_HOST_SPL_DIF_2_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_2_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_2_INFO_REN_DWLEN (1) +union host_padpt_host_spl_dif_2_info_ren_u { + struct host_padpt_host_spl_dif_2_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_2_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_2_DATA_REOC_ADDR (0xf4c818) +#define NBL_HOST_PADPT_HOST_SPL_DIF_2_DATA_REOC_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_2_DATA_REOC_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_2_DATA_REOC_DWLEN (1) +union host_padpt_host_spl_dif_2_data_reoc_u { + struct host_padpt_host_spl_dif_2_data_reoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_2_DATA_REOC_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_3_INFO_REN_ADDR (0xf4c81c) +#define NBL_HOST_PADPT_HOST_SPL_DIF_3_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_3_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_3_INFO_REN_DWLEN (1) +union host_padpt_host_spl_dif_3_info_ren_u { + struct host_padpt_host_spl_dif_3_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_3_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_3_DATA_REOC_ADDR (0xf4c820) +#define NBL_HOST_PADPT_HOST_SPL_DIF_3_DATA_REOC_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_3_DATA_REOC_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_3_DATA_REOC_DWLEN (1) +union host_padpt_host_spl_dif_3_data_reoc_u { + struct host_padpt_host_spl_dif_3_data_reoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_3_DATA_REOC_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_INFO_TOTAL_ADDR (0xf4c824) +#define NBL_HOST_PADPT_HOST_SPL_DIF_INFO_TOTAL_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_INFO_TOTAL_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_INFO_TOTAL_DWLEN (1) +union host_padpt_host_spl_dif_info_total_u { + struct host_padpt_host_spl_dif_info_total { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_INFO_TOTAL_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_DIF_DATA_TOTAL_ADDR (0xf4c828) +#define NBL_HOST_PADPT_HOST_SPL_DIF_DATA_TOTAL_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_DIF_DATA_TOTAL_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_DIF_DATA_TOTAL_DWLEN (1) +union host_padpt_host_spl_dif_data_total_u { + struct host_padpt_host_spl_dif_data_total { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_DIF_DATA_TOTAL_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_SOB_ADDR (0xf4c82c) +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_SOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_SOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_SOB_DWLEN (1) +union host_padpt_host_spl_tlp_rd_sob_u { + struct host_padpt_host_spl_tlp_rd_sob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TLP_RD_SOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_EOB_ADDR (0xf4c830) +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_EOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_EOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_EOB_DWLEN (1) +union host_padpt_host_spl_tlp_rd_eob_u { + struct host_padpt_host_spl_tlp_rd_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TLP_RD_EOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_ERR_ADDR (0xf4c834) +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TLP_RD_ERR_DWLEN (1) +union host_padpt_host_spl_tlp_rd_err_u { + struct host_padpt_host_spl_tlp_rd_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TLP_RD_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_SOB_ADDR (0xf4c838) +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_SOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_SOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_SOB_DWLEN (1) +union host_padpt_host_spl_tlp_wr_sob_u { + struct host_padpt_host_spl_tlp_wr_sob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TLP_WR_SOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_EOB_ADDR (0xf4c83c) +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_EOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_EOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_EOB_DWLEN (1) +union host_padpt_host_spl_tlp_wr_eob_u { + struct host_padpt_host_spl_tlp_wr_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TLP_WR_EOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_ERR_ADDR (0xf4c840) +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TLP_WR_ERR_DWLEN (1) +union host_padpt_host_spl_tlp_wr_err_u { + struct host_padpt_host_spl_tlp_wr_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TLP_WR_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TLP_INFO_ADDR (0xf4c844) +#define NBL_HOST_PADPT_HOST_SPL_TLP_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TLP_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TLP_INFO_DWLEN (1) +union host_padpt_host_spl_tlp_info_u { + struct host_padpt_host_spl_tlp_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TLP_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TLP_INFO_RAM_ERR_ADDR (0xf4c84c) +#define NBL_HOST_PADPT_HOST_SPL_TLP_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TLP_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TLP_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_spl_tlp_info_ram_err_u { + struct host_padpt_host_spl_tlp_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TLP_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TAG_ADDR (0xf4c850) +#define NBL_HOST_PADPT_HOST_SPL_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TAG_DWLEN (1) +union host_padpt_host_spl_tag_u { + struct host_padpt_host_spl_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_TAG_RAM_ERR_ADDR (0xf4c858) +#define NBL_HOST_PADPT_HOST_SPL_TAG_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_TAG_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_TAG_RAM_ERR_DWLEN (1) +union host_padpt_host_spl_tag_ram_err_u { + struct host_padpt_host_spl_tag_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_TAG_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_SEL_ADDR (0xf4c85c) +#define NBL_HOST_PADPT_HOST_SPL_SEL_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_SEL_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_SEL_DWLEN (1) +union host_padpt_host_spl_sel_u { + struct host_padpt_host_spl_sel { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_SEL_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_SEL_RAM_ERR_ADDR (0xf4c864) +#define NBL_HOST_PADPT_HOST_SPL_SEL_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_SEL_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_SEL_RAM_ERR_DWLEN (1) +union host_padpt_host_spl_sel_ram_err_u { + struct host_padpt_host_spl_sel_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_SEL_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_SEL_TAG_ADDR (0xf4c868) +#define NBL_HOST_PADPT_HOST_SPL_SEL_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_SEL_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_SEL_TAG_DWLEN (1) +union host_padpt_host_spl_sel_tag_u { + struct host_padpt_host_spl_sel_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_SEL_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_SEL_TAG_RAM_ERR_ADDR (0xf4c874) +#define NBL_HOST_PADPT_HOST_SPL_SEL_TAG_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_SEL_TAG_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_SEL_TAG_RAM_ERR_DWLEN (1) +union host_padpt_host_spl_sel_tag_ram_err_u { + struct host_padpt_host_spl_sel_tag_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_SEL_TAG_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_MSIX_ADDR (0xf4c878) +#define NBL_HOST_PADPT_HOST_SPL_MSIX_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_MSIX_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_MSIX_DWLEN (1) +union host_padpt_host_spl_msix_u { + struct host_padpt_host_spl_msix { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_MSIX_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_MSIX_RAM_ERR_ADDR (0xf4c880) +#define NBL_HOST_PADPT_HOST_SPL_MSIX_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_MSIX_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_MSIX_RAM_ERR_DWLEN (1) +union host_padpt_host_spl_msix_ram_err_u { + struct host_padpt_host_spl_msix_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_MSIX_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_VNET_QINFO_RAM_ERR_ADDR (0xf4c884) +#define NBL_HOST_PADPT_HOST_SPL_VNET_QINFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_VNET_QINFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_VNET_QINFO_RAM_ERR_DWLEN (1) +union host_padpt_host_spl_vnet_qinfo_ram_err_u { + struct host_padpt_host_spl_vnet_qinfo_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_VNET_QINFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_SPL_VBLK_QINFO_RAM_ERR_ADDR (0xf4c888) +#define NBL_HOST_PADPT_HOST_SPL_VBLK_QINFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_SPL_VBLK_QINFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_SPL_VBLK_QINFO_RAM_ERR_DWLEN (1) +union host_padpt_host_spl_vblk_qinfo_ram_err_u { + struct host_padpt_host_spl_vblk_qinfo_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_SPL_VBLK_QINFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_DEBUG_ADDR (0xf4c900) +#define NBL_HOST_PADPT_HOST_TLP_DEBUG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_DEBUG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_DEBUG_DWLEN (1) +union host_padpt_host_tlp_debug_u { + struct host_padpt_host_tlp_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_DEBUG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_SOB_ADDR (0xf4c904) +#define NBL_HOST_PADPT_HOST_TLP_SOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_SOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_SOB_DWLEN (1) +union host_padpt_host_tlp_sob_u { + struct host_padpt_host_tlp_sob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_SOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_EOB_ADDR (0xf4c908) +#define NBL_HOST_PADPT_HOST_TLP_EOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_EOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_EOB_DWLEN (1) +union host_padpt_host_tlp_eob_u { + struct host_padpt_host_tlp_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_EOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_ERR_ADDR (0xf4c90c) +#define NBL_HOST_PADPT_HOST_TLP_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_ERR_DWLEN (1) +union host_padpt_host_tlp_err_u { + struct host_padpt_host_tlp_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_INFO_REN_ADDR (0xf4c910) +#define NBL_HOST_PADPT_HOST_TLP_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_INFO_REN_DWLEN (1) +union host_padpt_host_tlp_info_ren_u { + struct host_padpt_host_tlp_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_INFO_WEN_ADDR (0xf4c914) +#define NBL_HOST_PADPT_HOST_TLP_INFO_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_INFO_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_INFO_WEN_DWLEN (1) +union host_padpt_host_tlp_info_wen_u { + struct host_padpt_host_tlp_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_DATA_REN_ADDR (0xf4c918) +#define NBL_HOST_PADPT_HOST_TLP_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_DATA_REN_DWLEN (1) +union host_padpt_host_tlp_data_ren_u { + struct host_padpt_host_tlp_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_DATA_WEN_ADDR (0xf4c91c) +#define NBL_HOST_PADPT_HOST_TLP_DATA_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_DATA_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_DATA_WEN_DWLEN (1) +union host_padpt_host_tlp_data_wen_u { + struct host_padpt_host_tlp_data_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_DATA_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_INFO_RAM_ERR_ADDR (0xf4c924) +#define NBL_HOST_PADPT_HOST_TLP_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_tlp_info_ram_err_u { + struct host_padpt_host_tlp_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_TLP_DATA_RAM_ERR_ADDR (0xf4c92c) +#define NBL_HOST_PADPT_HOST_TLP_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_TLP_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_TLP_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_tlp_data_ram_err_u { + struct host_padpt_host_tlp_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_TLP_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_DEBUG_ADDR (0xf4ca00) +#define NBL_HOST_PADPT_HOST_JON_DEBUG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_DEBUG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_DEBUG_DWLEN (1) +union host_padpt_host_jon_debug_u { + struct host_padpt_host_jon_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_DEBUG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_TLP_INFO_REN_ADDR (0xf4ca04) +#define NBL_HOST_PADPT_HOST_JON_TLP_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_TLP_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_TLP_INFO_REN_DWLEN (1) +union host_padpt_host_jon_tlp_info_ren_u { + struct host_padpt_host_jon_tlp_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_TLP_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_EOB_ADDR (0xf4ca08) +#define NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_EOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_EOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_EOB_DWLEN (1) +union host_padpt_host_jon_dif_total_eob_u { + struct host_padpt_host_jon_dif_total_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_EOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_ERR_ADDR (0xf4ca0c) +#define NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_ERR_DWLEN (1) +union host_padpt_host_jon_dif_total_err_u { + struct host_padpt_host_jon_dif_total_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_DIF_TOTAL_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_DIF_RSOB_ADDR (0xf4ca10) +#define NBL_HOST_PADPT_HOST_JON_DIF_RSOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_DIF_RSOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_DIF_RSOB_DWLEN (1) +union host_padpt_host_jon_dif_rsob_u { + struct host_padpt_host_jon_dif_rsob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_DIF_RSOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_DIF_REOB_ADDR (0xf4ca14) +#define NBL_HOST_PADPT_HOST_JON_DIF_REOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_DIF_REOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_DIF_REOB_DWLEN (1) +union host_padpt_host_jon_dif_reob_u { + struct host_padpt_host_jon_dif_reob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_DIF_REOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_DIF_RERR_ADDR (0xf4ca18) +#define NBL_HOST_PADPT_HOST_JON_DIF_RERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_DIF_RERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_DIF_RERR_DWLEN (1) +union host_padpt_host_jon_dif_rerr_u { + struct host_padpt_host_jon_dif_rerr { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_DIF_RERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF_LENGTH_ERR_ADDR (0xf4ca1c) +#define NBL_HOST_PADPT_HOST_JON_RDIF_LENGTH_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF_LENGTH_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF_LENGTH_ERR_DWLEN (1) +union host_padpt_host_jon_rdif_length_err_u { + struct host_padpt_host_jon_rdif_length_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF_LENGTH_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF0_EOB_ADDR (0xf4ca20) +#define NBL_HOST_PADPT_HOST_JON_RDIF0_EOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF0_EOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF0_EOB_DWLEN (1) +union host_padpt_host_jon_rdif0_eob_u { + struct host_padpt_host_jon_rdif0_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF0_EOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF0_ERR_ADDR (0xf4ca24) +#define NBL_HOST_PADPT_HOST_JON_RDIF0_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF0_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF0_ERR_DWLEN (1) +union host_padpt_host_jon_rdif0_err_u { + struct host_padpt_host_jon_rdif0_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF0_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF1_EOB_ADDR (0xf4ca28) +#define NBL_HOST_PADPT_HOST_JON_RDIF1_EOB_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF1_EOB_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF1_EOB_DWLEN (1) +union host_padpt_host_jon_rdif1_eob_u { + struct host_padpt_host_jon_rdif1_eob { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF1_EOB_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF1_ERR_ADDR (0xf4ca2c) +#define NBL_HOST_PADPT_HOST_JON_RDIF1_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF1_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF1_ERR_DWLEN (1) +union host_padpt_host_jon_rdif1_err_u { + struct host_padpt_host_jon_rdif1_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF1_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_REN_ADDR (0xf4ca30) +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_REN_DWLEN (1) +union host_padpt_host_jon_rdif_info_ren_u { + struct host_padpt_host_jon_rdif_info_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF_INFO_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_WEN_ADDR (0xf4ca34) +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_WEN_DWLEN (1) +union host_padpt_host_jon_rdif_info_wen_u { + struct host_padpt_host_jon_rdif_info_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF_INFO_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_REN_ADDR (0xf4ca38) +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_REN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_REN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_REN_DWLEN (1) +union host_padpt_host_jon_rdif_data_ren_u { + struct host_padpt_host_jon_rdif_data_ren { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF_DATA_REN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_WEN_ADDR (0xf4ca3c) +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_WEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_WEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_WEN_DWLEN (1) +union host_padpt_host_jon_rdif_data_wen_u { + struct host_padpt_host_jon_rdif_data_wen { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF_DATA_WEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_RAM_ERR_ADDR (0xf4ca44) +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_jon_rdif_info_ram_err_u { + struct host_padpt_host_jon_rdif_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_RAM_ERR_ADDR (0xf4ca4c) +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_JON_RDIF_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_jon_rdif_data_ram_err_u { + struct host_padpt_host_jon_rdif_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_JON_RDIF_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_DEBUG_ADDR (0xf4cb00) +#define NBL_HOST_PADPT_HOST_DMUX_DEBUG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_DEBUG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_DEBUG_DWLEN (1) +union host_padpt_host_dmux_debug_u { + struct host_padpt_host_dmux_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_DEBUG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_SEL_TAG_ADDR (0xf4cb04) +#define NBL_HOST_PADPT_HOST_DMUX_SEL_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_SEL_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_SEL_TAG_DWLEN (1) +union host_padpt_host_dmux_sel_tag_u { + struct host_padpt_host_dmux_sel_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_SEL_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_NULL_TAG_ADDR (0xf4cb08) +#define NBL_HOST_PADPT_HOST_DMUX_NULL_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_NULL_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_NULL_TAG_DWLEN (1) +union host_padpt_host_dmux_null_tag_u { + struct host_padpt_host_dmux_null_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_NULL_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_RX_TAG_ADDR (0xf4cb0c) +#define NBL_HOST_PADPT_HOST_DMUX_RX_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_RX_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_RX_TAG_DWLEN (1) +union host_padpt_host_dmux_rx_tag_u { + struct host_padpt_host_dmux_rx_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_RX_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_ERR_TAG_ADDR (0xf4cb10) +#define NBL_HOST_PADPT_HOST_DMUX_ERR_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_ERR_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_ERR_TAG_DWLEN (1) +union host_padpt_host_dmux_err_tag_u { + struct host_padpt_host_dmux_err_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_ERR_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_AGED_TAG_ADDR (0xf4cb14) +#define NBL_HOST_PADPT_HOST_DMUX_AGED_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_AGED_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_AGED_TAG_DWLEN (1) +union host_padpt_host_dmux_aged_tag_u { + struct host_padpt_host_dmux_aged_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_AGED_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_SOC_ADDR (0xf4cb18) +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_SOC_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_SOC_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_SOC_DWLEN (1) +union host_padpt_host_dmux_rdif_soc_u { + struct host_padpt_host_dmux_rdif_soc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_RDIF_SOC_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_EOC_ADDR (0xf4cb1c) +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_EOC_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_EOC_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_EOC_DWLEN (1) +union host_padpt_host_dmux_rdif_eoc_u { + struct host_padpt_host_dmux_rdif_eoc { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_RDIF_EOC_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_ERR_ADDR (0xf4cb20) +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_RDIF_ERR_DWLEN (1) +union host_padpt_host_dmux_rdif_err_u { + struct host_padpt_host_dmux_rdif_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_RDIF_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_TAG_RLS_ADDR (0xf4cb24) +#define NBL_HOST_PADPT_HOST_DMUX_TAG_RLS_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_TAG_RLS_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_TAG_RLS_DWLEN (1) +union host_padpt_host_dmux_tag_rls_u { + struct host_padpt_host_dmux_tag_rls { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_TAG_RLS_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_CPL_RLS_ADDR (0xf4cb28) +#define NBL_HOST_PADPT_HOST_DMUX_CPL_RLS_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_CPL_RLS_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_CPL_RLS_DWLEN (1) +union host_padpt_host_dmux_cpl_rls_u { + struct host_padpt_host_dmux_cpl_rls { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_CPL_RLS_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_IS_TAG_ADDR (0xf4cb40) +#define NBL_HOST_PADPT_HOST_DMUX_IS_TAG_DEPTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_IS_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_IS_TAG_DWLEN (1) +union host_padpt_host_dmux_is_tag_u { + struct host_padpt_host_dmux_is_tag { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_IS_TAG_DWLEN]; +} __packed; +#define NBL_HOST_PADPT_HOST_DMUX_IS_TAG_REG(r) (NBL_HOST_PADPT_HOST_DMUX_IS_TAG_ADDR + \ + (NBL_HOST_PADPT_HOST_DMUX_IS_TAG_DWLEN * 4) * (r)) + +#define NBL_HOST_PADPT_HOST_DMUX_IS_SEL_TAG_ADDR (0xf4cbc0) +#define NBL_HOST_PADPT_HOST_DMUX_IS_SEL_TAG_DEPTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_IS_SEL_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_IS_SEL_TAG_DWLEN (1) +union host_padpt_host_dmux_is_sel_tag_u { + struct host_padpt_host_dmux_is_sel_tag { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_IS_SEL_TAG_DWLEN]; +} __packed; +#define NBL_HOST_PADPT_HOST_DMUX_IS_SEL_TAG_REG(r) (NBL_HOST_PADPT_HOST_DMUX_IS_SEL_TAG_ADDR + \ + (NBL_HOST_PADPT_HOST_DMUX_IS_SEL_TAG_DWLEN * 4) * (r)) + +#define NBL_HOST_PADPT_HOST_DMUX_IS_ERR_TAG_ADDR (0xf4cc40) +#define NBL_HOST_PADPT_HOST_DMUX_IS_ERR_TAG_DEPTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_IS_ERR_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_IS_ERR_TAG_DWLEN (1) +union host_padpt_host_dmux_is_err_tag_u { + struct host_padpt_host_dmux_is_err_tag { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_IS_ERR_TAG_DWLEN]; +} __packed; +#define NBL_HOST_PADPT_HOST_DMUX_IS_ERR_TAG_REG(r) (NBL_HOST_PADPT_HOST_DMUX_IS_ERR_TAG_ADDR + \ + (NBL_HOST_PADPT_HOST_DMUX_IS_ERR_TAG_DWLEN * 4) * (r)) + +#define NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_MAX_ADDR (0xf4cd00) +#define NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_MAX_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_MAX_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_MAX_DWLEN (1) +union host_padpt_host_dmux_tag_aged_max_u { + struct host_padpt_host_dmux_tag_aged_max { + u32 times:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_MAX_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_AVR_ADDR (0xf4cd04) +#define NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_AVR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_AVR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_AVR_DWLEN (1) +union host_padpt_host_dmux_tag_aged_avr_u { + struct host_padpt_host_dmux_tag_aged_avr { + u32 times:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DMUX_TAG_AGED_AVR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_DEBUG_ADDR (0xf4ce00) +#define NBL_HOST_PADPT_HOST_CPL_DEBUG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_DEBUG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_DEBUG_DWLEN (1) +union host_padpt_host_cpl_debug_u { + struct host_padpt_host_cpl_debug { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_DEBUG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_RX_TAG_ADDR (0xf4ce04) +#define NBL_HOST_PADPT_HOST_CPL_RX_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_RX_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_RX_TAG_DWLEN (1) +union host_padpt_host_cpl_rx_tag_u { + struct host_padpt_host_cpl_rx_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_RX_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_ERR_TAG_ADDR (0xf4ce08) +#define NBL_HOST_PADPT_HOST_CPL_ERR_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_ERR_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_ERR_TAG_DWLEN (1) +union host_padpt_host_cpl_err_tag_u { + struct host_padpt_host_cpl_err_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_ERR_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_EP_TAG_ADDR (0xf4ce0c) +#define NBL_HOST_PADPT_HOST_CPL_EP_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_EP_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_EP_TAG_DWLEN (1) +union host_padpt_host_cpl_ep_tag_u { + struct host_padpt_host_cpl_ep_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_EP_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_FAIL_TAG_ADDR (0xf4ce10) +#define NBL_HOST_PADPT_HOST_CPL_FAIL_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_FAIL_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_FAIL_TAG_DWLEN (1) +union host_padpt_host_cpl_fail_tag_u { + struct host_padpt_host_cpl_fail_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_FAIL_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_HERR_TAG_ADDR (0xf4ce14) +#define NBL_HOST_PADPT_HOST_CPL_HERR_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_HERR_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_HERR_TAG_DWLEN (1) +union host_padpt_host_cpl_herr_tag_u { + struct host_padpt_host_cpl_herr_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_HERR_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_RCB_TAG_ADDR (0xf4ce18) +#define NBL_HOST_PADPT_HOST_CPL_RCB_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_RCB_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_RCB_TAG_DWLEN (1) +union host_padpt_host_cpl_rcb_tag_u { + struct host_padpt_host_cpl_rcb_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_RCB_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_RCB_ERR_ADDR (0xf4ce1c) +#define NBL_HOST_PADPT_HOST_CPL_RCB_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_RCB_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_RCB_ERR_DWLEN (1) +union host_padpt_host_cpl_rcb_err_u { + struct host_padpt_host_cpl_rcb_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_RCB_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_DRP_TAG_ADDR (0xf4ce20) +#define NBL_HOST_PADPT_HOST_CPL_DRP_TAG_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_DRP_TAG_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_DRP_TAG_DWLEN (1) +union host_padpt_host_cpl_drp_tag_u { + struct host_padpt_host_cpl_drp_tag { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_DRP_TAG_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_RX_SOP_ADDR (0xf4ce24) +#define NBL_HOST_PADPT_HOST_CPL_RX_SOP_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_RX_SOP_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_RX_SOP_DWLEN (1) +union host_padpt_host_cpl_rx_sop_u { + struct host_padpt_host_cpl_rx_sop { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_RX_SOP_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_RX_EOP_ADDR (0xf4ce28) +#define NBL_HOST_PADPT_HOST_CPL_RX_EOP_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_RX_EOP_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_RX_EOP_DWLEN (1) +union host_padpt_host_cpl_rx_eop_u { + struct host_padpt_host_cpl_rx_eop { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_RX_EOP_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_RX_ERR_ADDR (0xf4ce2c) +#define NBL_HOST_PADPT_HOST_CPL_RX_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_RX_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_RX_ERR_DWLEN (1) +union host_padpt_host_cpl_rx_err_u { + struct host_padpt_host_cpl_rx_err { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_RX_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_DMUX_TAG_RAM_ERR_ADDR (0xf4ce34) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_TAG_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_TAG_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_TAG_RAM_ERR_DWLEN (1) +union host_padpt_host_cpl_dmux_tag_ram_err_u { + struct host_padpt_host_cpl_dmux_tag_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_DMUX_TAG_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_0_RAM_ADDR (0xf4ce38) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_0_RAM_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_0_RAM_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_0_RAM_DWLEN (1) +union host_padpt_host_cpl_dmux_high_0_ram_u { + struct host_padpt_host_cpl_dmux_high_0_ram { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_0_RAM_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_1_RAM_ADDR (0xf4ce3c) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_1_RAM_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_1_RAM_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_1_RAM_DWLEN (1) +union host_padpt_host_cpl_dmux_high_1_ram_u { + struct host_padpt_host_cpl_dmux_high_1_ram { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_DMUX_HIGH_1_RAM_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_0_RAM_ADDR (0xf4ce40) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_0_RAM_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_0_RAM_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_0_RAM_DWLEN (1) +union host_padpt_host_cpl_dmux_low_0_ram_u { + struct host_padpt_host_cpl_dmux_low_0_ram { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_0_RAM_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_1_RAM_ADDR (0xf4ce44) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_1_RAM_DEPTH (1) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_1_RAM_WIDTH (32) +#define NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_1_RAM_DWLEN (1) +union host_padpt_host_cpl_dmux_low_1_ram_u { + struct host_padpt_host_cpl_dmux_low_1_ram { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_CPL_DMUX_LOW_1_RAM_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_ADDR (0xf4ce80) +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_DWLEN (1) +union host_padpt_host_dif0_rd_info_u { + struct host_padpt_host_dif0_rd_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF0_RD_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_LEN_ADDR (0xf4ce84) +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_LEN_DWLEN (1) +union host_padpt_host_dif0_rd_info_len_u { + struct host_padpt_host_dif0_rd_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF0_RD_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_RAM_ERR_ADDR (0xf4ce8c) +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF0_RD_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_dif0_rd_info_ram_err_u { + struct host_padpt_host_dif0_rd_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF0_RD_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_ADDR (0xf4ce90) +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_DWLEN (1) +union host_padpt_host_dif1_rd_info_u { + struct host_padpt_host_dif1_rd_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF1_RD_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_LEN_ADDR (0xf4ce94) +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_LEN_DWLEN (1) +union host_padpt_host_dif1_rd_info_len_u { + struct host_padpt_host_dif1_rd_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF1_RD_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_RAM_ERR_ADDR (0xf4ce9c) +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF1_RD_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_dif1_rd_info_ram_err_u { + struct host_padpt_host_dif1_rd_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF1_RD_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_ADDR (0xf4cea0) +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_DWLEN (1) +union host_padpt_host_dif2_rd_info_u { + struct host_padpt_host_dif2_rd_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF2_RD_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_LEN_ADDR (0xf4cea4) +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_LEN_DWLEN (1) +union host_padpt_host_dif2_rd_info_len_u { + struct host_padpt_host_dif2_rd_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF2_RD_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_RAM_ERR_ADDR (0xf4ceac) +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF2_RD_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_dif2_rd_info_ram_err_u { + struct host_padpt_host_dif2_rd_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF2_RD_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_ADDR (0xf4ceb0) +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_DWLEN (1) +union host_padpt_host_dif3_rd_info_u { + struct host_padpt_host_dif3_rd_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF3_RD_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_LEN_ADDR (0xf4ceb4) +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_LEN_DWLEN (1) +union host_padpt_host_dif3_rd_info_len_u { + struct host_padpt_host_dif3_rd_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF3_RD_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_RAM_ERR_ADDR (0xf4cebc) +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF3_RD_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_dif3_rd_info_ram_err_u { + struct host_padpt_host_dif3_rd_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF3_RD_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_ADDR (0xf4cec0) +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_DWLEN (1) +union host_padpt_host_dif0_wr_info_u { + struct host_padpt_host_dif0_wr_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF0_WR_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_LEN_ADDR (0xf4cec4) +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_LEN_DWLEN (1) +union host_padpt_host_dif0_wr_info_len_u { + struct host_padpt_host_dif0_wr_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF0_WR_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_RAM_ERR_ADDR (0xf4cecc) +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF0_WR_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_dif0_wr_info_ram_err_u { + struct host_padpt_host_dif0_wr_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF0_WR_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_ADDR (0xf4ced0) +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_DWLEN (1) +union host_padpt_host_dif0_wr_data_u { + struct host_padpt_host_dif0_wr_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF0_WR_DATA_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_LEN_ADDR (0xf4ced4) +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_LEN_DWLEN (1) +union host_padpt_host_dif0_wr_data_len_u { + struct host_padpt_host_dif0_wr_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF0_WR_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_RAM_ERR_ADDR (0xf4cedc) +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF0_WR_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_dif0_wr_data_ram_err_u { + struct host_padpt_host_dif0_wr_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF0_WR_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_ADDR (0xf4cee0) +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_DWLEN (1) +union host_padpt_host_dif1_wr_info_u { + struct host_padpt_host_dif1_wr_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF1_WR_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_LEN_ADDR (0xf4cee4) +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_LEN_DWLEN (1) +union host_padpt_host_dif1_wr_info_len_u { + struct host_padpt_host_dif1_wr_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF1_WR_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_RAM_ERR_ADDR (0xf4ceec) +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF1_WR_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_dif1_wr_info_ram_err_u { + struct host_padpt_host_dif1_wr_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF1_WR_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_ADDR (0xf4cef0) +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_DWLEN (1) +union host_padpt_host_dif1_wr_data_u { + struct host_padpt_host_dif1_wr_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF1_WR_DATA_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_LEN_ADDR (0xf4cef4) +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_LEN_DWLEN (1) +union host_padpt_host_dif1_wr_data_len_u { + struct host_padpt_host_dif1_wr_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF1_WR_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_RAM_ERR_ADDR (0xf4cefc) +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF1_WR_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_dif1_wr_data_ram_err_u { + struct host_padpt_host_dif1_wr_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF1_WR_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_ADDR (0xf4cf00) +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_DWLEN (1) +union host_padpt_host_dif2_wr_info_u { + struct host_padpt_host_dif2_wr_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF2_WR_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_LEN_ADDR (0xf4cf04) +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_LEN_DWLEN (1) +union host_padpt_host_dif2_wr_info_len_u { + struct host_padpt_host_dif2_wr_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF2_WR_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_RAM_ERR_ADDR (0xf4cf0c) +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF2_WR_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_dif2_wr_info_ram_err_u { + struct host_padpt_host_dif2_wr_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF2_WR_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_ADDR (0xf4cf10) +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_DWLEN (1) +union host_padpt_host_dif2_wr_data_u { + struct host_padpt_host_dif2_wr_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF2_WR_DATA_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_LEN_ADDR (0xf4cf14) +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_LEN_DWLEN (1) +union host_padpt_host_dif2_wr_data_len_u { + struct host_padpt_host_dif2_wr_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF2_WR_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_RAM_ERR_ADDR (0xf4cf1c) +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF2_WR_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_dif2_wr_data_ram_err_u { + struct host_padpt_host_dif2_wr_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF2_WR_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_ADDR (0xf4cf20) +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_DWLEN (1) +union host_padpt_host_dif3_wr_info_u { + struct host_padpt_host_dif3_wr_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF3_WR_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_LEN_ADDR (0xf4cf24) +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_LEN_DWLEN (1) +union host_padpt_host_dif3_wr_info_len_u { + struct host_padpt_host_dif3_wr_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF3_WR_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_RAM_ERR_ADDR (0xf4cf2c) +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF3_WR_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_dif3_wr_info_ram_err_u { + struct host_padpt_host_dif3_wr_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF3_WR_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_ADDR (0xf4cf30) +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_DWLEN (1) +union host_padpt_host_dif3_wr_data_u { + struct host_padpt_host_dif3_wr_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF3_WR_DATA_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_LEN_ADDR (0xf4cf34) +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_LEN_DWLEN (1) +union host_padpt_host_dif3_wr_data_len_u { + struct host_padpt_host_dif3_wr_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF3_WR_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_RAM_ERR_ADDR (0xf4cf3c) +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_DIF3_WR_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_dif3_wr_data_ram_err_u { + struct host_padpt_host_dif3_wr_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_DIF3_WR_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_ADDR (0xf4cf40) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_DWLEN (1) +union host_padpt_host_rdma_dif0_rw_info_u { + struct host_padpt_host_rdma_dif0_rw_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_LEN_ADDR (0xf4cf44) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_LEN_DWLEN (1) +union host_padpt_host_rdma_dif0_rw_info_len_u { + struct host_padpt_host_rdma_dif0_rw_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_RAM_ERR_ADDR (0xf4cf4c) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_rdma_dif0_rw_info_ram_err_u { + struct host_padpt_host_rdma_dif0_rw_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_ADDR (0xf4cf50) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_DWLEN (1) +union host_padpt_host_rdma_dif0_rw_data_u { + struct host_padpt_host_rdma_dif0_rw_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_LEN_ADDR (0xf4cf54) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_LEN_DWLEN (1) +union host_padpt_host_rdma_dif0_rw_data_len_u { + struct host_padpt_host_rdma_dif0_rw_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_RAM_ERR_ADDR (0xf4cf5c) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_rdma_dif0_rw_data_ram_err_u { + struct host_padpt_host_rdma_dif0_rw_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF0_RW_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_ADDR (0xf4cf60) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_DWLEN (1) +union host_padpt_host_rdma_dif1_rw_info_u { + struct host_padpt_host_rdma_dif1_rw_info { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_LEN_ADDR (0xf4cf64) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_LEN_DWLEN (1) +union host_padpt_host_rdma_dif1_rw_info_len_u { + struct host_padpt_host_rdma_dif1_rw_info_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_RAM_ERR_ADDR (0xf4cf6c) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_RAM_ERR_DWLEN (1) +union host_padpt_host_rdma_dif1_rw_info_ram_err_u { + struct host_padpt_host_rdma_dif1_rw_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_ADDR (0xf4cf70) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_DWLEN (1) +union host_padpt_host_rdma_dif1_rw_data_u { + struct host_padpt_host_rdma_dif1_rw_data { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_LEN_ADDR (0xf4cf74) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_LEN_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_LEN_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_LEN_DWLEN (1) +union host_padpt_host_rdma_dif1_rw_data_len_u { + struct host_padpt_host_rdma_dif1_rw_data_len { + u32 cnt:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_LEN_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_RAM_ERR_ADDR (0xf4cf7c) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_RAM_ERR_DEPTH (1) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_RAM_ERR_WIDTH (32) +#define NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_RAM_ERR_DWLEN (1) +union host_padpt_host_rdma_dif1_rw_data_ram_err_u { + struct host_padpt_host_rdma_dif1_rw_data_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_RDMA_DIF1_RW_DATA_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PADPT_HOST_MSIX_PENDING_ADDR (0xf4d000) +#define NBL_HOST_PADPT_HOST_MSIX_PENDING_DEPTH (80) +#define NBL_HOST_PADPT_HOST_MSIX_PENDING_WIDTH (64) +#define NBL_HOST_PADPT_HOST_MSIX_PENDING_DWLEN (2) +union host_padpt_host_msix_pending_u { + struct host_padpt_host_msix_pending { + u32 array_arr[2]; /* [63:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_PENDING_DWLEN]; +} __packed; +#define NBL_HOST_PADPT_HOST_MSIX_PENDING_REG(r) (NBL_HOST_PADPT_HOST_MSIX_PENDING_ADDR + \ + (NBL_HOST_PADPT_HOST_MSIX_PENDING_DWLEN * 4) * (r)) + +#define NBL_HOST_PADPT_HOST_VBLK_QINFO_ADDR (0xf50000) +#define NBL_HOST_PADPT_HOST_VBLK_QINFO_DEPTH (512) +#define NBL_HOST_PADPT_HOST_VBLK_QINFO_WIDTH (32) +#define NBL_HOST_PADPT_HOST_VBLK_QINFO_DWLEN (1) +union host_padpt_host_vblk_qinfo_u { + struct host_padpt_host_vblk_qinfo { + u32 fuction_id:3; /* [2:0] Default:0x0 RW */ + u32 device_id:5; /* [7:3] Default:0x0 RW */ + u32 bus_id:8; /* [15:8] Default:0x0 RW */ + u32 msix_idx:13; /* [28:16] Default:0x0 RW */ + u32 msix_idx_valid:1; /* [29] Default:0x0 RW */ + u32 log_en:1; /* [30] Default:0x0 RW */ + u32 valid:1; /* [31] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_VBLK_QINFO_DWLEN]; +} __packed; +#define NBL_HOST_PADPT_HOST_VBLK_QINFO_REG(r) (NBL_HOST_PADPT_HOST_VBLK_QINFO_ADDR + \ + (NBL_HOST_PADPT_HOST_VBLK_QINFO_DWLEN * 4) * (r)) + +#define NBL_HOST_PADPT_HOST_VNET_QINFO_ADDR (0xf54000) +#define NBL_HOST_PADPT_HOST_VNET_QINFO_DEPTH (4096) +#define NBL_HOST_PADPT_HOST_VNET_QINFO_WIDTH (64) +#define NBL_HOST_PADPT_HOST_VNET_QINFO_DWLEN (2) +union host_padpt_host_vnet_qinfo_u { + struct host_padpt_host_vnet_qinfo { + u32 fuction_id:3; /* [2:0] Default:0x0 RW */ + u32 device_id:5; /* [7:3] Default:0x0 RW */ + u32 bus_id:8; /* [15:8] Default:0x0 RW */ + u32 msix_idx:13; /* [28:16] Default:0x0 RW */ + u32 msix_idx_valid:1; /* [29] Default:0x0 RW */ + u32 log_en:1; /* [30] Default:0x0 RW */ + u32 valid:1; /* [31] Default:0x0 RW */ + u32 tph_en:1; /* [32] Default:0x0 RW */ + u32 ido_en:1; /* [33] Default:0x0 RW */ + u32 rlo_en:1; /* [34] Default:0x0 RW */ + u32 rsv:29; /* [63:35] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_VNET_QINFO_DWLEN]; +} __packed; +#define NBL_HOST_PADPT_HOST_VNET_QINFO_REG(r) (NBL_HOST_PADPT_HOST_VNET_QINFO_ADDR + \ + (NBL_HOST_PADPT_HOST_VNET_QINFO_DWLEN * 4) * (r)) + +#define NBL_HOST_PADPT_HOST_MSIX_INFO_ADDR (0xf5c000) +#define NBL_HOST_PADPT_HOST_MSIX_INFO_DEPTH (5120) +#define NBL_HOST_PADPT_HOST_MSIX_INFO_WIDTH (64) +#define NBL_HOST_PADPT_HOST_MSIX_INFO_DWLEN (2) +union host_padpt_host_msix_info_u { + struct host_padpt_host_msix_info { + u32 intrl_pnum:16; /* [15:0] Default:0x0 RW */ + u32 intrl_rate:16; /* [31:16] Default:0x0 RW */ + u32 fuction_id:3; /* [34:32] Default:0x0 RW */ + u32 device_id:5; /* [39:35] Default:0x0 RW */ + u32 bus_id:8; /* [47:40] Default:0x0 RW */ + u32 valid:1; /* [48:48] Default:0x0 RW */ + u32 msix_mask_en:1; /* [49:49] Default:0x0 RW */ + u32 rsv:14; /* [63:50] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_INFO_DWLEN]; +} __packed; +#define NBL_HOST_PADPT_HOST_MSIX_INFO_REG(r) (NBL_HOST_PADPT_HOST_MSIX_INFO_ADDR + \ + (NBL_HOST_PADPT_HOST_MSIX_INFO_DWLEN * 4) * (r)) + +#define NBL_HOST_PADPT_HOST_MSIX_CTRL_ADDR (0xf6c000) +#define NBL_HOST_PADPT_HOST_MSIX_CTRL_DEPTH (5120) +#define NBL_HOST_PADPT_HOST_MSIX_CTRL_WIDTH (128) +#define NBL_HOST_PADPT_HOST_MSIX_CTRL_DWLEN (4) +union host_padpt_host_msix_ctrl_u { + struct host_padpt_host_msix_ctrl { + u32 lower_address:32; /* [31:0] Default:0x0 RW */ + u32 upper_address:32; /* [63:32] Default:0x0 RW */ + u32 message_data:32; /* [95:64] Default:0x0 RW */ + u32 vector_control_mask:1; /* [96:96] Default:0x0 RW */ + u32 vector_control_rsv:31; /* [127:97] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PADPT_HOST_MSIX_CTRL_DWLEN]; +} __packed; +#define NBL_HOST_PADPT_HOST_MSIX_CTRL_REG(r) (NBL_HOST_PADPT_HOST_MSIX_CTRL_ADDR + \ + (NBL_HOST_PADPT_HOST_MSIX_CTRL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_host_pcap.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_host_pcap.h new file mode 100644 index 0000000000000000000000000000000000000000..d2adbe7d2dc7ad0c4c47b3631be684066e62043a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_host_pcap.h @@ -0,0 +1,529 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_HOST_PCAP_H +#define NBL_HOST_PCAP_H 1 + +#include + +#define NBL_HOST_PCAP_BASE (0x015A4000) + +#define NBL_HOST_PCAP_INT_STATUS_ADDR (0x15a4000) +#define NBL_HOST_PCAP_INT_STATUS_DEPTH (1) +#define NBL_HOST_PCAP_INT_STATUS_WIDTH (32) +#define NBL_HOST_PCAP_INT_STATUS_DWLEN (1) +union host_pcap_int_status_u { + struct host_pcap_int_status { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 rsv2:2; /* [03:02] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv1:2; /* [06:05] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv:23; /* [31:09] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_INT_MASK_ADDR (0x15a4004) +#define NBL_HOST_PCAP_INT_MASK_DEPTH (1) +#define NBL_HOST_PCAP_INT_MASK_WIDTH (32) +#define NBL_HOST_PCAP_INT_MASK_DWLEN (1) +union host_pcap_int_mask_u { + struct host_pcap_int_mask { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 RW */ + u32 rsv2:2; /* [03:02] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv1:2; /* [06:05] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv:23; /* [31:09] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_INT_MASK_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_INT_SET_ADDR (0x15a4008) +#define NBL_HOST_PCAP_INT_SET_DEPTH (1) +#define NBL_HOST_PCAP_INT_SET_WIDTH (32) +#define NBL_HOST_PCAP_INT_SET_DWLEN (1) +union host_pcap_int_set_u { + struct host_pcap_int_set { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 WO */ + u32 rsv2:2; /* [03:02] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv1:2; /* [06:05] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv:23; /* [31:09] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_INT_SET_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_CIF_ERR_INFO_ADDR (0x15a4040) +#define NBL_HOST_PCAP_CIF_ERR_INFO_DEPTH (1) +#define NBL_HOST_PCAP_CIF_ERR_INFO_WIDTH (32) +#define NBL_HOST_PCAP_CIF_ERR_INFO_DWLEN (1) +union host_pcap_cif_err_info_u { + struct host_pcap_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_CAR_CTRL_ADDR (0x15a4100) +#define NBL_HOST_PCAP_CAR_CTRL_DEPTH (1) +#define NBL_HOST_PCAP_CAR_CTRL_WIDTH (32) +#define NBL_HOST_PCAP_CAR_CTRL_DWLEN (1) +union host_pcap_car_ctrl_u { + struct host_pcap_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_CAP_EN_ADDR (0x15a4200) +#define NBL_HOST_PCAP_TX_CAP_EN_DEPTH (1) +#define NBL_HOST_PCAP_TX_CAP_EN_WIDTH (32) +#define NBL_HOST_PCAP_TX_CAP_EN_DWLEN (1) +union host_pcap_tx_cap_en_u { + struct host_pcap_tx_cap_en { + u32 force_en:1; /* [00:00] Default:0x1 RW */ + u32 pattern_trigger_en:1; /* [01:01] Default:0x0 RW */ + u32 err_trigger_en:1; /* [02:02] Default:0x0 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_CAP_EN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_CAP_STORE_ADDR (0x15a4204) +#define NBL_HOST_PCAP_TX_CAP_STORE_DEPTH (1) +#define NBL_HOST_PCAP_TX_CAP_STORE_WIDTH (32) +#define NBL_HOST_PCAP_TX_CAP_STORE_DWLEN (1) +union host_pcap_tx_cap_store_u { + struct host_pcap_tx_cap_store { + u32 match_mode:2; /* [01:00] Default:0x1 RW */ + u32 match_only_en:1; /* [02:02] Default:0x1 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_CAP_STORE_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_CAP_STALL_ADDR (0x15a4208) +#define NBL_HOST_PCAP_TX_CAP_STALL_DEPTH (1) +#define NBL_HOST_PCAP_TX_CAP_STALL_WIDTH (32) +#define NBL_HOST_PCAP_TX_CAP_STALL_DWLEN (1) +union host_pcap_tx_cap_stall_u { + struct host_pcap_tx_cap_stall { + u32 error_full_stall_ena:1; /* [00:00] Default:0x0 RW */ + u32 error_dly_stall_ena:1; /* [01:01] Default:0x0 RW */ + u32 matched_full_stall_ena:1; /* [02:02] Default:0x0 RW */ + u32 matched_dly_stall_ena:1; /* [03:03] Default:0x0 RW */ + u32 ex_stall_ena:1; /* [04:04] Default:0x1 RW */ + u32 aged_stall_ena:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_CAP_STALL_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_CAP_CLR_ADDR (0x15a420c) +#define NBL_HOST_PCAP_TX_CAP_CLR_DEPTH (1) +#define NBL_HOST_PCAP_TX_CAP_CLR_WIDTH (32) +#define NBL_HOST_PCAP_TX_CAP_CLR_DWLEN (1) +union host_pcap_tx_cap_clr_u { + struct host_pcap_tx_cap_clr { + u32 tlp_clr:1; /* [00:00] Default:0x0 RW */ + u32 ltssm_clr:1; /* [01:01] Default:0x0 RW */ + u32 timer_clr:1; /* [02:02] Default:0x0 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_CAP_CLR_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_EN_ADDR (0x15a4300) +#define NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_EN_DEPTH (1) +#define NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_EN_WIDTH (512) +#define NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_EN_DWLEN (16) +union host_pcap_tx_cap_store_pattern_en_u { + struct host_pcap_tx_cap_store_pattern_en { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_EN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_ADDR (0x15a4340) +#define NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_DEPTH (1) +#define NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_WIDTH (512) +#define NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_DWLEN (16) +union host_pcap_tx_cap_store_pattern_u { + struct host_pcap_tx_cap_store_pattern { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_CAP_STORE_PATTERN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_EN_ADDR (0x15a4380) +#define NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_EN_DEPTH (1) +#define NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_EN_WIDTH (512) +#define NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_EN_DWLEN (16) +union host_pcap_tx_cap_trigger_pattern_en_u { + struct host_pcap_tx_cap_trigger_pattern_en { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_EN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_ADDR (0x15a43c0) +#define NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_DEPTH (1) +#define NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_WIDTH (512) +#define NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_DWLEN (16) +union host_pcap_tx_cap_trigger_pattern_u { + struct host_pcap_tx_cap_trigger_pattern { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_CAP_TRIGGER_PATTERN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_CAP_EN_ADDR (0x15a4800) +#define NBL_HOST_PCAP_RX_CAP_EN_DEPTH (1) +#define NBL_HOST_PCAP_RX_CAP_EN_WIDTH (32) +#define NBL_HOST_PCAP_RX_CAP_EN_DWLEN (1) +union host_pcap_rx_cap_en_u { + struct host_pcap_rx_cap_en { + u32 force_en:1; /* [00:00] Default:0x1 RW */ + u32 pattern_trigger_en:1; /* [01:01] Default:0x0 RW */ + u32 err_trigger_en:1; /* [02:02] Default:0x0 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_CAP_EN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_CAP_STORE_ADDR (0x15a4804) +#define NBL_HOST_PCAP_RX_CAP_STORE_DEPTH (1) +#define NBL_HOST_PCAP_RX_CAP_STORE_WIDTH (32) +#define NBL_HOST_PCAP_RX_CAP_STORE_DWLEN (1) +union host_pcap_rx_cap_store_u { + struct host_pcap_rx_cap_store { + u32 match_mode:2; /* [01:00] Default:0x1 RW */ + u32 match_only_en:1; /* [02:02] Default:0x1 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_CAP_STORE_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_CAP_STALL_ADDR (0x15a4808) +#define NBL_HOST_PCAP_RX_CAP_STALL_DEPTH (1) +#define NBL_HOST_PCAP_RX_CAP_STALL_WIDTH (32) +#define NBL_HOST_PCAP_RX_CAP_STALL_DWLEN (1) +union host_pcap_rx_cap_stall_u { + struct host_pcap_rx_cap_stall { + u32 error_full_stall_ena:1; /* [00:00] Default:0x0 RW */ + u32 error_dly_stall_ena:1; /* [01:01] Default:0x1 RW */ + u32 matched_full_stall_ena:1; /* [02:02] Default:0x0 RW */ + u32 matched_dly_stall_ena:1; /* [03:03] Default:0x0 RW */ + u32 ex_stall_ena:1; /* [04:04] Default:0x0 RW */ + u32 aged_stall_ena:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_CAP_STALL_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_CAP_CLR_ADDR (0x15a480c) +#define NBL_HOST_PCAP_RX_CAP_CLR_DEPTH (1) +#define NBL_HOST_PCAP_RX_CAP_CLR_WIDTH (32) +#define NBL_HOST_PCAP_RX_CAP_CLR_DWLEN (1) +union host_pcap_rx_cap_clr_u { + struct host_pcap_rx_cap_clr { + u32 tlp_clr:1; /* [00:00] Default:0x0 RW */ + u32 ltssm_clr:1; /* [01:01] Default:0x0 RW */ + u32 timer_clr:1; /* [02:02] Default:0x0 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_CAP_CLR_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_EN_ADDR (0x15a4900) +#define NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_EN_DEPTH (1) +#define NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_EN_WIDTH (512) +#define NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_EN_DWLEN (16) +union host_pcap_rx_cap_store_pattern_en_u { + struct host_pcap_rx_cap_store_pattern_en { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_EN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_ADDR (0x15a4940) +#define NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_DEPTH (1) +#define NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_WIDTH (512) +#define NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_DWLEN (16) +union host_pcap_rx_cap_store_pattern_u { + struct host_pcap_rx_cap_store_pattern { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_CAP_STORE_PATTERN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_EN_ADDR (0x15a4980) +#define NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_EN_DEPTH (1) +#define NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_EN_WIDTH (512) +#define NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_EN_DWLEN (16) +union host_pcap_rx_cap_trigger_pattern_en_u { + struct host_pcap_rx_cap_trigger_pattern_en { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_EN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_ADDR (0x15a49c0) +#define NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_DEPTH (1) +#define NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_WIDTH (512) +#define NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_DWLEN (16) +union host_pcap_rx_cap_trigger_pattern_u { + struct host_pcap_rx_cap_trigger_pattern { + u32 match_arr[16]; /* [511:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_CAP_TRIGGER_PATTERN_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_RAM_ERR_ADDR (0x15a5000) +#define NBL_HOST_PCAP_TX_RAM_ERR_DEPTH (1) +#define NBL_HOST_PCAP_TX_RAM_ERR_WIDTH (32) +#define NBL_HOST_PCAP_TX_RAM_ERR_DWLEN (1) +union host_pcap_tx_ram_err_u { + struct host_pcap_tx_ram_err { + u32 tlp_cap:1; /* [00:00] Default:0x0 RO */ + u32 timer_cap:1; /* [01:01] Default:0x0 RO */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_RAM_ERR_ADDR (0x15a5004) +#define NBL_HOST_PCAP_RX_RAM_ERR_DEPTH (1) +#define NBL_HOST_PCAP_RX_RAM_ERR_WIDTH (32) +#define NBL_HOST_PCAP_RX_RAM_ERR_DWLEN (1) +union host_pcap_rx_ram_err_u { + struct host_pcap_rx_ram_err { + u32 tlp_cap:1; /* [00:00] Default:0x0 RO */ + u32 timer_cap:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_LTSSM_RAM_ERR_ADDR (0x15a5008) +#define NBL_HOST_PCAP_LTSSM_RAM_ERR_DEPTH (1) +#define NBL_HOST_PCAP_LTSSM_RAM_ERR_WIDTH (32) +#define NBL_HOST_PCAP_LTSSM_RAM_ERR_DWLEN (1) +union host_pcap_ltssm_ram_err_u { + struct host_pcap_ltssm_ram_err { + u32 ltssm_cap:1; /* [00:00] Default:0x0 RO */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_LTSSM_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_RAM_TLP_CAP_ADDR (0x15a500c) +#define NBL_HOST_PCAP_TX_RAM_TLP_CAP_DEPTH (1) +#define NBL_HOST_PCAP_TX_RAM_TLP_CAP_WIDTH (32) +#define NBL_HOST_PCAP_TX_RAM_TLP_CAP_DWLEN (1) +union host_pcap_tx_ram_tlp_cap_u { + struct host_pcap_tx_ram_tlp_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_RAM_TLP_CAP_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_RAM_TIMER_CAP_ADDR (0x15a5010) +#define NBL_HOST_PCAP_TX_RAM_TIMER_CAP_DEPTH (1) +#define NBL_HOST_PCAP_TX_RAM_TIMER_CAP_WIDTH (32) +#define NBL_HOST_PCAP_TX_RAM_TIMER_CAP_DWLEN (1) +union host_pcap_tx_ram_timer_cap_u { + struct host_pcap_tx_ram_timer_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_RAM_TIMER_CAP_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_RAM_TLP_CAP_ADDR (0x15a5014) +#define NBL_HOST_PCAP_RX_RAM_TLP_CAP_DEPTH (1) +#define NBL_HOST_PCAP_RX_RAM_TLP_CAP_WIDTH (32) +#define NBL_HOST_PCAP_RX_RAM_TLP_CAP_DWLEN (1) +union host_pcap_rx_ram_tlp_cap_u { + struct host_pcap_rx_ram_tlp_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_RAM_TLP_CAP_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_RAM_TIMER_CAP_ADDR (0x15a5018) +#define NBL_HOST_PCAP_RX_RAM_TIMER_CAP_DEPTH (1) +#define NBL_HOST_PCAP_RX_RAM_TIMER_CAP_WIDTH (32) +#define NBL_HOST_PCAP_RX_RAM_TIMER_CAP_DWLEN (1) +union host_pcap_rx_ram_timer_cap_u { + struct host_pcap_rx_ram_timer_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_RAM_TIMER_CAP_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RAM_LTSSM_CAP_ADDR (0x15a501c) +#define NBL_HOST_PCAP_RAM_LTSSM_CAP_DEPTH (1) +#define NBL_HOST_PCAP_RAM_LTSSM_CAP_WIDTH (32) +#define NBL_HOST_PCAP_RAM_LTSSM_CAP_DWLEN (1) +union host_pcap_ram_ltssm_cap_u { + struct host_pcap_ram_ltssm_cap { + u32 ram_err_info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RAM_LTSSM_CAP_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_DEBUG_REG_LTSSM_WADDR_ADDR (0x15a5040) +#define NBL_HOST_PCAP_DEBUG_REG_LTSSM_WADDR_DEPTH (1) +#define NBL_HOST_PCAP_DEBUG_REG_LTSSM_WADDR_WIDTH (32) +#define NBL_HOST_PCAP_DEBUG_REG_LTSSM_WADDR_DWLEN (1) +union host_pcap_debug_reg_ltssm_waddr_u { + struct host_pcap_debug_reg_ltssm_waddr { + u32 dbg:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_DEBUG_REG_LTSSM_WADDR_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_DEBUG_REG_CUR_TIMER_ADDR (0x15a5100) +#define NBL_HOST_PCAP_TX_DEBUG_REG_CUR_TIMER_DEPTH (4) +#define NBL_HOST_PCAP_TX_DEBUG_REG_CUR_TIMER_WIDTH (32) +#define NBL_HOST_PCAP_TX_DEBUG_REG_CUR_TIMER_DWLEN (1) +union host_pcap_tx_debug_reg_cur_timer_u { + struct host_pcap_tx_debug_reg_cur_timer { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_DEBUG_REG_CUR_TIMER_DWLEN]; +} __packed; +#define NBL_HOST_PCAP_TX_DEBUG_REG_CUR_TIMER_REG(r) (NBL_HOST_PCAP_TX_DEBUG_REG_CUR_TIMER_ADDR + \ + (NBL_HOST_PCAP_TX_DEBUG_REG_CUR_TIMER_DWLEN * 4) * (r)) + +#define NBL_HOST_PCAP_TX_DEBUG_REG_TLP_WADDR_ADDR (0x15a5110) +#define NBL_HOST_PCAP_TX_DEBUG_REG_TLP_WADDR_DEPTH (1) +#define NBL_HOST_PCAP_TX_DEBUG_REG_TLP_WADDR_WIDTH (32) +#define NBL_HOST_PCAP_TX_DEBUG_REG_TLP_WADDR_DWLEN (1) +union host_pcap_tx_debug_reg_tlp_waddr_u { + struct host_pcap_tx_debug_reg_tlp_waddr { + u32 dbg:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_DEBUG_REG_TLP_WADDR_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_TX_DEBUG_REG_CAP_ADDR (0x15a5114) +#define NBL_HOST_PCAP_TX_DEBUG_REG_CAP_DEPTH (1) +#define NBL_HOST_PCAP_TX_DEBUG_REG_CAP_WIDTH (32) +#define NBL_HOST_PCAP_TX_DEBUG_REG_CAP_DWLEN (1) +union host_pcap_tx_debug_reg_cap_u { + struct host_pcap_tx_debug_reg_cap { + u32 cap_en_dbg:1; /* [00:00] Default:0x0 RO */ + u32 cap_stalled_dbg:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_DEBUG_REG_CAP_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_DEBUG_REG_CUR_TIMER_ADDR (0x15a5200) +#define NBL_HOST_PCAP_RX_DEBUG_REG_CUR_TIMER_DEPTH (4) +#define NBL_HOST_PCAP_RX_DEBUG_REG_CUR_TIMER_WIDTH (32) +#define NBL_HOST_PCAP_RX_DEBUG_REG_CUR_TIMER_DWLEN (1) +union host_pcap_rx_debug_reg_cur_timer_u { + struct host_pcap_rx_debug_reg_cur_timer { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_DEBUG_REG_CUR_TIMER_DWLEN]; +} __packed; +#define NBL_HOST_PCAP_RX_DEBUG_REG_CUR_TIMER_REG(r) (NBL_HOST_PCAP_RX_DEBUG_REG_CUR_TIMER_ADDR + \ + (NBL_HOST_PCAP_RX_DEBUG_REG_CUR_TIMER_DWLEN * 4) * (r)) + +#define NBL_HOST_PCAP_RX_DEBUG_REG_TLP_WADDR_ADDR (0x15a5210) +#define NBL_HOST_PCAP_RX_DEBUG_REG_TLP_WADDR_DEPTH (1) +#define NBL_HOST_PCAP_RX_DEBUG_REG_TLP_WADDR_WIDTH (32) +#define NBL_HOST_PCAP_RX_DEBUG_REG_TLP_WADDR_DWLEN (1) +union host_pcap_rx_debug_reg_tlp_waddr_u { + struct host_pcap_rx_debug_reg_tlp_waddr { + u32 dbg:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_DEBUG_REG_TLP_WADDR_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_RX_DEBUG_REG_CAP_ADDR (0x15a5214) +#define NBL_HOST_PCAP_RX_DEBUG_REG_CAP_DEPTH (1) +#define NBL_HOST_PCAP_RX_DEBUG_REG_CAP_WIDTH (32) +#define NBL_HOST_PCAP_RX_DEBUG_REG_CAP_DWLEN (1) +union host_pcap_rx_debug_reg_cap_u { + struct host_pcap_rx_debug_reg_cap { + u32 cap_en_dbg:1; /* [00:00] Default:0x0 RO */ + u32 cap_stalled_dbg:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_DEBUG_REG_CAP_DWLEN]; +} __packed; + +#define NBL_HOST_PCAP_LTSSM_RAM_TABLE_ADDR (0x15a6000) +#define NBL_HOST_PCAP_LTSSM_RAM_TABLE_DEPTH (256) +#define NBL_HOST_PCAP_LTSSM_RAM_TABLE_WIDTH (128) +#define NBL_HOST_PCAP_LTSSM_RAM_TABLE_DWLEN (4) +union host_pcap_ltssm_ram_table_u { + struct host_pcap_ltssm_ram_table { + u32 ltssm:5; /* [4:0] Default:0x0 RO */ + u32 ltssm_rsv:3; /* [7:5] Default:0x0 RO */ + u32 timer:32; /* [103:8] Default:0x0 RO */ + u32 timer_arr[2]; /* [103:8] Default:0x0 RO */ + u32 rsv:24; /* [127:104] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_LTSSM_RAM_TABLE_DWLEN]; +} __packed; +#define NBL_HOST_PCAP_LTSSM_RAM_TABLE_REG(r) (NBL_HOST_PCAP_LTSSM_RAM_TABLE_ADDR + \ + (NBL_HOST_PCAP_LTSSM_RAM_TABLE_DWLEN * 4) * (r)) + +#define NBL_HOST_PCAP_TX_TLP_RAM_TABLE_ADDR (0x15b4000) +#define NBL_HOST_PCAP_TX_TLP_RAM_TABLE_DEPTH (256) +#define NBL_HOST_PCAP_TX_TLP_RAM_TABLE_WIDTH (1024) +#define NBL_HOST_PCAP_TX_TLP_RAM_TABLE_DWLEN (32) +union host_pcap_tx_tlp_ram_table_u { + struct host_pcap_tx_tlp_ram_table { + u32 native_data_arr[16]; /* [511:0] Default:0x0 RO */ + u32 timer:32; /* [607:512] Default:0x0 RO */ + u32 timer_arr[2]; /* [607:512] Default:0x0 RO */ + u32 rsv:32; /* [1023:608] Default:0x0 RO */ + u32 rsv_arr[12]; /* [1023:608] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_TX_TLP_RAM_TABLE_DWLEN]; +} __packed; +#define NBL_HOST_PCAP_TX_TLP_RAM_TABLE_REG(r) (NBL_HOST_PCAP_TX_TLP_RAM_TABLE_ADDR + \ + (NBL_HOST_PCAP_TX_TLP_RAM_TABLE_DWLEN * 4) * (r)) + +#define NBL_HOST_PCAP_RX_TLP_RAM_TABLE_ADDR (0x15bc000) +#define NBL_HOST_PCAP_RX_TLP_RAM_TABLE_DEPTH (256) +#define NBL_HOST_PCAP_RX_TLP_RAM_TABLE_WIDTH (1024) +#define NBL_HOST_PCAP_RX_TLP_RAM_TABLE_DWLEN (32) +union host_pcap_rx_tlp_ram_table_u { + struct host_pcap_rx_tlp_ram_table { + u32 native_data_arr[16]; /* [511:0] Default:0x0 RO */ + u32 timer:32; /* [607:512] Default:0x0 RO */ + u32 timer_arr[2]; /* [607:512] Default:0x0 RO */ + u32 rsv:32; /* [1023:608] Default:0x0 RO */ + u32 rsv_arr[12]; /* [1023:608] Default:0x0 RO */ + } __packed info; + u32 data[NBL_HOST_PCAP_RX_TLP_RAM_TABLE_DWLEN]; +} __packed; +#define NBL_HOST_PCAP_RX_TLP_RAM_TABLE_REG(r) (NBL_HOST_PCAP_RX_TLP_RAM_TABLE_ADDR + \ + (NBL_HOST_PCAP_RX_TLP_RAM_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_mailbox.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_mailbox.h new file mode 100644 index 0000000000000000000000000000000000000000..69cf1a94226f1b301fec4925650c5157b3b222e2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_mailbox.h @@ -0,0 +1,992 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_MAILBOX_H +#define NBL_MAILBOX_H 1 + +#include + +#define NBL_MAILBOX_BASE (0x00FB0000) + +#define NBL_MAILBOX_INT_STATUS_ADDR (0xfb0000) +#define NBL_MAILBOX_INT_STATUS_DEPTH (1) +#define NBL_MAILBOX_INT_STATUS_WIDTH (32) +#define NBL_MAILBOX_INT_STATUS_DWLEN (1) +union mailbox_int_status_u { + struct mailbox_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RWC */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RWC */ + u32 cif_err:1; /* [05:05] Default:0x0 RWC */ + u32 dmail_rdif_err:1; /* [06:06] Default:0x0 RWC */ + u32 umail_rdif_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_MAILBOX_INT_MASK_ADDR (0xfb0004) +#define NBL_MAILBOX_INT_MASK_DEPTH (1) +#define NBL_MAILBOX_INT_MASK_WIDTH (32) +#define NBL_MAILBOX_INT_MASK_DWLEN (1) +union mailbox_int_mask_u { + struct mailbox_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RW */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RW */ + u32 cif_err:1; /* [05:05] Default:0x0 RW */ + u32 dmail_rdif_err:1; /* [06:06] Default:0x0 RW */ + u32 umail_rdif_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_INT_MASK_DWLEN]; +} __packed; + +#define NBL_MAILBOX_INT_SET_ADDR (0xfb0008) +#define NBL_MAILBOX_INT_SET_DEPTH (1) +#define NBL_MAILBOX_INT_SET_WIDTH (32) +#define NBL_MAILBOX_INT_SET_DWLEN (1) +union mailbox_int_set_u { + struct mailbox_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 WO */ + u32 data_cor_err:1; /* [04:04] Default:0x0 WO */ + u32 cif_err:1; /* [05:05] Default:0x0 WO */ + u32 dmail_rdif_err:1; /* [06:06] Default:0x0 WO */ + u32 umail_rdif_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_INT_SET_DWLEN]; +} __packed; + +#define NBL_MAILBOX_INIT_DONE_ADDR (0xfb000c) +#define NBL_MAILBOX_INIT_DONE_DEPTH (1) +#define NBL_MAILBOX_INIT_DONE_WIDTH (32) +#define NBL_MAILBOX_INIT_DONE_DWLEN (1) +union mailbox_init_done_u { + struct mailbox_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_MAILBOX_RAM_SBITERR_ADDR (0xfb0040) +#define NBL_MAILBOX_RAM_SBITERR_DEPTH (1) +#define NBL_MAILBOX_RAM_SBITERR_WIDTH (32) +#define NBL_MAILBOX_RAM_SBITERR_DWLEN (1) +union mailbox_ram_sbiterr_u { + struct mailbox_ram_sbiterr { + u32 rx_notify_ram:1; /* [0] Default:0x0 RC */ + u32 rx_hd_ptr_ram:1; /* [1] Default:0x0 RC */ + u32 rx_cfg_ram:1; /* [2] Default:0x0 RC */ + u32 tx_notify_ram:1; /* [3] Default:0x0 RC */ + u32 tx_hd_ptr_ram:1; /* [4] Default:0x0 RC */ + u32 tx_cfg_ram:1; /* [5] Default:0x0 RC */ + u32 map_ram:1; /* [6] Default:0x0 RC */ + u32 tx_queue_dbg_ram:1; /* [7] Default:0x0 RC */ + u32 rx_queue_dbg_ram:1; /* [8] Default:0x0 RC */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_RAM_SBITERR_DWLEN]; +} __packed; + +#define NBL_MAILBOX_RAM_MBITERR_ADDR (0xfb0044) +#define NBL_MAILBOX_RAM_MBITERR_DEPTH (1) +#define NBL_MAILBOX_RAM_MBITERR_WIDTH (32) +#define NBL_MAILBOX_RAM_MBITERR_DWLEN (1) +union mailbox_ram_mbiterr_u { + struct mailbox_ram_mbiterr { + u32 rx_notify_ram:1; /* [0] Default:0x0 RC */ + u32 rx_hd_ptr_ram:1; /* [1] Default:0x0 RC */ + u32 rx_cfg_ram:1; /* [2] Default:0x0 RC */ + u32 tx_notify_ram:1; /* [3] Default:0x0 RC */ + u32 tx_hd_ptr_ram:1; /* [4] Default:0x0 RC */ + u32 tx_cfg_ram:1; /* [5] Default:0x0 RC */ + u32 map_ram:1; /* [6] Default:0x0 RC */ + u32 tx_queue_dbg_ram:1; /* [7] Default:0x0 RC */ + u32 rx_queue_dbg_ram:1; /* [8] Default:0x0 RC */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_RAM_MBITERR_DWLEN]; +} __packed; + +#define NBL_MAILBOX_CAR_CTRL_ADDR (0xfb0098) +#define NBL_MAILBOX_CAR_CTRL_DEPTH (1) +#define NBL_MAILBOX_CAR_CTRL_WIDTH (32) +#define NBL_MAILBOX_CAR_CTRL_DWLEN (1) +union mailbox_car_ctrl_u { + struct mailbox_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_MAILBOX_FLOW_EN_ADDR (0xfb009c) +#define NBL_MAILBOX_FLOW_EN_DEPTH (1) +#define NBL_MAILBOX_FLOW_EN_WIDTH (32) +#define NBL_MAILBOX_FLOW_EN_DWLEN (1) +union mailbox_flow_en_u { + struct mailbox_flow_en { + u32 mbx_umail_dif_ack_cnt_en:1; /* [00:00] Default:0x1 RW */ + u32 mbx_dmail_dif_ack_cnt_en:1; /* [01:01] Default:0x1 RW */ + u32 mbx_umail_dif_rerr_cnt_en:1; /* [02:02] Default:0x1 RW */ + u32 mbx_dmail_dif_rerr_cnt_en:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_FLOW_EN_DWLEN]; +} __packed; + +#define NBL_MAILBOX_CIF_ERR_INFO_ADDR (0xfb00a0) +#define NBL_MAILBOX_CIF_ERR_INFO_DEPTH (1) +#define NBL_MAILBOX_CIF_ERR_INFO_WIDTH (32) +#define NBL_MAILBOX_CIF_ERR_INFO_DWLEN (1) +union mailbox_cif_err_info_u { + struct mailbox_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_L_ADDR (0xfb00a8) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_L_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_L_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_L_DWLEN (1) +union mailbox_umail_rdif_addr_l_u { + struct mailbox_umail_rdif_addr_l { + u32 mbx_umail_rdif_addr_l:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_ADDR_L_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_H_ADDR (0xfb00ac) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_H_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_H_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_H_DWLEN (1) +union mailbox_umail_rdif_addr_h_u { + struct mailbox_umail_rdif_addr_h { + u32 mbx_umail_rdif_addr_h:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_ADDR_H_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_BNUM_ADDR (0xfb00b0) +#define NBL_MAILBOX_UMAIL_RDIF_BNUM_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_BNUM_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_BNUM_DWLEN (1) +union mailbox_umail_rdif_bnum_u { + struct mailbox_umail_rdif_bnum { + u32 mbx_umail_rdif_bnum:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_BNUM_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_INFO_ADDR (0xfb00b4) +#define NBL_MAILBOX_UMAIL_RDIF_INFO_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_INFO_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_INFO_DWLEN (1) +union mailbox_umail_rdif_info_u { + struct mailbox_umail_rdif_info { + u32 mbx_umail_rdif_info:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_BDF_ADDR (0xfb00b8) +#define NBL_MAILBOX_UMAIL_RDIF_BDF_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_BDF_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_BDF_DWLEN (1) +union mailbox_umail_rdif_bdf_u { + struct mailbox_umail_rdif_bdf { + u32 mbx_umail_rdif_bdf:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_BDF_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDESC_DW0_ADDR (0xfb00bc) +#define NBL_MAILBOX_UMAIL_RDESC_DW0_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDESC_DW0_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDESC_DW0_DWLEN (1) +union mailbox_umail_rdesc_dw0_u { + struct mailbox_umail_rdesc_dw0 { + u32 mbx_umail_rdesc_dw0:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDESC_DW0_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDESC_DW1_ADDR (0xfb00c0) +#define NBL_MAILBOX_UMAIL_RDESC_DW1_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDESC_DW1_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDESC_DW1_DWLEN (1) +union mailbox_umail_rdesc_dw1_u { + struct mailbox_umail_rdesc_dw1 { + u32 mbx_umail_rdesc_dw1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDESC_DW1_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDESC_DW2_ADDR (0xfb00c4) +#define NBL_MAILBOX_UMAIL_RDESC_DW2_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDESC_DW2_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDESC_DW2_DWLEN (1) +union mailbox_umail_rdesc_dw2_u { + struct mailbox_umail_rdesc_dw2 { + u32 mbx_umail_rdesc_dw2:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDESC_DW2_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDESC_DW3_ADDR (0xfb00c8) +#define NBL_MAILBOX_UMAIL_RDESC_DW3_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDESC_DW3_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDESC_DW3_DWLEN (1) +union mailbox_umail_rdesc_dw3_u { + struct mailbox_umail_rdesc_dw3 { + u32 mbx_umail_rdesc_dw3:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDESC_DW3_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_L_ADDR (0xfb00cc) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_L_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_L_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_L_DWLEN (1) +union mailbox_umail_wdif_addr_l_u { + struct mailbox_umail_wdif_addr_l { + u32 mbx_umail_wdif_addr_l:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_ADDR_L_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_H_ADDR (0xfb00d0) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_H_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_H_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_H_DWLEN (1) +union mailbox_umail_wdif_addr_h_u { + struct mailbox_umail_wdif_addr_h { + u32 mbx_umail_wdif_addr_h:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_ADDR_H_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_BNUM_ADDR (0xfb00d4) +#define NBL_MAILBOX_UMAIL_WDIF_BNUM_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_BNUM_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_BNUM_DWLEN (1) +union mailbox_umail_wdif_bnum_u { + struct mailbox_umail_wdif_bnum { + u32 mbx_umail_wdif_bnum:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_BNUM_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_INFO_ADDR (0xfb00d8) +#define NBL_MAILBOX_UMAIL_WDIF_INFO_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_INFO_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_INFO_DWLEN (1) +union mailbox_umail_wdif_info_u { + struct mailbox_umail_wdif_info { + u32 mbx_umail_wdif_info:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_BDF_ADDR (0xfb00dc) +#define NBL_MAILBOX_UMAIL_WDIF_BDF_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_BDF_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_BDF_DWLEN (1) +union mailbox_umail_wdif_bdf_u { + struct mailbox_umail_wdif_bdf { + u32 mbx_umail_wdif_bdf:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_BDF_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDESC_DW0_ADDR (0xfb00e0) +#define NBL_MAILBOX_UMAIL_WDESC_DW0_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDESC_DW0_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDESC_DW0_DWLEN (1) +union mailbox_umail_wdesc_dw0_u { + struct mailbox_umail_wdesc_dw0 { + u32 mbx_umail_wdesc_dw0:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDESC_DW0_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDESC_DW1_ADDR (0xfb00e4) +#define NBL_MAILBOX_UMAIL_WDESC_DW1_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDESC_DW1_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDESC_DW1_DWLEN (1) +union mailbox_umail_wdesc_dw1_u { + struct mailbox_umail_wdesc_dw1 { + u32 mbx_umail_wdesc_dw1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDESC_DW1_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDESC_DW2_ADDR (0xfb00e8) +#define NBL_MAILBOX_UMAIL_WDESC_DW2_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDESC_DW2_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDESC_DW2_DWLEN (1) +union mailbox_umail_wdesc_dw2_u { + struct mailbox_umail_wdesc_dw2 { + u32 mbx_umail_wdesc_dw2:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDESC_DW2_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDESC_DW3_ADDR (0xfb00ec) +#define NBL_MAILBOX_UMAIL_WDESC_DW3_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDESC_DW3_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDESC_DW3_DWLEN (1) +union mailbox_umail_wdesc_dw3_u { + struct mailbox_umail_wdesc_dw3 { + u32 mbx_umail_wdesc_dw3:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDESC_DW3_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDIF_ADDR_L_ADDR (0xfb00f0) +#define NBL_MAILBOX_DMAIL_RDIF_ADDR_L_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDIF_ADDR_L_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDIF_ADDR_L_DWLEN (1) +union mailbox_dmail_rdif_addr_l_u { + struct mailbox_dmail_rdif_addr_l { + u32 mbx_dmail_rdif_addr_l:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDIF_ADDR_L_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDIF_ADDR_H_ADDR (0xfb00f4) +#define NBL_MAILBOX_DMAIL_RDIF_ADDR_H_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDIF_ADDR_H_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDIF_ADDR_H_DWLEN (1) +union mailbox_dmail_rdif_addr_h_u { + struct mailbox_dmail_rdif_addr_h { + u32 mbx_dmail_rdif_addr_h:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDIF_ADDR_H_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDIF_BNUM_ADDR (0xfb00f8) +#define NBL_MAILBOX_DMAIL_RDIF_BNUM_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDIF_BNUM_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDIF_BNUM_DWLEN (1) +union mailbox_dmail_rdif_bnum_u { + struct mailbox_dmail_rdif_bnum { + u32 mbx_dmail_rdif_bnum:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDIF_BNUM_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDIF_INFO_ADDR (0xfb00fc) +#define NBL_MAILBOX_DMAIL_RDIF_INFO_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDIF_INFO_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDIF_INFO_DWLEN (1) +union mailbox_dmail_rdif_info_u { + struct mailbox_dmail_rdif_info { + u32 mbx_dmail_rdif_info:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDIF_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDIF_BDF_ADDR (0xfb0100) +#define NBL_MAILBOX_DMAIL_RDIF_BDF_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDIF_BDF_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDIF_BDF_DWLEN (1) +union mailbox_dmail_rdif_bdf_u { + struct mailbox_dmail_rdif_bdf { + u32 mbx_dmail_rdif_bdf:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDIF_BDF_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDESC_DW0_ADDR (0xfb0104) +#define NBL_MAILBOX_DMAIL_RDESC_DW0_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDESC_DW0_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDESC_DW0_DWLEN (1) +union mailbox_dmail_rdesc_dw0_u { + struct mailbox_dmail_rdesc_dw0 { + u32 mbx_dmail_rdesc_dw0:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDESC_DW0_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDESC_DW1_ADDR (0xfb0108) +#define NBL_MAILBOX_DMAIL_RDESC_DW1_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDESC_DW1_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDESC_DW1_DWLEN (1) +union mailbox_dmail_rdesc_dw1_u { + struct mailbox_dmail_rdesc_dw1 { + u32 mbx_dmail_rdesc_dw1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDESC_DW1_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDESC_DW2_ADDR (0xfb010c) +#define NBL_MAILBOX_DMAIL_RDESC_DW2_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDESC_DW2_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDESC_DW2_DWLEN (1) +union mailbox_dmail_rdesc_dw2_u { + struct mailbox_dmail_rdesc_dw2 { + u32 mbx_dmail_rdesc_dw2:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDESC_DW2_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDESC_DW3_ADDR (0xfb0110) +#define NBL_MAILBOX_DMAIL_RDESC_DW3_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDESC_DW3_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDESC_DW3_DWLEN (1) +union mailbox_dmail_rdesc_dw3_u { + struct mailbox_dmail_rdesc_dw3 { + u32 mbx_dmail_rdesc_dw3:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDESC_DW3_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDESC_DW4_ADDR (0xfb0114) +#define NBL_MAILBOX_DMAIL_RDESC_DW4_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDESC_DW4_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDESC_DW4_DWLEN (1) +union mailbox_dmail_rdesc_dw4_u { + struct mailbox_dmail_rdesc_dw4 { + u32 mbx_dmail_rdesc_dw4:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDESC_DW4_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDESC_DW5_ADDR (0xfb0118) +#define NBL_MAILBOX_DMAIL_RDESC_DW5_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDESC_DW5_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDESC_DW5_DWLEN (1) +union mailbox_dmail_rdesc_dw5_u { + struct mailbox_dmail_rdesc_dw5 { + u32 mbx_dmail_rdesc_dw5:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDESC_DW5_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDESC_DW6_ADDR (0xfb011c) +#define NBL_MAILBOX_DMAIL_RDESC_DW6_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDESC_DW6_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDESC_DW6_DWLEN (1) +union mailbox_dmail_rdesc_dw6_u { + struct mailbox_dmail_rdesc_dw6 { + u32 mbx_dmail_rdesc_dw6:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDESC_DW6_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDESC_DW7_ADDR (0xfb0120) +#define NBL_MAILBOX_DMAIL_RDESC_DW7_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDESC_DW7_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDESC_DW7_DWLEN (1) +union mailbox_dmail_rdesc_dw7_u { + struct mailbox_dmail_rdesc_dw7 { + u32 mbx_dmail_rdesc_dw7:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDESC_DW7_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_RDESC_DW8_ADDR (0xfb0124) +#define NBL_MAILBOX_DMAIL_RDESC_DW8_DEPTH (1) +#define NBL_MAILBOX_DMAIL_RDESC_DW8_WIDTH (32) +#define NBL_MAILBOX_DMAIL_RDESC_DW8_DWLEN (1) +union mailbox_dmail_rdesc_dw8_u { + struct mailbox_dmail_rdesc_dw8 { + u32 mbx_dmail_rdesc_dw8:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_RDESC_DW8_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_WDIF_ADDR_L_ADDR (0xfb0128) +#define NBL_MAILBOX_DMAIL_WDIF_ADDR_L_DEPTH (1) +#define NBL_MAILBOX_DMAIL_WDIF_ADDR_L_WIDTH (32) +#define NBL_MAILBOX_DMAIL_WDIF_ADDR_L_DWLEN (1) +union mailbox_dmail_wdif_addr_l_u { + struct mailbox_dmail_wdif_addr_l { + u32 mbx_dmail_wdif_addr_l:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_WDIF_ADDR_L_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_WDIF_ADDR_H_ADDR (0xfb012c) +#define NBL_MAILBOX_DMAIL_WDIF_ADDR_H_DEPTH (1) +#define NBL_MAILBOX_DMAIL_WDIF_ADDR_H_WIDTH (32) +#define NBL_MAILBOX_DMAIL_WDIF_ADDR_H_DWLEN (1) +union mailbox_dmail_wdif_addr_h_u { + struct mailbox_dmail_wdif_addr_h { + u32 mbx_dmail_wdif_addr_h:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_WDIF_ADDR_H_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_WDIF_BNUM_ADDR (0xfb0130) +#define NBL_MAILBOX_DMAIL_WDIF_BNUM_DEPTH (1) +#define NBL_MAILBOX_DMAIL_WDIF_BNUM_WIDTH (32) +#define NBL_MAILBOX_DMAIL_WDIF_BNUM_DWLEN (1) +union mailbox_dmail_wdif_bnum_u { + struct mailbox_dmail_wdif_bnum { + u32 mbx_dmail_wdif_bnum:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_WDIF_BNUM_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_WDIF_INFO_ADDR (0xfb0134) +#define NBL_MAILBOX_DMAIL_WDIF_INFO_DEPTH (1) +#define NBL_MAILBOX_DMAIL_WDIF_INFO_WIDTH (32) +#define NBL_MAILBOX_DMAIL_WDIF_INFO_DWLEN (1) +union mailbox_dmail_wdif_info_u { + struct mailbox_dmail_wdif_info { + u32 mbx_dmail_wdif_info:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_WDIF_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_WDIF_BDF_ADDR (0xfb0138) +#define NBL_MAILBOX_DMAIL_WDIF_BDF_DEPTH (1) +#define NBL_MAILBOX_DMAIL_WDIF_BDF_WIDTH (32) +#define NBL_MAILBOX_DMAIL_WDIF_BDF_DWLEN (1) +union mailbox_dmail_wdif_bdf_u { + struct mailbox_dmail_wdif_bdf { + u32 mbx_dmail_wdif_bdf:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_WDIF_BDF_DWLEN]; +} __packed; + +#define NBL_MAILBOX_DMAIL_WDESC_DW0_ADDR (0xfb013c) +#define NBL_MAILBOX_DMAIL_WDESC_DW0_DEPTH (1) +#define NBL_MAILBOX_DMAIL_WDESC_DW0_WIDTH (32) +#define NBL_MAILBOX_DMAIL_WDESC_DW0_DWLEN (1) +union mailbox_dmail_wdesc_dw0_u { + struct mailbox_dmail_wdesc_dw0 { + u32 mbx_dmail_wdesc_dw0:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_DMAIL_WDESC_DW0_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_L_DROP_ADDR (0xfb0140) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_L_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_L_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_L_DROP_DWLEN (1) +union mailbox_umail_rdif_addr_l_drop_u { + struct mailbox_umail_rdif_addr_l_drop { + u32 mbx_umail_rdif_addr_l_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_ADDR_L_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_H_DROP_ADDR (0xfb0144) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_H_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_H_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_ADDR_H_DROP_DWLEN (1) +union mailbox_umail_rdif_addr_h_drop_u { + struct mailbox_umail_rdif_addr_h_drop { + u32 mbx_umail_rdif_addr_h_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_ADDR_H_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_BNUM_DROP_ADDR (0xfb0148) +#define NBL_MAILBOX_UMAIL_RDIF_BNUM_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_BNUM_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_BNUM_DROP_DWLEN (1) +union mailbox_umail_rdif_bnum_drop_u { + struct mailbox_umail_rdif_bnum_drop { + u32 mbx_umail_rdif_bnum_drop:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_BNUM_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_INFO_DROP_ADDR (0xfb014c) +#define NBL_MAILBOX_UMAIL_RDIF_INFO_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_INFO_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_INFO_DROP_DWLEN (1) +union mailbox_umail_rdif_info_drop_u { + struct mailbox_umail_rdif_info_drop { + u32 mbx_umail_rdif_info_drop:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_INFO_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDIF_BDF_DROP_ADDR (0xfb0150) +#define NBL_MAILBOX_UMAIL_RDIF_BDF_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDIF_BDF_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDIF_BDF_DROP_DWLEN (1) +union mailbox_umail_rdif_bdf_drop_u { + struct mailbox_umail_rdif_bdf_drop { + u32 mbx_umail_rdif_bdf_drop:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDIF_BDF_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDESC_DW0_DROP_ADDR (0xfb0154) +#define NBL_MAILBOX_UMAIL_RDESC_DW0_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDESC_DW0_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDESC_DW0_DROP_DWLEN (1) +union mailbox_umail_rdesc_dw0_drop_u { + struct mailbox_umail_rdesc_dw0_drop { + u32 mbx_umail_rdesc_dw0_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDESC_DW0_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDESC_DW1_DROP_ADDR (0xfb0158) +#define NBL_MAILBOX_UMAIL_RDESC_DW1_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDESC_DW1_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDESC_DW1_DROP_DWLEN (1) +union mailbox_umail_rdesc_dw1_drop_u { + struct mailbox_umail_rdesc_dw1_drop { + u32 mbx_umail_rdesc_dw1_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDESC_DW1_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDESC_DW2_DROP_ADDR (0xfb015c) +#define NBL_MAILBOX_UMAIL_RDESC_DW2_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDESC_DW2_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDESC_DW2_DROP_DWLEN (1) +union mailbox_umail_rdesc_dw2_drop_u { + struct mailbox_umail_rdesc_dw2_drop { + u32 mbx_umail_rdesc_dw2_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDESC_DW2_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_RDESC_DW3_DROP_ADDR (0xfb0160) +#define NBL_MAILBOX_UMAIL_RDESC_DW3_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_RDESC_DW3_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_RDESC_DW3_DROP_DWLEN (1) +union mailbox_umail_rdesc_dw3_drop_u { + struct mailbox_umail_rdesc_dw3_drop { + u32 mbx_umail_rdesc_dw3_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_RDESC_DW3_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_L_DROP_ADDR (0xfb0164) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_L_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_L_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_L_DROP_DWLEN (1) +union mailbox_umail_wdif_addr_l_drop_u { + struct mailbox_umail_wdif_addr_l_drop { + u32 mbx_umail_wdif_addr_l_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_ADDR_L_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_H_DROP_ADDR (0xfb0168) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_H_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_H_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_ADDR_H_DROP_DWLEN (1) +union mailbox_umail_wdif_addr_h_drop_u { + struct mailbox_umail_wdif_addr_h_drop { + u32 mbx_umail_wdif_addr_h_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_ADDR_H_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_BNUM_DROP_ADDR (0xfb016c) +#define NBL_MAILBOX_UMAIL_WDIF_BNUM_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_BNUM_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_BNUM_DROP_DWLEN (1) +union mailbox_umail_wdif_bnum_drop_u { + struct mailbox_umail_wdif_bnum_drop { + u32 mbx_umail_wdif_bnum_drop:10; /* [9:0] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_BNUM_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_INFO_DROP_ADDR (0xfb0170) +#define NBL_MAILBOX_UMAIL_WDIF_INFO_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_INFO_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_INFO_DROP_DWLEN (1) +union mailbox_umail_wdif_info_drop_u { + struct mailbox_umail_wdif_info_drop { + u32 mbx_umail_wdif_info_drop:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_INFO_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDIF_BDF_DROP_ADDR (0xfb0174) +#define NBL_MAILBOX_UMAIL_WDIF_BDF_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDIF_BDF_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDIF_BDF_DROP_DWLEN (1) +union mailbox_umail_wdif_bdf_drop_u { + struct mailbox_umail_wdif_bdf_drop { + u32 mbx_umail_wdif_bdf_drop:16; /* [15:0] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDIF_BDF_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDESC_DW0_DROP_ADDR (0xfb0178) +#define NBL_MAILBOX_UMAIL_WDESC_DW0_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDESC_DW0_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDESC_DW0_DROP_DWLEN (1) +union mailbox_umail_wdesc_dw0_drop_u { + struct mailbox_umail_wdesc_dw0_drop { + u32 mbx_umail_wdesc_dw0_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDESC_DW0_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDESC_DW1_DROP_ADDR (0xfb017c) +#define NBL_MAILBOX_UMAIL_WDESC_DW1_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDESC_DW1_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDESC_DW1_DROP_DWLEN (1) +union mailbox_umail_wdesc_dw1_drop_u { + struct mailbox_umail_wdesc_dw1_drop { + u32 mbx_umail_wdesc_dw1_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDESC_DW1_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDESC_DW2_DROP_ADDR (0xfb0180) +#define NBL_MAILBOX_UMAIL_WDESC_DW2_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDESC_DW2_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDESC_DW2_DROP_DWLEN (1) +union mailbox_umail_wdesc_dw2_drop_u { + struct mailbox_umail_wdesc_dw2_drop { + u32 mbx_umail_wdesc_dw2_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDESC_DW2_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_UMAIL_WDESC_DW3_DROP_ADDR (0xfb0184) +#define NBL_MAILBOX_UMAIL_WDESC_DW3_DROP_DEPTH (1) +#define NBL_MAILBOX_UMAIL_WDESC_DW3_DROP_WIDTH (32) +#define NBL_MAILBOX_UMAIL_WDESC_DW3_DROP_DWLEN (1) +union mailbox_umail_wdesc_dw3_drop_u { + struct mailbox_umail_wdesc_dw3_drop { + u32 mbx_umail_wdesc_dw3_drop:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_UMAIL_WDESC_DW3_DROP_DWLEN]; +} __packed; + +#define NBL_MAILBOX_NOTIFY_INFO_ADDR (0xfb0188) +#define NBL_MAILBOX_NOTIFY_INFO_DEPTH (1) +#define NBL_MAILBOX_NOTIFY_INFO_WIDTH (32) +#define NBL_MAILBOX_NOTIFY_INFO_DWLEN (1) +union mailbox_notify_info_u { + struct mailbox_notify_info { + u32 notify_data:16; /* [15:0] Default:0x0 RC */ + u32 notify_qid:1; /* [16:16] Default:0x0 RC */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_NOTIFY_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_RX_NOTIFY_RAM_ERR_INFO_ADDR (0xfb0198) +#define NBL_MAILBOX_RX_NOTIFY_RAM_ERR_INFO_DEPTH (1) +#define NBL_MAILBOX_RX_NOTIFY_RAM_ERR_INFO_WIDTH (32) +#define NBL_MAILBOX_RX_NOTIFY_RAM_ERR_INFO_DWLEN (1) +union mailbox_rx_notify_ram_err_info_u { + struct mailbox_rx_notify_ram_err_info { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_RX_NOTIFY_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_RX_HD_PTR_RAM_ERR_INFO_ADDR (0xfb019c) +#define NBL_MAILBOX_RX_HD_PTR_RAM_ERR_INFO_DEPTH (1) +#define NBL_MAILBOX_RX_HD_PTR_RAM_ERR_INFO_WIDTH (32) +#define NBL_MAILBOX_RX_HD_PTR_RAM_ERR_INFO_DWLEN (1) +union mailbox_rx_hd_ptr_ram_err_info_u { + struct mailbox_rx_hd_ptr_ram_err_info { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_RX_HD_PTR_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_RX_CFG_RAM_ERR_INFO_ADDR (0xfb01a0) +#define NBL_MAILBOX_RX_CFG_RAM_ERR_INFO_DEPTH (1) +#define NBL_MAILBOX_RX_CFG_RAM_ERR_INFO_WIDTH (32) +#define NBL_MAILBOX_RX_CFG_RAM_ERR_INFO_DWLEN (1) +union mailbox_rx_cfg_ram_err_info_u { + struct mailbox_rx_cfg_ram_err_info { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_RX_CFG_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_TX_NOTIFY_RAM_ERR_INFO_ADDR (0xfb01a4) +#define NBL_MAILBOX_TX_NOTIFY_RAM_ERR_INFO_DEPTH (1) +#define NBL_MAILBOX_TX_NOTIFY_RAM_ERR_INFO_WIDTH (32) +#define NBL_MAILBOX_TX_NOTIFY_RAM_ERR_INFO_DWLEN (1) +union mailbox_tx_notify_ram_err_info_u { + struct mailbox_tx_notify_ram_err_info { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_TX_NOTIFY_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_TX_HD_PTR_RAM_ERR_INFO_ADDR (0xfb01a8) +#define NBL_MAILBOX_TX_HD_PTR_RAM_ERR_INFO_DEPTH (1) +#define NBL_MAILBOX_TX_HD_PTR_RAM_ERR_INFO_WIDTH (32) +#define NBL_MAILBOX_TX_HD_PTR_RAM_ERR_INFO_DWLEN (1) +union mailbox_tx_hd_ptr_ram_err_info_u { + struct mailbox_tx_hd_ptr_ram_err_info { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_TX_HD_PTR_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_TX_CFG_RAM_ERR_INFO_ADDR (0xfb01ac) +#define NBL_MAILBOX_TX_CFG_RAM_ERR_INFO_DEPTH (1) +#define NBL_MAILBOX_TX_CFG_RAM_ERR_INFO_WIDTH (32) +#define NBL_MAILBOX_TX_CFG_RAM_ERR_INFO_DWLEN (1) +union mailbox_tx_cfg_ram_err_info_u { + struct mailbox_tx_cfg_ram_err_info { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_TX_CFG_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_MAP_RAM_ERR_INFO_ADDR (0xfb01b0) +#define NBL_MAILBOX_MAP_RAM_ERR_INFO_DEPTH (1) +#define NBL_MAILBOX_MAP_RAM_ERR_INFO_WIDTH (32) +#define NBL_MAILBOX_MAP_RAM_ERR_INFO_DWLEN (1) +union mailbox_map_ram_err_info_u { + struct mailbox_map_ram_err_info { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_MAP_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_INIT_START_ADDR (0xfb0800) +#define NBL_MAILBOX_INIT_START_DEPTH (1) +#define NBL_MAILBOX_INIT_START_WIDTH (32) +#define NBL_MAILBOX_INIT_START_DWLEN (1) +union mailbox_init_start_u { + struct mailbox_init_start { + u32 start:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_INIT_START_DWLEN]; +} __packed; + +#define NBL_MAILBOX_EN_ADDR (0xfb0804) +#define NBL_MAILBOX_EN_DEPTH (1) +#define NBL_MAILBOX_EN_WIDTH (32) +#define NBL_MAILBOX_EN_DWLEN (1) +union mailbox_en_u { + struct mailbox_en { + u32 en:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_EN_DWLEN]; +} __packed; + +#define NBL_MAILBOX_MDL_INFO_ADDR (0xfb0f10) +#define NBL_MAILBOX_MDL_INFO_DEPTH (1) +#define NBL_MAILBOX_MDL_INFO_WIDTH (32) +#define NBL_MAILBOX_MDL_INFO_DWLEN (1) +union mailbox_mdl_info_u { + struct mailbox_mdl_info { + u32 version_id:16; /* [15:00] Default:0x0001 RO */ + u32 prj_id:16; /* [31:16] Default:0x0020 RO */ + } __packed info; + u32 data[NBL_MAILBOX_MDL_INFO_DWLEN]; +} __packed; + +#define NBL_MAILBOX_VERSION_ADDR (0xfb0f14) +#define NBL_MAILBOX_VERSION_DEPTH (1) +#define NBL_MAILBOX_VERSION_WIDTH (32) +#define NBL_MAILBOX_VERSION_DWLEN (1) +union mailbox_version_u { + struct mailbox_version { + u32 date:32; /* [31:00] Default:0x20220803 RO */ + } __packed info; + u32 data[NBL_MAILBOX_VERSION_DWLEN]; +} __packed; + +#define NBL_MAILBOX_QINFO_MAP_TABLE_ADDR (0xfb1000) +#define NBL_MAILBOX_QINFO_MAP_TABLE_DEPTH (520) +#define NBL_MAILBOX_QINFO_MAP_TABLE_WIDTH (32) +#define NBL_MAILBOX_QINFO_MAP_TABLE_DWLEN (1) +union mailbox_qinfo_map_table_u { + struct mailbox_qinfo_map_table { + u32 bdf:16; /* [15:0] Default:0x0 RW */ + u32 msix_idx:13; /* [28:16] Default:0x0 RW */ + u32 msix_idx_valid:1; /* [29] Default:0x0 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_QINFO_MAP_TABLE_DWLEN]; +} __packed; +#define NBL_MAILBOX_QINFO_MAP_TABLE_REG(r) (NBL_MAILBOX_QINFO_MAP_TABLE_ADDR + \ + (NBL_MAILBOX_QINFO_MAP_TABLE_DWLEN * 4) * (r)) + +#define NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR (0xfb5000) +#define NBL_MAILBOX_QINFO_CFG_TX_TABLE_DEPTH (520) +#define NBL_MAILBOX_QINFO_CFG_TX_TABLE_WIDTH (128) +#define NBL_MAILBOX_QINFO_CFG_TX_TABLE_DWLEN (4) +union mailbox_qinfo_cfg_tx_table_u { + struct mailbox_qinfo_cfg_tx_table { + u32 tx_queue_base_addr_l:32; /* [31:00] Default:0x0 RW */ + u32 tx_queue_base_addr_h:32; /* [63:32] Default:0x0 RW */ + u32 tx_queue_size_bwid:4; /* [67:64] Default:0x0 RW */ + u32 RESERVE0:28; /* [95:68] Default:0x0 RO */ + u32 tx_queue_rst:1; /* [96] Default:0x0 RW */ + u32 tx_queue_en:1; /* [97] Default:0x0 RW */ + u32 tx_rdif_err:1; /* [98] Default:0x0 RO */ + u32 tx_ptr_err:1; /* [99] Default:0x0 RO */ + u32 RESERVE1:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_QINFO_CFG_TX_TABLE_DWLEN]; +} __packed; +#define NBL_MAILBOX_QINFO_CFG_TX_TABLE_REG(r) (NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR + \ + (NBL_MAILBOX_QINFO_CFG_TX_TABLE_DWLEN * 4) * (r)) + +#define NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR (0xfb8000) +#define NBL_MAILBOX_QINFO_CFG_RX_TABLE_DEPTH (520) +#define NBL_MAILBOX_QINFO_CFG_RX_TABLE_WIDTH (128) +#define NBL_MAILBOX_QINFO_CFG_RX_TABLE_DWLEN (4) +union mailbox_qinfo_cfg_rx_table_u { + struct mailbox_qinfo_cfg_rx_table { + u32 rx_queue_base_addr_l:32; /* [31:00] Default:0x0 RW */ + u32 rx_queue_base_addr_h:32; /* [63:32] Default:0x0 RW */ + u32 rx_queue_size_bwid:4; /* [67:64] Default:0x0 RW */ + u32 RESERVE0:28; /* [95:68] Default:0x0 RO */ + u32 rx_queue_rst:1; /* [96] Default:0x0 RW */ + u32 rx_queue_en:1; /* [97] Default:0x0 RW */ + u32 rx_rdif_err:1; /* [98] Default:0x0 RO */ + u32 rx_ptr_err:1; /* [99] Default:0x0 RO */ + u32 RESERVE1:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MAILBOX_QINFO_CFG_RX_TABLE_DWLEN]; +} __packed; +#define NBL_MAILBOX_QINFO_CFG_RX_TABLE_REG(r) (NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR + \ + (NBL_MAILBOX_QINFO_CFG_RX_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_msgq_aged.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_msgq_aged.h new file mode 100644 index 0000000000000000000000000000000000000000..fc9fb67bceb483baa767f30ef6f23fdf81ac495c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_msgq_aged.h @@ -0,0 +1,316 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_MSGQ_AGED_H +#define NBL_MSGQ_AGED_H 1 + +#include + +#define NBL_MSGQ_AGED_BASE (0x00FA8000) + +#define NBL_MSGQ_AGED_INT_STATUS_ADDR (0xfa8000) +#define NBL_MSGQ_AGED_INT_STATUS_DEPTH (1) +#define NBL_MSGQ_AGED_INT_STATUS_WIDTH (32) +#define NBL_MSGQ_AGED_INT_STATUS_DWLEN (1) +union msgq_aged_int_status_u { + struct msgq_aged_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RWC */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RWC */ + u32 cif_err:1; /* [05:05] Default:0x0 RWC */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_INT_MASK_ADDR (0xfa8004) +#define NBL_MSGQ_AGED_INT_MASK_DEPTH (1) +#define NBL_MSGQ_AGED_INT_MASK_WIDTH (32) +#define NBL_MSGQ_AGED_INT_MASK_DWLEN (1) +union msgq_aged_int_mask_u { + struct msgq_aged_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RW */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RW */ + u32 cif_err:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_INT_MASK_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_INT_SET_ADDR (0xfa8008) +#define NBL_MSGQ_AGED_INT_SET_DEPTH (1) +#define NBL_MSGQ_AGED_INT_SET_WIDTH (32) +#define NBL_MSGQ_AGED_INT_SET_DWLEN (1) +union msgq_aged_int_set_u { + struct msgq_aged_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 WO */ + u32 data_cor_err:1; /* [04:04] Default:0x0 WO */ + u32 cif_err:1; /* [05:05] Default:0x0 WO */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_INT_SET_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_INIT_DONE_ADDR (0xfa800c) +#define NBL_MSGQ_AGED_INIT_DONE_DEPTH (1) +#define NBL_MSGQ_AGED_INIT_DONE_WIDTH (32) +#define NBL_MSGQ_AGED_INIT_DONE_DWLEN (1) +union msgq_aged_init_done_u { + struct msgq_aged_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_IDROP_ADDR (0xfa8058) +#define NBL_MSGQ_AGED_IDROP_DEPTH (1) +#define NBL_MSGQ_AGED_IDROP_WIDTH (32) +#define NBL_MSGQ_AGED_IDROP_DWLEN (1) +union msgq_aged_idrop_u { + struct msgq_aged_idrop { + u32 wen_drop_cnt:16; /* [15:00] Default:0x0 SCTR */ + u32 weoc_drop_cnt:16; /* [31:16] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_MSGQ_AGED_IDROP_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_CAR_CTRL_ADDR (0xfa805c) +#define NBL_MSGQ_AGED_CAR_CTRL_DEPTH (1) +#define NBL_MSGQ_AGED_CAR_CTRL_WIDTH (32) +#define NBL_MSGQ_AGED_CAR_CTRL_DWLEN (1) +union msgq_aged_car_ctrl_u { + struct msgq_aged_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_FLOW_EN_ADDR (0xfa8060) +#define NBL_MSGQ_AGED_FLOW_EN_DEPTH (1) +#define NBL_MSGQ_AGED_FLOW_EN_WIDTH (32) +#define NBL_MSGQ_AGED_FLOW_EN_DWLEN (1) +union msgq_aged_flow_en_u { + struct msgq_aged_flow_en { + u32 imsgq_aged_cnt_en:1; /* [00:00] Default:0x1 RW */ + u32 omsgq_aged_cnt_en:1; /* [01:01] Default:0x1 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_FLOW_EN_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_CIF_ERR_INFO_ADDR (0xfa8064) +#define NBL_MSGQ_AGED_CIF_ERR_INFO_DEPTH (1) +#define NBL_MSGQ_AGED_CIF_ERR_INFO_WIDTH (32) +#define NBL_MSGQ_AGED_CIF_ERR_INFO_DWLEN (1) +union msgq_aged_cif_err_info_u { + struct msgq_aged_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_MODE_ADDR (0xfa8100) +#define NBL_MSGQ_AGED_MODE_DEPTH (1) +#define NBL_MSGQ_AGED_MODE_WIDTH (32) +#define NBL_MSGQ_AGED_MODE_DWLEN (1) +union msgq_aged_mode_u { + struct msgq_aged_mode { + u32 msgq_aged_mode:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_MODE_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_EN_ADDR (0xfa8104) +#define NBL_MSGQ_AGED_EN_DEPTH (1) +#define NBL_MSGQ_AGED_EN_WIDTH (32) +#define NBL_MSGQ_AGED_EN_DWLEN (1) +union msgq_aged_en_u { + struct msgq_aged_en { + u32 msgq_aged_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_EN_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_RING_BASE_ADDR_L_ADDR (0xfa8108) +#define NBL_MSGQ_AGED_RING_BASE_ADDR_L_DEPTH (1) +#define NBL_MSGQ_AGED_RING_BASE_ADDR_L_WIDTH (32) +#define NBL_MSGQ_AGED_RING_BASE_ADDR_L_DWLEN (1) +union msgq_aged_ring_base_addr_l_u { + struct msgq_aged_ring_base_addr_l { + u32 base_addr_l:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MSGQ_AGED_RING_BASE_ADDR_L_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_RING_BASE_ADDR_H_ADDR (0xfa810c) +#define NBL_MSGQ_AGED_RING_BASE_ADDR_H_DEPTH (1) +#define NBL_MSGQ_AGED_RING_BASE_ADDR_H_WIDTH (32) +#define NBL_MSGQ_AGED_RING_BASE_ADDR_H_DWLEN (1) +union msgq_aged_ring_base_addr_h_u { + struct msgq_aged_ring_base_addr_h { + u32 base_addr_h:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MSGQ_AGED_RING_BASE_ADDR_H_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_RING_SIZE_MASK_ADDR (0xfa8110) +#define NBL_MSGQ_AGED_RING_SIZE_MASK_DEPTH (1) +#define NBL_MSGQ_AGED_RING_SIZE_MASK_WIDTH (32) +#define NBL_MSGQ_AGED_RING_SIZE_MASK_DWLEN (1) +union msgq_aged_ring_size_mask_u { + struct msgq_aged_ring_size_mask { + u32 size_mask:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_RING_SIZE_MASK_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_RING_TPNTR_ADDR (0xfa8114) +#define NBL_MSGQ_AGED_RING_TPNTR_DEPTH (1) +#define NBL_MSGQ_AGED_RING_TPNTR_WIDTH (32) +#define NBL_MSGQ_AGED_RING_TPNTR_DWLEN (1) +union msgq_aged_ring_tpntr_u { + struct msgq_aged_ring_tpntr { + u32 tpntr:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_RING_TPNTR_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_RING_HPNTR_ADDR (0xfa8118) +#define NBL_MSGQ_AGED_RING_HPNTR_DEPTH (1) +#define NBL_MSGQ_AGED_RING_HPNTR_WIDTH (32) +#define NBL_MSGQ_AGED_RING_HPNTR_DWLEN (1) +union msgq_aged_ring_hpntr_u { + struct msgq_aged_ring_hpntr { + u32 hpntr:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_RING_HPNTR_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_RING_HPNTR_RST_ADDR (0xfa811c) +#define NBL_MSGQ_AGED_RING_HPNTR_RST_DEPTH (1) +#define NBL_MSGQ_AGED_RING_HPNTR_RST_WIDTH (32) +#define NBL_MSGQ_AGED_RING_HPNTR_RST_DWLEN (1) +union msgq_aged_ring_hpntr_rst_u { + struct msgq_aged_ring_hpntr_rst { + u32 hpntr_rst:1; /* [00:00] Default:0x0 WO */ + u32 rdy:1; /* [01:01] Default:0x1 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_RING_HPNTR_RST_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_BURST_LEN_ADDR (0xfa8120) +#define NBL_MSGQ_AGED_BURST_LEN_DEPTH (1) +#define NBL_MSGQ_AGED_BURST_LEN_WIDTH (32) +#define NBL_MSGQ_AGED_BURST_LEN_DWLEN (1) +union msgq_aged_burst_len_u { + struct msgq_aged_burst_len { + u32 burst_len:6; /* [05:00] Default:0x1 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_BURST_LEN_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_TIMEOUT_VALUE_ADDR (0xfa8124) +#define NBL_MSGQ_AGED_TIMEOUT_VALUE_DEPTH (1) +#define NBL_MSGQ_AGED_TIMEOUT_VALUE_WIDTH (32) +#define NBL_MSGQ_AGED_TIMEOUT_VALUE_DWLEN (1) +union msgq_aged_timeout_value_u { + struct msgq_aged_timeout_value { + u32 timeout_value:32; /* [31:00] Default:0x190 RW */ + } __packed info; + u32 data[NBL_MSGQ_AGED_TIMEOUT_VALUE_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_DIF_MODE_ADDR (0xfa8128) +#define NBL_MSGQ_AGED_DIF_MODE_DEPTH (1) +#define NBL_MSGQ_AGED_DIF_MODE_WIDTH (32) +#define NBL_MSGQ_AGED_DIF_MODE_DWLEN (1) +union msgq_aged_dif_mode_u { + struct msgq_aged_dif_mode { + u32 dif_mode:3; /* [02:00] Default:0x2 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_DIF_MODE_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_DIF_INFO_ADDR (0xfa812c) +#define NBL_MSGQ_AGED_DIF_INFO_DEPTH (1) +#define NBL_MSGQ_AGED_DIF_INFO_WIDTH (32) +#define NBL_MSGQ_AGED_DIF_INFO_DWLEN (1) +union msgq_aged_dif_info_u { + struct msgq_aged_dif_info { + u32 dif_info:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_DIF_INFO_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_DIF_BDF_ADDR (0xfa8130) +#define NBL_MSGQ_AGED_DIF_BDF_DEPTH (1) +#define NBL_MSGQ_AGED_DIF_BDF_WIDTH (32) +#define NBL_MSGQ_AGED_DIF_BDF_DWLEN (1) +union msgq_aged_dif_bdf_u { + struct msgq_aged_dif_bdf { + u32 dif_bdf:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_DIF_BDF_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_DIF_INT_ADDR (0xfa8134) +#define NBL_MSGQ_AGED_DIF_INT_DEPTH (1) +#define NBL_MSGQ_AGED_DIF_INT_WIDTH (32) +#define NBL_MSGQ_AGED_DIF_INT_DWLEN (1) +union msgq_aged_dif_int_u { + struct msgq_aged_dif_int { + u32 dif_int:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_DIF_INT_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_MDL_INFO_ADDR (0xfa8138) +#define NBL_MSGQ_AGED_MDL_INFO_DEPTH (1) +#define NBL_MSGQ_AGED_MDL_INFO_WIDTH (32) +#define NBL_MSGQ_AGED_MDL_INFO_DWLEN (1) +union msgq_aged_mdl_info_u { + struct msgq_aged_mdl_info { + u32 version_id:16; /* [15:00] Default:0x0001 RO */ + u32 prj_id:16; /* [31:16] Default:0x0020 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_MDL_INFO_DWLEN]; +} __packed; + +#define NBL_MSGQ_AGED_VERSION_ADDR (0xfa813c) +#define NBL_MSGQ_AGED_VERSION_DEPTH (1) +#define NBL_MSGQ_AGED_VERSION_WIDTH (32) +#define NBL_MSGQ_AGED_VERSION_DWLEN (1) +union msgq_aged_version_u { + struct msgq_aged_version { + u32 date:32; /* [31:00] Default:0x20220615 RO */ + } __packed info; + u32 data[NBL_MSGQ_AGED_VERSION_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_msgq_notify.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_msgq_notify.h new file mode 100644 index 0000000000000000000000000000000000000000..46e8fd059265f858de408308cfe8dbeba3f640cb --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_msgq_notify.h @@ -0,0 +1,304 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_MSGQ_NOTIFY_H +#define NBL_MSGQ_NOTIFY_H 1 + +#include + +#define NBL_MSGQ_NOTIFY_BASE (0x00F94000) + +#define NBL_MSGQ_NOTIFY_INT_STATUS_ADDR (0xf94000) +#define NBL_MSGQ_NOTIFY_INT_STATUS_DEPTH (1) +#define NBL_MSGQ_NOTIFY_INT_STATUS_WIDTH (32) +#define NBL_MSGQ_NOTIFY_INT_STATUS_DWLEN (1) +union msgq_notify_int_status_u { + struct msgq_notify_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RWC */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RWC */ + u32 cif_err:1; /* [05:05] Default:0x0 RWC */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_INT_MASK_ADDR (0xf94004) +#define NBL_MSGQ_NOTIFY_INT_MASK_DEPTH (1) +#define NBL_MSGQ_NOTIFY_INT_MASK_WIDTH (32) +#define NBL_MSGQ_NOTIFY_INT_MASK_DWLEN (1) +union msgq_notify_int_mask_u { + struct msgq_notify_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RW */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RW */ + u32 cif_err:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_INT_MASK_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_INT_SET_ADDR (0xf94008) +#define NBL_MSGQ_NOTIFY_INT_SET_DEPTH (1) +#define NBL_MSGQ_NOTIFY_INT_SET_WIDTH (32) +#define NBL_MSGQ_NOTIFY_INT_SET_DWLEN (1) +union msgq_notify_int_set_u { + struct msgq_notify_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 WO */ + u32 data_cor_err:1; /* [04:04] Default:0x0 WO */ + u32 cif_err:1; /* [05:05] Default:0x0 WO */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_INT_SET_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_INIT_DONE_ADDR (0xf9400c) +#define NBL_MSGQ_NOTIFY_INIT_DONE_DEPTH (1) +#define NBL_MSGQ_NOTIFY_INIT_DONE_WIDTH (32) +#define NBL_MSGQ_NOTIFY_INIT_DONE_DWLEN (1) +union msgq_notify_init_done_u { + struct msgq_notify_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_WEN_DROP_ADDR (0xf94058) +#define NBL_MSGQ_NOTIFY_WEN_DROP_DEPTH (1) +#define NBL_MSGQ_NOTIFY_WEN_DROP_WIDTH (32) +#define NBL_MSGQ_NOTIFY_WEN_DROP_DWLEN (1) +union msgq_notify_wen_drop_u { + struct msgq_notify_wen_drop { + u32 for_en:16; /* [15:00] Default:0x0 SCTR */ + u32 for_afull:16; /* [31:16] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_WEN_DROP_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_CAR_CTRL_ADDR (0xf9405c) +#define NBL_MSGQ_NOTIFY_CAR_CTRL_DEPTH (1) +#define NBL_MSGQ_NOTIFY_CAR_CTRL_WIDTH (32) +#define NBL_MSGQ_NOTIFY_CAR_CTRL_DWLEN (1) +union msgq_notify_car_ctrl_u { + struct msgq_notify_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_FLOW_EN_ADDR (0xf94060) +#define NBL_MSGQ_NOTIFY_FLOW_EN_DEPTH (1) +#define NBL_MSGQ_NOTIFY_FLOW_EN_WIDTH (32) +#define NBL_MSGQ_NOTIFY_FLOW_EN_DWLEN (1) +union msgq_notify_flow_en_u { + struct msgq_notify_flow_en { + u32 imsgq_notify_cnt_en:1; /* [00:00] Default:0x1 RW */ + u32 omsgq_notify_cnt_en:1; /* [01:01] Default:0x1 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_FLOW_EN_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_CIF_ERR_INFO_ADDR (0xf94064) +#define NBL_MSGQ_NOTIFY_CIF_ERR_INFO_DEPTH (1) +#define NBL_MSGQ_NOTIFY_CIF_ERR_INFO_WIDTH (32) +#define NBL_MSGQ_NOTIFY_CIF_ERR_INFO_DWLEN (1) +union msgq_notify_cif_err_info_u { + struct msgq_notify_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_EN_ADDR (0xf94100) +#define NBL_MSGQ_NOTIFY_EN_DEPTH (1) +#define NBL_MSGQ_NOTIFY_EN_WIDTH (32) +#define NBL_MSGQ_NOTIFY_EN_DWLEN (1) +union msgq_notify_en_u { + struct msgq_notify_en { + u32 msgq_notify_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_EN_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_RING_BASE_ADDR_L_ADDR (0xf94104) +#define NBL_MSGQ_NOTIFY_RING_BASE_ADDR_L_DEPTH (1) +#define NBL_MSGQ_NOTIFY_RING_BASE_ADDR_L_WIDTH (32) +#define NBL_MSGQ_NOTIFY_RING_BASE_ADDR_L_DWLEN (1) +union msgq_notify_ring_base_addr_l_u { + struct msgq_notify_ring_base_addr_l { + u32 base_addr_l:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_RING_BASE_ADDR_L_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_RING_BASE_ADDR_H_ADDR (0xf94108) +#define NBL_MSGQ_NOTIFY_RING_BASE_ADDR_H_DEPTH (1) +#define NBL_MSGQ_NOTIFY_RING_BASE_ADDR_H_WIDTH (32) +#define NBL_MSGQ_NOTIFY_RING_BASE_ADDR_H_DWLEN (1) +union msgq_notify_ring_base_addr_h_u { + struct msgq_notify_ring_base_addr_h { + u32 base_addr_h:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_RING_BASE_ADDR_H_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_RING_SIZE_MASK_ADDR (0xf9410c) +#define NBL_MSGQ_NOTIFY_RING_SIZE_MASK_DEPTH (1) +#define NBL_MSGQ_NOTIFY_RING_SIZE_MASK_WIDTH (32) +#define NBL_MSGQ_NOTIFY_RING_SIZE_MASK_DWLEN (1) +union msgq_notify_ring_size_mask_u { + struct msgq_notify_ring_size_mask { + u32 size_mask:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_RING_SIZE_MASK_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_RING_TPNTR_ADDR (0xf94110) +#define NBL_MSGQ_NOTIFY_RING_TPNTR_DEPTH (1) +#define NBL_MSGQ_NOTIFY_RING_TPNTR_WIDTH (32) +#define NBL_MSGQ_NOTIFY_RING_TPNTR_DWLEN (1) +union msgq_notify_ring_tpntr_u { + struct msgq_notify_ring_tpntr { + u32 tpntr:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_RING_TPNTR_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_RING_HPNTR_ADDR (0xf94114) +#define NBL_MSGQ_NOTIFY_RING_HPNTR_DEPTH (1) +#define NBL_MSGQ_NOTIFY_RING_HPNTR_WIDTH (32) +#define NBL_MSGQ_NOTIFY_RING_HPNTR_DWLEN (1) +union msgq_notify_ring_hpntr_u { + struct msgq_notify_ring_hpntr { + u32 hpntr:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_RING_HPNTR_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_RING_HPNTR_RST_ADDR (0xf94118) +#define NBL_MSGQ_NOTIFY_RING_HPNTR_RST_DEPTH (1) +#define NBL_MSGQ_NOTIFY_RING_HPNTR_RST_WIDTH (32) +#define NBL_MSGQ_NOTIFY_RING_HPNTR_RST_DWLEN (1) +union msgq_notify_ring_hpntr_rst_u { + struct msgq_notify_ring_hpntr_rst { + u32 hpntr_rst:1; /* [00:00] Default:0x0 WO */ + u32 rdy:1; /* [01:01] Default:0x1 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_RING_HPNTR_RST_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_BURST_LEN_ADDR (0xf9411c) +#define NBL_MSGQ_NOTIFY_BURST_LEN_DEPTH (1) +#define NBL_MSGQ_NOTIFY_BURST_LEN_WIDTH (32) +#define NBL_MSGQ_NOTIFY_BURST_LEN_DWLEN (1) +union msgq_notify_burst_len_u { + struct msgq_notify_burst_len { + u32 burst_len:6; /* [05:00] Default:0x1 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_BURST_LEN_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_TIMEOUT_VALUE_ADDR (0xf94120) +#define NBL_MSGQ_NOTIFY_TIMEOUT_VALUE_DEPTH (1) +#define NBL_MSGQ_NOTIFY_TIMEOUT_VALUE_WIDTH (32) +#define NBL_MSGQ_NOTIFY_TIMEOUT_VALUE_DWLEN (1) +union msgq_notify_timeout_value_u { + struct msgq_notify_timeout_value { + u32 timeout_value:32; /* [31:00] Default:0x190 RW */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_TIMEOUT_VALUE_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_DIF_MODE_ADDR (0xf94124) +#define NBL_MSGQ_NOTIFY_DIF_MODE_DEPTH (1) +#define NBL_MSGQ_NOTIFY_DIF_MODE_WIDTH (32) +#define NBL_MSGQ_NOTIFY_DIF_MODE_DWLEN (1) +union msgq_notify_dif_mode_u { + struct msgq_notify_dif_mode { + u32 dif_mode:3; /* [02:00] Default:0x2 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_DIF_MODE_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_DIF_INFO_ADDR (0xf94128) +#define NBL_MSGQ_NOTIFY_DIF_INFO_DEPTH (1) +#define NBL_MSGQ_NOTIFY_DIF_INFO_WIDTH (32) +#define NBL_MSGQ_NOTIFY_DIF_INFO_DWLEN (1) +union msgq_notify_dif_info_u { + struct msgq_notify_dif_info { + u32 dif_info:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_DIF_INFO_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_DIF_BDF_ADDR (0xf9412c) +#define NBL_MSGQ_NOTIFY_DIF_BDF_DEPTH (1) +#define NBL_MSGQ_NOTIFY_DIF_BDF_WIDTH (32) +#define NBL_MSGQ_NOTIFY_DIF_BDF_DWLEN (1) +union msgq_notify_dif_bdf_u { + struct msgq_notify_dif_bdf { + u32 dif_bdf:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_DIF_BDF_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_DIF_INT_ADDR (0xf94130) +#define NBL_MSGQ_NOTIFY_DIF_INT_DEPTH (1) +#define NBL_MSGQ_NOTIFY_DIF_INT_WIDTH (32) +#define NBL_MSGQ_NOTIFY_DIF_INT_DWLEN (1) +union msgq_notify_dif_int_u { + struct msgq_notify_dif_int { + u32 dif_int:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_DIF_INT_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_MDL_INFO_ADDR (0xf94134) +#define NBL_MSGQ_NOTIFY_MDL_INFO_DEPTH (1) +#define NBL_MSGQ_NOTIFY_MDL_INFO_WIDTH (32) +#define NBL_MSGQ_NOTIFY_MDL_INFO_DWLEN (1) +union msgq_notify_mdl_info_u { + struct msgq_notify_mdl_info { + u32 version_id:16; /* [15:00] Default:0x0001 RO */ + u32 prj_id:16; /* [31:16] Default:0x0020 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_MDL_INFO_DWLEN]; +} __packed; + +#define NBL_MSGQ_NOTIFY_VERSION_ADDR (0xf94138) +#define NBL_MSGQ_NOTIFY_VERSION_DEPTH (1) +#define NBL_MSGQ_NOTIFY_VERSION_WIDTH (32) +#define NBL_MSGQ_NOTIFY_VERSION_DWLEN (1) +union msgq_notify_version_u { + struct msgq_notify_version { + u32 date:32; /* [31:00] Default:0x20220615 RO */ + } __packed info; + u32 data[NBL_MSGQ_NOTIFY_VERSION_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_native_ecpu.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_native_ecpu.h new file mode 100644 index 0000000000000000000000000000000000000000..d36d90d4715365afa124d60f5d3510b882ff3792 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_native_ecpu.h @@ -0,0 +1,326 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_NATIVE_ECPU_H +#define NBL_NATIVE_ECPU_H 1 + +#include + +#define NBL_NATIVE_ECPU_BASE (0x01004000) + +#define NBL_NATIVE_ECPU_INT_STATUS_ADDR (0x1004000) +#define NBL_NATIVE_ECPU_INT_STATUS_DEPTH (1) +#define NBL_NATIVE_ECPU_INT_STATUS_WIDTH (32) +#define NBL_NATIVE_ECPU_INT_STATUS_DWLEN (1) +union native_ecpu_int_status_u { + struct native_ecpu_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_INT_MASK_ADDR (0x1004004) +#define NBL_NATIVE_ECPU_INT_MASK_DEPTH (1) +#define NBL_NATIVE_ECPU_INT_MASK_WIDTH (32) +#define NBL_NATIVE_ECPU_INT_MASK_DWLEN (1) +union native_ecpu_int_mask_u { + struct native_ecpu_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_INT_MASK_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_INT_SET_ADDR (0x1004008) +#define NBL_NATIVE_ECPU_INT_SET_DEPTH (1) +#define NBL_NATIVE_ECPU_INT_SET_WIDTH (32) +#define NBL_NATIVE_ECPU_INT_SET_DWLEN (1) +union native_ecpu_int_set_u { + struct native_ecpu_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_INT_SET_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_INIT_DONE_ADDR (0x100400c) +#define NBL_NATIVE_ECPU_INIT_DONE_DEPTH (1) +#define NBL_NATIVE_ECPU_INIT_DONE_WIDTH (32) +#define NBL_NATIVE_ECPU_INIT_DONE_DWLEN (1) +union native_ecpu_init_done_u { + struct native_ecpu_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_CIF_ERR_INFO_ADDR (0x1004040) +#define NBL_NATIVE_ECPU_CIF_ERR_INFO_DEPTH (1) +#define NBL_NATIVE_ECPU_CIF_ERR_INFO_WIDTH (32) +#define NBL_NATIVE_ECPU_CIF_ERR_INFO_DWLEN (1) +union native_ecpu_cif_err_info_u { + struct native_ecpu_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_CAR_CTRL_ADDR (0x1004100) +#define NBL_NATIVE_ECPU_CAR_CTRL_DEPTH (1) +#define NBL_NATIVE_ECPU_CAR_CTRL_WIDTH (32) +#define NBL_NATIVE_ECPU_CAR_CTRL_DWLEN (1) +union native_ecpu_car_ctrl_u { + struct native_ecpu_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_ECRC_GEN_ADDR (0x1005104) +#define NBL_NATIVE_ECPU_ECRC_GEN_DEPTH (1) +#define NBL_NATIVE_ECPU_ECRC_GEN_WIDTH (32) +#define NBL_NATIVE_ECPU_ECRC_GEN_DWLEN (1) +union native_ecpu_ecrc_gen_u { + struct native_ecpu_ecrc_gen { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_ECRC_GEN_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_NATIVE_RX_MAXPLAYLOAD_ADDR (0x1005108) +#define NBL_NATIVE_ECPU_NATIVE_RX_MAXPLAYLOAD_DEPTH (1) +#define NBL_NATIVE_ECPU_NATIVE_RX_MAXPLAYLOAD_WIDTH (32) +#define NBL_NATIVE_ECPU_NATIVE_RX_MAXPLAYLOAD_DWLEN (1) +union native_ecpu_native_rx_maxplayload_u { + struct native_ecpu_native_rx_maxplayload { + u32 dbg:10; /* [09:00] Default:0x214 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_NATIVE_RX_MAXPLAYLOAD_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_TX_ERR_GEN_ADDR (0x100510c) +#define NBL_NATIVE_ECPU_TX_ERR_GEN_DEPTH (1) +#define NBL_NATIVE_ECPU_TX_ERR_GEN_WIDTH (32) +#define NBL_NATIVE_ECPU_TX_ERR_GEN_DWLEN (1) +union native_ecpu_tx_err_gen_u { + struct native_ecpu_tx_err_gen { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_TX_ERR_GEN_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_ERROR_ADDR (0x1006000) +#define NBL_NATIVE_ECPU_ERROR_DEPTH (1) +#define NBL_NATIVE_ECPU_ERROR_WIDTH (32) +#define NBL_NATIVE_ECPU_ERROR_DWLEN (1) +union native_ecpu_error_u { + struct native_ecpu_error { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_ERROR_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_WARNING_ADDR (0x1006004) +#define NBL_NATIVE_ECPU_WARNING_DEPTH (1) +#define NBL_NATIVE_ECPU_WARNING_WIDTH (32) +#define NBL_NATIVE_ECPU_WARNING_DWLEN (1) +union native_ecpu_warning_u { + struct native_ecpu_warning { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_WARNING_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_RX_SPEED_ADDR (0x1006180) +#define NBL_NATIVE_ECPU_RX_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_RX_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_RX_SPEED_DWLEN (1) +union native_ecpu_rx_speed_u { + struct native_ecpu_rx_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_RX_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_RX_RC_SPEED_ADDR (0x1006184) +#define NBL_NATIVE_ECPU_RX_RC_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_RX_RC_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_RX_RC_SPEED_DWLEN (1) +union native_ecpu_rx_rc_speed_u { + struct native_ecpu_rx_rc_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_RX_RC_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_RX_CQ_SPEED_ADDR (0x1006188) +#define NBL_NATIVE_ECPU_RX_CQ_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_RX_CQ_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_RX_CQ_SPEED_DWLEN (1) +union native_ecpu_rx_cq_speed_u { + struct native_ecpu_rx_cq_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_RX_CQ_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_RX_VLD_SPEED_ADDR (0x100618c) +#define NBL_NATIVE_ECPU_RX_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_RX_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_RX_VLD_SPEED_DWLEN (1) +union native_ecpu_rx_vld_speed_u { + struct native_ecpu_rx_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_RX_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_RX_RC_VLD_SPEED_ADDR (0x1006190) +#define NBL_NATIVE_ECPU_RX_RC_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_RX_RC_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_RX_RC_VLD_SPEED_DWLEN (1) +union native_ecpu_rx_rc_vld_speed_u { + struct native_ecpu_rx_rc_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_RX_RC_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_RX_CQ_VLD_SPEED_ADDR (0x1006194) +#define NBL_NATIVE_ECPU_RX_CQ_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_RX_CQ_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_RX_CQ_VLD_SPEED_DWLEN (1) +union native_ecpu_rx_cq_vld_speed_u { + struct native_ecpu_rx_cq_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_RX_CQ_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_DEBUG_INFO_ADDR (0x1006218) +#define NBL_NATIVE_ECPU_DEBUG_INFO_DEPTH (1) +#define NBL_NATIVE_ECPU_DEBUG_INFO_WIDTH (32) +#define NBL_NATIVE_ECPU_DEBUG_INFO_DWLEN (1) +union native_ecpu_debug_info_u { + struct native_ecpu_debug_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_TX_SPEED_ADDR (0x1006280) +#define NBL_NATIVE_ECPU_TX_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_TX_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_TX_SPEED_DWLEN (1) +union native_ecpu_tx_speed_u { + struct native_ecpu_tx_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_TX_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_TX_RQ_SPEED_ADDR (0x1006284) +#define NBL_NATIVE_ECPU_TX_RQ_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_TX_RQ_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_TX_RQ_SPEED_DWLEN (1) +union native_ecpu_tx_rq_speed_u { + struct native_ecpu_tx_rq_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_TX_RQ_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_TX_CC_SPEED_ADDR (0x1006288) +#define NBL_NATIVE_ECPU_TX_CC_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_TX_CC_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_TX_CC_SPEED_DWLEN (1) +union native_ecpu_tx_cc_speed_u { + struct native_ecpu_tx_cc_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_TX_CC_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_TX_NORDY_SPEED_ADDR (0x100628c) +#define NBL_NATIVE_ECPU_TX_NORDY_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_TX_NORDY_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_TX_NORDY_SPEED_DWLEN (1) +union native_ecpu_tx_nordy_speed_u { + struct native_ecpu_tx_nordy_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_TX_NORDY_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_TX_VLD_SPEED_ADDR (0x1006290) +#define NBL_NATIVE_ECPU_TX_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_TX_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_TX_VLD_SPEED_DWLEN (1) +union native_ecpu_tx_vld_speed_u { + struct native_ecpu_tx_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_TX_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_TX_RQ_VLD_SPEED_ADDR (0x1006294) +#define NBL_NATIVE_ECPU_TX_RQ_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_TX_RQ_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_TX_RQ_VLD_SPEED_DWLEN (1) +union native_ecpu_tx_rq_vld_speed_u { + struct native_ecpu_tx_rq_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_TX_RQ_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_ECPU_TX_CC_VLD_SPEED_ADDR (0x1006298) +#define NBL_NATIVE_ECPU_TX_CC_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_ECPU_TX_CC_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_ECPU_TX_CC_VLD_SPEED_DWLEN (1) +union native_ecpu_tx_cc_vld_speed_u { + struct native_ecpu_tx_cc_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_ECPU_TX_CC_VLD_SPEED_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_native_host.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_native_host.h new file mode 100644 index 0000000000000000000000000000000000000000..8e1cd253c2a4b201bf713a371326776b72d9d781 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_native_host.h @@ -0,0 +1,326 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_NATIVE_HOST_H +#define NBL_NATIVE_HOST_H 1 + +#include + +#define NBL_NATIVE_HOST_BASE (0x00F04000) + +#define NBL_NATIVE_HOST_INT_STATUS_ADDR (0xf04000) +#define NBL_NATIVE_HOST_INT_STATUS_DEPTH (1) +#define NBL_NATIVE_HOST_INT_STATUS_WIDTH (32) +#define NBL_NATIVE_HOST_INT_STATUS_DWLEN (1) +union native_host_int_status_u { + struct native_host_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_INT_MASK_ADDR (0xf04004) +#define NBL_NATIVE_HOST_INT_MASK_DEPTH (1) +#define NBL_NATIVE_HOST_INT_MASK_WIDTH (32) +#define NBL_NATIVE_HOST_INT_MASK_DWLEN (1) +union native_host_int_mask_u { + struct native_host_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_INT_MASK_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_INT_SET_ADDR (0xf04008) +#define NBL_NATIVE_HOST_INT_SET_DEPTH (1) +#define NBL_NATIVE_HOST_INT_SET_WIDTH (32) +#define NBL_NATIVE_HOST_INT_SET_DWLEN (1) +union native_host_int_set_u { + struct native_host_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 rsv2:1; /* [06:06] Default:0x0 RO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_INT_SET_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_INIT_DONE_ADDR (0xf0400c) +#define NBL_NATIVE_HOST_INIT_DONE_DEPTH (1) +#define NBL_NATIVE_HOST_INIT_DONE_WIDTH (32) +#define NBL_NATIVE_HOST_INIT_DONE_DWLEN (1) +union native_host_init_done_u { + struct native_host_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_CIF_ERR_INFO_ADDR (0xf04040) +#define NBL_NATIVE_HOST_CIF_ERR_INFO_DEPTH (1) +#define NBL_NATIVE_HOST_CIF_ERR_INFO_WIDTH (32) +#define NBL_NATIVE_HOST_CIF_ERR_INFO_DWLEN (1) +union native_host_cif_err_info_u { + struct native_host_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_CAR_CTRL_ADDR (0xf04100) +#define NBL_NATIVE_HOST_CAR_CTRL_DEPTH (1) +#define NBL_NATIVE_HOST_CAR_CTRL_WIDTH (32) +#define NBL_NATIVE_HOST_CAR_CTRL_DWLEN (1) +union native_host_car_ctrl_u { + struct native_host_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_ECRC_GEN_ADDR (0xf05104) +#define NBL_NATIVE_HOST_ECRC_GEN_DEPTH (1) +#define NBL_NATIVE_HOST_ECRC_GEN_WIDTH (32) +#define NBL_NATIVE_HOST_ECRC_GEN_DWLEN (1) +union native_host_ecrc_gen_u { + struct native_host_ecrc_gen { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_ECRC_GEN_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_NATIVE_RX_MAXPLAYLOAD_ADDR (0xf05108) +#define NBL_NATIVE_HOST_NATIVE_RX_MAXPLAYLOAD_DEPTH (1) +#define NBL_NATIVE_HOST_NATIVE_RX_MAXPLAYLOAD_WIDTH (32) +#define NBL_NATIVE_HOST_NATIVE_RX_MAXPLAYLOAD_DWLEN (1) +union native_host_native_rx_maxplayload_u { + struct native_host_native_rx_maxplayload { + u32 dbg:10; /* [09:00] Default:0x214 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_NATIVE_RX_MAXPLAYLOAD_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_TX_ERR_GEN_ADDR (0xf0510c) +#define NBL_NATIVE_HOST_TX_ERR_GEN_DEPTH (1) +#define NBL_NATIVE_HOST_TX_ERR_GEN_WIDTH (32) +#define NBL_NATIVE_HOST_TX_ERR_GEN_DWLEN (1) +union native_host_tx_err_gen_u { + struct native_host_tx_err_gen { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_TX_ERR_GEN_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_ERROR_ADDR (0xf06000) +#define NBL_NATIVE_HOST_ERROR_DEPTH (1) +#define NBL_NATIVE_HOST_ERROR_WIDTH (32) +#define NBL_NATIVE_HOST_ERROR_DWLEN (1) +union native_host_error_u { + struct native_host_error { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_ERROR_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_WARNING_ADDR (0xf06004) +#define NBL_NATIVE_HOST_WARNING_DEPTH (1) +#define NBL_NATIVE_HOST_WARNING_WIDTH (32) +#define NBL_NATIVE_HOST_WARNING_DWLEN (1) +union native_host_warning_u { + struct native_host_warning { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_WARNING_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_RX_SPEED_ADDR (0xf06180) +#define NBL_NATIVE_HOST_RX_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_RX_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_RX_SPEED_DWLEN (1) +union native_host_rx_speed_u { + struct native_host_rx_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_RX_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_RX_RC_SPEED_ADDR (0xf06184) +#define NBL_NATIVE_HOST_RX_RC_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_RX_RC_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_RX_RC_SPEED_DWLEN (1) +union native_host_rx_rc_speed_u { + struct native_host_rx_rc_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_RX_RC_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_RX_CQ_SPEED_ADDR (0xf06188) +#define NBL_NATIVE_HOST_RX_CQ_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_RX_CQ_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_RX_CQ_SPEED_DWLEN (1) +union native_host_rx_cq_speed_u { + struct native_host_rx_cq_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_RX_CQ_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_RX_VLD_SPEED_ADDR (0xf0618c) +#define NBL_NATIVE_HOST_RX_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_RX_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_RX_VLD_SPEED_DWLEN (1) +union native_host_rx_vld_speed_u { + struct native_host_rx_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_RX_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_RX_RC_VLD_SPEED_ADDR (0xf06190) +#define NBL_NATIVE_HOST_RX_RC_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_RX_RC_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_RX_RC_VLD_SPEED_DWLEN (1) +union native_host_rx_rc_vld_speed_u { + struct native_host_rx_rc_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_RX_RC_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_RX_CQ_VLD_SPEED_ADDR (0xf06194) +#define NBL_NATIVE_HOST_RX_CQ_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_RX_CQ_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_RX_CQ_VLD_SPEED_DWLEN (1) +union native_host_rx_cq_vld_speed_u { + struct native_host_rx_cq_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_RX_CQ_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_DEBUG_INFO_ADDR (0xf06218) +#define NBL_NATIVE_HOST_DEBUG_INFO_DEPTH (1) +#define NBL_NATIVE_HOST_DEBUG_INFO_WIDTH (32) +#define NBL_NATIVE_HOST_DEBUG_INFO_DWLEN (1) +union native_host_debug_info_u { + struct native_host_debug_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_TX_SPEED_ADDR (0xf06280) +#define NBL_NATIVE_HOST_TX_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_TX_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_TX_SPEED_DWLEN (1) +union native_host_tx_speed_u { + struct native_host_tx_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_TX_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_TX_RQ_SPEED_ADDR (0xf06284) +#define NBL_NATIVE_HOST_TX_RQ_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_TX_RQ_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_TX_RQ_SPEED_DWLEN (1) +union native_host_tx_rq_speed_u { + struct native_host_tx_rq_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_TX_RQ_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_TX_CC_SPEED_ADDR (0xf06288) +#define NBL_NATIVE_HOST_TX_CC_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_TX_CC_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_TX_CC_SPEED_DWLEN (1) +union native_host_tx_cc_speed_u { + struct native_host_tx_cc_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_TX_CC_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_TX_NORDY_SPEED_ADDR (0xf0628c) +#define NBL_NATIVE_HOST_TX_NORDY_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_TX_NORDY_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_TX_NORDY_SPEED_DWLEN (1) +union native_host_tx_nordy_speed_u { + struct native_host_tx_nordy_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_TX_NORDY_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_TX_VLD_SPEED_ADDR (0xf06290) +#define NBL_NATIVE_HOST_TX_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_TX_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_TX_VLD_SPEED_DWLEN (1) +union native_host_tx_vld_speed_u { + struct native_host_tx_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_TX_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_TX_RQ_VLD_SPEED_ADDR (0xf06294) +#define NBL_NATIVE_HOST_TX_RQ_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_TX_RQ_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_TX_RQ_VLD_SPEED_DWLEN (1) +union native_host_tx_rq_vld_speed_u { + struct native_host_tx_rq_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_TX_RQ_VLD_SPEED_DWLEN]; +} __packed; + +#define NBL_NATIVE_HOST_TX_CC_VLD_SPEED_ADDR (0xf06298) +#define NBL_NATIVE_HOST_TX_CC_VLD_SPEED_DEPTH (1) +#define NBL_NATIVE_HOST_TX_CC_VLD_SPEED_WIDTH (32) +#define NBL_NATIVE_HOST_TX_CC_VLD_SPEED_DWLEN (1) +union native_host_tx_cc_vld_speed_u { + struct native_host_tx_cc_vld_speed { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_NATIVE_HOST_TX_CC_VLD_SPEED_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcie_ecpu.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcie_ecpu.h new file mode 100644 index 0000000000000000000000000000000000000000..8af4af65035d3a0bdcff127eb505c2a99bf9d0f3 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcie_ecpu.h @@ -0,0 +1,1774 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PCIE_ECPU_H +#define NBL_PCIE_ECPU_H 1 + +#include + +#define NBL_PCIE_ECPU_BASE (0x01404000) + +#define NBL_PCIE_ECPU_INT_STATUS_ADDR (0x1404000) +#define NBL_PCIE_ECPU_INT_STATUS_DEPTH (1) +#define NBL_PCIE_ECPU_INT_STATUS_WIDTH (32) +#define NBL_PCIE_ECPU_INT_STATUS_DWLEN (1) +union pcie_ecpu_int_status_u { + struct pcie_ecpu_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_INT_MASK_ADDR (0x1404004) +#define NBL_PCIE_ECPU_INT_MASK_DEPTH (1) +#define NBL_PCIE_ECPU_INT_MASK_WIDTH (32) +#define NBL_PCIE_ECPU_INT_MASK_DWLEN (1) +union pcie_ecpu_int_mask_u { + struct pcie_ecpu_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_INT_SET_ADDR (0x1404008) +#define NBL_PCIE_ECPU_INT_SET_DEPTH (1) +#define NBL_PCIE_ECPU_INT_SET_WIDTH (32) +#define NBL_PCIE_ECPU_INT_SET_DWLEN (1) +union pcie_ecpu_int_set_u { + struct pcie_ecpu_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_INT_SET_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_INIT_DONE_ADDR (0x140400c) +#define NBL_PCIE_ECPU_INIT_DONE_DEPTH (1) +#define NBL_PCIE_ECPU_INIT_DONE_WIDTH (32) +#define NBL_PCIE_ECPU_INIT_DONE_DWLEN (1) +union pcie_ecpu_init_done_u { + struct pcie_ecpu_init_done { + u32 done:1; /* [00:00] Default:0x1 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_CIF_ERR_INFO_ADDR (0x1404040) +#define NBL_PCIE_ECPU_CIF_ERR_INFO_DEPTH (1) +#define NBL_PCIE_ECPU_CIF_ERR_INFO_WIDTH (32) +#define NBL_PCIE_ECPU_CIF_ERR_INFO_DWLEN (1) +union pcie_ecpu_cif_err_info_u { + struct pcie_ecpu_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_CAR_CTRL_ADDR (0x1404100) +#define NBL_PCIE_ECPU_CAR_CTRL_DEPTH (1) +#define NBL_PCIE_ECPU_CAR_CTRL_WIDTH (32) +#define NBL_PCIE_ECPU_CAR_CTRL_DWLEN (1) +union pcie_ecpu_car_ctrl_u { + struct pcie_ecpu_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_CLOCK_FREQ_ADDR (0x1404200) +#define NBL_PCIE_ECPU_TL_CLOCK_FREQ_DEPTH (1) +#define NBL_PCIE_ECPU_TL_CLOCK_FREQ_WIDTH (32) +#define NBL_PCIE_ECPU_TL_CLOCK_FREQ_DWLEN (1) +union pcie_ecpu_tl_clock_freq_u { + struct pcie_ecpu_tl_clock_freq { + u32 dbg:32; /* [31:00] Default:0x258 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_CLOCK_FREQ_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_OBS_SEL_EN_ADDR (0x1404304) +#define NBL_PCIE_ECPU_OBS_SEL_EN_DEPTH (1) +#define NBL_PCIE_ECPU_OBS_SEL_EN_WIDTH (32) +#define NBL_PCIE_ECPU_OBS_SEL_EN_DWLEN (1) +union pcie_ecpu_obs_sel_en_u { + struct pcie_ecpu_obs_sel_en { + u32 dbg:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_OBS_SEL_EN_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF_MASK_ADDR (0x1405004) +#define NBL_PCIE_ECPU_K_PF_MASK_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF_MASK_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF_MASK_DWLEN (1) +union pcie_ecpu_k_pf_mask_u { + struct pcie_ecpu_k_pf_mask { + u32 dbg:6; /* [05:00] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF_MASK_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF_VPD_ADDR (0x1405008) +#define NBL_PCIE_ECPU_K_PF_VPD_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF_VPD_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF_VPD_DWLEN (1) +union pcie_ecpu_k_pf_vpd_u { + struct pcie_ecpu_k_pf_vpd { + u32 dbg:6; /* [05:00] Default:0x3f RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF0_VENDOR_DEVICE_ADDR (0x140500c) +#define NBL_PCIE_ECPU_K_PF0_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF0_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF0_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf0_vendor_device_u { + struct pcie_ecpu_k_pf0_vendor_device { + u32 id:32; /* [31:00] Default:0x30001F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF0_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF0_REV_CLASS_CODE_ADDR (0x1405010) +#define NBL_PCIE_ECPU_K_PF0_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF0_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF0_REV_CLASS_CODE_DWLEN (1) +union pcie_ecpu_k_pf0_rev_class_code_u { + struct pcie_ecpu_k_pf0_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF0_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF0_SUB_VENDOR_DEVICE_ADDR (0x1405014) +#define NBL_PCIE_ECPU_K_PF0_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF0_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF0_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf0_sub_vendor_device_u { + struct pcie_ecpu_k_pf0_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF0_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF1_VENDOR_DEVICE_ADDR (0x1405018) +#define NBL_PCIE_ECPU_K_PF1_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF1_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF1_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf1_vendor_device_u { + struct pcie_ecpu_k_pf1_vendor_device { + u32 id:32; /* [31:00] Default:0x30011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF1_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF1_REV_CLASS_CODE_ADDR (0x140501c) +#define NBL_PCIE_ECPU_K_PF1_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF1_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF1_REV_CLASS_CODE_DWLEN (1) +union pcie_ecpu_k_pf1_rev_class_code_u { + struct pcie_ecpu_k_pf1_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF1_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF1_SUB_VENDOR_DEVICE_ADDR (0x1405020) +#define NBL_PCIE_ECPU_K_PF1_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF1_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF1_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf1_sub_vendor_device_u { + struct pcie_ecpu_k_pf1_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF1_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF2_VENDOR_DEVICE_ADDR (0x1405024) +#define NBL_PCIE_ECPU_K_PF2_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF2_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF2_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf2_vendor_device_u { + struct pcie_ecpu_k_pf2_vendor_device { + u32 id:32; /* [31:00] Default:0x30021F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF2_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF2_REV_CLASS_CODE_ADDR (0x1405028) +#define NBL_PCIE_ECPU_K_PF2_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF2_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF2_REV_CLASS_CODE_DWLEN (1) +union pcie_ecpu_k_pf2_rev_class_code_u { + struct pcie_ecpu_k_pf2_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF2_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF2_SUB_VENDOR_DEVICE_ADDR (0x140502c) +#define NBL_PCIE_ECPU_K_PF2_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF2_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF2_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf2_sub_vendor_device_u { + struct pcie_ecpu_k_pf2_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF2_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF3_VENDOR_DEVICE_ADDR (0x1405030) +#define NBL_PCIE_ECPU_K_PF3_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF3_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF3_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf3_vendor_device_u { + struct pcie_ecpu_k_pf3_vendor_device { + u32 id:32; /* [31:00] Default:0x30031F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF3_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF3_REV_CLASS_CODE_ADDR (0x1405034) +#define NBL_PCIE_ECPU_K_PF3_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF3_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF3_REV_CLASS_CODE_DWLEN (1) +union pcie_ecpu_k_pf3_rev_class_code_u { + struct pcie_ecpu_k_pf3_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF3_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF3_SUB_VENDOR_DEVICE_ADDR (0x1405038) +#define NBL_PCIE_ECPU_K_PF3_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF3_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF3_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf3_sub_vendor_device_u { + struct pcie_ecpu_k_pf3_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF3_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF4_VENDOR_DEVICE_ADDR (0x140503c) +#define NBL_PCIE_ECPU_K_PF4_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF4_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF4_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf4_vendor_device_u { + struct pcie_ecpu_k_pf4_vendor_device { + u32 id:32; /* [31:00] Default:0x30041F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF4_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF4_REV_CLASS_CODE_ADDR (0x1405040) +#define NBL_PCIE_ECPU_K_PF4_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF4_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF4_REV_CLASS_CODE_DWLEN (1) +union pcie_ecpu_k_pf4_rev_class_code_u { + struct pcie_ecpu_k_pf4_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF4_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF4_SUB_VENDOR_DEVICE_ADDR (0x1405044) +#define NBL_PCIE_ECPU_K_PF4_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF4_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF4_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf4_sub_vendor_device_u { + struct pcie_ecpu_k_pf4_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF4_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF5_VENDOR_DEVICE_ADDR (0x1405048) +#define NBL_PCIE_ECPU_K_PF5_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF5_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF5_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf5_vendor_device_u { + struct pcie_ecpu_k_pf5_vendor_device { + u32 id:32; /* [31:00] Default:0x30051F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF5_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF5_REV_CLASS_CODE_ADDR (0x140504c) +#define NBL_PCIE_ECPU_K_PF5_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF5_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF5_REV_CLASS_CODE_DWLEN (1) +union pcie_ecpu_k_pf5_rev_class_code_u { + struct pcie_ecpu_k_pf5_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF5_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PF5_SUB_VENDOR_DEVICE_ADDR (0x1405050) +#define NBL_PCIE_ECPU_K_PF5_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PF5_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PF5_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_ecpu_k_pf5_sub_vendor_device_u { + struct pcie_ecpu_k_pf5_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PF5_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA0_MEM_DEEPSLEEP_ADDR (0x14050a0) +#define NBL_PCIE_ECPU_PMA0_MEM_DEEPSLEEP_DEPTH (1) +#define NBL_PCIE_ECPU_PMA0_MEM_DEEPSLEEP_WIDTH (32) +#define NBL_PCIE_ECPU_PMA0_MEM_DEEPSLEEP_DWLEN (1) +union pcie_ecpu_pma0_mem_deepsleep_u { + struct pcie_ecpu_pma0_mem_deepsleep { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA0_MEM_DEEPSLEEP_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ENABLE_ADDR (0x14050a4) +#define NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ENABLE_DEPTH (1) +#define NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ENABLE_WIDTH (32) +#define NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ENABLE_DWLEN (1) +union pcie_ecpu_pma0_prgm_parity_enable_u { + struct pcie_ecpu_pma0_prgm_parity_enable { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ENABLE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA_PMLD_LOCK_REG_ADDR (0x14050c0) +#define NBL_PCIE_ECPU_PMA_PMLD_LOCK_REG_DEPTH (1) +#define NBL_PCIE_ECPU_PMA_PMLD_LOCK_REG_WIDTH (32) +#define NBL_PCIE_ECPU_PMA_PMLD_LOCK_REG_DWLEN (1) +union pcie_ecpu_pma_pmld_lock_reg_u { + struct pcie_ecpu_pma_pmld_lock_reg { + u32 dbg:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA_PMLD_LOCK_REG_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_REFCLK_ADDR (0x14050c8) +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_REFCLK_DEPTH (1) +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_REFCLK_WIDTH (32) +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_REFCLK_DWLEN (1) +union pcie_ecpu_pma0_phy_ctrl_refclk_u { + struct pcie_ecpu_pma0_phy_ctrl_refclk { + u32 dbg:5; /* [04:00] Default:0x14 RW */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA0_PHY_CTRL_REFCLK_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDA_SEL_ADDR (0x14050cc) +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDA_SEL_DEPTH (1) +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDA_SEL_WIDTH (32) +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDA_SEL_DWLEN (1) +union pcie_ecpu_pma0_phy_ctrl_vdda_sel_u { + struct pcie_ecpu_pma0_phy_ctrl_vdda_sel { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDA_SEL_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDHA_SEL_ADDR (0x14050d0) +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDHA_SEL_DEPTH (1) +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDHA_SEL_WIDTH (32) +#define NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDHA_SEL_DWLEN (1) +union pcie_ecpu_pma0_phy_ctrl_vddha_sel_u { + struct pcie_ecpu_pma0_phy_ctrl_vddha_sel { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA0_PHY_CTRL_VDDHA_SEL_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PM_EVENT_ADDR (0x1406100) +#define NBL_PCIE_ECPU_TL_PM_EVENT_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PM_EVENT_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PM_EVENT_DWLEN (1) +union pcie_ecpu_tl_pm_event_u { + struct pcie_ecpu_tl_pm_event { + u32 dbg:6; /* [05:00] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PM_EVENT_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_CLK_GATE_ADDR (0x1406104) +#define NBL_PCIE_ECPU_TL_CLK_GATE_DEPTH (1) +#define NBL_PCIE_ECPU_TL_CLK_GATE_WIDTH (32) +#define NBL_PCIE_ECPU_TL_CLK_GATE_DWLEN (1) +union pcie_ecpu_tl_clk_gate_u { + struct pcie_ecpu_tl_clk_gate { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_CLK_GATE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_LINK_UP_ADDR (0x1406108) +#define NBL_PCIE_ECPU_LINK_UP_DEPTH (1) +#define NBL_PCIE_ECPU_LINK_UP_WIDTH (32) +#define NBL_PCIE_ECPU_LINK_UP_DWLEN (1) +union pcie_ecpu_link_up_u { + struct pcie_ecpu_link_up { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_LINK_UP_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_MPERST_N_ADDR (0x140610c) +#define NBL_PCIE_ECPU_MPERST_N_DEPTH (1) +#define NBL_PCIE_ECPU_MPERST_N_WIDTH (32) +#define NBL_PCIE_ECPU_MPERST_N_DWLEN (1) +union pcie_ecpu_mperst_n_u { + struct pcie_ecpu_mperst_n { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_MPERST_N_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PM_L2_STATUS_ADDR (0x1406110) +#define NBL_PCIE_ECPU_TL_PM_L2_STATUS_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PM_L2_STATUS_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PM_L2_STATUS_DWLEN (1) +union pcie_ecpu_tl_pm_l2_status_u { + struct pcie_ecpu_tl_pm_l2_status { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PM_L2_STATUS_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PL_LTSSM_ADDR (0x1406114) +#define NBL_PCIE_ECPU_PL_LTSSM_DEPTH (1) +#define NBL_PCIE_ECPU_PL_LTSSM_WIDTH (32) +#define NBL_PCIE_ECPU_PL_LTSSM_DWLEN (1) +union pcie_ecpu_pl_ltssm_u { + struct pcie_ecpu_pl_ltssm { + u32 dbg:5; /* [04:00] Default:0x0 RO */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PL_LTSSM_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_LTSSM_HISTORY0_ADDR (0x1406118) +#define NBL_PCIE_ECPU_LTSSM_HISTORY0_DEPTH (1) +#define NBL_PCIE_ECPU_LTSSM_HISTORY0_WIDTH (32) +#define NBL_PCIE_ECPU_LTSSM_HISTORY0_DWLEN (1) +union pcie_ecpu_ltssm_history0_u { + struct pcie_ecpu_ltssm_history0 { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_LTSSM_HISTORY0_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_LTSSM_HISTORY1_ADDR (0x140611c) +#define NBL_PCIE_ECPU_LTSSM_HISTORY1_DEPTH (1) +#define NBL_PCIE_ECPU_LTSSM_HISTORY1_WIDTH (32) +#define NBL_PCIE_ECPU_LTSSM_HISTORY1_DWLEN (1) +union pcie_ecpu_ltssm_history1_u { + struct pcie_ecpu_ltssm_history1 { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_LTSSM_HISTORY1_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_LTSSM_HISTORY2_ADDR (0x1406120) +#define NBL_PCIE_ECPU_LTSSM_HISTORY2_DEPTH (1) +#define NBL_PCIE_ECPU_LTSSM_HISTORY2_WIDTH (32) +#define NBL_PCIE_ECPU_LTSSM_HISTORY2_DWLEN (1) +union pcie_ecpu_ltssm_history2_u { + struct pcie_ecpu_ltssm_history2 { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_LTSSM_HISTORY2_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_LTSSM_HISTORY3_ADDR (0x1406124) +#define NBL_PCIE_ECPU_LTSSM_HISTORY3_DEPTH (1) +#define NBL_PCIE_ECPU_LTSSM_HISTORY3_WIDTH (32) +#define NBL_PCIE_ECPU_LTSSM_HISTORY3_DWLEN (1) +union pcie_ecpu_ltssm_history3_u { + struct pcie_ecpu_ltssm_history3 { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_LTSSM_HISTORY3_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_CFG_REGS0_ADDR (0x1406130) +#define NBL_PCIE_ECPU_TL_CFG_REGS0_DEPTH (1) +#define NBL_PCIE_ECPU_TL_CFG_REGS0_WIDTH (32) +#define NBL_PCIE_ECPU_TL_CFG_REGS0_DWLEN (1) +union pcie_ecpu_tl_cfg_regs0_u { + struct pcie_ecpu_tl_cfg_regs0 { + u32 dbg:28; /* [27:00] Default:0x0 RO */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_CFG_REGS0_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_CFG_REGS1_ADDR (0x1406134) +#define NBL_PCIE_ECPU_TL_CFG_REGS1_DEPTH (1) +#define NBL_PCIE_ECPU_TL_CFG_REGS1_WIDTH (32) +#define NBL_PCIE_ECPU_TL_CFG_REGS1_DWLEN (1) +union pcie_ecpu_tl_cfg_regs1_u { + struct pcie_ecpu_tl_cfg_regs1 { + u32 dbg:12; /* [11:00] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_CFG_REGS1_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_CFG_REGS2_ADDR (0x1406138) +#define NBL_PCIE_ECPU_TL_CFG_REGS2_DEPTH (1) +#define NBL_PCIE_ECPU_TL_CFG_REGS2_WIDTH (32) +#define NBL_PCIE_ECPU_TL_CFG_REGS2_DWLEN (1) +union pcie_ecpu_tl_cfg_regs2_u { + struct pcie_ecpu_tl_cfg_regs2 { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_CFG_REGS2_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF0_VPD_ADDR (0x1406200) +#define NBL_PCIE_ECPU_TL_PF0_VPD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF0_VPD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF0_VPD_DWLEN (1) +union pcie_ecpu_tl_pf0_vpd_u { + struct pcie_ecpu_tl_pf0_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF0_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF0_VPD_WR_ADDR (0x1406204) +#define NBL_PCIE_ECPU_TL_PF0_VPD_WR_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF0_VPD_WR_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF0_VPD_WR_DWLEN (1) +union pcie_ecpu_tl_pf0_vpd_wr_u { + struct pcie_ecpu_tl_pf0_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF0_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF0_VPD_RD_ADDR (0x1406208) +#define NBL_PCIE_ECPU_TL_PF0_VPD_RD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF0_VPD_RD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF0_VPD_RD_DWLEN (1) +union pcie_ecpu_tl_pf0_vpd_rd_u { + struct pcie_ecpu_tl_pf0_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF0_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF0_VPD_R_ADDR (0x140620c) +#define NBL_PCIE_ECPU_TL_PF0_VPD_R_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF0_VPD_R_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF0_VPD_R_DWLEN (1) +union pcie_ecpu_tl_pf0_vpd_r_u { + struct pcie_ecpu_tl_pf0_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF0_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF1_VPD_ADDR (0x1406210) +#define NBL_PCIE_ECPU_TL_PF1_VPD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF1_VPD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF1_VPD_DWLEN (1) +union pcie_ecpu_tl_pf1_vpd_u { + struct pcie_ecpu_tl_pf1_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF1_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF1_VPD_WR_ADDR (0x1406214) +#define NBL_PCIE_ECPU_TL_PF1_VPD_WR_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF1_VPD_WR_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF1_VPD_WR_DWLEN (1) +union pcie_ecpu_tl_pf1_vpd_wr_u { + struct pcie_ecpu_tl_pf1_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF1_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF1_VPD_RD_ADDR (0x1406218) +#define NBL_PCIE_ECPU_TL_PF1_VPD_RD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF1_VPD_RD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF1_VPD_RD_DWLEN (1) +union pcie_ecpu_tl_pf1_vpd_rd_u { + struct pcie_ecpu_tl_pf1_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF1_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF1_VPD_R_ADDR (0x140621c) +#define NBL_PCIE_ECPU_TL_PF1_VPD_R_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF1_VPD_R_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF1_VPD_R_DWLEN (1) +union pcie_ecpu_tl_pf1_vpd_r_u { + struct pcie_ecpu_tl_pf1_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF1_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF2_VPD_ADDR (0x1406220) +#define NBL_PCIE_ECPU_TL_PF2_VPD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF2_VPD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF2_VPD_DWLEN (1) +union pcie_ecpu_tl_pf2_vpd_u { + struct pcie_ecpu_tl_pf2_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF2_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF2_VPD_WR_ADDR (0x1406224) +#define NBL_PCIE_ECPU_TL_PF2_VPD_WR_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF2_VPD_WR_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF2_VPD_WR_DWLEN (1) +union pcie_ecpu_tl_pf2_vpd_wr_u { + struct pcie_ecpu_tl_pf2_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF2_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF2_VPD_RD_ADDR (0x1406228) +#define NBL_PCIE_ECPU_TL_PF2_VPD_RD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF2_VPD_RD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF2_VPD_RD_DWLEN (1) +union pcie_ecpu_tl_pf2_vpd_rd_u { + struct pcie_ecpu_tl_pf2_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF2_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF2_VPD_R_ADDR (0x140622c) +#define NBL_PCIE_ECPU_TL_PF2_VPD_R_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF2_VPD_R_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF2_VPD_R_DWLEN (1) +union pcie_ecpu_tl_pf2_vpd_r_u { + struct pcie_ecpu_tl_pf2_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF2_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF3_VPD_ADDR (0x1406230) +#define NBL_PCIE_ECPU_TL_PF3_VPD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF3_VPD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF3_VPD_DWLEN (1) +union pcie_ecpu_tl_pf3_vpd_u { + struct pcie_ecpu_tl_pf3_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF3_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF3_VPD_WR_ADDR (0x1406234) +#define NBL_PCIE_ECPU_TL_PF3_VPD_WR_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF3_VPD_WR_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF3_VPD_WR_DWLEN (1) +union pcie_ecpu_tl_pf3_vpd_wr_u { + struct pcie_ecpu_tl_pf3_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF3_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF3_VPD_RD_ADDR (0x1406238) +#define NBL_PCIE_ECPU_TL_PF3_VPD_RD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF3_VPD_RD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF3_VPD_RD_DWLEN (1) +union pcie_ecpu_tl_pf3_vpd_rd_u { + struct pcie_ecpu_tl_pf3_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF3_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF3_VPD_R_ADDR (0x140623c) +#define NBL_PCIE_ECPU_TL_PF3_VPD_R_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF3_VPD_R_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF3_VPD_R_DWLEN (1) +union pcie_ecpu_tl_pf3_vpd_r_u { + struct pcie_ecpu_tl_pf3_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF3_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF4_VPD_ADDR (0x1406240) +#define NBL_PCIE_ECPU_TL_PF4_VPD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF4_VPD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF4_VPD_DWLEN (1) +union pcie_ecpu_tl_pf4_vpd_u { + struct pcie_ecpu_tl_pf4_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF4_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF4_VPD_WR_ADDR (0x1406244) +#define NBL_PCIE_ECPU_TL_PF4_VPD_WR_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF4_VPD_WR_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF4_VPD_WR_DWLEN (1) +union pcie_ecpu_tl_pf4_vpd_wr_u { + struct pcie_ecpu_tl_pf4_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF4_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF4_VPD_RD_ADDR (0x1406248) +#define NBL_PCIE_ECPU_TL_PF4_VPD_RD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF4_VPD_RD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF4_VPD_RD_DWLEN (1) +union pcie_ecpu_tl_pf4_vpd_rd_u { + struct pcie_ecpu_tl_pf4_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF4_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF4_VPD_R_ADDR (0x140624c) +#define NBL_PCIE_ECPU_TL_PF4_VPD_R_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF4_VPD_R_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF4_VPD_R_DWLEN (1) +union pcie_ecpu_tl_pf4_vpd_r_u { + struct pcie_ecpu_tl_pf4_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF4_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF5_VPD_ADDR (0x1406250) +#define NBL_PCIE_ECPU_TL_PF5_VPD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF5_VPD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF5_VPD_DWLEN (1) +union pcie_ecpu_tl_pf5_vpd_u { + struct pcie_ecpu_tl_pf5_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF5_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF5_VPD_WR_ADDR (0x1406254) +#define NBL_PCIE_ECPU_TL_PF5_VPD_WR_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF5_VPD_WR_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF5_VPD_WR_DWLEN (1) +union pcie_ecpu_tl_pf5_vpd_wr_u { + struct pcie_ecpu_tl_pf5_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF5_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF5_VPD_RD_ADDR (0x1406258) +#define NBL_PCIE_ECPU_TL_PF5_VPD_RD_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF5_VPD_RD_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF5_VPD_RD_DWLEN (1) +union pcie_ecpu_tl_pf5_vpd_rd_u { + struct pcie_ecpu_tl_pf5_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF5_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_PF5_VPD_R_ADDR (0x140625c) +#define NBL_PCIE_ECPU_TL_PF5_VPD_R_DEPTH (1) +#define NBL_PCIE_ECPU_TL_PF5_VPD_R_WIDTH (32) +#define NBL_PCIE_ECPU_TL_PF5_VPD_R_DWLEN (1) +union pcie_ecpu_tl_pf5_vpd_r_u { + struct pcie_ecpu_tl_pf5_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_PF5_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_GEN_SPEED_ADDR (0x1407000) +#define NBL_PCIE_ECPU_K_GEN_SPEED_DEPTH (1) +#define NBL_PCIE_ECPU_K_GEN_SPEED_WIDTH (32) +#define NBL_PCIE_ECPU_K_GEN_SPEED_DWLEN (1) +union pcie_ecpu_k_gen_speed_u { + struct pcie_ecpu_k_gen_speed { + u32 dbg:4; /* [03:00] Default:0xF RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_GEN_SPEED_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_GEN_LANE_REVERSAL_ADDR (0x1407004) +#define NBL_PCIE_ECPU_K_GEN_LANE_REVERSAL_DEPTH (1) +#define NBL_PCIE_ECPU_K_GEN_LANE_REVERSAL_WIDTH (32) +#define NBL_PCIE_ECPU_K_GEN_LANE_REVERSAL_DWLEN (1) +union pcie_ecpu_k_gen_lane_reversal_u { + struct pcie_ecpu_k_gen_lane_reversal { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_GEN_LANE_REVERSAL_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_GEN_EQU_PHA23_ADDR (0x1407008) +#define NBL_PCIE_ECPU_K_GEN_EQU_PHA23_DEPTH (1) +#define NBL_PCIE_ECPU_K_GEN_EQU_PHA23_WIDTH (32) +#define NBL_PCIE_ECPU_K_GEN_EQU_PHA23_DWLEN (1) +union pcie_ecpu_k_gen_equ_pha23_u { + struct pcie_ecpu_k_gen_equ_pha23 { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_GEN_EQU_PHA23_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_GEN_LANE_DISABLE_ADDR (0x140700c) +#define NBL_PCIE_ECPU_K_GEN_LANE_DISABLE_DEPTH (1) +#define NBL_PCIE_ECPU_K_GEN_LANE_DISABLE_WIDTH (32) +#define NBL_PCIE_ECPU_K_GEN_LANE_DISABLE_DWLEN (1) +union pcie_ecpu_k_gen_lane_disable_u { + struct pcie_ecpu_k_gen_lane_disable { + u32 dbg:4; /* [03:00] Default:0x0 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_GEN_LANE_DISABLE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_GEN_USE_RXELECILDE_ADDR (0x1407010) +#define NBL_PCIE_ECPU_K_GEN_USE_RXELECILDE_DEPTH (1) +#define NBL_PCIE_ECPU_K_GEN_USE_RXELECILDE_WIDTH (32) +#define NBL_PCIE_ECPU_K_GEN_USE_RXELECILDE_DWLEN (1) +union pcie_ecpu_k_gen_use_rxelecilde_u { + struct pcie_ecpu_k_gen_use_rxelecilde { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_GEN_USE_RXELECILDE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_GEN_ERR_MODE_ADDR (0x1407014) +#define NBL_PCIE_ECPU_K_GEN_ERR_MODE_DEPTH (1) +#define NBL_PCIE_ECPU_K_GEN_ERR_MODE_WIDTH (32) +#define NBL_PCIE_ECPU_K_GEN_ERR_MODE_DWLEN (1) +union pcie_ecpu_k_gen_err_mode_u { + struct pcie_ecpu_k_gen_err_mode { + u32 dbg:2; /* [01:00] Default:0x1 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_GEN_ERR_MODE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_GEN_RX_VLD_FILTER_EN_ADDR (0x1407018) +#define NBL_PCIE_ECPU_K_GEN_RX_VLD_FILTER_EN_DEPTH (1) +#define NBL_PCIE_ECPU_K_GEN_RX_VLD_FILTER_EN_WIDTH (32) +#define NBL_PCIE_ECPU_K_GEN_RX_VLD_FILTER_EN_DWLEN (1) +union pcie_ecpu_k_gen_rx_vld_filter_en_u { + struct pcie_ecpu_k_gen_rx_vld_filter_en { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_GEN_RX_VLD_FILTER_EN_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_10B_TAG_REQUESTER_SUPPORT_ADDR (0x140701c) +#define NBL_PCIE_ECPU_K_10B_TAG_REQUESTER_SUPPORT_DEPTH (1) +#define NBL_PCIE_ECPU_K_10B_TAG_REQUESTER_SUPPORT_WIDTH (32) +#define NBL_PCIE_ECPU_K_10B_TAG_REQUESTER_SUPPORT_DWLEN (1) +union pcie_ecpu_k_10b_tag_requester_support_u { + struct pcie_ecpu_k_10b_tag_requester_support { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_10B_TAG_REQUESTER_SUPPORT_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_10B_TAG_COMPLETER_SUPPORT_ADDR (0x1407020) +#define NBL_PCIE_ECPU_K_10B_TAG_COMPLETER_SUPPORT_DEPTH (1) +#define NBL_PCIE_ECPU_K_10B_TAG_COMPLETER_SUPPORT_WIDTH (32) +#define NBL_PCIE_ECPU_K_10B_TAG_COMPLETER_SUPPORT_DWLEN (1) +union pcie_ecpu_k_10b_tag_completer_support_u { + struct pcie_ecpu_k_10b_tag_completer_support { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_10B_TAG_COMPLETER_SUPPORT_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PCI_PM_ADDR (0x1407028) +#define NBL_PCIE_ECPU_K_PCI_PM_DEPTH (1) +#define NBL_PCIE_ECPU_K_PCI_PM_WIDTH (32) +#define NBL_PCIE_ECPU_K_PCI_PM_DWLEN (1) +union pcie_ecpu_k_pci_pm_u { + struct pcie_ecpu_k_pci_pm { + u32 dbg:10; /* [09:00] Default:0x320 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PCI_PM_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PEX_EP_ACCEPT_LATENCY_ADDR (0x140702c) +#define NBL_PCIE_ECPU_K_PEX_EP_ACCEPT_LATENCY_DEPTH (1) +#define NBL_PCIE_ECPU_K_PEX_EP_ACCEPT_LATENCY_WIDTH (32) +#define NBL_PCIE_ECPU_K_PEX_EP_ACCEPT_LATENCY_DWLEN (1) +union pcie_ecpu_k_pex_ep_accept_latency_u { + struct pcie_ecpu_k_pex_ep_accept_latency { + u32 dbg:6; /* [05:00] Default:0x33 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PEX_EP_ACCEPT_LATENCY_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PEX_COMPTO_ADDR (0x1407030) +#define NBL_PCIE_ECPU_K_PEX_COMPTO_DEPTH (1) +#define NBL_PCIE_ECPU_K_PEX_COMPTO_WIDTH (32) +#define NBL_PCIE_ECPU_K_PEX_COMPTO_DWLEN (1) +union pcie_ecpu_k_pex_compto_u { + struct pcie_ecpu_k_pex_compto { + u32 dbg:5; /* [04:00] Default:0x13 RW */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PEX_COMPTO_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PEX_LINK_CAP_ADDR (0x1407034) +#define NBL_PCIE_ECPU_K_PEX_LINK_CAP_DEPTH (1) +#define NBL_PCIE_ECPU_K_PEX_LINK_CAP_WIDTH (32) +#define NBL_PCIE_ECPU_K_PEX_LINK_CAP_DWLEN (1) +union pcie_ecpu_k_pex_link_cap_u { + struct pcie_ecpu_k_pex_link_cap { + u32 dbg:11; /* [10:00] Default:0x57 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PEX_LINK_CAP_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PEX_EC_VECTOR_SIZE_ADDR (0x1407038) +#define NBL_PCIE_ECPU_K_PEX_EC_VECTOR_SIZE_DEPTH (1) +#define NBL_PCIE_ECPU_K_PEX_EC_VECTOR_SIZE_WIDTH (32) +#define NBL_PCIE_ECPU_K_PEX_EC_VECTOR_SIZE_DWLEN (1) +union pcie_ecpu_k_pex_ec_vector_size_u { + struct pcie_ecpu_k_pex_ec_vector_size { + u32 dbg:7; /* [06:00] Default:0x3f RW */ + u32 rsv:25; /* [31:07] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PEX_EC_VECTOR_SIZE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PEX_ASPM_ENTRY_DELAY_ADDR (0x140703c) +#define NBL_PCIE_ECPU_K_PEX_ASPM_ENTRY_DELAY_DEPTH (1) +#define NBL_PCIE_ECPU_K_PEX_ASPM_ENTRY_DELAY_WIDTH (32) +#define NBL_PCIE_ECPU_K_PEX_ASPM_ENTRY_DELAY_DWLEN (1) +union pcie_ecpu_k_pex_aspm_entry_delay_u { + struct pcie_ecpu_k_pex_aspm_entry_delay { + u32 dbg:15; /* [14:00] Default:0x190c RW */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PEX_ASPM_ENTRY_DELAY_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PEX_FTS_NUM_ADDR (0x1407040) +#define NBL_PCIE_ECPU_K_PEX_FTS_NUM_DEPTH (1) +#define NBL_PCIE_ECPU_K_PEX_FTS_NUM_WIDTH (32) +#define NBL_PCIE_ECPU_K_PEX_FTS_NUM_DWLEN (1) +union pcie_ecpu_k_pex_fts_num_u { + struct pcie_ecpu_k_pex_fts_num { + u32 dbg:32; /* [31:0] Default:0x3c3cd2d2 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PEX_FTS_NUM_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_PEX_L1_PM_ADDR (0x1407044) +#define NBL_PCIE_ECPU_K_PEX_L1_PM_DEPTH (1) +#define NBL_PCIE_ECPU_K_PEX_L1_PM_WIDTH (32) +#define NBL_PCIE_ECPU_K_PEX_L1_PM_DWLEN (1) +union pcie_ecpu_k_pex_l1_pm_u { + struct pcie_ecpu_k_pex_l1_pm { + u32 dbg:28; /* [27:00] Default:0x0 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_PEX_L1_PM_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET_USER1_ADDR (0x1407048) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER1_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER1_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER1_DWLEN (1) +union pcie_ecpu_k_equpreset_user1_u { + struct pcie_ecpu_k_equpreset_user1 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET_USER1_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET_USER2_ADDR (0x140704c) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER2_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER2_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER2_DWLEN (1) +union pcie_ecpu_k_equpreset_user2_u { + struct pcie_ecpu_k_equpreset_user2 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET_USER2_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET_USER3_ADDR (0x1407050) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER3_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER3_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER3_DWLEN (1) +union pcie_ecpu_k_equpreset_user3_u { + struct pcie_ecpu_k_equpreset_user3 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET_USER3_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET_USER4_ADDR (0x1407054) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER4_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER4_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER4_DWLEN (1) +union pcie_ecpu_k_equpreset_user4_u { + struct pcie_ecpu_k_equpreset_user4 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET_USER4_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET_USER5_ADDR (0x1407058) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER5_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER5_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER5_DWLEN (1) +union pcie_ecpu_k_equpreset_user5_u { + struct pcie_ecpu_k_equpreset_user5 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET_USER5_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET_USER6_ADDR (0x140705c) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER6_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER6_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER6_DWLEN (1) +union pcie_ecpu_k_equpreset_user6_u { + struct pcie_ecpu_k_equpreset_user6 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET_USER6_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET_USER7_ADDR (0x1407060) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER7_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER7_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER7_DWLEN (1) +union pcie_ecpu_k_equpreset_user7_u { + struct pcie_ecpu_k_equpreset_user7 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET_USER7_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET_USER8_ADDR (0x1407064) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER8_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER8_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET_USER8_DWLEN (1) +union pcie_ecpu_k_equpreset_user8_u { + struct pcie_ecpu_k_equpreset_user8 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET_USER8_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER1_ADDR (0x1407068) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER1_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER1_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER1_DWLEN (1) +union pcie_ecpu_k_equpreset16_user1_u { + struct pcie_ecpu_k_equpreset16_user1 { + u32 dbg:32; /* [31:0] Default:0xf4f4f4f4 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET16_USER1_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER2_ADDR (0x140706c) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER2_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER2_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER2_DWLEN (1) +union pcie_ecpu_k_equpreset16_user2_u { + struct pcie_ecpu_k_equpreset16_user2 { + u32 dbg:32; /* [31:0] Default:0xf4f4f4f4 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET16_USER2_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER3_ADDR (0x1407070) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER3_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER3_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER3_DWLEN (1) +union pcie_ecpu_k_equpreset16_user3_u { + struct pcie_ecpu_k_equpreset16_user3 { + u32 dbg:32; /* [31:0] Default:0xf4f4f4f4 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET16_USER3_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER4_ADDR (0x1407074) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER4_DEPTH (1) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER4_WIDTH (32) +#define NBL_PCIE_ECPU_K_EQUPRESET16_USER4_DWLEN (1) +union pcie_ecpu_k_equpreset16_user4_u { + struct pcie_ecpu_k_equpreset16_user4 { + u32 dbg:32; /* [31:0] Default:0xf4f4f4f4 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_EQUPRESET16_USER4_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER1_ADDR (0x1407078) +#define NBL_PCIE_ECPU_K_TIMER_USER1_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER1_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER1_DWLEN (1) +union pcie_ecpu_k_timer_user1_u { + struct pcie_ecpu_k_timer_user1 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER1_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER2_ADDR (0x140707c) +#define NBL_PCIE_ECPU_K_TIMER_USER2_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER2_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER2_DWLEN (1) +union pcie_ecpu_k_timer_user2_u { + struct pcie_ecpu_k_timer_user2 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER2_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER3_ADDR (0x1407080) +#define NBL_PCIE_ECPU_K_TIMER_USER3_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER3_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER3_DWLEN (1) +union pcie_ecpu_k_timer_user3_u { + struct pcie_ecpu_k_timer_user3 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER3_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER4_ADDR (0x1407084) +#define NBL_PCIE_ECPU_K_TIMER_USER4_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER4_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER4_DWLEN (1) +union pcie_ecpu_k_timer_user4_u { + struct pcie_ecpu_k_timer_user4 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER4_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER5_ADDR (0x1407088) +#define NBL_PCIE_ECPU_K_TIMER_USER5_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER5_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER5_DWLEN (1) +union pcie_ecpu_k_timer_user5_u { + struct pcie_ecpu_k_timer_user5 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER5_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER6_ADDR (0x140708c) +#define NBL_PCIE_ECPU_K_TIMER_USER6_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER6_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER6_DWLEN (1) +union pcie_ecpu_k_timer_user6_u { + struct pcie_ecpu_k_timer_user6 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER6_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER7_ADDR (0x1407090) +#define NBL_PCIE_ECPU_K_TIMER_USER7_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER7_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER7_DWLEN (1) +union pcie_ecpu_k_timer_user7_u { + struct pcie_ecpu_k_timer_user7 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER7_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER8_ADDR (0x1407094) +#define NBL_PCIE_ECPU_K_TIMER_USER8_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER8_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER8_DWLEN (1) +union pcie_ecpu_k_timer_user8_u { + struct pcie_ecpu_k_timer_user8 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER8_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER9_ADDR (0x1407098) +#define NBL_PCIE_ECPU_K_TIMER_USER9_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER9_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER9_DWLEN (1) +union pcie_ecpu_k_timer_user9_u { + struct pcie_ecpu_k_timer_user9 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER9_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER10_ADDR (0x140709c) +#define NBL_PCIE_ECPU_K_TIMER_USER10_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER10_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER10_DWLEN (1) +union pcie_ecpu_k_timer_user10_u { + struct pcie_ecpu_k_timer_user10 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER10_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER11_ADDR (0x14070a0) +#define NBL_PCIE_ECPU_K_TIMER_USER11_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER11_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER11_DWLEN (1) +union pcie_ecpu_k_timer_user11_u { + struct pcie_ecpu_k_timer_user11 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER11_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER12_ADDR (0x14070a4) +#define NBL_PCIE_ECPU_K_TIMER_USER12_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER12_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER12_DWLEN (1) +union pcie_ecpu_k_timer_user12_u { + struct pcie_ecpu_k_timer_user12 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER12_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER13_ADDR (0x14070a8) +#define NBL_PCIE_ECPU_K_TIMER_USER13_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER13_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER13_DWLEN (1) +union pcie_ecpu_k_timer_user13_u { + struct pcie_ecpu_k_timer_user13 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER13_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER14_ADDR (0x14070ac) +#define NBL_PCIE_ECPU_K_TIMER_USER14_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER14_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER14_DWLEN (1) +union pcie_ecpu_k_timer_user14_u { + struct pcie_ecpu_k_timer_user14 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER14_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER15_ADDR (0x14070b0) +#define NBL_PCIE_ECPU_K_TIMER_USER15_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER15_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER15_DWLEN (1) +union pcie_ecpu_k_timer_user15_u { + struct pcie_ecpu_k_timer_user15 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER15_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER16_ADDR (0x14070b4) +#define NBL_PCIE_ECPU_K_TIMER_USER16_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER16_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER16_DWLEN (1) +union pcie_ecpu_k_timer_user16_u { + struct pcie_ecpu_k_timer_user16 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER16_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER17_ADDR (0x14070b8) +#define NBL_PCIE_ECPU_K_TIMER_USER17_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER17_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER17_DWLEN (1) +union pcie_ecpu_k_timer_user17_u { + struct pcie_ecpu_k_timer_user17 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER17_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER18_ADDR (0x14070bc) +#define NBL_PCIE_ECPU_K_TIMER_USER18_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER18_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER18_DWLEN (1) +union pcie_ecpu_k_timer_user18_u { + struct pcie_ecpu_k_timer_user18 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER18_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER19_ADDR (0x14070c0) +#define NBL_PCIE_ECPU_K_TIMER_USER19_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER19_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER19_DWLEN (1) +union pcie_ecpu_k_timer_user19_u { + struct pcie_ecpu_k_timer_user19 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER19_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER20_ADDR (0x14070c4) +#define NBL_PCIE_ECPU_K_TIMER_USER20_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER20_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER20_DWLEN (1) +union pcie_ecpu_k_timer_user20_u { + struct pcie_ecpu_k_timer_user20 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER20_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER21_ADDR (0x14070c8) +#define NBL_PCIE_ECPU_K_TIMER_USER21_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER21_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER21_DWLEN (1) +union pcie_ecpu_k_timer_user21_u { + struct pcie_ecpu_k_timer_user21 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER21_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER22_ADDR (0x14070cc) +#define NBL_PCIE_ECPU_K_TIMER_USER22_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER22_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER22_DWLEN (1) +union pcie_ecpu_k_timer_user22_u { + struct pcie_ecpu_k_timer_user22 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER22_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER23_ADDR (0x14070d0) +#define NBL_PCIE_ECPU_K_TIMER_USER23_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER23_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER23_DWLEN (1) +union pcie_ecpu_k_timer_user23_u { + struct pcie_ecpu_k_timer_user23 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER23_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_K_TIMER_USER24_ADDR (0x14070d4) +#define NBL_PCIE_ECPU_K_TIMER_USER24_DEPTH (1) +#define NBL_PCIE_ECPU_K_TIMER_USER24_WIDTH (32) +#define NBL_PCIE_ECPU_K_TIMER_USER24_DWLEN (1) +union pcie_ecpu_k_timer_user24_u { + struct pcie_ecpu_k_timer_user24 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_K_TIMER_USER24_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TEST_OUT_CLR_ADDR (0x1408000) +#define NBL_PCIE_ECPU_TEST_OUT_CLR_DEPTH (1) +#define NBL_PCIE_ECPU_TEST_OUT_CLR_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_CLR_DWLEN (1) +union pcie_ecpu_test_out_clr_u { + struct pcie_ecpu_test_out_clr { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_CLR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_ERROR_ADDR (0x1414000) +#define NBL_PCIE_ECPU_ERROR_DEPTH (1) +#define NBL_PCIE_ECPU_ERROR_WIDTH (32) +#define NBL_PCIE_ECPU_ERROR_DWLEN (1) +union pcie_ecpu_error_u { + struct pcie_ecpu_error { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_ERROR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_WARNING_ADDR (0x1414004) +#define NBL_PCIE_ECPU_WARNING_DEPTH (1) +#define NBL_PCIE_ECPU_WARNING_WIDTH (32) +#define NBL_PCIE_ECPU_WARNING_DWLEN (1) +union pcie_ecpu_warning_u { + struct pcie_ecpu_warning { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_WARNING_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_LAST_ERR_ADDR (0x1414090) +#define NBL_PCIE_ECPU_LAST_ERR_DEPTH (1) +#define NBL_PCIE_ECPU_LAST_ERR_WIDTH (32) +#define NBL_PCIE_ECPU_LAST_ERR_DWLEN (1) +union pcie_ecpu_last_err_u { + struct pcie_ecpu_last_err { + u32 index:18; /* [17:0] Default:0x0 RO */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_LAST_ERR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TEST_IN_ADDR (0x1415028) +#define NBL_PCIE_ECPU_TEST_IN_DEPTH (1) +#define NBL_PCIE_ECPU_TEST_IN_WIDTH (64) +#define NBL_PCIE_ECPU_TEST_IN_DWLEN (2) +union pcie_ecpu_test_in_u { + struct pcie_ecpu_test_in { + u32 dbg_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_IN_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TEST_IN_ERRINJ_ADDR (0x1415030) +#define NBL_PCIE_ECPU_TEST_IN_ERRINJ_DEPTH (1) +#define NBL_PCIE_ECPU_TEST_IN_ERRINJ_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_IN_ERRINJ_DWLEN (1) +union pcie_ecpu_test_in_errinj_u { + struct pcie_ecpu_test_in_errinj { + u32 dbg:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_IN_ERRINJ_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PL_LTSSM_ENABLE_ADDR (0x1415034) +#define NBL_PCIE_ECPU_PL_LTSSM_ENABLE_DEPTH (1) +#define NBL_PCIE_ECPU_PL_LTSSM_ENABLE_WIDTH (32) +#define NBL_PCIE_ECPU_PL_LTSSM_ENABLE_DWLEN (1) +union pcie_ecpu_pl_ltssm_enable_u { + struct pcie_ecpu_pl_ltssm_enable { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PL_LTSSM_ENABLE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PL_EQU_PHASE_ADDR (0x141503c) +#define NBL_PCIE_ECPU_PL_EQU_PHASE_DEPTH (1) +#define NBL_PCIE_ECPU_PL_EQU_PHASE_WIDTH (32) +#define NBL_PCIE_ECPU_PL_EQU_PHASE_DWLEN (1) +union pcie_ecpu_pl_equ_phase_u { + struct pcie_ecpu_pl_equ_phase { + u32 dbg:2; /* [01:00] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PL_EQU_PHASE_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_CFG_BUSDEV_ADDR (0x1415040) +#define NBL_PCIE_ECPU_TL_CFG_BUSDEV_DEPTH (1) +#define NBL_PCIE_ECPU_TL_CFG_BUSDEV_WIDTH (32) +#define NBL_PCIE_ECPU_TL_CFG_BUSDEV_DWLEN (1) +union pcie_ecpu_tl_cfg_busdev_u { + struct pcie_ecpu_tl_cfg_busdev { + u32 dbg:13; /* [12:00] Default:0x0 RO */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_CFG_BUSDEV_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TL_REPORT_TIMER_ADDR (0x1415044) +#define NBL_PCIE_ECPU_TL_REPORT_TIMER_DEPTH (1) +#define NBL_PCIE_ECPU_TL_REPORT_TIMER_WIDTH (32) +#define NBL_PCIE_ECPU_TL_REPORT_TIMER_DWLEN (1) +union pcie_ecpu_tl_report_timer_u { + struct pcie_ecpu_tl_report_timer { + u32 dbg:4; /* [03:00] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TL_REPORT_TIMER_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TEST_OUT_TL_ADDR (0x1415048) +#define NBL_PCIE_ECPU_TEST_OUT_TL_DEPTH (8) +#define NBL_PCIE_ECPU_TEST_OUT_TL_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_TL_DWLEN (1) +union pcie_ecpu_test_out_tl_u { + struct pcie_ecpu_test_out_tl { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_TL_DWLEN]; +} __packed; +#define NBL_PCIE_ECPU_TEST_OUT_TL_REG(r) (NBL_PCIE_ECPU_TEST_OUT_TL_ADDR + \ + (NBL_PCIE_ECPU_TEST_OUT_TL_DWLEN * 4) * (r)) + +#define NBL_PCIE_ECPU_TEST_OUT_PL_ADDR (0x1415068) +#define NBL_PCIE_ECPU_TEST_OUT_PL_DEPTH (8) +#define NBL_PCIE_ECPU_TEST_OUT_PL_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_PL_DWLEN (1) +union pcie_ecpu_test_out_pl_u { + struct pcie_ecpu_test_out_pl { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_PL_DWLEN]; +} __packed; +#define NBL_PCIE_ECPU_TEST_OUT_PL_REG(r) (NBL_PCIE_ECPU_TEST_OUT_PL_ADDR + \ + (NBL_PCIE_ECPU_TEST_OUT_PL_DWLEN * 4) * (r)) + +#define NBL_PCIE_ECPU_TEST_OUT_EQU_ADDR (0x1415088) +#define NBL_PCIE_ECPU_TEST_OUT_EQU_DEPTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_EQU_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_EQU_DWLEN (1) +union pcie_ecpu_test_out_equ_u { + struct pcie_ecpu_test_out_equ { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_EQU_DWLEN]; +} __packed; +#define NBL_PCIE_ECPU_TEST_OUT_EQU_REG(r) (NBL_PCIE_ECPU_TEST_OUT_EQU_ADDR + \ + (NBL_PCIE_ECPU_TEST_OUT_EQU_DWLEN * 4) * (r)) + +#define NBL_PCIE_ECPU_TEST_OUT_PERF_ADDR (0x1415188) +#define NBL_PCIE_ECPU_TEST_OUT_PERF_DEPTH (4) +#define NBL_PCIE_ECPU_TEST_OUT_PERF_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_PERF_DWLEN (1) +union pcie_ecpu_test_out_perf_u { + struct pcie_ecpu_test_out_perf { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_PERF_DWLEN]; +} __packed; +#define NBL_PCIE_ECPU_TEST_OUT_PERF_REG(r) (NBL_PCIE_ECPU_TEST_OUT_PERF_ADDR + \ + (NBL_PCIE_ECPU_TEST_OUT_PERF_DWLEN * 4) * (r)) + +#define NBL_PCIE_ECPU_TEST_OUT_RXVAL_ADDR (0x1415198) +#define NBL_PCIE_ECPU_TEST_OUT_RXVAL_DEPTH (1) +#define NBL_PCIE_ECPU_TEST_OUT_RXVAL_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_RXVAL_DWLEN (1) +union pcie_ecpu_test_out_rxval_u { + struct pcie_ecpu_test_out_rxval { + u32 dbg:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_RXVAL_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TEST_OUT_RXDATA_ADDR (0x141519c) +#define NBL_PCIE_ECPU_TEST_OUT_RXDATA_DEPTH (8) +#define NBL_PCIE_ECPU_TEST_OUT_RXDATA_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_RXDATA_DWLEN (1) +union pcie_ecpu_test_out_rxdata_u { + struct pcie_ecpu_test_out_rxdata { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_RXDATA_DWLEN]; +} __packed; +#define NBL_PCIE_ECPU_TEST_OUT_RXDATA_REG(r) (NBL_PCIE_ECPU_TEST_OUT_RXDATA_ADDR + \ + (NBL_PCIE_ECPU_TEST_OUT_RXDATA_DWLEN * 4) * (r)) + +#define NBL_PCIE_ECPU_TEST_OUT_RXDATAK_ADDR (0x14151dc) +#define NBL_PCIE_ECPU_TEST_OUT_RXDATAK_DEPTH (1) +#define NBL_PCIE_ECPU_TEST_OUT_RXDATAK_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_RXDATAK_DWLEN (1) +union pcie_ecpu_test_out_rxdatak_u { + struct pcie_ecpu_test_out_rxdatak { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_RXDATAK_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TEST_OUT_TXVAL_ADDR (0x14151e4) +#define NBL_PCIE_ECPU_TEST_OUT_TXVAL_DEPTH (1) +#define NBL_PCIE_ECPU_TEST_OUT_TXVAL_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_TXVAL_DWLEN (1) +union pcie_ecpu_test_out_txval_u { + struct pcie_ecpu_test_out_txval { + u32 dbg:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_TXVAL_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_TEST_OUT_TXDATA_ADDR (0x14151e8) +#define NBL_PCIE_ECPU_TEST_OUT_TXDATA_DEPTH (8) +#define NBL_PCIE_ECPU_TEST_OUT_TXDATA_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_TXDATA_DWLEN (1) +union pcie_ecpu_test_out_txdata_u { + struct pcie_ecpu_test_out_txdata { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_TXDATA_DWLEN]; +} __packed; +#define NBL_PCIE_ECPU_TEST_OUT_TXDATA_REG(r) (NBL_PCIE_ECPU_TEST_OUT_TXDATA_ADDR + \ + (NBL_PCIE_ECPU_TEST_OUT_TXDATA_DWLEN * 4) * (r)) + +#define NBL_PCIE_ECPU_TEST_OUT_TXDATAK_ADDR (0x1415228) +#define NBL_PCIE_ECPU_TEST_OUT_TXDATAK_DEPTH (1) +#define NBL_PCIE_ECPU_TEST_OUT_TXDATAK_WIDTH (32) +#define NBL_PCIE_ECPU_TEST_OUT_TXDATAK_DWLEN (1) +union pcie_ecpu_test_out_txdatak_u { + struct pcie_ecpu_test_out_txdatak { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_TEST_OUT_TXDATAK_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_DTEST_O_ADDR (0x1415230) +#define NBL_PCIE_ECPU_DTEST_O_DEPTH (1) +#define NBL_PCIE_ECPU_DTEST_O_WIDTH (32) +#define NBL_PCIE_ECPU_DTEST_O_DWLEN (1) +union pcie_ecpu_dtest_o_u { + struct pcie_ecpu_dtest_o { + u32 dbg:12; /* [11:00] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_DTEST_O_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA0_DTEST_ADDR (0x1415234) +#define NBL_PCIE_ECPU_PMA0_DTEST_DEPTH (1) +#define NBL_PCIE_ECPU_PMA0_DTEST_WIDTH (32) +#define NBL_PCIE_ECPU_PMA0_DTEST_DWLEN (1) +union pcie_ecpu_pma0_dtest_u { + struct pcie_ecpu_pma0_dtest { + u32 dbg:12; /* [11:00] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA0_DTEST_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA0_ERR_ADDR (0x1415238) +#define NBL_PCIE_ECPU_PMA0_ERR_DEPTH (1) +#define NBL_PCIE_ECPU_PMA0_ERR_WIDTH (32) +#define NBL_PCIE_ECPU_PMA0_ERR_DWLEN (1) +union pcie_ecpu_pma0_err_u { + struct pcie_ecpu_pma0_err { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA0_ERR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ERR_ADDR (0x141523c) +#define NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ERR_DEPTH (1) +#define NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ERR_WIDTH (32) +#define NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ERR_DWLEN (1) +union pcie_ecpu_pma0_prgm_parity_err_u { + struct pcie_ecpu_pma0_prgm_parity_err { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA0_PRGM_PARITY_ERR_DWLEN]; +} __packed; + +#define NBL_PCIE_ECPU_PMA0_PMLD_ADDR (0x141c000) +#define NBL_PCIE_ECPU_PMA0_PMLD_DEPTH (1) +#define NBL_PCIE_ECPU_PMA0_PMLD_WIDTH (32) +#define NBL_PCIE_ECPU_PMA0_PMLD_DWLEN (1) +union pcie_ecpu_pma0_pmld_u { + struct pcie_ecpu_pma0_pmld { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_ECPU_PMA0_PMLD_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcie_host.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcie_host.h new file mode 100644 index 0000000000000000000000000000000000000000..7b550010bf63a7919b051c610781b07ba227f4fd --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcie_host.h @@ -0,0 +1,2310 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PCIE_HOST_H +#define NBL_PCIE_HOST_H 1 + +#include + +#define NBL_PCIE_HOST_BASE (0x01504000) + +#define NBL_PCIE_HOST_INT_STATUS_ADDR (0x1504000) +#define NBL_PCIE_HOST_INT_STATUS_DEPTH (1) +#define NBL_PCIE_HOST_INT_STATUS_WIDTH (32) +#define NBL_PCIE_HOST_INT_STATUS_DWLEN (1) +union pcie_host_int_status_u { + struct pcie_host_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_INT_MASK_ADDR (0x1504004) +#define NBL_PCIE_HOST_INT_MASK_DEPTH (1) +#define NBL_PCIE_HOST_INT_MASK_WIDTH (32) +#define NBL_PCIE_HOST_INT_MASK_DWLEN (1) +union pcie_host_int_mask_u { + struct pcie_host_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_INT_SET_ADDR (0x1504008) +#define NBL_PCIE_HOST_INT_SET_DEPTH (1) +#define NBL_PCIE_HOST_INT_SET_WIDTH (32) +#define NBL_PCIE_HOST_INT_SET_DWLEN (1) +union pcie_host_int_set_u { + struct pcie_host_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_INT_SET_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_INIT_DONE_ADDR (0x150400c) +#define NBL_PCIE_HOST_INIT_DONE_DEPTH (1) +#define NBL_PCIE_HOST_INIT_DONE_WIDTH (32) +#define NBL_PCIE_HOST_INIT_DONE_DWLEN (1) +union pcie_host_init_done_u { + struct pcie_host_init_done { + u32 done:1; /* [00:00] Default:0x1 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_CIF_ERR_INFO_ADDR (0x1504040) +#define NBL_PCIE_HOST_CIF_ERR_INFO_DEPTH (1) +#define NBL_PCIE_HOST_CIF_ERR_INFO_WIDTH (32) +#define NBL_PCIE_HOST_CIF_ERR_INFO_DWLEN (1) +union pcie_host_cif_err_info_u { + struct pcie_host_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_CAR_CTRL_ADDR (0x1504100) +#define NBL_PCIE_HOST_CAR_CTRL_DEPTH (1) +#define NBL_PCIE_HOST_CAR_CTRL_WIDTH (32) +#define NBL_PCIE_HOST_CAR_CTRL_DWLEN (1) +union pcie_host_car_ctrl_u { + struct pcie_host_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_CLOCK_FREQ_ADDR (0x1504200) +#define NBL_PCIE_HOST_TL_CLOCK_FREQ_DEPTH (1) +#define NBL_PCIE_HOST_TL_CLOCK_FREQ_WIDTH (32) +#define NBL_PCIE_HOST_TL_CLOCK_FREQ_DWLEN (1) +union pcie_host_tl_clock_freq_u { + struct pcie_host_tl_clock_freq { + u32 dbg:32; /* [31:00] Default:0x258 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_CLOCK_FREQ_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_OBS_SEL_EN_ADDR (0x1504304) +#define NBL_PCIE_HOST_OBS_SEL_EN_DEPTH (1) +#define NBL_PCIE_HOST_OBS_SEL_EN_WIDTH (32) +#define NBL_PCIE_HOST_OBS_SEL_EN_DWLEN (1) +union pcie_host_obs_sel_en_u { + struct pcie_host_obs_sel_en { + u32 dbg:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_OBS_SEL_EN_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF_MASK_ADDR (0x1505004) +#define NBL_PCIE_HOST_K_PF_MASK_DEPTH (1) +#define NBL_PCIE_HOST_K_PF_MASK_WIDTH (32) +#define NBL_PCIE_HOST_K_PF_MASK_DWLEN (1) +union pcie_host_k_pf_mask_u { + struct pcie_host_k_pf_mask { + u32 dbg:8; /* [07:00] Default:0x00 RW */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF_MASK_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF_VPD_ADDR (0x1505008) +#define NBL_PCIE_HOST_K_PF_VPD_DEPTH (1) +#define NBL_PCIE_HOST_K_PF_VPD_WIDTH (32) +#define NBL_PCIE_HOST_K_PF_VPD_DWLEN (1) +union pcie_host_k_pf_vpd_u { + struct pcie_host_k_pf_vpd { + u32 dbg:8; /* [07:00] Default:0xff RW */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF0_VENDOR_DEVICE_ADDR (0x150500c) +#define NBL_PCIE_HOST_K_PF0_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF0_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF0_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf0_vendor_device_u { + struct pcie_host_k_pf0_vendor_device { + u32 id:32; /* [31:00] Default:0x34001F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF0_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF0_REV_CLASS_CODE_ADDR (0x1505010) +#define NBL_PCIE_HOST_K_PF0_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF0_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF0_REV_CLASS_CODE_DWLEN (1) +union pcie_host_k_pf0_rev_class_code_u { + struct pcie_host_k_pf0_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF0_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF0_SUB_VENDOR_DEVICE_ADDR (0x1505014) +#define NBL_PCIE_HOST_K_PF0_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF0_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF0_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf0_sub_vendor_device_u { + struct pcie_host_k_pf0_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF0_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF1_VENDOR_DEVICE_ADDR (0x1505018) +#define NBL_PCIE_HOST_K_PF1_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF1_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF1_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf1_vendor_device_u { + struct pcie_host_k_pf1_vendor_device { + u32 id:32; /* [31:00] Default:0x34011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF1_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF1_REV_CLASS_CODE_ADDR (0x150501c) +#define NBL_PCIE_HOST_K_PF1_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF1_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF1_REV_CLASS_CODE_DWLEN (1) +union pcie_host_k_pf1_rev_class_code_u { + struct pcie_host_k_pf1_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF1_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF1_SUB_VENDOR_DEVICE_ADDR (0x1505020) +#define NBL_PCIE_HOST_K_PF1_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF1_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF1_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf1_sub_vendor_device_u { + struct pcie_host_k_pf1_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF1_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF2_VENDOR_DEVICE_ADDR (0x1505024) +#define NBL_PCIE_HOST_K_PF2_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF2_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF2_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf2_vendor_device_u { + struct pcie_host_k_pf2_vendor_device { + u32 id:32; /* [31:00] Default:0x34011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF2_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF2_REV_CLASS_CODE_ADDR (0x1505028) +#define NBL_PCIE_HOST_K_PF2_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF2_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF2_REV_CLASS_CODE_DWLEN (1) +union pcie_host_k_pf2_rev_class_code_u { + struct pcie_host_k_pf2_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF2_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF2_SUB_VENDOR_DEVICE_ADDR (0x150502c) +#define NBL_PCIE_HOST_K_PF2_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF2_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF2_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf2_sub_vendor_device_u { + struct pcie_host_k_pf2_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF2_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF3_VENDOR_DEVICE_ADDR (0x1505030) +#define NBL_PCIE_HOST_K_PF3_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF3_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF3_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf3_vendor_device_u { + struct pcie_host_k_pf3_vendor_device { + u32 id:32; /* [31:00] Default:0x34011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF3_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF3_REV_CLASS_CODE_ADDR (0x1505034) +#define NBL_PCIE_HOST_K_PF3_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF3_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF3_REV_CLASS_CODE_DWLEN (1) +union pcie_host_k_pf3_rev_class_code_u { + struct pcie_host_k_pf3_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF3_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF3_SUB_VENDOR_DEVICE_ADDR (0x1505038) +#define NBL_PCIE_HOST_K_PF3_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF3_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF3_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf3_sub_vendor_device_u { + struct pcie_host_k_pf3_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF3_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF4_VENDOR_DEVICE_ADDR (0x150503c) +#define NBL_PCIE_HOST_K_PF4_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF4_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF4_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf4_vendor_device_u { + struct pcie_host_k_pf4_vendor_device { + u32 id:32; /* [31:00] Default:0x34011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF4_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF4_REV_CLASS_CODE_ADDR (0x1505040) +#define NBL_PCIE_HOST_K_PF4_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF4_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF4_REV_CLASS_CODE_DWLEN (1) +union pcie_host_k_pf4_rev_class_code_u { + struct pcie_host_k_pf4_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF4_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF4_SUB_VENDOR_DEVICE_ADDR (0x1505044) +#define NBL_PCIE_HOST_K_PF4_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF4_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF4_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf4_sub_vendor_device_u { + struct pcie_host_k_pf4_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF4_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF5_VENDOR_DEVICE_ADDR (0x1505048) +#define NBL_PCIE_HOST_K_PF5_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF5_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF5_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf5_vendor_device_u { + struct pcie_host_k_pf5_vendor_device { + u32 id:32; /* [31:00] Default:0x34011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF5_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF5_REV_CLASS_CODE_ADDR (0x150504c) +#define NBL_PCIE_HOST_K_PF5_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF5_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF5_REV_CLASS_CODE_DWLEN (1) +union pcie_host_k_pf5_rev_class_code_u { + struct pcie_host_k_pf5_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF5_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF5_SUB_VENDOR_DEVICE_ADDR (0x1505050) +#define NBL_PCIE_HOST_K_PF5_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF5_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF5_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf5_sub_vendor_device_u { + struct pcie_host_k_pf5_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF5_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF6_VENDOR_DEVICE_ADDR (0x1505054) +#define NBL_PCIE_HOST_K_PF6_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF6_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF6_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf6_vendor_device_u { + struct pcie_host_k_pf6_vendor_device { + u32 id:32; /* [31:00] Default:0x34011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF6_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF6_REV_CLASS_CODE_ADDR (0x1505058) +#define NBL_PCIE_HOST_K_PF6_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF6_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF6_REV_CLASS_CODE_DWLEN (1) +union pcie_host_k_pf6_rev_class_code_u { + struct pcie_host_k_pf6_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF6_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF6_SUB_VENDOR_DEVICE_ADDR (0x150505c) +#define NBL_PCIE_HOST_K_PF6_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF6_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF6_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf6_sub_vendor_device_u { + struct pcie_host_k_pf6_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF6_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF7_VENDOR_DEVICE_ADDR (0x1505060) +#define NBL_PCIE_HOST_K_PF7_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF7_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF7_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf7_vendor_device_u { + struct pcie_host_k_pf7_vendor_device { + u32 id:32; /* [31:00] Default:0x34011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF7_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF7_REV_CLASS_CODE_ADDR (0x1505064) +#define NBL_PCIE_HOST_K_PF7_REV_CLASS_CODE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF7_REV_CLASS_CODE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF7_REV_CLASS_CODE_DWLEN (1) +union pcie_host_k_pf7_rev_class_code_u { + struct pcie_host_k_pf7_rev_class_code { + u32 id:32; /* [31:00] Default:0x02000000 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF7_REV_CLASS_CODE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF7_SUB_VENDOR_DEVICE_ADDR (0x1505068) +#define NBL_PCIE_HOST_K_PF7_SUB_VENDOR_DEVICE_DEPTH (1) +#define NBL_PCIE_HOST_K_PF7_SUB_VENDOR_DEVICE_WIDTH (32) +#define NBL_PCIE_HOST_K_PF7_SUB_VENDOR_DEVICE_DWLEN (1) +union pcie_host_k_pf7_sub_vendor_device_u { + struct pcie_host_k_pf7_sub_vendor_device { + u32 id:32; /* [31:00] Default:0x00011F0F RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF7_SUB_VENDOR_DEVICE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF0_FID_ADDR (0x150506c) +#define NBL_PCIE_HOST_K_PF0_FID_DEPTH (1) +#define NBL_PCIE_HOST_K_PF0_FID_WIDTH (32) +#define NBL_PCIE_HOST_K_PF0_FID_DWLEN (1) +union pcie_host_k_pf0_fid_u { + struct pcie_host_k_pf0_fid { + u32 base_top:32; /* [31:00] Default:0x00400000 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF0_FID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF1_FID_ADDR (0x1505070) +#define NBL_PCIE_HOST_K_PF1_FID_DEPTH (1) +#define NBL_PCIE_HOST_K_PF1_FID_WIDTH (32) +#define NBL_PCIE_HOST_K_PF1_FID_DWLEN (1) +union pcie_host_k_pf1_fid_u { + struct pcie_host_k_pf1_fid { + u32 base_top:32; /* [31:00] Default:0x00800040 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF1_FID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF2_FID_ADDR (0x1505074) +#define NBL_PCIE_HOST_K_PF2_FID_DEPTH (1) +#define NBL_PCIE_HOST_K_PF2_FID_WIDTH (32) +#define NBL_PCIE_HOST_K_PF2_FID_DWLEN (1) +union pcie_host_k_pf2_fid_u { + struct pcie_host_k_pf2_fid { + u32 base_top:32; /* [31:00] Default:0x00C00080 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF2_FID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF3_FID_ADDR (0x1505078) +#define NBL_PCIE_HOST_K_PF3_FID_DEPTH (1) +#define NBL_PCIE_HOST_K_PF3_FID_WIDTH (32) +#define NBL_PCIE_HOST_K_PF3_FID_DWLEN (1) +union pcie_host_k_pf3_fid_u { + struct pcie_host_k_pf3_fid { + u32 base_top:32; /* [31:00] Default:0x010000C0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF3_FID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF4_FID_ADDR (0x150507c) +#define NBL_PCIE_HOST_K_PF4_FID_DEPTH (1) +#define NBL_PCIE_HOST_K_PF4_FID_WIDTH (32) +#define NBL_PCIE_HOST_K_PF4_FID_DWLEN (1) +union pcie_host_k_pf4_fid_u { + struct pcie_host_k_pf4_fid { + u32 base_top:32; /* [31:00] Default:0x01400100 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF4_FID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF5_FID_ADDR (0x1505080) +#define NBL_PCIE_HOST_K_PF5_FID_DEPTH (1) +#define NBL_PCIE_HOST_K_PF5_FID_WIDTH (32) +#define NBL_PCIE_HOST_K_PF5_FID_DWLEN (1) +union pcie_host_k_pf5_fid_u { + struct pcie_host_k_pf5_fid { + u32 base_top:32; /* [31:00] Default:0x01800140 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF5_FID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF6_FID_ADDR (0x1505084) +#define NBL_PCIE_HOST_K_PF6_FID_DEPTH (1) +#define NBL_PCIE_HOST_K_PF6_FID_WIDTH (32) +#define NBL_PCIE_HOST_K_PF6_FID_DWLEN (1) +union pcie_host_k_pf6_fid_u { + struct pcie_host_k_pf6_fid { + u32 base_top:32; /* [31:00] Default:0x01C00180 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF6_FID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PF7_FID_ADDR (0x1505088) +#define NBL_PCIE_HOST_K_PF7_FID_DEPTH (1) +#define NBL_PCIE_HOST_K_PF7_FID_WIDTH (32) +#define NBL_PCIE_HOST_K_PF7_FID_DWLEN (1) +union pcie_host_k_pf7_fid_u { + struct pcie_host_k_pf7_fid { + u32 base_top:32; /* [31:00] Default:0x020001C0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PF7_FID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_VF0_ID_ADDR (0x150508c) +#define NBL_PCIE_HOST_K_VF0_ID_DEPTH (1) +#define NBL_PCIE_HOST_K_VF0_ID_WIDTH (32) +#define NBL_PCIE_HOST_K_VF0_ID_DWLEN (1) +union pcie_host_k_vf0_id_u { + struct pcie_host_k_vf0_id { + u32 device_sub:32; /* [31:00] Default:0x00013402 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_VF0_ID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_VF1_ID_ADDR (0x1505090) +#define NBL_PCIE_HOST_K_VF1_ID_DEPTH (1) +#define NBL_PCIE_HOST_K_VF1_ID_WIDTH (32) +#define NBL_PCIE_HOST_K_VF1_ID_DWLEN (1) +union pcie_host_k_vf1_id_u { + struct pcie_host_k_vf1_id { + u32 device_sub:32; /* [31:00] Default:0x00013402 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_VF1_ID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_VF2_ID_ADDR (0x1505094) +#define NBL_PCIE_HOST_K_VF2_ID_DEPTH (1) +#define NBL_PCIE_HOST_K_VF2_ID_WIDTH (32) +#define NBL_PCIE_HOST_K_VF2_ID_DWLEN (1) +union pcie_host_k_vf2_id_u { + struct pcie_host_k_vf2_id { + u32 device_sub:32; /* [31:00] Default:0x00013402 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_VF2_ID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_VF3_ID_ADDR (0x1505098) +#define NBL_PCIE_HOST_K_VF3_ID_DEPTH (1) +#define NBL_PCIE_HOST_K_VF3_ID_WIDTH (32) +#define NBL_PCIE_HOST_K_VF3_ID_DWLEN (1) +union pcie_host_k_vf3_id_u { + struct pcie_host_k_vf3_id { + u32 device_sub:32; /* [31:00] Default:0x00013402 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_VF3_ID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_VF4_ID_ADDR (0x150509c) +#define NBL_PCIE_HOST_K_VF4_ID_DEPTH (1) +#define NBL_PCIE_HOST_K_VF4_ID_WIDTH (32) +#define NBL_PCIE_HOST_K_VF4_ID_DWLEN (1) +union pcie_host_k_vf4_id_u { + struct pcie_host_k_vf4_id { + u32 device_sub:32; /* [31:00] Default:0x00013402 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_VF4_ID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_VF5_ID_ADDR (0x15050a0) +#define NBL_PCIE_HOST_K_VF5_ID_DEPTH (1) +#define NBL_PCIE_HOST_K_VF5_ID_WIDTH (32) +#define NBL_PCIE_HOST_K_VF5_ID_DWLEN (1) +union pcie_host_k_vf5_id_u { + struct pcie_host_k_vf5_id { + u32 device_sub:32; /* [31:00] Default:0x00013402 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_VF5_ID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_VF6_ID_ADDR (0x15050a4) +#define NBL_PCIE_HOST_K_VF6_ID_DEPTH (1) +#define NBL_PCIE_HOST_K_VF6_ID_WIDTH (32) +#define NBL_PCIE_HOST_K_VF6_ID_DWLEN (1) +union pcie_host_k_vf6_id_u { + struct pcie_host_k_vf6_id { + u32 device_sub:32; /* [31:00] Default:0x00013402 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_VF6_ID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_VF7_ID_ADDR (0x15050a8) +#define NBL_PCIE_HOST_K_VF7_ID_DEPTH (1) +#define NBL_PCIE_HOST_K_VF7_ID_WIDTH (32) +#define NBL_PCIE_HOST_K_VF7_ID_DWLEN (1) +union pcie_host_k_vf7_id_u { + struct pcie_host_k_vf7_id { + u32 device_sub:32; /* [31:00] Default:0x00013402 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_VF7_ID_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA0_MEM_DEEPSLEEP_ADDR (0x15050b0) +#define NBL_PCIE_HOST_PMA0_MEM_DEEPSLEEP_DEPTH (1) +#define NBL_PCIE_HOST_PMA0_MEM_DEEPSLEEP_WIDTH (32) +#define NBL_PCIE_HOST_PMA0_MEM_DEEPSLEEP_DWLEN (1) +union pcie_host_pma0_mem_deepsleep_u { + struct pcie_host_pma0_mem_deepsleep { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA0_MEM_DEEPSLEEP_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA0_PRGM_PARITY_ENABLE_ADDR (0x15050b4) +#define NBL_PCIE_HOST_PMA0_PRGM_PARITY_ENABLE_DEPTH (1) +#define NBL_PCIE_HOST_PMA0_PRGM_PARITY_ENABLE_WIDTH (32) +#define NBL_PCIE_HOST_PMA0_PRGM_PARITY_ENABLE_DWLEN (1) +union pcie_host_pma0_prgm_parity_enable_u { + struct pcie_host_pma0_prgm_parity_enable { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA0_PRGM_PARITY_ENABLE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA1_MEM_DEEPSLEEP_ADDR (0x15050b8) +#define NBL_PCIE_HOST_PMA1_MEM_DEEPSLEEP_DEPTH (1) +#define NBL_PCIE_HOST_PMA1_MEM_DEEPSLEEP_WIDTH (32) +#define NBL_PCIE_HOST_PMA1_MEM_DEEPSLEEP_DWLEN (1) +union pcie_host_pma1_mem_deepsleep_u { + struct pcie_host_pma1_mem_deepsleep { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA1_MEM_DEEPSLEEP_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA1_PRGM_PARITY_ENABLE_ADDR (0x15050bc) +#define NBL_PCIE_HOST_PMA1_PRGM_PARITY_ENABLE_DEPTH (1) +#define NBL_PCIE_HOST_PMA1_PRGM_PARITY_ENABLE_WIDTH (32) +#define NBL_PCIE_HOST_PMA1_PRGM_PARITY_ENABLE_DWLEN (1) +union pcie_host_pma1_prgm_parity_enable_u { + struct pcie_host_pma1_prgm_parity_enable { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA1_PRGM_PARITY_ENABLE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA_PMLD_LOCK_REG_ADDR (0x15050c0) +#define NBL_PCIE_HOST_PMA_PMLD_LOCK_REG_DEPTH (1) +#define NBL_PCIE_HOST_PMA_PMLD_LOCK_REG_WIDTH (32) +#define NBL_PCIE_HOST_PMA_PMLD_LOCK_REG_DWLEN (1) +union pcie_host_pma_pmld_lock_reg_u { + struct pcie_host_pma_pmld_lock_reg { + u32 dbg:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA_PMLD_LOCK_REG_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMLD_COPY_ADDR (0x15050c4) +#define NBL_PCIE_HOST_PMLD_COPY_DEPTH (1) +#define NBL_PCIE_HOST_PMLD_COPY_WIDTH (32) +#define NBL_PCIE_HOST_PMLD_COPY_DWLEN (1) +union pcie_host_pmld_copy_u { + struct pcie_host_pmld_copy { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMLD_COPY_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_REFCLK_ADDR (0x15050c8) +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_REFCLK_DEPTH (1) +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_REFCLK_WIDTH (32) +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_REFCLK_DWLEN (1) +union pcie_host_pma0_phy_ctrl_refclk_u { + struct pcie_host_pma0_phy_ctrl_refclk { + u32 dbg:5; /* [04:00] Default:0x14 RW */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA0_PHY_CTRL_REFCLK_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDA_SEL_ADDR (0x15050cc) +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDA_SEL_DEPTH (1) +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDA_SEL_WIDTH (32) +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDA_SEL_DWLEN (1) +union pcie_host_pma0_phy_ctrl_vdda_sel_u { + struct pcie_host_pma0_phy_ctrl_vdda_sel { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDA_SEL_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDHA_SEL_ADDR (0x15050d0) +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDHA_SEL_DEPTH (1) +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDHA_SEL_WIDTH (32) +#define NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDHA_SEL_DWLEN (1) +union pcie_host_pma0_phy_ctrl_vddha_sel_u { + struct pcie_host_pma0_phy_ctrl_vddha_sel { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA0_PHY_CTRL_VDDHA_SEL_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_REFCLK_ADDR (0x15050d4) +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_REFCLK_DEPTH (1) +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_REFCLK_WIDTH (32) +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_REFCLK_DWLEN (1) +union pcie_host_pma1_phy_ctrl_refclk_u { + struct pcie_host_pma1_phy_ctrl_refclk { + u32 dbg:5; /* [04:00] Default:0x14 RW */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA1_PHY_CTRL_REFCLK_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDA_SEL_ADDR (0x15050d8) +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDA_SEL_DEPTH (1) +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDA_SEL_WIDTH (32) +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDA_SEL_DWLEN (1) +union pcie_host_pma1_phy_ctrl_vdda_sel_u { + struct pcie_host_pma1_phy_ctrl_vdda_sel { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDA_SEL_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDHA_SEL_ADDR (0x15050dc) +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDHA_SEL_DEPTH (1) +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDHA_SEL_WIDTH (32) +#define NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDHA_SEL_DWLEN (1) +union pcie_host_pma1_phy_ctrl_vddha_sel_u { + struct pcie_host_pma1_phy_ctrl_vddha_sel { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA1_PHY_CTRL_VDDHA_SEL_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_CFG_VFREGS_ADDR (0x1505100) +#define NBL_PCIE_HOST_TL_CFG_VFREGS_DEPTH (1024) +#define NBL_PCIE_HOST_TL_CFG_VFREGS_WIDTH (32) +#define NBL_PCIE_HOST_TL_CFG_VFREGS_DWLEN (1) +union pcie_host_tl_cfg_vfregs_u { + struct pcie_host_tl_cfg_vfregs { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_CFG_VFREGS_DWLEN]; +} __packed; +#define NBL_PCIE_HOST_TL_CFG_VFREGS_REG(r) (NBL_PCIE_HOST_TL_CFG_VFREGS_ADDR + \ + (NBL_PCIE_HOST_TL_CFG_VFREGS_DWLEN * 4) * (r)) + +#define NBL_PCIE_HOST_TL_PM_EVENT_ADDR (0x1506100) +#define NBL_PCIE_HOST_TL_PM_EVENT_DEPTH (1) +#define NBL_PCIE_HOST_TL_PM_EVENT_WIDTH (32) +#define NBL_PCIE_HOST_TL_PM_EVENT_DWLEN (1) +union pcie_host_tl_pm_event_u { + struct pcie_host_tl_pm_event { + u32 dbg:8; /* [07:00] Default:0x0 RW */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PM_EVENT_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_CLK_GATE_ADDR (0x1506104) +#define NBL_PCIE_HOST_TL_CLK_GATE_DEPTH (1) +#define NBL_PCIE_HOST_TL_CLK_GATE_WIDTH (32) +#define NBL_PCIE_HOST_TL_CLK_GATE_DWLEN (1) +union pcie_host_tl_clk_gate_u { + struct pcie_host_tl_clk_gate { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_CLK_GATE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_LINK_UP_ADDR (0x1506108) +#define NBL_PCIE_HOST_LINK_UP_DEPTH (1) +#define NBL_PCIE_HOST_LINK_UP_WIDTH (32) +#define NBL_PCIE_HOST_LINK_UP_DWLEN (1) +union pcie_host_link_up_u { + struct pcie_host_link_up { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_LINK_UP_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_MPERST_N_ADDR (0x150610c) +#define NBL_PCIE_HOST_MPERST_N_DEPTH (1) +#define NBL_PCIE_HOST_MPERST_N_WIDTH (32) +#define NBL_PCIE_HOST_MPERST_N_DWLEN (1) +union pcie_host_mperst_n_u { + struct pcie_host_mperst_n { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_MPERST_N_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PM_L2_STATUS_ADDR (0x1506110) +#define NBL_PCIE_HOST_TL_PM_L2_STATUS_DEPTH (1) +#define NBL_PCIE_HOST_TL_PM_L2_STATUS_WIDTH (32) +#define NBL_PCIE_HOST_TL_PM_L2_STATUS_DWLEN (1) +union pcie_host_tl_pm_l2_status_u { + struct pcie_host_tl_pm_l2_status { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PM_L2_STATUS_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PL_LTSSM_ADDR (0x1506114) +#define NBL_PCIE_HOST_PL_LTSSM_DEPTH (1) +#define NBL_PCIE_HOST_PL_LTSSM_WIDTH (32) +#define NBL_PCIE_HOST_PL_LTSSM_DWLEN (1) +union pcie_host_pl_ltssm_u { + struct pcie_host_pl_ltssm { + u32 dbg:5; /* [04:00] Default:0x0 RO */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PL_LTSSM_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_LTSSM_HISTORY0_ADDR (0x1506118) +#define NBL_PCIE_HOST_LTSSM_HISTORY0_DEPTH (1) +#define NBL_PCIE_HOST_LTSSM_HISTORY0_WIDTH (32) +#define NBL_PCIE_HOST_LTSSM_HISTORY0_DWLEN (1) +union pcie_host_ltssm_history0_u { + struct pcie_host_ltssm_history0 { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_LTSSM_HISTORY0_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_LTSSM_HISTORY1_ADDR (0x150611c) +#define NBL_PCIE_HOST_LTSSM_HISTORY1_DEPTH (1) +#define NBL_PCIE_HOST_LTSSM_HISTORY1_WIDTH (32) +#define NBL_PCIE_HOST_LTSSM_HISTORY1_DWLEN (1) +union pcie_host_ltssm_history1_u { + struct pcie_host_ltssm_history1 { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_LTSSM_HISTORY1_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_LTSSM_HISTORY2_ADDR (0x1506120) +#define NBL_PCIE_HOST_LTSSM_HISTORY2_DEPTH (1) +#define NBL_PCIE_HOST_LTSSM_HISTORY2_WIDTH (32) +#define NBL_PCIE_HOST_LTSSM_HISTORY2_DWLEN (1) +union pcie_host_ltssm_history2_u { + struct pcie_host_ltssm_history2 { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_LTSSM_HISTORY2_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_LTSSM_HISTORY3_ADDR (0x1506124) +#define NBL_PCIE_HOST_LTSSM_HISTORY3_DEPTH (1) +#define NBL_PCIE_HOST_LTSSM_HISTORY3_WIDTH (32) +#define NBL_PCIE_HOST_LTSSM_HISTORY3_DWLEN (1) +union pcie_host_ltssm_history3_u { + struct pcie_host_ltssm_history3 { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_LTSSM_HISTORY3_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_CFG_REGS0_ADDR (0x1506130) +#define NBL_PCIE_HOST_TL_CFG_REGS0_DEPTH (1) +#define NBL_PCIE_HOST_TL_CFG_REGS0_WIDTH (32) +#define NBL_PCIE_HOST_TL_CFG_REGS0_DWLEN (1) +union pcie_host_tl_cfg_regs0_u { + struct pcie_host_tl_cfg_regs0 { + u32 dbg:28; /* [27:00] Default:0x0 RO */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_CFG_REGS0_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_CFG_REGS1_ADDR (0x1506134) +#define NBL_PCIE_HOST_TL_CFG_REGS1_DEPTH (1) +#define NBL_PCIE_HOST_TL_CFG_REGS1_WIDTH (32) +#define NBL_PCIE_HOST_TL_CFG_REGS1_DWLEN (1) +union pcie_host_tl_cfg_regs1_u { + struct pcie_host_tl_cfg_regs1 { + u32 dbg:12; /* [11:00] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_CFG_REGS1_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_CFG_REGS2_ADDR (0x1506138) +#define NBL_PCIE_HOST_TL_CFG_REGS2_DEPTH (1) +#define NBL_PCIE_HOST_TL_CFG_REGS2_WIDTH (32) +#define NBL_PCIE_HOST_TL_CFG_REGS2_DWLEN (1) +union pcie_host_tl_cfg_regs2_u { + struct pcie_host_tl_cfg_regs2 { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_CFG_REGS2_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF0_VPD_ADDR (0x1506200) +#define NBL_PCIE_HOST_TL_PF0_VPD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF0_VPD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF0_VPD_DWLEN (1) +union pcie_host_tl_pf0_vpd_u { + struct pcie_host_tl_pf0_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF0_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF0_VPD_WR_ADDR (0x1506204) +#define NBL_PCIE_HOST_TL_PF0_VPD_WR_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF0_VPD_WR_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF0_VPD_WR_DWLEN (1) +union pcie_host_tl_pf0_vpd_wr_u { + struct pcie_host_tl_pf0_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF0_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF0_VPD_RD_ADDR (0x1506208) +#define NBL_PCIE_HOST_TL_PF0_VPD_RD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF0_VPD_RD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF0_VPD_RD_DWLEN (1) +union pcie_host_tl_pf0_vpd_rd_u { + struct pcie_host_tl_pf0_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF0_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF0_VPD_R_ADDR (0x150620c) +#define NBL_PCIE_HOST_TL_PF0_VPD_R_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF0_VPD_R_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF0_VPD_R_DWLEN (1) +union pcie_host_tl_pf0_vpd_r_u { + struct pcie_host_tl_pf0_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF0_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF1_VPD_ADDR (0x1506210) +#define NBL_PCIE_HOST_TL_PF1_VPD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF1_VPD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF1_VPD_DWLEN (1) +union pcie_host_tl_pf1_vpd_u { + struct pcie_host_tl_pf1_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF1_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF1_VPD_WR_ADDR (0x1506214) +#define NBL_PCIE_HOST_TL_PF1_VPD_WR_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF1_VPD_WR_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF1_VPD_WR_DWLEN (1) +union pcie_host_tl_pf1_vpd_wr_u { + struct pcie_host_tl_pf1_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF1_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF1_VPD_RD_ADDR (0x1506218) +#define NBL_PCIE_HOST_TL_PF1_VPD_RD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF1_VPD_RD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF1_VPD_RD_DWLEN (1) +union pcie_host_tl_pf1_vpd_rd_u { + struct pcie_host_tl_pf1_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF1_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF1_VPD_R_ADDR (0x150621c) +#define NBL_PCIE_HOST_TL_PF1_VPD_R_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF1_VPD_R_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF1_VPD_R_DWLEN (1) +union pcie_host_tl_pf1_vpd_r_u { + struct pcie_host_tl_pf1_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF1_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF2_VPD_ADDR (0x1506220) +#define NBL_PCIE_HOST_TL_PF2_VPD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF2_VPD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF2_VPD_DWLEN (1) +union pcie_host_tl_pf2_vpd_u { + struct pcie_host_tl_pf2_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF2_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF2_VPD_WR_ADDR (0x1506224) +#define NBL_PCIE_HOST_TL_PF2_VPD_WR_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF2_VPD_WR_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF2_VPD_WR_DWLEN (1) +union pcie_host_tl_pf2_vpd_wr_u { + struct pcie_host_tl_pf2_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF2_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF2_VPD_RD_ADDR (0x1506228) +#define NBL_PCIE_HOST_TL_PF2_VPD_RD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF2_VPD_RD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF2_VPD_RD_DWLEN (1) +union pcie_host_tl_pf2_vpd_rd_u { + struct pcie_host_tl_pf2_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF2_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF2_VPD_R_ADDR (0x150622c) +#define NBL_PCIE_HOST_TL_PF2_VPD_R_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF2_VPD_R_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF2_VPD_R_DWLEN (1) +union pcie_host_tl_pf2_vpd_r_u { + struct pcie_host_tl_pf2_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF2_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF3_VPD_ADDR (0x1506230) +#define NBL_PCIE_HOST_TL_PF3_VPD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF3_VPD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF3_VPD_DWLEN (1) +union pcie_host_tl_pf3_vpd_u { + struct pcie_host_tl_pf3_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF3_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF3_VPD_WR_ADDR (0x1506234) +#define NBL_PCIE_HOST_TL_PF3_VPD_WR_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF3_VPD_WR_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF3_VPD_WR_DWLEN (1) +union pcie_host_tl_pf3_vpd_wr_u { + struct pcie_host_tl_pf3_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF3_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF3_VPD_RD_ADDR (0x1506238) +#define NBL_PCIE_HOST_TL_PF3_VPD_RD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF3_VPD_RD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF3_VPD_RD_DWLEN (1) +union pcie_host_tl_pf3_vpd_rd_u { + struct pcie_host_tl_pf3_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF3_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF3_VPD_R_ADDR (0x150623c) +#define NBL_PCIE_HOST_TL_PF3_VPD_R_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF3_VPD_R_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF3_VPD_R_DWLEN (1) +union pcie_host_tl_pf3_vpd_r_u { + struct pcie_host_tl_pf3_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF3_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF4_VPD_ADDR (0x1506240) +#define NBL_PCIE_HOST_TL_PF4_VPD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF4_VPD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF4_VPD_DWLEN (1) +union pcie_host_tl_pf4_vpd_u { + struct pcie_host_tl_pf4_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF4_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF4_VPD_WR_ADDR (0x1506244) +#define NBL_PCIE_HOST_TL_PF4_VPD_WR_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF4_VPD_WR_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF4_VPD_WR_DWLEN (1) +union pcie_host_tl_pf4_vpd_wr_u { + struct pcie_host_tl_pf4_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF4_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF4_VPD_RD_ADDR (0x1506248) +#define NBL_PCIE_HOST_TL_PF4_VPD_RD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF4_VPD_RD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF4_VPD_RD_DWLEN (1) +union pcie_host_tl_pf4_vpd_rd_u { + struct pcie_host_tl_pf4_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF4_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF4_VPD_R_ADDR (0x150624c) +#define NBL_PCIE_HOST_TL_PF4_VPD_R_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF4_VPD_R_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF4_VPD_R_DWLEN (1) +union pcie_host_tl_pf4_vpd_r_u { + struct pcie_host_tl_pf4_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF4_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF5_VPD_ADDR (0x1506250) +#define NBL_PCIE_HOST_TL_PF5_VPD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF5_VPD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF5_VPD_DWLEN (1) +union pcie_host_tl_pf5_vpd_u { + struct pcie_host_tl_pf5_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF5_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF5_VPD_WR_ADDR (0x1506254) +#define NBL_PCIE_HOST_TL_PF5_VPD_WR_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF5_VPD_WR_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF5_VPD_WR_DWLEN (1) +union pcie_host_tl_pf5_vpd_wr_u { + struct pcie_host_tl_pf5_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF5_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF5_VPD_RD_ADDR (0x1506258) +#define NBL_PCIE_HOST_TL_PF5_VPD_RD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF5_VPD_RD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF5_VPD_RD_DWLEN (1) +union pcie_host_tl_pf5_vpd_rd_u { + struct pcie_host_tl_pf5_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF5_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF5_VPD_R_ADDR (0x150625c) +#define NBL_PCIE_HOST_TL_PF5_VPD_R_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF5_VPD_R_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF5_VPD_R_DWLEN (1) +union pcie_host_tl_pf5_vpd_r_u { + struct pcie_host_tl_pf5_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF5_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF6_VPD_ADDR (0x1506260) +#define NBL_PCIE_HOST_TL_PF6_VPD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF6_VPD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF6_VPD_DWLEN (1) +union pcie_host_tl_pf6_vpd_u { + struct pcie_host_tl_pf6_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF6_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF6_VPD_WR_ADDR (0x1506264) +#define NBL_PCIE_HOST_TL_PF6_VPD_WR_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF6_VPD_WR_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF6_VPD_WR_DWLEN (1) +union pcie_host_tl_pf6_vpd_wr_u { + struct pcie_host_tl_pf6_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF6_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF6_VPD_RD_ADDR (0x1506268) +#define NBL_PCIE_HOST_TL_PF6_VPD_RD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF6_VPD_RD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF6_VPD_RD_DWLEN (1) +union pcie_host_tl_pf6_vpd_rd_u { + struct pcie_host_tl_pf6_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF6_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF6_VPD_R_ADDR (0x150626c) +#define NBL_PCIE_HOST_TL_PF6_VPD_R_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF6_VPD_R_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF6_VPD_R_DWLEN (1) +union pcie_host_tl_pf6_vpd_r_u { + struct pcie_host_tl_pf6_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF6_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF7_VPD_ADDR (0x1506270) +#define NBL_PCIE_HOST_TL_PF7_VPD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF7_VPD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF7_VPD_DWLEN (1) +union pcie_host_tl_pf7_vpd_u { + struct pcie_host_tl_pf7_vpd { + u32 addr:15; /* [14:00] Default:0x0 RO */ + u32 flag:1; /* [15] Default:0x0 RO */ + u32 enable:1; /* [16] Default:0x0 RO */ + u32 status:4; /* [20:17] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF7_VPD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF7_VPD_WR_ADDR (0x1506274) +#define NBL_PCIE_HOST_TL_PF7_VPD_WR_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF7_VPD_WR_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF7_VPD_WR_DWLEN (1) +union pcie_host_tl_pf7_vpd_wr_u { + struct pcie_host_tl_pf7_vpd_wr { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF7_VPD_WR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF7_VPD_RD_ADDR (0x1506278) +#define NBL_PCIE_HOST_TL_PF7_VPD_RD_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF7_VPD_RD_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF7_VPD_RD_DWLEN (1) +union pcie_host_tl_pf7_vpd_rd_u { + struct pcie_host_tl_pf7_vpd_rd { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF7_VPD_RD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_PF7_VPD_R_ADDR (0x150627c) +#define NBL_PCIE_HOST_TL_PF7_VPD_R_DEPTH (1) +#define NBL_PCIE_HOST_TL_PF7_VPD_R_WIDTH (32) +#define NBL_PCIE_HOST_TL_PF7_VPD_R_DWLEN (1) +union pcie_host_tl_pf7_vpd_r_u { + struct pcie_host_tl_pf7_vpd_r { + u32 ready:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_PF7_VPD_R_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_GEN_SPEED_ADDR (0x1507000) +#define NBL_PCIE_HOST_K_GEN_SPEED_DEPTH (1) +#define NBL_PCIE_HOST_K_GEN_SPEED_WIDTH (32) +#define NBL_PCIE_HOST_K_GEN_SPEED_DWLEN (1) +union pcie_host_k_gen_speed_u { + struct pcie_host_k_gen_speed { + u32 dbg:4; /* [03:00] Default:0xF RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_GEN_SPEED_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_GEN_LANE_REVERSAL_ADDR (0x1507004) +#define NBL_PCIE_HOST_K_GEN_LANE_REVERSAL_DEPTH (1) +#define NBL_PCIE_HOST_K_GEN_LANE_REVERSAL_WIDTH (32) +#define NBL_PCIE_HOST_K_GEN_LANE_REVERSAL_DWLEN (1) +union pcie_host_k_gen_lane_reversal_u { + struct pcie_host_k_gen_lane_reversal { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_GEN_LANE_REVERSAL_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_GEN_EQU_PHA23_ADDR (0x1507008) +#define NBL_PCIE_HOST_K_GEN_EQU_PHA23_DEPTH (1) +#define NBL_PCIE_HOST_K_GEN_EQU_PHA23_WIDTH (32) +#define NBL_PCIE_HOST_K_GEN_EQU_PHA23_DWLEN (1) +union pcie_host_k_gen_equ_pha23_u { + struct pcie_host_k_gen_equ_pha23 { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_GEN_EQU_PHA23_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_GEN_LANE_DISABLE_ADDR (0x150700c) +#define NBL_PCIE_HOST_K_GEN_LANE_DISABLE_DEPTH (1) +#define NBL_PCIE_HOST_K_GEN_LANE_DISABLE_WIDTH (32) +#define NBL_PCIE_HOST_K_GEN_LANE_DISABLE_DWLEN (1) +union pcie_host_k_gen_lane_disable_u { + struct pcie_host_k_gen_lane_disable { + u32 dbg:4; /* [03:00] Default:0x0 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_GEN_LANE_DISABLE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_GEN_USE_RXELECILDE_ADDR (0x1507010) +#define NBL_PCIE_HOST_K_GEN_USE_RXELECILDE_DEPTH (1) +#define NBL_PCIE_HOST_K_GEN_USE_RXELECILDE_WIDTH (32) +#define NBL_PCIE_HOST_K_GEN_USE_RXELECILDE_DWLEN (1) +union pcie_host_k_gen_use_rxelecilde_u { + struct pcie_host_k_gen_use_rxelecilde { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_GEN_USE_RXELECILDE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_GEN_ERR_MODE_ADDR (0x1507014) +#define NBL_PCIE_HOST_K_GEN_ERR_MODE_DEPTH (1) +#define NBL_PCIE_HOST_K_GEN_ERR_MODE_WIDTH (32) +#define NBL_PCIE_HOST_K_GEN_ERR_MODE_DWLEN (1) +union pcie_host_k_gen_err_mode_u { + struct pcie_host_k_gen_err_mode { + u32 dbg:2; /* [01:00] Default:0x1 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_GEN_ERR_MODE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_GEN_RX_VLD_FILTER_EN_ADDR (0x1507018) +#define NBL_PCIE_HOST_K_GEN_RX_VLD_FILTER_EN_DEPTH (1) +#define NBL_PCIE_HOST_K_GEN_RX_VLD_FILTER_EN_WIDTH (32) +#define NBL_PCIE_HOST_K_GEN_RX_VLD_FILTER_EN_DWLEN (1) +union pcie_host_k_gen_rx_vld_filter_en_u { + struct pcie_host_k_gen_rx_vld_filter_en { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_GEN_RX_VLD_FILTER_EN_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_10B_TAG_REQUESTER_SUPPORT_ADDR (0x150701c) +#define NBL_PCIE_HOST_K_10B_TAG_REQUESTER_SUPPORT_DEPTH (1) +#define NBL_PCIE_HOST_K_10B_TAG_REQUESTER_SUPPORT_WIDTH (32) +#define NBL_PCIE_HOST_K_10B_TAG_REQUESTER_SUPPORT_DWLEN (1) +union pcie_host_k_10b_tag_requester_support_u { + struct pcie_host_k_10b_tag_requester_support { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_10B_TAG_REQUESTER_SUPPORT_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_10B_TAG_COMPLETER_SUPPORT_ADDR (0x1507020) +#define NBL_PCIE_HOST_K_10B_TAG_COMPLETER_SUPPORT_DEPTH (1) +#define NBL_PCIE_HOST_K_10B_TAG_COMPLETER_SUPPORT_WIDTH (32) +#define NBL_PCIE_HOST_K_10B_TAG_COMPLETER_SUPPORT_DWLEN (1) +union pcie_host_k_10b_tag_completer_support_u { + struct pcie_host_k_10b_tag_completer_support { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_10B_TAG_COMPLETER_SUPPORT_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_VF_10B_TAG_REQ_SUPPORT_ADDR (0x1507024) +#define NBL_PCIE_HOST_K_VF_10B_TAG_REQ_SUPPORT_DEPTH (1) +#define NBL_PCIE_HOST_K_VF_10B_TAG_REQ_SUPPORT_WIDTH (32) +#define NBL_PCIE_HOST_K_VF_10B_TAG_REQ_SUPPORT_DWLEN (1) +union pcie_host_k_vf_10b_tag_req_support_u { + struct pcie_host_k_vf_10b_tag_req_support { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_VF_10B_TAG_REQ_SUPPORT_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PCI_PM_ADDR (0x1507028) +#define NBL_PCIE_HOST_K_PCI_PM_DEPTH (1) +#define NBL_PCIE_HOST_K_PCI_PM_WIDTH (32) +#define NBL_PCIE_HOST_K_PCI_PM_DWLEN (1) +union pcie_host_k_pci_pm_u { + struct pcie_host_k_pci_pm { + u32 dbg:10; /* [09:00] Default:0x320 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PCI_PM_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PEX_EP_ACCEPT_LATENCY_ADDR (0x150702c) +#define NBL_PCIE_HOST_K_PEX_EP_ACCEPT_LATENCY_DEPTH (1) +#define NBL_PCIE_HOST_K_PEX_EP_ACCEPT_LATENCY_WIDTH (32) +#define NBL_PCIE_HOST_K_PEX_EP_ACCEPT_LATENCY_DWLEN (1) +union pcie_host_k_pex_ep_accept_latency_u { + struct pcie_host_k_pex_ep_accept_latency { + u32 dbg:6; /* [05:00] Default:0x33 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PEX_EP_ACCEPT_LATENCY_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PEX_COMPTO_ADDR (0x1507030) +#define NBL_PCIE_HOST_K_PEX_COMPTO_DEPTH (1) +#define NBL_PCIE_HOST_K_PEX_COMPTO_WIDTH (32) +#define NBL_PCIE_HOST_K_PEX_COMPTO_DWLEN (1) +union pcie_host_k_pex_compto_u { + struct pcie_host_k_pex_compto { + u32 dbg:5; /* [04:00] Default:0x13 RW */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PEX_COMPTO_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PEX_LINK_CAP_ADDR (0x1507034) +#define NBL_PCIE_HOST_K_PEX_LINK_CAP_DEPTH (1) +#define NBL_PCIE_HOST_K_PEX_LINK_CAP_WIDTH (32) +#define NBL_PCIE_HOST_K_PEX_LINK_CAP_DWLEN (1) +union pcie_host_k_pex_link_cap_u { + struct pcie_host_k_pex_link_cap { + u32 dbg:11; /* [10:00] Default:0x57 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PEX_LINK_CAP_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PEX_EC_VECTOR_SIZE_ADDR (0x1507038) +#define NBL_PCIE_HOST_K_PEX_EC_VECTOR_SIZE_DEPTH (1) +#define NBL_PCIE_HOST_K_PEX_EC_VECTOR_SIZE_WIDTH (32) +#define NBL_PCIE_HOST_K_PEX_EC_VECTOR_SIZE_DWLEN (1) +union pcie_host_k_pex_ec_vector_size_u { + struct pcie_host_k_pex_ec_vector_size { + u32 dbg:7; /* [06:00] Default:0x3f RW */ + u32 rsv:25; /* [31:07] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PEX_EC_VECTOR_SIZE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PEX_ASPM_ENTRY_DELAY_ADDR (0x150703c) +#define NBL_PCIE_HOST_K_PEX_ASPM_ENTRY_DELAY_DEPTH (1) +#define NBL_PCIE_HOST_K_PEX_ASPM_ENTRY_DELAY_WIDTH (32) +#define NBL_PCIE_HOST_K_PEX_ASPM_ENTRY_DELAY_DWLEN (1) +union pcie_host_k_pex_aspm_entry_delay_u { + struct pcie_host_k_pex_aspm_entry_delay { + u32 dbg:15; /* [14:00] Default:0x190c RW */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PEX_ASPM_ENTRY_DELAY_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PEX_FTS_NUM_ADDR (0x1507040) +#define NBL_PCIE_HOST_K_PEX_FTS_NUM_DEPTH (1) +#define NBL_PCIE_HOST_K_PEX_FTS_NUM_WIDTH (32) +#define NBL_PCIE_HOST_K_PEX_FTS_NUM_DWLEN (1) +union pcie_host_k_pex_fts_num_u { + struct pcie_host_k_pex_fts_num { + u32 dbg:32; /* [31:0] Default:0x3c3cd2d2 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PEX_FTS_NUM_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_PEX_L1_PM_ADDR (0x1507044) +#define NBL_PCIE_HOST_K_PEX_L1_PM_DEPTH (1) +#define NBL_PCIE_HOST_K_PEX_L1_PM_WIDTH (32) +#define NBL_PCIE_HOST_K_PEX_L1_PM_DWLEN (1) +union pcie_host_k_pex_l1_pm_u { + struct pcie_host_k_pex_l1_pm { + u32 dbg:28; /* [27:00] Default:0x0 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_PEX_L1_PM_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET_USER1_ADDR (0x1507048) +#define NBL_PCIE_HOST_K_EQUPRESET_USER1_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET_USER1_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET_USER1_DWLEN (1) +union pcie_host_k_equpreset_user1_u { + struct pcie_host_k_equpreset_user1 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET_USER1_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET_USER2_ADDR (0x150704c) +#define NBL_PCIE_HOST_K_EQUPRESET_USER2_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET_USER2_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET_USER2_DWLEN (1) +union pcie_host_k_equpreset_user2_u { + struct pcie_host_k_equpreset_user2 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET_USER2_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET_USER3_ADDR (0x1507050) +#define NBL_PCIE_HOST_K_EQUPRESET_USER3_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET_USER3_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET_USER3_DWLEN (1) +union pcie_host_k_equpreset_user3_u { + struct pcie_host_k_equpreset_user3 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET_USER3_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET_USER4_ADDR (0x1507054) +#define NBL_PCIE_HOST_K_EQUPRESET_USER4_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET_USER4_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET_USER4_DWLEN (1) +union pcie_host_k_equpreset_user4_u { + struct pcie_host_k_equpreset_user4 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET_USER4_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET_USER5_ADDR (0x1507058) +#define NBL_PCIE_HOST_K_EQUPRESET_USER5_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET_USER5_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET_USER5_DWLEN (1) +union pcie_host_k_equpreset_user5_u { + struct pcie_host_k_equpreset_user5 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET_USER5_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET_USER6_ADDR (0x150705c) +#define NBL_PCIE_HOST_K_EQUPRESET_USER6_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET_USER6_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET_USER6_DWLEN (1) +union pcie_host_k_equpreset_user6_u { + struct pcie_host_k_equpreset_user6 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET_USER6_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET_USER7_ADDR (0x1507060) +#define NBL_PCIE_HOST_K_EQUPRESET_USER7_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET_USER7_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET_USER7_DWLEN (1) +union pcie_host_k_equpreset_user7_u { + struct pcie_host_k_equpreset_user7 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET_USER7_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET_USER8_ADDR (0x1507064) +#define NBL_PCIE_HOST_K_EQUPRESET_USER8_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET_USER8_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET_USER8_DWLEN (1) +union pcie_host_k_equpreset_user8_u { + struct pcie_host_k_equpreset_user8 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET_USER8_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET16_USER1_ADDR (0x1507068) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER1_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER1_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER1_DWLEN (1) +union pcie_host_k_equpreset16_user1_u { + struct pcie_host_k_equpreset16_user1 { + u32 dbg:32; /* [31:0] Default:0xf4f4f4f4 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET16_USER1_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET16_USER2_ADDR (0x150706c) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER2_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER2_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER2_DWLEN (1) +union pcie_host_k_equpreset16_user2_u { + struct pcie_host_k_equpreset16_user2 { + u32 dbg:32; /* [31:0] Default:0xf4f4f4f4 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET16_USER2_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET16_USER3_ADDR (0x1507070) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER3_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER3_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER3_DWLEN (1) +union pcie_host_k_equpreset16_user3_u { + struct pcie_host_k_equpreset16_user3 { + u32 dbg:32; /* [31:0] Default:0xf4f4f4f4 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET16_USER3_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_EQUPRESET16_USER4_ADDR (0x1507074) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER4_DEPTH (1) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER4_WIDTH (32) +#define NBL_PCIE_HOST_K_EQUPRESET16_USER4_DWLEN (1) +union pcie_host_k_equpreset16_user4_u { + struct pcie_host_k_equpreset16_user4 { + u32 dbg:32; /* [31:0] Default:0xf4f4f4f4 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_EQUPRESET16_USER4_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER1_ADDR (0x1507078) +#define NBL_PCIE_HOST_K_TIMER_USER1_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER1_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER1_DWLEN (1) +union pcie_host_k_timer_user1_u { + struct pcie_host_k_timer_user1 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER1_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER2_ADDR (0x150707c) +#define NBL_PCIE_HOST_K_TIMER_USER2_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER2_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER2_DWLEN (1) +union pcie_host_k_timer_user2_u { + struct pcie_host_k_timer_user2 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER2_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER3_ADDR (0x1507080) +#define NBL_PCIE_HOST_K_TIMER_USER3_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER3_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER3_DWLEN (1) +union pcie_host_k_timer_user3_u { + struct pcie_host_k_timer_user3 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER3_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER4_ADDR (0x1507084) +#define NBL_PCIE_HOST_K_TIMER_USER4_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER4_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER4_DWLEN (1) +union pcie_host_k_timer_user4_u { + struct pcie_host_k_timer_user4 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER4_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER5_ADDR (0x1507088) +#define NBL_PCIE_HOST_K_TIMER_USER5_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER5_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER5_DWLEN (1) +union pcie_host_k_timer_user5_u { + struct pcie_host_k_timer_user5 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER5_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER6_ADDR (0x150708c) +#define NBL_PCIE_HOST_K_TIMER_USER6_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER6_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER6_DWLEN (1) +union pcie_host_k_timer_user6_u { + struct pcie_host_k_timer_user6 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER6_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER7_ADDR (0x1507090) +#define NBL_PCIE_HOST_K_TIMER_USER7_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER7_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER7_DWLEN (1) +union pcie_host_k_timer_user7_u { + struct pcie_host_k_timer_user7 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER7_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER8_ADDR (0x1507094) +#define NBL_PCIE_HOST_K_TIMER_USER8_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER8_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER8_DWLEN (1) +union pcie_host_k_timer_user8_u { + struct pcie_host_k_timer_user8 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER8_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER9_ADDR (0x1507098) +#define NBL_PCIE_HOST_K_TIMER_USER9_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER9_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER9_DWLEN (1) +union pcie_host_k_timer_user9_u { + struct pcie_host_k_timer_user9 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER9_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER10_ADDR (0x150709c) +#define NBL_PCIE_HOST_K_TIMER_USER10_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER10_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER10_DWLEN (1) +union pcie_host_k_timer_user10_u { + struct pcie_host_k_timer_user10 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER10_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER11_ADDR (0x15070a0) +#define NBL_PCIE_HOST_K_TIMER_USER11_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER11_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER11_DWLEN (1) +union pcie_host_k_timer_user11_u { + struct pcie_host_k_timer_user11 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER11_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER12_ADDR (0x15070a4) +#define NBL_PCIE_HOST_K_TIMER_USER12_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER12_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER12_DWLEN (1) +union pcie_host_k_timer_user12_u { + struct pcie_host_k_timer_user12 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER12_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER13_ADDR (0x15070a8) +#define NBL_PCIE_HOST_K_TIMER_USER13_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER13_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER13_DWLEN (1) +union pcie_host_k_timer_user13_u { + struct pcie_host_k_timer_user13 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER13_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER14_ADDR (0x15070ac) +#define NBL_PCIE_HOST_K_TIMER_USER14_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER14_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER14_DWLEN (1) +union pcie_host_k_timer_user14_u { + struct pcie_host_k_timer_user14 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER14_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER15_ADDR (0x15070b0) +#define NBL_PCIE_HOST_K_TIMER_USER15_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER15_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER15_DWLEN (1) +union pcie_host_k_timer_user15_u { + struct pcie_host_k_timer_user15 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER15_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER16_ADDR (0x15070b4) +#define NBL_PCIE_HOST_K_TIMER_USER16_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER16_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER16_DWLEN (1) +union pcie_host_k_timer_user16_u { + struct pcie_host_k_timer_user16 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER16_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER17_ADDR (0x15070b8) +#define NBL_PCIE_HOST_K_TIMER_USER17_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER17_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER17_DWLEN (1) +union pcie_host_k_timer_user17_u { + struct pcie_host_k_timer_user17 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER17_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER18_ADDR (0x15070bc) +#define NBL_PCIE_HOST_K_TIMER_USER18_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER18_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER18_DWLEN (1) +union pcie_host_k_timer_user18_u { + struct pcie_host_k_timer_user18 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER18_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER19_ADDR (0x15070c0) +#define NBL_PCIE_HOST_K_TIMER_USER19_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER19_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER19_DWLEN (1) +union pcie_host_k_timer_user19_u { + struct pcie_host_k_timer_user19 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER19_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER20_ADDR (0x15070c4) +#define NBL_PCIE_HOST_K_TIMER_USER20_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER20_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER20_DWLEN (1) +union pcie_host_k_timer_user20_u { + struct pcie_host_k_timer_user20 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER20_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER21_ADDR (0x15070c8) +#define NBL_PCIE_HOST_K_TIMER_USER21_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER21_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER21_DWLEN (1) +union pcie_host_k_timer_user21_u { + struct pcie_host_k_timer_user21 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER21_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER22_ADDR (0x15070cc) +#define NBL_PCIE_HOST_K_TIMER_USER22_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER22_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER22_DWLEN (1) +union pcie_host_k_timer_user22_u { + struct pcie_host_k_timer_user22 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER22_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER23_ADDR (0x15070d0) +#define NBL_PCIE_HOST_K_TIMER_USER23_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER23_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER23_DWLEN (1) +union pcie_host_k_timer_user23_u { + struct pcie_host_k_timer_user23 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER23_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_K_TIMER_USER24_ADDR (0x15070d4) +#define NBL_PCIE_HOST_K_TIMER_USER24_DEPTH (1) +#define NBL_PCIE_HOST_K_TIMER_USER24_WIDTH (32) +#define NBL_PCIE_HOST_K_TIMER_USER24_DWLEN (1) +union pcie_host_k_timer_user24_u { + struct pcie_host_k_timer_user24 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_K_TIMER_USER24_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TEST_OUT_CLR_ADDR (0x1508000) +#define NBL_PCIE_HOST_TEST_OUT_CLR_DEPTH (1) +#define NBL_PCIE_HOST_TEST_OUT_CLR_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_CLR_DWLEN (1) +union pcie_host_test_out_clr_u { + struct pcie_host_test_out_clr { + u32 dbg:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_CLR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_ERROR_ADDR (0x1514000) +#define NBL_PCIE_HOST_ERROR_DEPTH (1) +#define NBL_PCIE_HOST_ERROR_WIDTH (32) +#define NBL_PCIE_HOST_ERROR_DWLEN (1) +union pcie_host_error_u { + struct pcie_host_error { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_ERROR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_WARNING_ADDR (0x1514004) +#define NBL_PCIE_HOST_WARNING_DEPTH (1) +#define NBL_PCIE_HOST_WARNING_WIDTH (32) +#define NBL_PCIE_HOST_WARNING_DWLEN (1) +union pcie_host_warning_u { + struct pcie_host_warning { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_WARNING_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_LAST_ERR_ADDR (0x1514090) +#define NBL_PCIE_HOST_LAST_ERR_DEPTH (1) +#define NBL_PCIE_HOST_LAST_ERR_WIDTH (32) +#define NBL_PCIE_HOST_LAST_ERR_DWLEN (1) +union pcie_host_last_err_u { + struct pcie_host_last_err { + u32 index:18; /* [17:0] Default:0x0 RO */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_LAST_ERR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_BRSW_IN_REG_ADDR (0x1515000) +#define NBL_PCIE_HOST_TL_BRSW_IN_REG_DEPTH (1) +#define NBL_PCIE_HOST_TL_BRSW_IN_REG_WIDTH (32) +#define NBL_PCIE_HOST_TL_BRSW_IN_REG_DWLEN (1) +union pcie_host_tl_brsw_in_reg_u { + struct pcie_host_tl_brsw_in_reg { + u32 dbg:8; /* [07:00] Default:0x0 RW */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_BRSW_IN_REG_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_BRSW_OUT_REG_ADDR (0x1515004) +#define NBL_PCIE_HOST_TL_BRSW_OUT_REG_DEPTH (1) +#define NBL_PCIE_HOST_TL_BRSW_OUT_REG_WIDTH (32) +#define NBL_PCIE_HOST_TL_BRSW_OUT_REG_DWLEN (1) +union pcie_host_tl_brsw_out_reg_u { + struct pcie_host_tl_brsw_out_reg { + u32 dbg:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_BRSW_OUT_REG_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_REPORT_EVENT_REG_ADDR (0x1515008) +#define NBL_PCIE_HOST_TL_REPORT_EVENT_REG_DEPTH (1) +#define NBL_PCIE_HOST_TL_REPORT_EVENT_REG_WIDTH (32) +#define NBL_PCIE_HOST_TL_REPORT_EVENT_REG_DWLEN (1) +union pcie_host_tl_report_event_reg_u { + struct pcie_host_tl_report_event_reg { + u32 dbg:8; /* [07:00] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_REPORT_EVENT_REG_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_INT_PINSTATE_REG_ADDR (0x151500c) +#define NBL_PCIE_HOST_TL_INT_PINSTATE_REG_DEPTH (1) +#define NBL_PCIE_HOST_TL_INT_PINSTATE_REG_WIDTH (32) +#define NBL_PCIE_HOST_TL_INT_PINSTATE_REG_DWLEN (1) +union pcie_host_tl_int_pinstate_reg_u { + struct pcie_host_tl_int_pinstate_reg { + u32 dbg:4; /* [03:00] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_INT_PINSTATE_REG_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TEST_IN_ADDR (0x1515028) +#define NBL_PCIE_HOST_TEST_IN_DEPTH (1) +#define NBL_PCIE_HOST_TEST_IN_WIDTH (64) +#define NBL_PCIE_HOST_TEST_IN_DWLEN (2) +union pcie_host_test_in_u { + struct pcie_host_test_in { + u32 dbg_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_IN_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TEST_IN_ERRINJ_ADDR (0x1515030) +#define NBL_PCIE_HOST_TEST_IN_ERRINJ_DEPTH (1) +#define NBL_PCIE_HOST_TEST_IN_ERRINJ_WIDTH (32) +#define NBL_PCIE_HOST_TEST_IN_ERRINJ_DWLEN (1) +union pcie_host_test_in_errinj_u { + struct pcie_host_test_in_errinj { + u32 dbg:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_IN_ERRINJ_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PL_LTSSM_ENABLE_ADDR (0x1515034) +#define NBL_PCIE_HOST_PL_LTSSM_ENABLE_DEPTH (1) +#define NBL_PCIE_HOST_PL_LTSSM_ENABLE_WIDTH (32) +#define NBL_PCIE_HOST_PL_LTSSM_ENABLE_DWLEN (1) +union pcie_host_pl_ltssm_enable_u { + struct pcie_host_pl_ltssm_enable { + u32 dbg:1; /* [00:00] Default:0x1 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PL_LTSSM_ENABLE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PL_EQU_PHASE_ADDR (0x151503c) +#define NBL_PCIE_HOST_PL_EQU_PHASE_DEPTH (1) +#define NBL_PCIE_HOST_PL_EQU_PHASE_WIDTH (32) +#define NBL_PCIE_HOST_PL_EQU_PHASE_DWLEN (1) +union pcie_host_pl_equ_phase_u { + struct pcie_host_pl_equ_phase { + u32 dbg:2; /* [01:00] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PL_EQU_PHASE_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_CFG_BUSDEV_ADDR (0x1515040) +#define NBL_PCIE_HOST_TL_CFG_BUSDEV_DEPTH (1) +#define NBL_PCIE_HOST_TL_CFG_BUSDEV_WIDTH (32) +#define NBL_PCIE_HOST_TL_CFG_BUSDEV_DWLEN (1) +union pcie_host_tl_cfg_busdev_u { + struct pcie_host_tl_cfg_busdev { + u32 dbg:13; /* [12:00] Default:0x0 RO */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_CFG_BUSDEV_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TL_REPORT_TIMER_ADDR (0x1515044) +#define NBL_PCIE_HOST_TL_REPORT_TIMER_DEPTH (1) +#define NBL_PCIE_HOST_TL_REPORT_TIMER_WIDTH (32) +#define NBL_PCIE_HOST_TL_REPORT_TIMER_DWLEN (1) +union pcie_host_tl_report_timer_u { + struct pcie_host_tl_report_timer { + u32 dbg:4; /* [03:00] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TL_REPORT_TIMER_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TEST_OUT_TL_ADDR (0x1515048) +#define NBL_PCIE_HOST_TEST_OUT_TL_DEPTH (8) +#define NBL_PCIE_HOST_TEST_OUT_TL_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_TL_DWLEN (1) +union pcie_host_test_out_tl_u { + struct pcie_host_test_out_tl { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_TL_DWLEN]; +} __packed; +#define NBL_PCIE_HOST_TEST_OUT_TL_REG(r) (NBL_PCIE_HOST_TEST_OUT_TL_ADDR + \ + (NBL_PCIE_HOST_TEST_OUT_TL_DWLEN * 4) * (r)) + +#define NBL_PCIE_HOST_TEST_OUT_PL_ADDR (0x1515068) +#define NBL_PCIE_HOST_TEST_OUT_PL_DEPTH (8) +#define NBL_PCIE_HOST_TEST_OUT_PL_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_PL_DWLEN (1) +union pcie_host_test_out_pl_u { + struct pcie_host_test_out_pl { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_PL_DWLEN]; +} __packed; +#define NBL_PCIE_HOST_TEST_OUT_PL_REG(r) (NBL_PCIE_HOST_TEST_OUT_PL_ADDR + \ + (NBL_PCIE_HOST_TEST_OUT_PL_DWLEN * 4) * (r)) + +#define NBL_PCIE_HOST_TEST_OUT_EQU_ADDR (0x1515088) +#define NBL_PCIE_HOST_TEST_OUT_EQU_DEPTH (64) +#define NBL_PCIE_HOST_TEST_OUT_EQU_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_EQU_DWLEN (1) +union pcie_host_test_out_equ_u { + struct pcie_host_test_out_equ { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_EQU_DWLEN]; +} __packed; +#define NBL_PCIE_HOST_TEST_OUT_EQU_REG(r) (NBL_PCIE_HOST_TEST_OUT_EQU_ADDR + \ + (NBL_PCIE_HOST_TEST_OUT_EQU_DWLEN * 4) * (r)) + +#define NBL_PCIE_HOST_TEST_OUT_PERF_ADDR (0x1515188) +#define NBL_PCIE_HOST_TEST_OUT_PERF_DEPTH (4) +#define NBL_PCIE_HOST_TEST_OUT_PERF_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_PERF_DWLEN (1) +union pcie_host_test_out_perf_u { + struct pcie_host_test_out_perf { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_PERF_DWLEN]; +} __packed; +#define NBL_PCIE_HOST_TEST_OUT_PERF_REG(r) (NBL_PCIE_HOST_TEST_OUT_PERF_ADDR + \ + (NBL_PCIE_HOST_TEST_OUT_PERF_DWLEN * 4) * (r)) + +#define NBL_PCIE_HOST_TEST_OUT_RXVAL_ADDR (0x1515198) +#define NBL_PCIE_HOST_TEST_OUT_RXVAL_DEPTH (1) +#define NBL_PCIE_HOST_TEST_OUT_RXVAL_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_RXVAL_DWLEN (1) +union pcie_host_test_out_rxval_u { + struct pcie_host_test_out_rxval { + u32 dbg:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_RXVAL_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TEST_OUT_RXDATA_ADDR (0x151519c) +#define NBL_PCIE_HOST_TEST_OUT_RXDATA_DEPTH (16) +#define NBL_PCIE_HOST_TEST_OUT_RXDATA_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_RXDATA_DWLEN (1) +union pcie_host_test_out_rxdata_u { + struct pcie_host_test_out_rxdata { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_RXDATA_DWLEN]; +} __packed; +#define NBL_PCIE_HOST_TEST_OUT_RXDATA_REG(r) (NBL_PCIE_HOST_TEST_OUT_RXDATA_ADDR + \ + (NBL_PCIE_HOST_TEST_OUT_RXDATA_DWLEN * 4) * (r)) + +#define NBL_PCIE_HOST_TEST_OUT_RXDATAK_ADDR (0x15151dc) +#define NBL_PCIE_HOST_TEST_OUT_RXDATAK_DEPTH (2) +#define NBL_PCIE_HOST_TEST_OUT_RXDATAK_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_RXDATAK_DWLEN (1) +union pcie_host_test_out_rxdatak_u { + struct pcie_host_test_out_rxdatak { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_RXDATAK_DWLEN]; +} __packed; +#define NBL_PCIE_HOST_TEST_OUT_RXDATAK_REG(r) (NBL_PCIE_HOST_TEST_OUT_RXDATAK_ADDR + \ + (NBL_PCIE_HOST_TEST_OUT_RXDATAK_DWLEN * 4) * (r)) + +#define NBL_PCIE_HOST_TEST_OUT_TXVAL_ADDR (0x15151e4) +#define NBL_PCIE_HOST_TEST_OUT_TXVAL_DEPTH (1) +#define NBL_PCIE_HOST_TEST_OUT_TXVAL_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_TXVAL_DWLEN (1) +union pcie_host_test_out_txval_u { + struct pcie_host_test_out_txval { + u32 dbg:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_TXVAL_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_TEST_OUT_TXDATA_ADDR (0x15151e8) +#define NBL_PCIE_HOST_TEST_OUT_TXDATA_DEPTH (16) +#define NBL_PCIE_HOST_TEST_OUT_TXDATA_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_TXDATA_DWLEN (1) +union pcie_host_test_out_txdata_u { + struct pcie_host_test_out_txdata { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_TXDATA_DWLEN]; +} __packed; +#define NBL_PCIE_HOST_TEST_OUT_TXDATA_REG(r) (NBL_PCIE_HOST_TEST_OUT_TXDATA_ADDR + \ + (NBL_PCIE_HOST_TEST_OUT_TXDATA_DWLEN * 4) * (r)) + +#define NBL_PCIE_HOST_TEST_OUT_TXDATAK_ADDR (0x1515228) +#define NBL_PCIE_HOST_TEST_OUT_TXDATAK_DEPTH (2) +#define NBL_PCIE_HOST_TEST_OUT_TXDATAK_WIDTH (32) +#define NBL_PCIE_HOST_TEST_OUT_TXDATAK_DWLEN (1) +union pcie_host_test_out_txdatak_u { + struct pcie_host_test_out_txdatak { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_TEST_OUT_TXDATAK_DWLEN]; +} __packed; +#define NBL_PCIE_HOST_TEST_OUT_TXDATAK_REG(r) (NBL_PCIE_HOST_TEST_OUT_TXDATAK_ADDR + \ + (NBL_PCIE_HOST_TEST_OUT_TXDATAK_DWLEN * 4) * (r)) + +#define NBL_PCIE_HOST_DTEST_O_ADDR (0x1515230) +#define NBL_PCIE_HOST_DTEST_O_DEPTH (1) +#define NBL_PCIE_HOST_DTEST_O_WIDTH (32) +#define NBL_PCIE_HOST_DTEST_O_DWLEN (1) +union pcie_host_dtest_o_u { + struct pcie_host_dtest_o { + u32 dbg:12; /* [11:00] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_DTEST_O_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA0_DTEST_ADDR (0x1515234) +#define NBL_PCIE_HOST_PMA0_DTEST_DEPTH (1) +#define NBL_PCIE_HOST_PMA0_DTEST_WIDTH (32) +#define NBL_PCIE_HOST_PMA0_DTEST_DWLEN (1) +union pcie_host_pma0_dtest_u { + struct pcie_host_pma0_dtest { + u32 dbg:12; /* [11:00] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA0_DTEST_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA0_ERR_ADDR (0x1515238) +#define NBL_PCIE_HOST_PMA0_ERR_DEPTH (1) +#define NBL_PCIE_HOST_PMA0_ERR_WIDTH (32) +#define NBL_PCIE_HOST_PMA0_ERR_DWLEN (1) +union pcie_host_pma0_err_u { + struct pcie_host_pma0_err { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA0_ERR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA0_PRGM_PARITY_ERR_ADDR (0x151523c) +#define NBL_PCIE_HOST_PMA0_PRGM_PARITY_ERR_DEPTH (1) +#define NBL_PCIE_HOST_PMA0_PRGM_PARITY_ERR_WIDTH (32) +#define NBL_PCIE_HOST_PMA0_PRGM_PARITY_ERR_DWLEN (1) +union pcie_host_pma0_prgm_parity_err_u { + struct pcie_host_pma0_prgm_parity_err { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA0_PRGM_PARITY_ERR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA1_DTEST_ADDR (0x1515244) +#define NBL_PCIE_HOST_PMA1_DTEST_DEPTH (1) +#define NBL_PCIE_HOST_PMA1_DTEST_WIDTH (32) +#define NBL_PCIE_HOST_PMA1_DTEST_DWLEN (1) +union pcie_host_pma1_dtest_u { + struct pcie_host_pma1_dtest { + u32 dbg:12; /* [11:00] Default:0x0 RO */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA1_DTEST_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA1_ERR_ADDR (0x1515248) +#define NBL_PCIE_HOST_PMA1_ERR_DEPTH (1) +#define NBL_PCIE_HOST_PMA1_ERR_WIDTH (32) +#define NBL_PCIE_HOST_PMA1_ERR_DWLEN (1) +union pcie_host_pma1_err_u { + struct pcie_host_pma1_err { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA1_ERR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA1_PRGM_PARITY_ERR_ADDR (0x151524c) +#define NBL_PCIE_HOST_PMA1_PRGM_PARITY_ERR_DEPTH (1) +#define NBL_PCIE_HOST_PMA1_PRGM_PARITY_ERR_WIDTH (32) +#define NBL_PCIE_HOST_PMA1_PRGM_PARITY_ERR_DWLEN (1) +union pcie_host_pma1_prgm_parity_err_u { + struct pcie_host_pma1_prgm_parity_err { + u32 dbg:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA1_PRGM_PARITY_ERR_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA0_PMLD_ADDR (0x151c000) +#define NBL_PCIE_HOST_PMA0_PMLD_DEPTH (1) +#define NBL_PCIE_HOST_PMA0_PMLD_WIDTH (32) +#define NBL_PCIE_HOST_PMA0_PMLD_DWLEN (1) +union pcie_host_pma0_pmld_u { + struct pcie_host_pma0_pmld { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA0_PMLD_DWLEN]; +} __packed; + +#define NBL_PCIE_HOST_PMA1_PMLD_ADDR (0x151c004) +#define NBL_PCIE_HOST_PMA1_PMLD_DEPTH (1) +#define NBL_PCIE_HOST_PMA1_PMLD_WIDTH (32) +#define NBL_PCIE_HOST_PMA1_PMLD_DWLEN (1) +union pcie_host_pma1_pmld_u { + struct pcie_host_pma1_pmld { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCIE_HOST_PMA1_PMLD_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h new file mode 100644 index 0000000000000000000000000000000000000000..ec0e7a309df033204498dc8b19e61cd74593d1e6 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_pcompleter_host.h @@ -0,0 +1,851 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PCOMPLETER_HOST_H +#define NBL_PCOMPLETER_HOST_H 1 + +#include + +#define NBL_PCOMPLETER_HOST_BASE (0x00F08000) + +#define NBL_PCOMPLETER_HOST_INT_STATUS_ADDR (0xf08000) +#define NBL_PCOMPLETER_HOST_INT_STATUS_DEPTH (1) +#define NBL_PCOMPLETER_HOST_INT_STATUS_WIDTH (32) +#define NBL_PCOMPLETER_HOST_INT_STATUS_DWLEN (1) +union pcompleter_host_int_status_u { + struct pcompleter_host_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_INT_MASK_ADDR (0xf08004) +#define NBL_PCOMPLETER_HOST_INT_MASK_DEPTH (1) +#define NBL_PCOMPLETER_HOST_INT_MASK_WIDTH (32) +#define NBL_PCOMPLETER_HOST_INT_MASK_DWLEN (1) +union pcompleter_host_int_mask_u { + struct pcompleter_host_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_INT_SET_ADDR (0xf08008) +#define NBL_PCOMPLETER_HOST_INT_SET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_INT_SET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_INT_SET_DWLEN (1) +union pcompleter_host_int_set_u { + struct pcompleter_host_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_INT_SET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_INIT_DONE_ADDR (0xf0800c) +#define NBL_PCOMPLETER_HOST_INIT_DONE_DEPTH (1) +#define NBL_PCOMPLETER_HOST_INIT_DONE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_INIT_DONE_DWLEN (1) +union pcompleter_host_init_done_u { + struct pcompleter_host_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CIF_ERR_INFO_ADDR (0xf08040) +#define NBL_PCOMPLETER_HOST_CIF_ERR_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CIF_ERR_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CIF_ERR_INFO_DWLEN (1) +union pcompleter_host_cif_err_info_u { + struct pcompleter_host_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CAR_CTRL_ADDR (0xf08100) +#define NBL_PCOMPLETER_HOST_CAR_CTRL_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CAR_CTRL_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CAR_CTRL_DWLEN (1) +union pcompleter_host_car_ctrl_u { + struct pcompleter_host_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_ECPU_READY_ADDR (0xf090a0) +#define NBL_PCOMPLETER_HOST_ECPU_READY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_ECPU_READY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_ECPU_READY_DWLEN (1) +union pcompleter_host_ecpu_ready_u { + struct pcompleter_host_ecpu_ready { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_ECPU_READY_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_ECPU_STATUS_ADDR (0xf090a4) +#define NBL_PCOMPLETER_HOST_ECPU_STATUS_DEPTH (1) +#define NBL_PCOMPLETER_HOST_ECPU_STATUS_WIDTH (32) +#define NBL_PCOMPLETER_HOST_ECPU_STATUS_DWLEN (1) +union pcompleter_host_ecpu_status_u { + struct pcompleter_host_ecpu_status { + u32 dbg:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_ECPU_STATUS_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_ADDR (0xf090a8) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_DWLEN (1) +union pcompleter_host_cfg_heartbeat_times_u { + struct pcompleter_host_cfg_heartbeat_times { + u32 cnt:31; /* [30:0] Default:0x0 RW */ + u32 vld:1; /* [31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_TIMES_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_ADDR (0xf090ac) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_DWLEN (1) +union pcompleter_host_cfg_heartbeat_enable_u { + struct pcompleter_host_cfg_heartbeat_enable { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_HEARTBEAT_ENABLE_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_ADDR (0xf090b0) +#define NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_DEPTH (1) +#define NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_WIDTH (32) +#define NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_DWLEN (1) +union pcompleter_host_heartbeat_times_u { + struct pcompleter_host_heartbeat_times { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_HEARTBEAT_TIMES_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_DATA_BIG_END_ADDR (0xf090b4) +#define NBL_PCOMPLETER_HOST_DATA_BIG_END_DEPTH (1) +#define NBL_PCOMPLETER_HOST_DATA_BIG_END_WIDTH (32) +#define NBL_PCOMPLETER_HOST_DATA_BIG_END_DWLEN (1) +union pcompleter_host_data_big_end_u { + struct pcompleter_host_data_big_end { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_DATA_BIG_END_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_ADDR (0xf090b8) +#define NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_DWLEN (1) +union pcompleter_host_msg_addr_size_u { + struct pcompleter_host_msg_addr_size { + u32 dbg:32; /* [31:0] Default:0x100000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_ADDR_SIZE_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_ADDR (0xf090bc) +#define NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_DWLEN (1) +union pcompleter_host_cif_axi_base_addr_u { + struct pcompleter_host_cif_axi_base_addr { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CIF_AXI_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_ADDR (0xf090c0) +#define NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_DWLEN (1) +union pcompleter_host_cif_up_base_addr_u { + struct pcompleter_host_cif_up_base_addr { + u32 dbg:32; /* [31:0] Default:0xfac000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CIF_UP_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_ADDR (0xf090c4) +#define NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_DWLEN (1) +union pcompleter_host_msg_axi_base_addr_u { + struct pcompleter_host_msg_axi_base_addr { + u32 dbg:32; /* [31:0] Default:0x60000000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_AXI_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_SWITCH_ADDR (0xf090c8) +#define NBL_PCOMPLETER_HOST_MSG_SWITCH_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_SWITCH_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_SWITCH_DWLEN (1) +union pcompleter_host_msg_switch_u { + struct pcompleter_host_msg_switch { + u32 dbg:4; /* [3:0] Default:0x9 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_SWITCH_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_ADDR (0xf090cc) +#define NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_DWLEN (1) +union pcompleter_host_mbx_soft_mode_u { + struct pcompleter_host_mbx_soft_mode { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MBX_SOFT_MODE_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_CLR_INT_ADDR (0xf09104) +#define NBL_PCOMPLETER_HOST_MSG_CLR_INT_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_CLR_INT_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_CLR_INT_DWLEN (1) +union pcompleter_host_msg_clr_int_u { + struct pcompleter_host_msg_clr_int { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_CLR_INT_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_AXI_READY_ADDR (0xf09108) +#define NBL_PCOMPLETER_HOST_AXI_READY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_AXI_READY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_AXI_READY_DWLEN (1) +union pcompleter_host_axi_ready_u { + struct pcompleter_host_axi_ready { + u32 overtime:32; /* [31:0] Default:0x5fffff RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_AXI_READY_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_ADDR (0xf0910c) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_DWLEN (1) +union pcompleter_host_virtio_table_ready_u { + struct pcompleter_host_virtio_table_ready { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_VIRTIO_TABLE_READY_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_ADDR (0xf09110) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_DEPTH (1) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_WIDTH (32) +#define NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_DWLEN (1) +union pcompleter_host_virtio_table_select_u { + struct pcompleter_host_virtio_table_select { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_VIRTIO_TABLE_SELECT_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_ADDR (0xf09114) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_DWLEN (1) +union pcompleter_host_rdma_table_ready_u { + struct pcompleter_host_rdma_table_ready { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_TABLE_READY_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_ADDR (0xf09118) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_DWLEN (1) +union pcompleter_host_rdma_table_select_u { + struct pcompleter_host_rdma_table_select { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_TABLE_SELECT_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_ADDR (0xf0911c) +#define NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_DWLEN (1) +union pcompleter_host_cfg_oprom_offset_u { + struct pcompleter_host_cfg_oprom_offset { + u32 addr:32; /* [31:00] Default:0x4000000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_OPROM_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_ADDR (0xf09120) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr0_u { + struct pcompleter_host_cfg_rdma_base_addr0 { + u32 dbg:32; /* [31:00] Default:0x01110000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR0_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_ADDR (0xf09124) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr1_u { + struct pcompleter_host_cfg_rdma_base_addr1 { + u32 dbg:32; /* [31:00] Default:0x00400000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR1_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_ADDR (0xf09128) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr2_u { + struct pcompleter_host_cfg_rdma_base_addr2 { + u32 dbg:32; /* [31:00] Default:0x011A0000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR2_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_ADDR (0xf0912c) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr3_u { + struct pcompleter_host_cfg_rdma_base_addr3 { + u32 dbg:32; /* [31:00] Default:0x011A0000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR3_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_ADDR (0xf09130) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr4_u { + struct pcompleter_host_cfg_rdma_base_addr4 { + u32 dbg:32; /* [31:00] Default:0x011A0000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR4_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_ADDR (0xf09134) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr5_u { + struct pcompleter_host_cfg_rdma_base_addr5 { + u32 dbg:32; /* [31:00] Default:0x011A0000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR5_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_ADDR (0xf09138) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr6_u { + struct pcompleter_host_cfg_rdma_base_addr6 { + u32 dbg:32; /* [31:00] Default:0x01110000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR6_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_ADDR (0xf0913c) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_DWLEN (1) +union pcompleter_host_cfg_rdma_base_addr7_u { + struct pcompleter_host_cfg_rdma_base_addr7 { + u32 dbg:32; /* [31:00] Default:0x01110000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_RDMA_BASE_ADDR7_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_ADDR (0xf09140) +#define NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_DWLEN (1) +union pcompleter_host_cfg_mailbox_offset_u { + struct pcompleter_host_cfg_mailbox_offset { + u32 addr:32; /* [31:00] Default:0xFB2000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_MAILBOX_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_ADDR (0xf09144) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_DWLEN (1) +union pcompleter_host_cfg_msix_offset_u { + struct pcompleter_host_cfg_msix_offset { + u32 addr:32; /* [31:00] Default:0xF6C000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_MSIX_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_ADDR (0xf09148) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_DWLEN (1) +union pcompleter_host_cfg_msix_invld_offset_u { + struct pcompleter_host_cfg_msix_invld_offset { + u32 addr:32; /* [31:00] Default:0xF4C300 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_MSIX_INVLD_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_ADDR (0xf0914c) +#define NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_DWLEN (1) +union pcompleter_host_cfg_pba_offset_u { + struct pcompleter_host_cfg_pba_offset { + u32 addr:32; /* [31:00] Default:0xF4D000 RW */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_PBA_OFFSET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MSG_PTR_ADDR (0xf09154) +#define NBL_PCOMPLETER_HOST_MSG_PTR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MSG_PTR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MSG_PTR_DWLEN (1) +union pcompleter_host_msg_ptr_u { + struct pcompleter_host_msg_ptr { + u32 dbg:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MSG_PTR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_OPROM_DEBUG_ADDR (0xf09160) +#define NBL_PCOMPLETER_HOST_OPROM_DEBUG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_OPROM_DEBUG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_OPROM_DEBUG_DWLEN (1) +union pcompleter_host_oprom_debug_u { + struct pcompleter_host_oprom_debug { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_OPROM_DEBUG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CMDQ_ADDR (0xf09170) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CMDQ_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CMDQ_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CMDQ_DWLEN (1) +union pcompleter_host_cfg_function_id_cmdq_u { + struct pcompleter_host_cfg_function_id_cmdq { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CMDQ_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CTRLQ_ADDR (0xf09174) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CTRLQ_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CTRLQ_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CTRLQ_DWLEN (1) +union pcompleter_host_cfg_function_id_ctrlq_u { + struct pcompleter_host_cfg_function_id_ctrlq { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CTRLQ_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_AGED_ADDR (0xf09178) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_AGED_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_AGED_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_AGED_DWLEN (1) +union pcompleter_host_cfg_function_id_msgq_aged_u { + struct pcompleter_host_cfg_function_id_msgq_aged { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_AGED_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_ADDR (0xf0917c) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_DWLEN (1) +union pcompleter_host_cfg_function_id_msgq_u { + struct pcompleter_host_cfg_function_id_msgq { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_MSGQ_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_NET_ADDR (0xf09180) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_NET_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_NET_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_NET_DWLEN (1) +union pcompleter_host_cfg_function_id_vdpa_net_u { + struct pcompleter_host_cfg_function_id_vdpa_net { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_NET_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_BLK_ADDR (0xf09184) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_BLK_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_BLK_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_BLK_DWLEN (1) +union pcompleter_host_cfg_function_id_vdpa_blk_u { + struct pcompleter_host_cfg_function_id_vdpa_blk { + u32 dbg:10; /* [9:0] Default:0x0 RW */ + u32 vld:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_BLK_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_ERROR_ADDR (0xf0a000) +#define NBL_PCOMPLETER_HOST_ERROR_DEPTH (1) +#define NBL_PCOMPLETER_HOST_ERROR_WIDTH (32) +#define NBL_PCOMPLETER_HOST_ERROR_DWLEN (1) +union pcompleter_host_error_u { + struct pcompleter_host_error { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_ERROR_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_WARNING_ADDR (0xf0a004) +#define NBL_PCOMPLETER_HOST_WARNING_DEPTH (1) +#define NBL_PCOMPLETER_HOST_WARNING_WIDTH (32) +#define NBL_PCOMPLETER_HOST_WARNING_DWLEN (1) +union pcompleter_host_warning_u { + struct pcompleter_host_warning { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_WARNING_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CFG_ADDR (0xf0a048) +#define NBL_PCOMPLETER_HOST_CFG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CFG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CFG_DWLEN (1) +union pcompleter_host_cfg_u { + struct pcompleter_host_cfg { + u32 dmux_fifo_drop_err:1; /* [0] Default:0x0 RC */ + u32 tlp_out_drop_err:1; /* [1] Default:0x0 RC */ + u32 cif_axi_werr:1; /* [2] Default:0x0 RC */ + u32 cif_axi_rerr:1; /* [3] Default:0x0 RC */ + u32 cif_axi_ready_err:1; /* [4] Default:0x0 RC */ + u32 msg_axi_werr:1; /* [5] Default:0x0 RC */ + u32 msg_axi_ready_err:1; /* [6] Default:0x0 RC */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CFG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_ADDR (0xf0a204) +#define NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_DWLEN (1) +union pcompleter_host_dmux_debug_info_u { + struct pcompleter_host_dmux_debug_info { + u32 dbg:32; /* [31:0] Default:0x1 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_DMUX_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a208) +#define NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_l1_map_ram_err_info_reg_u { + struct pcompleter_host_l1_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_L1_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a20c) +#define NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_l2_map_ram_err_info_reg_u { + struct pcompleter_host_l2_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_L2_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a210) +#define NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_l3_map_ram_err_info_reg_u { + struct pcompleter_host_l3_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_L3_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_ADDR (0xf0a214) +#define NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_tlp_info_ram_err_info_reg_u { + struct pcompleter_host_tlp_info_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_TLP_INFO_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_ADDR (0xf0a218) +#define NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_notify_info_ram_err_info_reg_u { + struct pcompleter_host_notify_info_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_NOTIFY_INFO_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_ADDR (0xf0a304) +#define NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_DWLEN (1) +union pcompleter_host_rdma_debug_info_u { + struct pcompleter_host_rdma_debug_info { + u32 dbg:32; /* [31:0] Default:0x1 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a308) +#define NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_l1_map_ram_err_info_reg_u { + struct pcompleter_host_rdma_l1_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_L1_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a30c) +#define NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_l2_map_ram_err_info_reg_u { + struct pcompleter_host_rdma_l2_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_L2_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_ADDR (0xf0a310) +#define NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_l3_map_ram_err_info_reg_u { + struct pcompleter_host_rdma_l3_map_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_L3_MAP_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_ADDR (0xf0a314) +#define NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_tlp_info_ram_err_info_reg_u { + struct pcompleter_host_rdma_tlp_info_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_TLP_INFO_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_ADDR (0xf0a318) +#define NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_DWLEN (1) +union pcompleter_host_rdma_notify_info_ram_err_info_reg_u { + struct pcompleter_host_rdma_notify_info_ram_err_info_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_RDMA_NOTIFY_INFO_RAM_ERR_INFO_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_ADDR (0xf0a434) +#define NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_DWLEN (1) +union pcompleter_host_judge_msix_fid_ram_err_info_u { + struct pcompleter_host_judge_msix_fid_ram_err_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_JUDGE_MSIX_FID_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_ADDR (0xf0a500) +#define NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_DWLEN (1) +union pcompleter_host_pmsix_debug_info_u { + struct pcompleter_host_pmsix_debug_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_PMSIX_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_ADDR (0xf0a51c) +#define NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_DWLEN (1) +union pcompleter_host_pmsix_map_ram_err_info_u { + struct pcompleter_host_pmsix_map_ram_err_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_PMSIX_MAP_RAM_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_ADDR (0xf0a700) +#define NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_DWLEN (1) +union pcompleter_host_mux_debug_info_u { + struct pcompleter_host_mux_debug_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_ADDR (0xf0a718) +#define NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_DWLEN (1) +union pcompleter_host_mux_port_0_pre_read_debug_reg_u { + struct pcompleter_host_mux_port_0_pre_read_debug_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_PORT_0_PRE_READ_DEBUG_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_ADDR (0xf0a71c) +#define NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_DWLEN (1) +union pcompleter_host_mux_port_1_pre_read_debug_reg_u { + struct pcompleter_host_mux_port_1_pre_read_debug_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_PORT_1_PRE_READ_DEBUG_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_ADDR (0xf0a720) +#define NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_DWLEN (1) +union pcompleter_host_mux_port_2_pre_read_debug_reg_u { + struct pcompleter_host_mux_port_2_pre_read_debug_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_PORT_2_PRE_READ_DEBUG_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_ADDR (0xf0a724) +#define NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_DEPTH (1) +#define NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_WIDTH (32) +#define NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_DWLEN (1) +union pcompleter_host_mux_port_3_pre_read_debug_reg_u { + struct pcompleter_host_mux_port_3_pre_read_debug_reg { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_MUX_PORT_3_PRE_READ_DEBUG_REG_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_ADDR (0xf0a820) +#define NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_DEPTH (1) +#define NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_WIDTH (32) +#define NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_DWLEN (1) +union pcompleter_host_cif_axir_pre_debug_info_u { + struct pcompleter_host_cif_axir_pre_debug_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_CIF_AXIR_PRE_DEBUG_INFO_DWLEN]; +} __packed; + +#define NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_ADDR (0xf0a918) +#define NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_DEPTH (1) +#define NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_WIDTH (32) +#define NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_DWLEN (1) +union pcompleter_host_emp2pcie_rdy_u { + struct pcompleter_host_emp2pcie_rdy { + u32 rdy:1; /* [0:0] Default:0x1 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PCOMPLETER_HOST_EMP2PCIE_RDY_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ptlp.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ptlp.h new file mode 100644 index 0000000000000000000000000000000000000000..71730dd46bd3a7a3a7c3f65d61211c79f1fa39be --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_ptlp.h @@ -0,0 +1,1104 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PTLP_H +#define NBL_PTLP_H 1 + +#include + +#define NBL_PTLP_BASE (0x00F48000) + +#define NBL_PTLP_INT_STATUS_ADDR (0xf48000) +#define NBL_PTLP_INT_STATUS_DEPTH (1) +#define NBL_PTLP_INT_STATUS_WIDTH (32) +#define NBL_PTLP_INT_STATUS_DWLEN (1) +union ptlp_int_status_u { + struct ptlp_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PTLP_INT_MASK_ADDR (0xf48004) +#define NBL_PTLP_INT_MASK_DEPTH (1) +#define NBL_PTLP_INT_MASK_WIDTH (32) +#define NBL_PTLP_INT_MASK_DWLEN (1) +union ptlp_int_mask_u { + struct ptlp_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 data_cor_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PTLP_INT_SET_ADDR (0xf48008) +#define NBL_PTLP_INT_SET_DEPTH (1) +#define NBL_PTLP_INT_SET_WIDTH (32) +#define NBL_PTLP_INT_SET_DWLEN (1) +union ptlp_int_set_u { + struct ptlp_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv3:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 data_cor_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_INT_SET_DWLEN]; +} __packed; + +#define NBL_PTLP_INIT_DONE_ADDR (0xf4800c) +#define NBL_PTLP_INIT_DONE_DEPTH (1) +#define NBL_PTLP_INIT_DONE_WIDTH (32) +#define NBL_PTLP_INIT_DONE_DWLEN (1) +union ptlp_init_done_u { + struct ptlp_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PTLP_CIF_ERR_INFO_ADDR (0xf48040) +#define NBL_PTLP_CIF_ERR_INFO_DEPTH (1) +#define NBL_PTLP_CIF_ERR_INFO_WIDTH (32) +#define NBL_PTLP_CIF_ERR_INFO_DWLEN (1) +union ptlp_cif_err_info_u { + struct ptlp_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PTLP_CAR_CTRL_ADDR (0xf48100) +#define NBL_PTLP_CAR_CTRL_DEPTH (1) +#define NBL_PTLP_CAR_CTRL_WIDTH (32) +#define NBL_PTLP_CAR_CTRL_DWLEN (1) +union ptlp_car_ctrl_u { + struct ptlp_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PTLP_WDIF_BDF_ADDR (0xf490e4) +#define NBL_PTLP_WDIF_BDF_DEPTH (1) +#define NBL_PTLP_WDIF_BDF_WIDTH (32) +#define NBL_PTLP_WDIF_BDF_DWLEN (1) +union ptlp_wdif_bdf_u { + struct ptlp_wdif_bdf { + u32 dbg:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_WDIF_BDF_DWLEN]; +} __packed; + +#define NBL_PTLP_AXI_READY_ADDR (0xf490e8) +#define NBL_PTLP_AXI_READY_DEPTH (1) +#define NBL_PTLP_AXI_READY_WIDTH (32) +#define NBL_PTLP_AXI_READY_DWLEN (1) +union ptlp_axi_ready_u { + struct ptlp_axi_ready { + u32 overtime:32; /* [31:0] Default:0x5fffff RW */ + } __packed info; + u32 data[NBL_PTLP_AXI_READY_DWLEN]; +} __packed; + +#define NBL_PTLP_TLP_ECPU_ADDR_SIZE_ADDR (0xf490ec) +#define NBL_PTLP_TLP_ECPU_ADDR_SIZE_DEPTH (1) +#define NBL_PTLP_TLP_ECPU_ADDR_SIZE_WIDTH (32) +#define NBL_PTLP_TLP_ECPU_ADDR_SIZE_DWLEN (1) +union ptlp_tlp_ecpu_addr_size_u { + struct ptlp_tlp_ecpu_addr_size { + u32 dbg:32; /* [31:0] Default:0x100000 RW */ + } __packed info; + u32 data[NBL_PTLP_TLP_ECPU_ADDR_SIZE_DWLEN]; +} __packed; + +#define NBL_PTLP_TLP_EMP_ADDR_SIZE_ADDR (0xf490f0) +#define NBL_PTLP_TLP_EMP_ADDR_SIZE_DEPTH (1) +#define NBL_PTLP_TLP_EMP_ADDR_SIZE_WIDTH (32) +#define NBL_PTLP_TLP_EMP_ADDR_SIZE_DWLEN (1) +union ptlp_tlp_emp_addr_size_u { + struct ptlp_tlp_emp_addr_size { + u32 dbg:32; /* [31:0] Default:0x100000 RW */ + } __packed info; + u32 data[NBL_PTLP_TLP_EMP_ADDR_SIZE_DWLEN]; +} __packed; + +#define NBL_PTLP_AXI_BASE_ADDR_ADDR (0xf490f4) +#define NBL_PTLP_AXI_BASE_ADDR_DEPTH (1) +#define NBL_PTLP_AXI_BASE_ADDR_WIDTH (32) +#define NBL_PTLP_AXI_BASE_ADDR_DWLEN (1) +union ptlp_axi_base_addr_u { + struct ptlp_axi_base_addr { + u32 dbg:32; /* [31:0] Default:0x60000000 RW */ + } __packed info; + u32 data[NBL_PTLP_AXI_BASE_ADDR_DWLEN]; +} __packed; + +#define NBL_PTLP_CLR_INT_ADDR (0xf490fc) +#define NBL_PTLP_CLR_INT_DEPTH (1) +#define NBL_PTLP_CLR_INT_WIDTH (32) +#define NBL_PTLP_CLR_INT_DWLEN (1) +union ptlp_clr_int_u { + struct ptlp_clr_int { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CLR_INT_DWLEN]; +} __packed; + +#define NBL_PTLP_E2H_READY_ADDR (0xf49100) +#define NBL_PTLP_E2H_READY_DEPTH (1) +#define NBL_PTLP_E2H_READY_WIDTH (32) +#define NBL_PTLP_E2H_READY_DWLEN (1) +union ptlp_e2h_ready_u { + struct ptlp_e2h_ready { + u32 dbg:1; /* [0] Default:0x1 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_E2H_READY_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_EN_ADDR (0xf49104) +#define NBL_PTLP_CFG_E_TLP_EN_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_EN_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_EN_DWLEN (1) +union ptlp_cfg_e_tlp_en_u { + struct ptlp_cfg_e_tlp_en { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_EN_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_SEQID_ADDR (0xf49108) +#define NBL_PTLP_CFG_E_TLP_SEQID_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_SEQID_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_SEQID_DWLEN (1) +union ptlp_cfg_e_tlp_seqid_u { + struct ptlp_cfg_e_tlp_seqid { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_SEQID_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_ADDR_LOW_ADDR (0xf4910c) +#define NBL_PTLP_CFG_E_TLP_ADDR_LOW_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_ADDR_LOW_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_ADDR_LOW_DWLEN (1) +union ptlp_cfg_e_tlp_addr_low_u { + struct ptlp_cfg_e_tlp_addr_low { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_ADDR_LOW_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_ADDR_HIGH_ADDR (0xf49110) +#define NBL_PTLP_CFG_E_TLP_ADDR_HIGH_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_ADDR_HIGH_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_ADDR_HIGH_DWLEN (1) +union ptlp_cfg_e_tlp_addr_high_u { + struct ptlp_cfg_e_tlp_addr_high { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_ADDR_HIGH_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_ID_ADDR_LOW_ADDR (0xf49114) +#define NBL_PTLP_CFG_E_ID_ADDR_LOW_DEPTH (1) +#define NBL_PTLP_CFG_E_ID_ADDR_LOW_WIDTH (32) +#define NBL_PTLP_CFG_E_ID_ADDR_LOW_DWLEN (1) +union ptlp_cfg_e_id_addr_low_u { + struct ptlp_cfg_e_id_addr_low { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_ID_ADDR_LOW_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_ID_ADDR_HIGH_ADDR (0xf49118) +#define NBL_PTLP_CFG_E_ID_ADDR_HIGH_DEPTH (1) +#define NBL_PTLP_CFG_E_ID_ADDR_HIGH_WIDTH (32) +#define NBL_PTLP_CFG_E_ID_ADDR_HIGH_DWLEN (1) +union ptlp_cfg_e_id_addr_high_u { + struct ptlp_cfg_e_id_addr_high { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_ID_ADDR_HIGH_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_NOTIFY_ADDR (0xf4911c) +#define NBL_PTLP_CFG_E_TLP_NOTIFY_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_NOTIFY_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_NOTIFY_DWLEN (1) +union ptlp_cfg_e_tlp_notify_u { + struct ptlp_cfg_e_tlp_notify { + u32 keep:16; /* [15:0] Default:0x0 RW */ + u32 eop:1; /* [16] Default:0x0 RW */ + u32 sop:1; /* [17] Default:0x0 RW */ + u32 len:8; /* [25:18] Default:0x0 RW */ + u32 vld:1; /* [26] Default:0x0 WO */ + u32 rsv:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_NOTIFY_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW0_ADDR (0xf49120) +#define NBL_PTLP_CFG_E_TLP_DATA_DW0_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW0_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW0_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw0_u { + struct ptlp_cfg_e_tlp_data_dw0 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW0_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW1_ADDR (0xf49124) +#define NBL_PTLP_CFG_E_TLP_DATA_DW1_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW1_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW1_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw1_u { + struct ptlp_cfg_e_tlp_data_dw1 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW1_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW2_ADDR (0xf49128) +#define NBL_PTLP_CFG_E_TLP_DATA_DW2_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW2_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW2_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw2_u { + struct ptlp_cfg_e_tlp_data_dw2 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW2_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW3_ADDR (0xf4912c) +#define NBL_PTLP_CFG_E_TLP_DATA_DW3_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW3_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW3_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw3_u { + struct ptlp_cfg_e_tlp_data_dw3 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW3_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW4_ADDR (0xf49130) +#define NBL_PTLP_CFG_E_TLP_DATA_DW4_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW4_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW4_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw4_u { + struct ptlp_cfg_e_tlp_data_dw4 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW4_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW5_ADDR (0xf49134) +#define NBL_PTLP_CFG_E_TLP_DATA_DW5_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW5_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW5_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw5_u { + struct ptlp_cfg_e_tlp_data_dw5 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW5_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW6_ADDR (0xf49138) +#define NBL_PTLP_CFG_E_TLP_DATA_DW6_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW6_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW6_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw6_u { + struct ptlp_cfg_e_tlp_data_dw6 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW6_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW7_ADDR (0xf4913c) +#define NBL_PTLP_CFG_E_TLP_DATA_DW7_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW7_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW7_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw7_u { + struct ptlp_cfg_e_tlp_data_dw7 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW7_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW8_ADDR (0xf49140) +#define NBL_PTLP_CFG_E_TLP_DATA_DW8_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW8_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW8_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw8_u { + struct ptlp_cfg_e_tlp_data_dw8 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW8_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW9_ADDR (0xf49144) +#define NBL_PTLP_CFG_E_TLP_DATA_DW9_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW9_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW9_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw9_u { + struct ptlp_cfg_e_tlp_data_dw9 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW9_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW10_ADDR (0xf49148) +#define NBL_PTLP_CFG_E_TLP_DATA_DW10_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW10_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW10_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw10_u { + struct ptlp_cfg_e_tlp_data_dw10 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW10_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW11_ADDR (0xf4914c) +#define NBL_PTLP_CFG_E_TLP_DATA_DW11_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW11_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW11_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw11_u { + struct ptlp_cfg_e_tlp_data_dw11 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW11_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW12_ADDR (0xf49150) +#define NBL_PTLP_CFG_E_TLP_DATA_DW12_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW12_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW12_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw12_u { + struct ptlp_cfg_e_tlp_data_dw12 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW12_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW13_ADDR (0xf49154) +#define NBL_PTLP_CFG_E_TLP_DATA_DW13_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW13_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW13_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw13_u { + struct ptlp_cfg_e_tlp_data_dw13 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW13_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW14_ADDR (0xf49158) +#define NBL_PTLP_CFG_E_TLP_DATA_DW14_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW14_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW14_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw14_u { + struct ptlp_cfg_e_tlp_data_dw14 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW14_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP_DATA_DW15_ADDR (0xf4915c) +#define NBL_PTLP_CFG_E_TLP_DATA_DW15_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP_DATA_DW15_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP_DATA_DW15_DWLEN (1) +union ptlp_cfg_e_tlp_data_dw15_u { + struct ptlp_cfg_e_tlp_data_dw15 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP_DATA_DW15_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP2_EN_ADDR (0xf491a4) +#define NBL_PTLP_CFG_E_TLP2_EN_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP2_EN_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP2_EN_DWLEN (1) +union ptlp_cfg_e_tlp2_en_u { + struct ptlp_cfg_e_tlp2_en { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP2_EN_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP2_ADDR_LOW_ADDR (0xf491ac) +#define NBL_PTLP_CFG_E_TLP2_ADDR_LOW_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP2_ADDR_LOW_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP2_ADDR_LOW_DWLEN (1) +union ptlp_cfg_e_tlp2_addr_low_u { + struct ptlp_cfg_e_tlp2_addr_low { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP2_ADDR_LOW_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_TLP2_ADDR_HIGH_ADDR (0xf491b0) +#define NBL_PTLP_CFG_E_TLP2_ADDR_HIGH_DEPTH (1) +#define NBL_PTLP_CFG_E_TLP2_ADDR_HIGH_WIDTH (32) +#define NBL_PTLP_CFG_E_TLP2_ADDR_HIGH_DWLEN (1) +union ptlp_cfg_e_tlp2_addr_high_u { + struct ptlp_cfg_e_tlp2_addr_high { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_TLP2_ADDR_HIGH_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_ID2_ADDR_LOW_ADDR (0xf491b4) +#define NBL_PTLP_CFG_E_ID2_ADDR_LOW_DEPTH (1) +#define NBL_PTLP_CFG_E_ID2_ADDR_LOW_WIDTH (32) +#define NBL_PTLP_CFG_E_ID2_ADDR_LOW_DWLEN (1) +union ptlp_cfg_e_id2_addr_low_u { + struct ptlp_cfg_e_id2_addr_low { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_ID2_ADDR_LOW_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_E_ID2_ADDR_HIGH_ADDR (0xf491b8) +#define NBL_PTLP_CFG_E_ID2_ADDR_HIGH_DEPTH (1) +#define NBL_PTLP_CFG_E_ID2_ADDR_HIGH_WIDTH (32) +#define NBL_PTLP_CFG_E_ID2_ADDR_HIGH_DWLEN (1) +union ptlp_cfg_e_id2_addr_high_u { + struct ptlp_cfg_e_id2_addr_high { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_E_ID2_ADDR_HIGH_DWLEN]; +} __packed; + +#define NBL_PTLP_I2H_READY_ADDR (0xf49200) +#define NBL_PTLP_I2H_READY_DEPTH (1) +#define NBL_PTLP_I2H_READY_WIDTH (32) +#define NBL_PTLP_I2H_READY_DWLEN (1) +union ptlp_i2h_ready_u { + struct ptlp_i2h_ready { + u32 dbg:1; /* [0] Default:0x1 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_I2H_READY_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_EN_ADDR (0xf49204) +#define NBL_PTLP_CFG_I_TLP_EN_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_EN_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_EN_DWLEN (1) +union ptlp_cfg_i_tlp_en_u { + struct ptlp_cfg_i_tlp_en { + u32 dbg:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_EN_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_SEQID_ADDR (0xf49208) +#define NBL_PTLP_CFG_I_TLP_SEQID_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_SEQID_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_SEQID_DWLEN (1) +union ptlp_cfg_i_tlp_seqid_u { + struct ptlp_cfg_i_tlp_seqid { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_SEQID_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_ADDR_LOW_ADDR (0xf4920c) +#define NBL_PTLP_CFG_I_TLP_ADDR_LOW_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_ADDR_LOW_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_ADDR_LOW_DWLEN (1) +union ptlp_cfg_i_tlp_addr_low_u { + struct ptlp_cfg_i_tlp_addr_low { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_ADDR_LOW_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_ADDR_HIGH_ADDR (0xf49210) +#define NBL_PTLP_CFG_I_TLP_ADDR_HIGH_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_ADDR_HIGH_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_ADDR_HIGH_DWLEN (1) +union ptlp_cfg_i_tlp_addr_high_u { + struct ptlp_cfg_i_tlp_addr_high { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_ADDR_HIGH_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_ID_ADDR_LOW_ADDR (0xf49214) +#define NBL_PTLP_CFG_I_ID_ADDR_LOW_DEPTH (1) +#define NBL_PTLP_CFG_I_ID_ADDR_LOW_WIDTH (32) +#define NBL_PTLP_CFG_I_ID_ADDR_LOW_DWLEN (1) +union ptlp_cfg_i_id_addr_low_u { + struct ptlp_cfg_i_id_addr_low { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_ID_ADDR_LOW_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_ID_ADDR_HIGH_ADDR (0xf49218) +#define NBL_PTLP_CFG_I_ID_ADDR_HIGH_DEPTH (1) +#define NBL_PTLP_CFG_I_ID_ADDR_HIGH_WIDTH (32) +#define NBL_PTLP_CFG_I_ID_ADDR_HIGH_DWLEN (1) +union ptlp_cfg_i_id_addr_high_u { + struct ptlp_cfg_i_id_addr_high { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_ID_ADDR_HIGH_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_NOTIFY_ADDR (0xf4921c) +#define NBL_PTLP_CFG_I_TLP_NOTIFY_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_NOTIFY_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_NOTIFY_DWLEN (1) +union ptlp_cfg_i_tlp_notify_u { + struct ptlp_cfg_i_tlp_notify { + u32 keep:16; /* [15:0] Default:0x0 RW */ + u32 eop:1; /* [16] Default:0x0 RW */ + u32 sop:1; /* [17] Default:0x0 RW */ + u32 len:8; /* [25:18] Default:0x0 RW */ + u32 vld:1; /* [26] Default:0x0 WO */ + u32 rsv:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_NOTIFY_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW0_ADDR (0xf49220) +#define NBL_PTLP_CFG_I_TLP_DATA_DW0_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW0_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW0_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw0_u { + struct ptlp_cfg_i_tlp_data_dw0 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW0_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW1_ADDR (0xf49224) +#define NBL_PTLP_CFG_I_TLP_DATA_DW1_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW1_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW1_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw1_u { + struct ptlp_cfg_i_tlp_data_dw1 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW1_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW2_ADDR (0xf49228) +#define NBL_PTLP_CFG_I_TLP_DATA_DW2_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW2_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW2_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw2_u { + struct ptlp_cfg_i_tlp_data_dw2 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW2_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW3_ADDR (0xf4922c) +#define NBL_PTLP_CFG_I_TLP_DATA_DW3_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW3_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW3_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw3_u { + struct ptlp_cfg_i_tlp_data_dw3 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW3_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW4_ADDR (0xf49230) +#define NBL_PTLP_CFG_I_TLP_DATA_DW4_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW4_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW4_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw4_u { + struct ptlp_cfg_i_tlp_data_dw4 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW4_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW5_ADDR (0xf49234) +#define NBL_PTLP_CFG_I_TLP_DATA_DW5_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW5_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW5_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw5_u { + struct ptlp_cfg_i_tlp_data_dw5 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW5_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW6_ADDR (0xf49238) +#define NBL_PTLP_CFG_I_TLP_DATA_DW6_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW6_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW6_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw6_u { + struct ptlp_cfg_i_tlp_data_dw6 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW6_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW7_ADDR (0xf4923c) +#define NBL_PTLP_CFG_I_TLP_DATA_DW7_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW7_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW7_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw7_u { + struct ptlp_cfg_i_tlp_data_dw7 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW7_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW8_ADDR (0xf49240) +#define NBL_PTLP_CFG_I_TLP_DATA_DW8_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW8_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW8_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw8_u { + struct ptlp_cfg_i_tlp_data_dw8 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW8_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW9_ADDR (0xf49244) +#define NBL_PTLP_CFG_I_TLP_DATA_DW9_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW9_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW9_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw9_u { + struct ptlp_cfg_i_tlp_data_dw9 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW9_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW10_ADDR (0xf49248) +#define NBL_PTLP_CFG_I_TLP_DATA_DW10_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW10_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW10_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw10_u { + struct ptlp_cfg_i_tlp_data_dw10 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW10_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW11_ADDR (0xf4924c) +#define NBL_PTLP_CFG_I_TLP_DATA_DW11_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW11_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW11_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw11_u { + struct ptlp_cfg_i_tlp_data_dw11 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW11_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW12_ADDR (0xf49250) +#define NBL_PTLP_CFG_I_TLP_DATA_DW12_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW12_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW12_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw12_u { + struct ptlp_cfg_i_tlp_data_dw12 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW12_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW13_ADDR (0xf49254) +#define NBL_PTLP_CFG_I_TLP_DATA_DW13_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW13_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW13_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw13_u { + struct ptlp_cfg_i_tlp_data_dw13 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW13_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW14_ADDR (0xf49258) +#define NBL_PTLP_CFG_I_TLP_DATA_DW14_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW14_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW14_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw14_u { + struct ptlp_cfg_i_tlp_data_dw14 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW14_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TLP_DATA_DW15_ADDR (0xf4925c) +#define NBL_PTLP_CFG_I_TLP_DATA_DW15_DEPTH (1) +#define NBL_PTLP_CFG_I_TLP_DATA_DW15_WIDTH (32) +#define NBL_PTLP_CFG_I_TLP_DATA_DW15_DWLEN (1) +union ptlp_cfg_i_tlp_data_dw15_u { + struct ptlp_cfg_i_tlp_data_dw15 { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TLP_DATA_DW15_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_PF_FLR_ACK_ADDR (0xf49300) +#define NBL_PTLP_CFG_I_TL_PF_FLR_ACK_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_PF_FLR_ACK_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_PF_FLR_ACK_DWLEN (1) +union ptlp_cfg_i_tl_pf_flr_ack_u { + struct ptlp_cfg_i_tl_pf_flr_ack { + u32 dbg:8; /* [7:0] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_PF_FLR_ACK_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_ACK_ADDR (0xf49304) +#define NBL_PTLP_CFG_I_TL_VF_FLR_ACK_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_ACK_WIDTH (512) +#define NBL_PTLP_CFG_I_TL_VF_FLR_ACK_DWLEN (16) +union ptlp_cfg_i_tl_vf_flr_ack_u { + struct ptlp_cfg_i_tl_vf_flr_ack { + u32 dbg_arr[16]; /* [511:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_ACK_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_PF_FLR_REQ_ADDR (0xf49400) +#define NBL_PTLP_CFG_I_TL_PF_FLR_REQ_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_PF_FLR_REQ_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_PF_FLR_REQ_DWLEN (1) +union ptlp_cfg_i_tl_pf_flr_req_u { + struct ptlp_cfg_i_tl_pf_flr_req { + u32 dbg:8; /* [7:0] Default:0x0 RO */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_PF_FLR_REQ_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ0_ADDR (0xf49404) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ0_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ0_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ0_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req0_u { + struct ptlp_cfg_i_tl_vf_flr_req0 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ0_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ1_ADDR (0xf49408) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ1_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ1_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ1_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req1_u { + struct ptlp_cfg_i_tl_vf_flr_req1 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ1_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ2_ADDR (0xf4940c) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ2_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ2_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ2_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req2_u { + struct ptlp_cfg_i_tl_vf_flr_req2 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ2_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ3_ADDR (0xf49410) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ3_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ3_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ3_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req3_u { + struct ptlp_cfg_i_tl_vf_flr_req3 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ3_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ4_ADDR (0xf49414) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ4_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ4_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ4_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req4_u { + struct ptlp_cfg_i_tl_vf_flr_req4 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ4_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ5_ADDR (0xf49418) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ5_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ5_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ5_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req5_u { + struct ptlp_cfg_i_tl_vf_flr_req5 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ5_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ6_ADDR (0xf4941c) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ6_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ6_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ6_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req6_u { + struct ptlp_cfg_i_tl_vf_flr_req6 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ6_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ7_ADDR (0xf49420) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ7_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ7_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ7_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req7_u { + struct ptlp_cfg_i_tl_vf_flr_req7 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ7_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ8_ADDR (0xf49424) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ8_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ8_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ8_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req8_u { + struct ptlp_cfg_i_tl_vf_flr_req8 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ8_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ9_ADDR (0xf49428) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ9_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ9_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ9_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req9_u { + struct ptlp_cfg_i_tl_vf_flr_req9 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ9_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ10_ADDR (0xf4942c) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ10_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ10_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ10_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req10_u { + struct ptlp_cfg_i_tl_vf_flr_req10 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ10_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ11_ADDR (0xf49430) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ11_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ11_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ11_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req11_u { + struct ptlp_cfg_i_tl_vf_flr_req11 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ11_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ12_ADDR (0xf49434) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ12_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ12_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ12_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req12_u { + struct ptlp_cfg_i_tl_vf_flr_req12 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ12_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ13_ADDR (0xf49438) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ13_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ13_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ13_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req13_u { + struct ptlp_cfg_i_tl_vf_flr_req13 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ13_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ14_ADDR (0xf4943c) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ14_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ14_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ14_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req14_u { + struct ptlp_cfg_i_tl_vf_flr_req14 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ14_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ15_ADDR (0xf49440) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ15_DEPTH (1) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ15_WIDTH (32) +#define NBL_PTLP_CFG_I_TL_VF_FLR_REQ15_DWLEN (1) +union ptlp_cfg_i_tl_vf_flr_req15_u { + struct ptlp_cfg_i_tl_vf_flr_req15 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_I_TL_VF_FLR_REQ15_DWLEN]; +} __packed; + +#define NBL_PTLP_ERROR_ADDR (0xf4a000) +#define NBL_PTLP_ERROR_DEPTH (1) +#define NBL_PTLP_ERROR_WIDTH (32) +#define NBL_PTLP_ERROR_DWLEN (1) +union ptlp_error_u { + struct ptlp_error { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_ERROR_DWLEN]; +} __packed; + +#define NBL_PTLP_WARNING_ADDR (0xf4a004) +#define NBL_PTLP_WARNING_DEPTH (1) +#define NBL_PTLP_WARNING_WIDTH (32) +#define NBL_PTLP_WARNING_DWLEN (1) +union ptlp_warning_u { + struct ptlp_warning { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_WARNING_DWLEN]; +} __packed; + +#define NBL_PTLP_CFG_ADDR (0xf4a040) +#define NBL_PTLP_CFG_DEPTH (1) +#define NBL_PTLP_CFG_WIDTH (32) +#define NBL_PTLP_CFG_DWLEN (1) +union ptlp_cfg_u { + struct ptlp_cfg { + u32 ptlp_axi_werr:1; /* [0] Default:0x0 RC */ + u32 ptlp_axi_rerr:1; /* [1] Default:0x0 RC */ + u32 ptlp_axi_ready_err:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_CFG_DWLEN]; +} __packed; + +#define NBL_PTLP_E_NEXT_SEQ_ID_ADDR (0xf4a12c) +#define NBL_PTLP_E_NEXT_SEQ_ID_DEPTH (1) +#define NBL_PTLP_E_NEXT_SEQ_ID_WIDTH (32) +#define NBL_PTLP_E_NEXT_SEQ_ID_DWLEN (1) +union ptlp_e_next_seq_id_u { + struct ptlp_e_next_seq_id { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_E_NEXT_SEQ_ID_DWLEN]; +} __packed; + +#define NBL_PTLP_E_TLP_ADDR_ADDR (0xf4a130) +#define NBL_PTLP_E_TLP_ADDR_DEPTH (1) +#define NBL_PTLP_E_TLP_ADDR_WIDTH (32) +#define NBL_PTLP_E_TLP_ADDR_DWLEN (1) +union ptlp_e_tlp_addr_u { + struct ptlp_e_tlp_addr { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_E_TLP_ADDR_DWLEN]; +} __packed; + +#define NBL_PTLP_E_STATUS_ADDR (0xf4a134) +#define NBL_PTLP_E_STATUS_DEPTH (1) +#define NBL_PTLP_E_STATUS_WIDTH (32) +#define NBL_PTLP_E_STATUS_DWLEN (1) +union ptlp_e_status_u { + struct ptlp_e_status { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_E_STATUS_DWLEN]; +} __packed; + +#define NBL_PTLP_I_NEXT_SEQ_ID_ADDR (0xf4a22c) +#define NBL_PTLP_I_NEXT_SEQ_ID_DEPTH (1) +#define NBL_PTLP_I_NEXT_SEQ_ID_WIDTH (32) +#define NBL_PTLP_I_NEXT_SEQ_ID_DWLEN (1) +union ptlp_i_next_seq_id_u { + struct ptlp_i_next_seq_id { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_I_NEXT_SEQ_ID_DWLEN]; +} __packed; + +#define NBL_PTLP_I_TLP_ADDR_ADDR (0xf4a230) +#define NBL_PTLP_I_TLP_ADDR_DEPTH (1) +#define NBL_PTLP_I_TLP_ADDR_WIDTH (32) +#define NBL_PTLP_I_TLP_ADDR_DWLEN (1) +union ptlp_i_tlp_addr_u { + struct ptlp_i_tlp_addr { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_I_TLP_ADDR_DWLEN]; +} __packed; + +#define NBL_PTLP_I_STATUS_ADDR (0xf4a234) +#define NBL_PTLP_I_STATUS_DEPTH (1) +#define NBL_PTLP_I_STATUS_WIDTH (32) +#define NBL_PTLP_I_STATUS_DWLEN (1) +union ptlp_i_status_u { + struct ptlp_i_status { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_I_STATUS_DWLEN]; +} __packed; + +#define NBL_PTLP_H2I_I_PTLP_RAM_ERR_INFO_ADDR (0xf4a250) +#define NBL_PTLP_H2I_I_PTLP_RAM_ERR_INFO_DEPTH (1) +#define NBL_PTLP_H2I_I_PTLP_RAM_ERR_INFO_WIDTH (32) +#define NBL_PTLP_H2I_I_PTLP_RAM_ERR_INFO_DWLEN (1) +union ptlp_h2i_i_ptlp_ram_err_info_u { + struct ptlp_h2i_i_ptlp_ram_err_info { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PTLP_H2I_I_PTLP_RAM_ERR_INFO_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vblk.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vblk.h new file mode 100644 index 0000000000000000000000000000000000000000..d85d0608f8ca3a4a47a08710cf3457174fd5245b --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vblk.h @@ -0,0 +1,1820 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_VBLK_H +#define NBL_VBLK_H 1 + +#include + +#define NBL_VBLK_BASE (0x00F9C000) + +#define NBL_VBLK_INT_STATUS_ADDR (0xf9c000) +#define NBL_VBLK_INT_STATUS_DEPTH (1) +#define NBL_VBLK_INT_STATUS_WIDTH (32) +#define NBL_VBLK_INT_STATUS_DWLEN (1) +union vblk_int_status_u { + struct vblk_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RWC */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RWC */ + u32 cif_err:1; /* [05:05] Default:0x0 RWC */ + u32 rsv4:1; /* [06:06] Default:0x0 RO */ + u32 rsv3:1; /* [07:07] Default:0x0 RO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_VBLK_INT_MASK_ADDR (0xf9c004) +#define NBL_VBLK_INT_MASK_DEPTH (1) +#define NBL_VBLK_INT_MASK_WIDTH (32) +#define NBL_VBLK_INT_MASK_DWLEN (1) +union vblk_int_mask_u { + struct vblk_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RW */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RW */ + u32 cif_err:1; /* [05:05] Default:0x0 RW */ + u32 rsv4:1; /* [06:06] Default:0x0 RO */ + u32 rsv3:1; /* [07:07] Default:0x0 RO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_INT_MASK_DWLEN]; +} __packed; + +#define NBL_VBLK_INT_SET_ADDR (0xf9c008) +#define NBL_VBLK_INT_SET_DEPTH (1) +#define NBL_VBLK_INT_SET_WIDTH (32) +#define NBL_VBLK_INT_SET_DWLEN (1) +union vblk_int_set_u { + struct vblk_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 WO */ + u32 data_cor_err:1; /* [04:04] Default:0x0 WO */ + u32 cif_err:1; /* [05:05] Default:0x0 WO */ + u32 rsv4:1; /* [06:06] Default:0x0 RO */ + u32 rsv3:1; /* [07:07] Default:0x0 RO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_INT_SET_DWLEN]; +} __packed; + +#define NBL_VBLK_INIT_DONE_ADDR (0xf9c00c) +#define NBL_VBLK_INIT_DONE_DEPTH (1) +#define NBL_VBLK_INIT_DONE_WIDTH (32) +#define NBL_VBLK_INIT_DONE_DWLEN (1) +union vblk_init_done_u { + struct vblk_init_done { + u32 init_done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_VBLK_COR_ERR_0_ADDR (0xf9c018) +#define NBL_VBLK_COR_ERR_0_DEPTH (1) +#define NBL_VBLK_COR_ERR_0_WIDTH (32) +#define NBL_VBLK_COR_ERR_0_DWLEN (1) +union vblk_cor_err_0_u { + struct vblk_cor_err_0 { + u32 header_ram:1; /* [0] Default:0x0 RC */ + u32 dmaresp_ring_data_fifo:1; /* [1] Default:0x0 RC */ + u32 dmareq_ring_info_fifo:1; /* [2] Default:0x0 RC */ + u32 rsv5:1; /* [3] Default:0x0 RO */ + u32 cq_avail_info_ram:1; /* [4] Default:0x0 RC */ + u32 dmaresp_idx_data_fifo:1; /* [5] Default:0x0 RC */ + u32 dmareq_idx_info_fifo:1; /* [6] Default:0x0 RC */ + u32 rsv4:1; /* [7] Default:0x0 RO */ + u32 hq_inf_ram:1; /* [8] Default:0x0 RC */ + u32 done_fifo_ram:1; /* [9] Default:0x0 RC */ + u32 wrap_cnt_ram:1; /* [10] Default:0x0 RC */ + u32 io_done_hd_ram:1; /* [11] Default:0x0 RC */ + u32 iodesc_qid_fifo:1; /* [12] Default:0x0 RC */ + u32 iodesc_fifo:1; /* [13] Default:0x0 RC */ + u32 iodesc_temp_fifo:1; /* [14] Default:0x0 RC */ + u32 iodesc_infifo:1; /* [15] Default:0x0 RC */ + u32 get_iodesc_hd_ram:1; /* [16] Default:0x0 RC */ + u32 get_iodesc_done_ram_cor_err:1; /* [17] Default:0x0 RC */ + u32 get_iodesc_invld_ram_cor_err:1; /* [18] Default:0x0 RC */ + u32 get_iodesc_sch_s_ram_cor_err:1; /* [19] Default:0x0 RC */ + u32 get_iodesc_sch_ram_cor_err:1; /* [20] Default:0x0 RC */ + u32 iohd_hd_wr_hesc_ram:1; /* [21] Default:0x0 RC */ + u32 iohd_fifo_rm_ram:1; /* [22] Default:0x0 RC */ + u32 iohd_infifo_ram:1; /* [23] Default:0x0 RC */ + u32 rsv3:1; /* [24] Default:0x0 RO */ + u32 hddesc_info_fifo_hd_ram:1; /* [25] Default:0x0 RC */ + u32 hddesc_fifo_hd_ram:1; /* [26] Default:0x0 RC */ + u32 rsv2:1; /* [27] Default:0x0 RO */ + u32 rsv1:1; /* [28] Default:0x0 RO */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_COR_ERR_0_DWLEN]; +} __packed; + +#define NBL_VBLK_COR_ERR_1_ADDR (0xf9c01c) +#define NBL_VBLK_COR_ERR_1_DEPTH (1) +#define NBL_VBLK_COR_ERR_1_WIDTH (32) +#define NBL_VBLK_COR_ERR_1_DWLEN (1) +union vblk_cor_err_1_u { + struct vblk_cor_err_1 { + u32 wr_hdesc:1; /* [0] Default:0x0 RC */ + u32 eq_inf_ram:1; /* [1] Default:0x0 RC */ + u32 edesc_desc_ram:1; /* [2] Default:0x0 RC */ + u32 edesc_hd_ram:1; /* [3] Default:0x0 RC */ + u32 edesc_infifo_ram:1; /* [4] Default:0x0 RC */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 rmdesc_dat_infifo_ram:1; /* [6] Default:0x0 RC */ + u32 rmdesc_datfifo_ram:1; /* [7] Default:0x0 RC */ + u32 rmdesc_dir_infifo_ram:1; /* [8] Default:0x0 RC */ + u32 rmdesc_fifo_ram:1; /* [9] Default:0x0 RC */ + u32 rmdesc_infifo_ram:1; /* [10] Default:0x0 RC */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_COR_ERR_1_DWLEN]; +} __packed; + +#define NBL_VBLK_COR_ERR_2_ADDR (0xf9c020) +#define NBL_VBLK_COR_ERR_2_DEPTH (1) +#define NBL_VBLK_COR_ERR_2_WIDTH (32) +#define NBL_VBLK_COR_ERR_2_DWLEN (1) +union vblk_cor_err_2_u { + struct vblk_cor_err_2 { + u32 dq_que_chain_ram_err:1; /* [0] Default:0x0 RC */ + u32 dq_dev_chain_ram_err:1; /* [1] Default:0x0 RC */ + u32 dq_sw_notify_fifo_ram:1; /* [2] Default:0x0 RC */ + u32 dq_notify_ram:1; /* [3] Default:0x0 RC */ + u32 dq_qstat_ram:1; /* [4] Default:0x0 RC */ + u32 dq_qstate_cif_copy_ram:1; /* [5] Default:0x0 RC */ + u32 dq_notify_fifo_ram:1; /* [6] Default:0x0 RC */ + u32 dq_sch_fifo_ram:1; /* [7] Default:0x0 RC */ + u32 rsv2:1; /* [8] Default:0x0 RO */ + u32 cq_que_chain_ram_err:1; /* [9] Default:0x0 RC */ + u32 cq_dev_chain_ram_err:1; /* [10] Default:0x0 RC */ + u32 cq_sw_notify_fifo_ram:1; /* [11] Default:0x0 RC */ + u32 cq_notify_ram:1; /* [12] Default:0x0 RC */ + u32 cq_qstat_ram:1; /* [13] Default:0x0 RC */ + u32 cq_qstate_cif_copy_ram:1; /* [14] Default:0x0 RC */ + u32 cq_notify_fifo_ram:1; /* [15] Default:0x0 RC */ + u32 cq_sch_fifo_ram:1; /* [16] Default:0x0 RC */ + u32 rsv1:1; /* [17] Default:0x0 RO */ + u32 cq_chain_info_ram:1; /* [18] Default:0x0 RC */ + u32 dq_chain_info_ram:1; /* [19] Default:0x0 RC */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_COR_ERR_2_DWLEN]; +} __packed; + +#define NBL_VBLK_COR_ERR_3_ADDR (0xf9c024) +#define NBL_VBLK_COR_ERR_3_DEPTH (1) +#define NBL_VBLK_COR_ERR_3_WIDTH (32) +#define NBL_VBLK_COR_ERR_3_DWLEN (1) +union vblk_cor_err_3_u { + struct vblk_cor_err_3 { + u32 iodone_num_ram:1; /* [0] Default:0x0 RC */ + u32 edesc_offset_ram:1; /* [1] Default:0x0 RC */ + u32 hdesc_offset_ram:1; /* [2] Default:0x0 RC */ + u32 io_done_fifo_ram:1; /* [3] Default:0x0 RC */ + u32 nxt_desc_req_fifo_ram:1; /* [4] Default:0x0 RC */ + u32 e2h_dmacmd_fifo_ram:1; /* [5] Default:0x0 RC */ + u32 h2e_dmacmd_fifo_ram:1; /* [6] Default:0x0 RC */ + u32 rsv3:1; /* [7] Default:0x0 RO */ + u32 dmareq_info_fifo_ram:1; /* [8] Default:0x0 RC */ + u32 pro_order_fifo_ram:1; /* [9] Default:0x0 RC */ + u32 dqdesc_context_ram:1; /* [10] Default:0x0 RC */ + u32 hdesc_context_ram:1; /* [11] Default:0x0 RC */ + u32 edesc_context_ram:1; /* [12] Default:0x0 RC */ + u32 rsv2:1; /* [13] Default:0x0 RO */ + u32 qsize_mask_ram_rdata:1; /* [14] Default:0x0 RC */ + u32 baddr_ram_rdata:1; /* [15] Default:0x0 RC */ + u32 dmareq_rsp_fifo_ram:1; /* [16] Default:0x0 RC */ + u32 hw_idx_ram:1; /* [17] Default:0x0 RC */ + u32 dqdesc_info_ram:1; /* [18] Default:0x0 RC */ + u32 rsv1:1; /* [19] Default:0x0 RO */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_COR_ERR_3_DWLEN]; +} __packed; + +#define NBL_VBLK_COR_ERR_4_ADDR (0xf9c028) +#define NBL_VBLK_COR_ERR_4_DEPTH (1) +#define NBL_VBLK_COR_ERR_4_WIDTH (32) +#define NBL_VBLK_COR_ERR_4_DWLEN (1) +union vblk_cor_err_4_u { + struct vblk_cor_err_4 { + u32 e2h_dqinfo_ram_rdata:1; /* [0] Default:0x0 RC */ + u32 e2h_dma_used_info_ram:1; /* [1] Default:0x0 RC */ + u32 e2h_hqinfo_ram_rdata:1; /* [2] Default:0x0 RC */ + u32 e2h_header_ram:1; /* [3] Default:0x0 RC */ + u32 e2h_datram:1; /* [4] Default:0x0 RC */ + u32 e2h_dma_wcmd_fifo_ram:1; /* [5] Default:0x0 RC */ + u32 rsv1:1; /* [6] Default:0x0 RO */ + u32 h2e_dqinfo_ram_rdata:1; /* [7] Default:0x0 RC */ + u32 h2e_dma_used_info_ram:1; /* [8] Default:0x0 RC */ + u32 h2e_hqinfo_ram_rdata:1; /* [9] Default:0x0 RC */ + u32 h2e_header_ram:1; /* [10] Default:0x0 RC */ + u32 h2e_datram:1; /* [11] Default:0x0 RC */ + u32 h2e_dma_wcmd_fifo_ram:1; /* [12] Default:0x0 RC */ + u32 notify_cfg_hpntr_ram:1; /* [13] Default:0x0 RC */ + u32 notify_cfg_notify_ram:1; /* [14] Default:0x0 RC */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_COR_ERR_4_DWLEN]; +} __packed; + +#define NBL_VBLK_UCOR_ERR_0_ADDR (0xf9c02c) +#define NBL_VBLK_UCOR_ERR_0_DEPTH (1) +#define NBL_VBLK_UCOR_ERR_0_WIDTH (32) +#define NBL_VBLK_UCOR_ERR_0_DWLEN (1) +union vblk_ucor_err_0_u { + struct vblk_ucor_err_0 { + u32 header_ram:1; /* [0] Default:0x0 RC */ + u32 dmaresp_ring_data_fifo:1; /* [1] Default:0x0 RC */ + u32 dmareq_ring_info_fifo:1; /* [2] Default:0x0 RC */ + u32 rsv5:1; /* [3] Default:0x0 RO */ + u32 cq_avail_info_ram:1; /* [4] Default:0x0 RC */ + u32 dmaresp_idx_data_fifo:1; /* [5] Default:0x0 RC */ + u32 dmareq_idx_info_fifo:1; /* [6] Default:0x0 RC */ + u32 rsv4:1; /* [7] Default:0x0 RO */ + u32 hq_inf_ram:1; /* [8] Default:0x0 RC */ + u32 done_fifo_ram:1; /* [9] Default:0x0 RC */ + u32 wrap_cnt_ram:1; /* [10] Default:0x0 RC */ + u32 io_done_hd_ram:1; /* [11] Default:0x0 RC */ + u32 iodesc_qid_fifo:1; /* [12] Default:0x0 RC */ + u32 iodesc_fifo:1; /* [13] Default:0x0 RC */ + u32 iodesc_temp_fifo:1; /* [14] Default:0x0 RC */ + u32 iodesc_infifo:1; /* [15] Default:0x0 RC */ + u32 get_iodesc_hd_ram:1; /* [16] Default:0x0 RC */ + u32 get_iodesc_done_ram_cor_err:1; /* [17] Default:0x0 RC */ + u32 get_iodesc_invld_ram_cor_err:1; /* [18] Default:0x0 RC */ + u32 get_iodesc_sch_s_ram_cor_err:1; /* [19] Default:0x0 RC */ + u32 get_iodesc_sch_ram_cor_err:1; /* [20] Default:0x0 RC */ + u32 iohd_hd_wr_hesc_ram:1; /* [21] Default:0x0 RC */ + u32 iohd_fifo_rm_ram:1; /* [22] Default:0x0 RC */ + u32 iohd_infifo_ram:1; /* [23] Default:0x0 RC */ + u32 rsv3:1; /* [24] Default:0x0 RO */ + u32 hddesc_info_fifo_hd_ram:1; /* [25] Default:0x0 RC */ + u32 hddesc_fifo_hd_ram:1; /* [26] Default:0x0 RC */ + u32 rsv2:1; /* [27] Default:0x0 RO */ + u32 rsv1:1; /* [28] Default:0x0 RO */ + u32 rsv:3; /* [31:29] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_UCOR_ERR_0_DWLEN]; +} __packed; + +#define NBL_VBLK_UCOR_ERR_1_ADDR (0xf9c030) +#define NBL_VBLK_UCOR_ERR_1_DEPTH (1) +#define NBL_VBLK_UCOR_ERR_1_WIDTH (32) +#define NBL_VBLK_UCOR_ERR_1_DWLEN (1) +union vblk_ucor_err_1_u { + struct vblk_ucor_err_1 { + u32 wr_hdesc:1; /* [0] Default:0x0 RC */ + u32 eq_inf_ram:1; /* [1] Default:0x0 RC */ + u32 edesc_desc_ram:1; /* [2] Default:0x0 RC */ + u32 edesc_hd_ram:1; /* [3] Default:0x0 RC */ + u32 edesc_infifo_ram:1; /* [4] Default:0x0 RC */ + u32 rsv1:1; /* [5] Default:0x0 RO */ + u32 rmdesc_dat_infifo_ram:1; /* [6] Default:0x0 RC */ + u32 rmdesc_datfifo_ram:1; /* [7] Default:0x0 RC */ + u32 rmdesc_dir_infifo_ram:1; /* [8] Default:0x0 RC */ + u32 rmdesc_fifo_ram:1; /* [9] Default:0x0 RC */ + u32 rmdesc_infifo_ram:1; /* [10] Default:0x0 RC */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_UCOR_ERR_1_DWLEN]; +} __packed; + +#define NBL_VBLK_UCOR_ERR_2_ADDR (0xf9c034) +#define NBL_VBLK_UCOR_ERR_2_DEPTH (1) +#define NBL_VBLK_UCOR_ERR_2_WIDTH (32) +#define NBL_VBLK_UCOR_ERR_2_DWLEN (1) +union vblk_ucor_err_2_u { + struct vblk_ucor_err_2 { + u32 cq_que_chain_ram_err:1; /* [0] Default:0x0 RC */ + u32 cq_dev_chain_ram_err:1; /* [1] Default:0x0 RC */ + u32 cq_sw_notify_fifo_ram:1; /* [2] Default:0x0 RC */ + u32 cq_notify_ram:1; /* [3] Default:0x0 RC */ + u32 cq_qstat_ram:1; /* [4] Default:0x0 RC */ + u32 cq_qstate_cif_copy_ram:1; /* [5] Default:0x0 RC */ + u32 cq_notify_fifo_ram:1; /* [6] Default:0x0 RC */ + u32 cq_sch_fifo_ram:1; /* [7] Default:0x0 RC */ + u32 rsv2:1; /* [8] Default:0x0 RO */ + u32 dq_que_chain_ram_err:1; /* [9] Default:0x0 RC */ + u32 dq_dev_chain_ram_err:1; /* [10] Default:0x0 RC */ + u32 dq_sw_notify_fifo_ram:1; /* [11] Default:0x0 RC */ + u32 dq_notify_ram:1; /* [12] Default:0x0 RC */ + u32 dq_qstat_ram:1; /* [13] Default:0x0 RC */ + u32 dq_qstate_cif_copy_ram:1; /* [14] Default:0x0 RC */ + u32 dq_notify_fifo_ram:1; /* [15] Default:0x0 RC */ + u32 dq_sch_fifo_ram:1; /* [16] Default:0x0 RC */ + u32 rsv1:1; /* [17] Default:0x0 RO */ + u32 cq_chain_info_ram:1; /* [18] Default:0x0 RC */ + u32 dq_chain_info_ram:1; /* [19] Default:0x0 RC */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_UCOR_ERR_2_DWLEN]; +} __packed; + +#define NBL_VBLK_UCOR_ERR_3_ADDR (0xf9c038) +#define NBL_VBLK_UCOR_ERR_3_DEPTH (1) +#define NBL_VBLK_UCOR_ERR_3_WIDTH (32) +#define NBL_VBLK_UCOR_ERR_3_DWLEN (1) +union vblk_ucor_err_3_u { + struct vblk_ucor_err_3 { + u32 edesc_offset_ram:1; /* [1] Default:0x0 RC */ + u32 hdesc_offset_ram:1; /* [2] Default:0x0 RC */ + u32 io_done_fifo_ram:1; /* [3] Default:0x0 RC */ + u32 nxt_desc_req_fifo_ram:1; /* [4] Default:0x0 RC */ + u32 e2h_dmacmd_fifo_ram:1; /* [5] Default:0x0 RC */ + u32 h2e_dmacmd_fifo_ram:1; /* [6] Default:0x0 RC */ + u32 rsv5:1; /* [7] Default:0x0 RO */ + u32 dmareq_info_fifo_ram:1; /* [8] Default:0x0 RC */ + u32 pro_order_fifo_ram:1; /* [9] Default:0x0 RC */ + u32 dqdesc_context_ram:1; /* [10] Default:0x0 RC */ + u32 hdesc_context_ram:1; /* [11] Default:0x0 RC */ + u32 edesc_context_ram:1; /* [12] Default:0x0 RC */ + u32 rsv4:1; /* [13] Default:0x0 RO */ + u32 qsize_mask_ram_rdata:1; /* [14] Default:0x0 RC */ + u32 baddr_ram_rdata:1; /* [15] Default:0x0 RC */ + u32 dmareq_rsp_fifo_ram:1; /* [16] Default:0x0 RC */ + u32 hw_idx_ram:1; /* [17] Default:0x0 RC */ + u32 dqdesc_info_ram:1; /* [18] Default:0x0 RC */ + u32 rsv3:1; /* [19] Default:0x0 RO */ + u32 rsv2:1; /* [20] Default:0x0 RO */ + u32 rsv1:1; /* [21] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_UCOR_ERR_3_DWLEN]; +} __packed; + +#define NBL_VBLK_UCOR_ERR_4_ADDR (0xf9c03c) +#define NBL_VBLK_UCOR_ERR_4_DEPTH (1) +#define NBL_VBLK_UCOR_ERR_4_WIDTH (32) +#define NBL_VBLK_UCOR_ERR_4_DWLEN (1) +union vblk_ucor_err_4_u { + struct vblk_ucor_err_4 { + u32 e2h_dqinfo_ram_rdata:1; /* [0] Default:0x0 RC */ + u32 e2h_dma_used_info_ram:1; /* [1] Default:0x0 RC */ + u32 e2h_hqinfo_ram_rdata:1; /* [2] Default:0x0 RC */ + u32 e2h_header_ram:1; /* [3] Default:0x0 RC */ + u32 e2h_datram:1; /* [4] Default:0x0 RC */ + u32 e2h_dma_wcmd_fifo_ram:1; /* [5] Default:0x0 RC */ + u32 rsv2:1; /* [6] Default:0x0 RO */ + u32 h2e_dqinfo_ram_rdata:1; /* [7] Default:0x0 RC */ + u32 h2e_dma_used_info_ram:1; /* [8] Default:0x0 RC */ + u32 h2e_hqinfo_ram_rdata:1; /* [9] Default:0x0 RC */ + u32 h2e_header_ram:1; /* [10] Default:0x0 RC */ + u32 h2e_datram:1; /* [11] Default:0x0 RC */ + u32 h2e_dma_wcmd_fifo_ram:1; /* [12] Default:0x0 RC */ + u32 notify_cfg_hpntr_ram:1; /* [13] Default:0x0 RC */ + u32 notify_cfg_notify_ram:1; /* [14] Default:0x0 RC */ + u32 rsv1:1; /* [15] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_UCOR_ERR_4_DWLEN]; +} __packed; + +#define NBL_VBLK_CIF_ERR_INFO_ADDR (0xf9c040) +#define NBL_VBLK_CIF_ERR_INFO_DEPTH (1) +#define NBL_VBLK_CIF_ERR_INFO_WIDTH (32) +#define NBL_VBLK_CIF_ERR_INFO_DWLEN (1) +union vblk_cif_err_info_u { + struct vblk_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_VBLK_INIT_DONE_G_ADDR (0xf9c048) +#define NBL_VBLK_INIT_DONE_G_DEPTH (1) +#define NBL_VBLK_INIT_DONE_G_WIDTH (32) +#define NBL_VBLK_INIT_DONE_G_DWLEN (1) +union vblk_init_done_g_u { + struct vblk_init_done_g { + u32 g:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_INIT_DONE_G_DWLEN]; +} __packed; + +#define NBL_VBLK_INTERRUPT_G_ADDR (0xf9c04c) +#define NBL_VBLK_INTERRUPT_G_DEPTH (1) +#define NBL_VBLK_INTERRUPT_G_WIDTH (32) +#define NBL_VBLK_INTERRUPT_G_DWLEN (1) +union vblk_interrupt_g_u { + struct vblk_interrupt_g { + u32 rsv:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_INTERRUPT_G_DWLEN]; +} __packed; + +#define NBL_VBLK_RAM_ERROR_G0_ADDR (0xf9c050) +#define NBL_VBLK_RAM_ERROR_G0_DEPTH (1) +#define NBL_VBLK_RAM_ERROR_G0_WIDTH (32) +#define NBL_VBLK_RAM_ERROR_G0_DWLEN (1) +union vblk_ram_error_g0_u { + struct vblk_ram_error_g0 { + u32 dqdesc_dmareq_rsp_fifo:1; /* [0] Default:0x0 RC */ + u32 dqdesc_dmareq_info_fifo:1; /* [1] Default:0x0 RC */ + u32 dqdesc_hw_wrap_cnt:1; /* [2] Default:0x0 RC */ + u32 descpro_dmareq_info_fifo:1; /* [3] Default:0x0 RC */ + u32 descpro_pro_order_fifo:1; /* [4] Default:0x0 RC */ + u32 h2e_dma_wcmd_fifo:1; /* [5] Default:0x0 RC */ + u32 h2e_datfifo_fifo:1; /* [6] Default:0x0 RC */ + u32 h2e:1; /* [7] Default:0x0 RC */ + u32 e2hdma_wcmd_fifo:1; /* [8] Default:0x0 RC */ + u32 e2hdatfifo_fifo:1; /* [9] Default:0x0 RC */ + u32 e2hram:1; /* [10] Default:0x0 RC */ + u32 io_done_fifo:1; /* [11] Default:0x0 RC */ + u32 nxt_desc_req_fifo:1; /* [12] Default:0x0 RC */ + u32 e2h_dmacmd_fifo:1; /* [13] Default:0x0 RC */ + u32 h2e_dmacmd_fifo:1; /* [14] Default:0x0 RC */ + u32 dqsch_fifo:1; /* [15] Default:0x0 RC */ + u32 dqnotify_fifo:1; /* [16] Default:0x0 RC */ + u32 dqsw_notify_fifo:1; /* [17] Default:0x0 RC */ + u32 cqsch_fifo:1; /* [18] Default:0x0 RC */ + u32 cqnotify_fifo:1; /* [19] Default:0x0 RC */ + u32 cqsw_notify_fifo:1; /* [20] Default:0x0 RC */ + u32 cq_notify_hpntr:1; /* [21] Default:0x0 RC */ + u32 cq_notify_ram:1; /* [22] Default:0x0 RC */ + u32 hqinfo:1; /* [23] Default:0x0 RC */ + u32 hqinfo_ram1:1; /* [24] Default:0x0 RC */ + u32 cqinfo:1; /* [25] Default:0x0 RC */ + u32 dqinfo_baddr:1; /* [26] Default:0x0 RC */ + u32 dqinfo_qsize:1; /* [27] Default:0x0 RC */ + u32 dqinfo_ram1:1; /* [28] Default:0x0 RC */ + u32 dqinfo_ram2:1; /* [29] Default:0x0 RC */ + u32 dq_used_info:1; /* [30] Default:0x0 RC */ + u32 cq_avail_info:1; /* [31] Default:0x0 RC */ + } __packed info; + u32 data[NBL_VBLK_RAM_ERROR_G0_DWLEN]; +} __packed; + +#define NBL_VBLK_RAM_ERROR_G1_ADDR (0xf9c074) +#define NBL_VBLK_RAM_ERROR_G1_DEPTH (1) +#define NBL_VBLK_RAM_ERROR_G1_WIDTH (32) +#define NBL_VBLK_RAM_ERROR_G1_DWLEN (1) +union vblk_ram_error_g1_u { + struct vblk_ram_error_g1 { + u32 get_iodesc_infifo:1; /* [0] Default:0x0 RC */ + u32 get_iodesc_fifo:1; /* [1] Default:0x0 RC */ + u32 get_iodesc_temp_fifo:1; /* [2] Default:0x0 RC */ + u32 get_iodesc_qid_fifo:1; /* [3] Default:0x0 RC */ + u32 get_iodesc_done_fifo:1; /* [4] Default:0x0 RC */ + u32 get_iodesc_ram:1; /* [5] Default:0x0 RC */ + u32 get_iodesc_wrap_cnt_ram:1; /* [6] Default:0x0 RC */ + u32 get_iodesc_io_invld_ram:1; /* [7] Default:0x0 RC */ + u32 get_iodesc_io_done_hd_ram:1; /* [8] Default:0x0 RC */ + u32 get_iodesc_io_done_ram:1; /* [9] Default:0x0 RC */ + u32 get_hddesc_infifo:1; /* [10] Default:0x0 RC */ + u32 get_hddesc_fifo_hd:1; /* [11] Default:0x0 RC */ + u32 get_iohd_infifo:1; /* [12] Default:0x0 RC */ + u32 get_iohd_fifo_rm:1; /* [13] Default:0x0 RC */ + u32 get_iohd_fifo_hd_wr_hesc_ram:1; /* [14] Default:0x0 RC */ + u32 get_rmdesc_dat_fifo:1; /* [15] Default:0x0 RC */ + u32 get_rmdesc_dat_infifo:1; /* [16] Default:0x0 RC */ + u32 get_rmdesc_rmdesc_fifo:1; /* [17] Default:0x0 RC */ + u32 get_rmdesc_dir_infifo:1; /* [18] Default:0x0 RC */ + u32 get_rmdesc_infifo:1; /* [19] Default:0x0 RC */ + u32 get_edesc_hd_ram:1; /* [20] Default:0x0 RC */ + u32 get_edesc_desc_ram_rd:1; /* [21] Default:0x0 RC */ + u32 wr_hdesc_cfg_ram:1; /* [22] Default:0x0 RC */ + u32 judge_split_fifo:1; /* [23] Default:0x0 RC */ + u32 judge_packed_fifo:1; /* [24] Default:0x0 RC */ + u32 get_idx_dmaresp_idx_fifo:1; /* [25] Default:0x0 RC */ + u32 get_idx_dmareq_idx_info_fifo:1; /* [26] Default:0x0 RC */ + u32 get_ring_dmaresp_ring_fifo:1; /* [27] Default:0x0 RC */ + u32 get_ring_dmareq_ring_info_fifo:1; /* [28] Default:0x0 RC */ + u32 cap_cfg_ram:1; /* [29] Default:0x0 RC */ + u32 cq_feature:1; /* [30] Default:0x0 RC */ + u32 edesc_infifo:1; /* [31] Default:0x0 RC */ + } __packed info; + u32 data[NBL_VBLK_RAM_ERROR_G1_DWLEN]; +} __packed; + +#define NBL_VBLK_DQ_HOST_DMA_ERR_ADDR (0xf9c09c) +#define NBL_VBLK_DQ_HOST_DMA_ERR_DEPTH (1) +#define NBL_VBLK_DQ_HOST_DMA_ERR_WIDTH (32) +#define NBL_VBLK_DQ_HOST_DMA_ERR_DWLEN (1) +union vblk_dq_host_dma_err_u { + struct vblk_dq_host_dma_err { + u32 qid:9; /* [8:0] Default:0x1ff RO */ + u32 rsv:7; /* [15:9] Default:0x0 RO */ + u32 cnt:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQ_HOST_DMA_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_CAR_CTRL_ADDR (0xf9c100) +#define NBL_VBLK_CAR_CTRL_DEPTH (1) +#define NBL_VBLK_CAR_CTRL_WIDTH (32) +#define NBL_VBLK_CAR_CTRL_DWLEN (1) +union vblk_car_ctrl_u { + struct vblk_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_VBLK_HQ_RESET_ADDR (0xf9c110) +#define NBL_VBLK_HQ_RESET_DEPTH (1) +#define NBL_VBLK_HQ_RESET_WIDTH (32) +#define NBL_VBLK_HQ_RESET_DWLEN (1) +union vblk_hq_reset_u { + struct vblk_hq_reset { + u32 qid:9; /* [8:0] Default:0x0 RW */ + u32 vld:1; /* [9:9] Default:0x0 WO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_HQ_RESET_DWLEN]; +} __packed; + +#define NBL_VBLK_CQ_RESET_ADDR (0xf9c114) +#define NBL_VBLK_CQ_RESET_DEPTH (1) +#define NBL_VBLK_CQ_RESET_WIDTH (32) +#define NBL_VBLK_CQ_RESET_DWLEN (1) +union vblk_cq_reset_u { + struct vblk_cq_reset { + u32 qid:9; /* [8:0] Default:0x0 RW */ + u32 vld:1; /* [9:9] Default:0x0 WO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CQ_RESET_DWLEN]; +} __packed; + +#define NBL_VBLK_DQ_RESET_ADDR (0xf9c118) +#define NBL_VBLK_DQ_RESET_DEPTH (1) +#define NBL_VBLK_DQ_RESET_WIDTH (32) +#define NBL_VBLK_DQ_RESET_DWLEN (1) +union vblk_dq_reset_u { + struct vblk_dq_reset { + u32 qid:9; /* [8:0] Default:0x0 RW */ + u32 vld:1; /* [9:9] Default:0x0 WO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQ_RESET_DWLEN]; +} __packed; + +#define NBL_VBLK_QUEUE_NOTIFY_ADDR (0xf9c11c) +#define NBL_VBLK_QUEUE_NOTIFY_DEPTH (1) +#define NBL_VBLK_QUEUE_NOTIFY_WIDTH (32) +#define NBL_VBLK_QUEUE_NOTIFY_DWLEN (1) +union vblk_queue_notify_u { + struct vblk_queue_notify { + u32 cfg_notify_qid:9; /* [8:0] Default:0x0 RW */ + u32 cfg_notify_vld:1; /* [9:9] Default:0x0 WO */ + u32 cfg_notify_pfid:5; /* [14:10] Default:0x0 RW */ + u32 rsv:1; /* [15] Default:0x0 RO */ + u32 cfg_notify_cnt:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_QUEUE_NOTIFY_DWLEN]; +} __packed; + +#define NBL_VBLK_MAX_TRANS_SIZE_ADDR (0xf9c120) +#define NBL_VBLK_MAX_TRANS_SIZE_DEPTH (1) +#define NBL_VBLK_MAX_TRANS_SIZE_WIDTH (32) +#define NBL_VBLK_MAX_TRANS_SIZE_DWLEN (1) +union vblk_max_trans_size_u { + struct vblk_max_trans_size { + u32 dbg:32; /* [31:0] Default:0x10000 RW */ + } __packed info; + u32 data[NBL_VBLK_MAX_TRANS_SIZE_DWLEN]; +} __packed; + +#define NBL_VBLK_BLK_VERSION_INFO_ADDR (0xf9c124) +#define NBL_VBLK_BLK_VERSION_INFO_DEPTH (1) +#define NBL_VBLK_BLK_VERSION_INFO_WIDTH (32) +#define NBL_VBLK_BLK_VERSION_INFO_DWLEN (1) +union vblk_blk_version_info_u { + struct vblk_blk_version_info { + u32 feature_flag:1; /* [0:0] Default:0x1 RO */ + u32 rsv:7; /* [7:1] Default:0x0 RO */ + u32 date:8; /* [15:8] Default:0x07 RO */ + u32 month:8; /* [23:16] Default:0x04 RO */ + u32 year:8; /* [31:24] Default:0x22 RO */ + } __packed info; + u32 data[NBL_VBLK_BLK_VERSION_INFO_DWLEN]; +} __packed; + +#define NBL_VBLK_CQ_SCH_CTRL_ADDR (0xf9c128) +#define NBL_VBLK_CQ_SCH_CTRL_DEPTH (1) +#define NBL_VBLK_CQ_SCH_CTRL_WIDTH (32) +#define NBL_VBLK_CQ_SCH_CTRL_DWLEN (1) +union vblk_cq_sch_ctrl_u { + struct vblk_cq_sch_ctrl { + u32 q_en_ctrl_mask:1; /* [0:0] Default:0x0 RW */ + u32 notify_bitmap_mask:1; /* [1:1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CQ_SCH_CTRL_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IDX_CFG_DEBUG_DATA_ADDR (0xf9c13c) +#define NBL_VBLK_GET_IDX_CFG_DEBUG_DATA_DEPTH (1) +#define NBL_VBLK_GET_IDX_CFG_DEBUG_DATA_WIDTH (32) +#define NBL_VBLK_GET_IDX_CFG_DEBUG_DATA_DWLEN (1) +union vblk_get_idx_cfg_debug_data_u { + struct vblk_get_idx_cfg_debug_data { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IDX_CFG_DEBUG_DATA_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RING_CFG_DEBUG_DATA_ADDR (0xf9c14c) +#define NBL_VBLK_GET_RING_CFG_DEBUG_DATA_DEPTH (1) +#define NBL_VBLK_GET_RING_CFG_DEBUG_DATA_WIDTH (32) +#define NBL_VBLK_GET_RING_CFG_DEBUG_DATA_DWLEN (1) +union vblk_get_ring_cfg_debug_data_u { + struct vblk_get_ring_cfg_debug_data { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RING_CFG_DEBUG_DATA_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_CIF_INVLD_QID_COPY_ADDR (0xf9c154) +#define NBL_VBLK_GET_IODESC_CIF_INVLD_QID_COPY_DEPTH (1) +#define NBL_VBLK_GET_IODESC_CIF_INVLD_QID_COPY_WIDTH (32) +#define NBL_VBLK_GET_IODESC_CIF_INVLD_QID_COPY_DWLEN (1) +union vblk_get_iodesc_cif_invld_qid_copy_u { + struct vblk_get_iodesc_cif_invld_qid_copy { + u32 cnt:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_CIF_INVLD_QID_COPY_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_0_ADDR (0xf9c180) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_0_DEPTH (1) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_0_WIDTH (32) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_0_DWLEN (1) +union vblk_get_iodesc_cfg_dma_rdata_0_u { + struct vblk_get_iodesc_cfg_dma_rdata_0 { + u32 rdata0:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_0_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_1_ADDR (0xf9c184) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_1_DEPTH (1) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_1_WIDTH (32) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_1_DWLEN (1) +union vblk_get_iodesc_cfg_dma_rdata_1_u { + struct vblk_get_iodesc_cfg_dma_rdata_1 { + u32 rdata1:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_1_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_2_ADDR (0xf9c188) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_2_DEPTH (1) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_2_WIDTH (32) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_2_DWLEN (1) +union vblk_get_iodesc_cfg_dma_rdata_2_u { + struct vblk_get_iodesc_cfg_dma_rdata_2 { + u32 rdata2:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_2_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_3_ADDR (0xf9c18c) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_3_DEPTH (1) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_3_WIDTH (32) +#define NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_3_DWLEN (1) +union vblk_get_iodesc_cfg_dma_rdata_3_u { + struct vblk_get_iodesc_cfg_dma_rdata_3 { + u32 rdata3:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_CFG_DMA_RDATA_3_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_CFG_QID_HD_DEBUG_REG_ADDR (0xf9c190) +#define NBL_VBLK_GET_IODESC_CFG_QID_HD_DEBUG_REG_DEPTH (1) +#define NBL_VBLK_GET_IODESC_CFG_QID_HD_DEBUG_REG_WIDTH (32) +#define NBL_VBLK_GET_IODESC_CFG_QID_HD_DEBUG_REG_DWLEN (1) +union vblk_get_iodesc_cfg_qid_hd_debug_reg_u { + struct vblk_get_iodesc_cfg_qid_hd_debug_reg { + u32 qid_hd:32; /* [31:0] Default:0x10000 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_CFG_QID_HD_DEBUG_REG_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_CIF_INVLD_QID_ADDR (0xf9c198) +#define NBL_VBLK_GET_IODESC_CIF_INVLD_QID_DEPTH (1) +#define NBL_VBLK_GET_IODESC_CIF_INVLD_QID_WIDTH (32) +#define NBL_VBLK_GET_IODESC_CIF_INVLD_QID_DWLEN (1) +union vblk_get_iodesc_cif_invld_qid_u { + struct vblk_get_iodesc_cif_invld_qid { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_CIF_INVLD_QID_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_INDIR_ADDR (0xf9c1a4) +#define NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_INDIR_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_INDIR_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_INDIR_DWLEN (1) +union vblk_get_hddesc_cfg_io_debug_indir_u { + struct vblk_get_hddesc_cfg_io_debug_indir { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_INDIR_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_DIR_ADDR (0xf9c1a8) +#define NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_DIR_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_DIR_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_DIR_DWLEN (1) +union vblk_get_hddesc_cfg_io_debug_dir_u { + struct vblk_get_hddesc_cfg_io_debug_dir { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_IO_DEBUG_DIR_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR0_ADDR (0xf9c1b0) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR0_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR0_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR0_DWLEN (1) +union vblk_get_hddesc_cfg_hdesc_debug_dir0_u { + struct vblk_get_hddesc_cfg_hdesc_debug_dir0 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR0_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR1_ADDR (0xf9c1b4) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR1_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR1_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR1_DWLEN (1) +union vblk_get_hddesc_cfg_hdesc_debug_dir1_u { + struct vblk_get_hddesc_cfg_hdesc_debug_dir1 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR1_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR2_ADDR (0xf9c1b8) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR2_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR2_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR2_DWLEN (1) +union vblk_get_hddesc_cfg_hdesc_debug_dir2_u { + struct vblk_get_hddesc_cfg_hdesc_debug_dir2 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR2_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR3_ADDR (0xf9c1bc) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR3_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR3_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR3_DWLEN (1) +union vblk_get_hddesc_cfg_hdesc_debug_dir3_u { + struct vblk_get_hddesc_cfg_hdesc_debug_dir3 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR3_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR4_ADDR (0xf9c1c0) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR4_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR4_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR4_DWLEN (1) +union vblk_get_hddesc_cfg_hdesc_debug_dir4_u { + struct vblk_get_hddesc_cfg_hdesc_debug_dir4 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR4_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR5_ADDR (0xf9c1c4) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR5_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR5_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR5_DWLEN (1) +union vblk_get_hddesc_cfg_hdesc_debug_dir5_u { + struct vblk_get_hddesc_cfg_hdesc_debug_dir5 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR5_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR6_ADDR (0xf9c1c8) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR6_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR6_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR6_DWLEN (1) +union vblk_get_hddesc_cfg_hdesc_debug_dir6_u { + struct vblk_get_hddesc_cfg_hdesc_debug_dir6 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR6_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR7_ADDR (0xf9c1cc) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR7_DEPTH (1) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR7_WIDTH (32) +#define NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR7_DWLEN (1) +union vblk_get_hddesc_cfg_hdesc_debug_dir7_u { + struct vblk_get_hddesc_cfg_hdesc_debug_dir7 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_HDDESC_CFG_HDESC_DEBUG_DIR7_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG0_ADDR (0xf9c1e0) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG0_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG0_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG0_DWLEN (1) +union vblk_get_iohd_cfg_iohd_debug0_u { + struct vblk_get_iohd_cfg_iohd_debug0 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG0_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG1_ADDR (0xf9c1e4) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG1_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG1_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG1_DWLEN (1) +union vblk_get_iohd_cfg_iohd_debug1_u { + struct vblk_get_iohd_cfg_iohd_debug1 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG1_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG2_ADDR (0xf9c1e8) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG2_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG2_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG2_DWLEN (1) +union vblk_get_iohd_cfg_iohd_debug2_u { + struct vblk_get_iohd_cfg_iohd_debug2 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG2_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG3_ADDR (0xf9c1ec) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG3_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG3_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG3_DWLEN (1) +union vblk_get_iohd_cfg_iohd_debug3_u { + struct vblk_get_iohd_cfg_iohd_debug3 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_IOHD_DEBUG3_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_ERR_INFO_L_ADDR (0xf9c1f0) +#define NBL_VBLK_GET_IOHD_CFG_ERR_INFO_L_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_ERR_INFO_L_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_ERR_INFO_L_DWLEN (1) +union vblk_get_iohd_cfg_err_info_l_u { + struct vblk_get_iohd_cfg_err_info_l { + u32 err:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_ERR_INFO_L_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_ERR_INFO_C_ADDR (0xf9c1f4) +#define NBL_VBLK_GET_IOHD_CFG_ERR_INFO_C_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_ERR_INFO_C_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_ERR_INFO_C_DWLEN (1) +union vblk_get_iohd_cfg_err_info_c_u { + struct vblk_get_iohd_cfg_err_info_c { + u32 err:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_ERR_INFO_C_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG0_ADDR (0xf9c200) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG0_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG0_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG0_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug0_u { + struct vblk_get_rmdesc_cfg_dma_info_debug0 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG0_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG1_ADDR (0xf9c204) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG1_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG1_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG1_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug1_u { + struct vblk_get_rmdesc_cfg_dma_info_debug1 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG1_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG2_ADDR (0xf9c208) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG2_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG2_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG2_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug2_u { + struct vblk_get_rmdesc_cfg_dma_info_debug2 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG2_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG3_ADDR (0xf9c20c) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG3_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG3_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG3_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug3_u { + struct vblk_get_rmdesc_cfg_dma_info_debug3 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG3_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG4_ADDR (0xf9c210) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG4_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG4_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG4_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug4_u { + struct vblk_get_rmdesc_cfg_dma_info_debug4 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG4_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG5_ADDR (0xf9c214) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG5_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG5_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG5_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug5_u { + struct vblk_get_rmdesc_cfg_dma_info_debug5 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG5_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG6_ADDR (0xf9c218) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG6_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG6_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG6_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug6_u { + struct vblk_get_rmdesc_cfg_dma_info_debug6 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG6_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG7_ADDR (0xf9c21c) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG7_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG7_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG7_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug7_u { + struct vblk_get_rmdesc_cfg_dma_info_debug7 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG7_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG8_ADDR (0xf9c220) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG8_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG8_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG8_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug8_u { + struct vblk_get_rmdesc_cfg_dma_info_debug8 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG8_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG9_ADDR (0xf9c224) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG9_DEPTH (1) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG9_WIDTH (32) +#define NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG9_DWLEN (1) +union vblk_get_rmdesc_cfg_dma_info_debug9_u { + struct vblk_get_rmdesc_cfg_dma_info_debug9 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDESC_CFG_DMA_INFO_DEBUG9_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG0_ADDR (0xf9c238) +#define NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG0_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG0_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG0_DWLEN (1) +union vblk_get_edesc_cfg_dma_info_debug0_u { + struct vblk_get_edesc_cfg_dma_info_debug0 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG0_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG1_ADDR (0xf9c23c) +#define NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG1_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG1_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG1_DWLEN (1) +union vblk_get_edesc_cfg_dma_info_debug1_u { + struct vblk_get_edesc_cfg_dma_info_debug1 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_DMA_INFO_DEBUG1_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG0_ADDR (0xf9c240) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG0_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG0_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG0_DWLEN (1) +union vblk_get_edesc_cfg_edesc_debug_reg0_u { + struct vblk_get_edesc_cfg_edesc_debug_reg0 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG0_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG1_ADDR (0xf9c244) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG1_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG1_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG1_DWLEN (1) +union vblk_get_edesc_cfg_edesc_debug_reg1_u { + struct vblk_get_edesc_cfg_edesc_debug_reg1 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG1_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG2_ADDR (0xf9c248) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG2_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG2_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG2_DWLEN (1) +union vblk_get_edesc_cfg_edesc_debug_reg2_u { + struct vblk_get_edesc_cfg_edesc_debug_reg2 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG2_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG3_ADDR (0xf9c24c) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG3_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG3_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG3_DWLEN (1) +union vblk_get_edesc_cfg_edesc_debug_reg3_u { + struct vblk_get_edesc_cfg_edesc_debug_reg3 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_EDESC_DEBUG_REG3_DWLEN]; +} __packed; + +#define NBL_VBLK_WR_HDESC_CFG_DMA_DEBUG_ADDR (0xf9c254) +#define NBL_VBLK_WR_HDESC_CFG_DMA_DEBUG_DEPTH (1) +#define NBL_VBLK_WR_HDESC_CFG_DMA_DEBUG_WIDTH (32) +#define NBL_VBLK_WR_HDESC_CFG_DMA_DEBUG_DWLEN (1) +union vblk_wr_hdesc_cfg_dma_debug_u { + struct vblk_wr_hdesc_cfg_dma_debug { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_WR_HDESC_CFG_DMA_DEBUG_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RMDSEC_IO_TOTAL_DESC_NUM_LG_254_ADDR (0xf9c25c) +#define NBL_VBLK_GET_RMDSEC_IO_TOTAL_DESC_NUM_LG_254_DEPTH (1) +#define NBL_VBLK_GET_RMDSEC_IO_TOTAL_DESC_NUM_LG_254_WIDTH (32) +#define NBL_VBLK_GET_RMDSEC_IO_TOTAL_DESC_NUM_LG_254_DWLEN (1) +union vblk_get_rmdsec_io_total_desc_num_lg_254_u { + struct vblk_get_rmdsec_io_total_desc_num_lg_254 { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RMDSEC_IO_TOTAL_DESC_NUM_LG_254_DWLEN]; +} __packed; + +#define NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_L_ADDR (0xf9c260) +#define NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_L_DEPTH (1) +#define NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_L_WIDTH (32) +#define NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_L_DWLEN (1) +union vblk_wr_hdesc_cfg_dma_addr_l_u { + struct vblk_wr_hdesc_cfg_dma_addr_l { + u32 waddr:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_L_DWLEN]; +} __packed; + +#define NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_H_ADDR (0xf9c264) +#define NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_H_DEPTH (1) +#define NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_H_WIDTH (32) +#define NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_H_DWLEN (1) +union vblk_wr_hdesc_cfg_dma_addr_h_u { + struct vblk_wr_hdesc_cfg_dma_addr_h { + u32 waddr:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_WR_HDESC_CFG_DMA_ADDR_H_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MAX_ADDR (0xf9c2a0) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MAX_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MAX_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MAX_DWLEN (1) +union vblk_get_iohd_cfg_dma_time_dly_max_u { + struct vblk_get_iohd_cfg_dma_time_dly_max { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MAX_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MIN_ADDR (0xf9c2a4) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MIN_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MIN_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MIN_DWLEN (1) +union vblk_get_iohd_cfg_dma_time_dly_min_u { + struct vblk_get_iohd_cfg_dma_time_dly_min { + u32 dbg:32; /* [31:0] Default:0xffff_ffff RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_MIN_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_L_ADDR (0xf9c2ac) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_L_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_L_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_L_DWLEN (1) +union vblk_get_iohd_cfg_dma_time_dly_acc_l_u { + struct vblk_get_iohd_cfg_dma_time_dly_acc_l { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_L_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_H_ADDR (0xf9c2b0) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_H_DEPTH (1) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_H_WIDTH (32) +#define NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_H_DWLEN (1) +union vblk_get_iohd_cfg_dma_time_dly_acc_h_u { + struct vblk_get_iohd_cfg_dma_time_dly_acc_h { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IOHD_CFG_DMA_TIME_DLY_ACC_H_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MAX_ADDR (0xf9c2b4) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MAX_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MAX_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MAX_DWLEN (1) +union vblk_get_edesc_cfg_dma_time_dly_max_u { + struct vblk_get_edesc_cfg_dma_time_dly_max { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MAX_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MIN_ADDR (0xf9c2b8) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MIN_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MIN_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MIN_DWLEN (1) +union vblk_get_edesc_cfg_dma_time_dly_min_u { + struct vblk_get_edesc_cfg_dma_time_dly_min { + u32 dbg:32; /* [31:0] Default:0xffff_ffff RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_MIN_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_L_ADDR (0xf9c2c0) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_L_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_L_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_L_DWLEN (1) +union vblk_get_edesc_cfg_dma_time_dly_acc_l_u { + struct vblk_get_edesc_cfg_dma_time_dly_acc_l { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_L_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_H_ADDR (0xf9c2c4) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_H_DEPTH (1) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_H_WIDTH (32) +#define NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_H_DWLEN (1) +union vblk_get_edesc_cfg_dma_time_dly_acc_h_u { + struct vblk_get_edesc_cfg_dma_time_dly_acc_h { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_CFG_DMA_TIME_DLY_ACC_H_DWLEN]; +} __packed; + +#define NBL_VBLK_H2EDMA_TIME_DLY_MAX_ADDR (0xf9c2c8) +#define NBL_VBLK_H2EDMA_TIME_DLY_MAX_DEPTH (1) +#define NBL_VBLK_H2EDMA_TIME_DLY_MAX_WIDTH (32) +#define NBL_VBLK_H2EDMA_TIME_DLY_MAX_DWLEN (1) +union vblk_h2edma_time_dly_max_u { + struct vblk_h2edma_time_dly_max { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_H2EDMA_TIME_DLY_MAX_DWLEN]; +} __packed; + +#define NBL_VBLK_H2EDMA_TIME_DLY_MIN_ADDR (0xf9c2cc) +#define NBL_VBLK_H2EDMA_TIME_DLY_MIN_DEPTH (1) +#define NBL_VBLK_H2EDMA_TIME_DLY_MIN_WIDTH (32) +#define NBL_VBLK_H2EDMA_TIME_DLY_MIN_DWLEN (1) +union vblk_h2edma_time_dly_min_u { + struct vblk_h2edma_time_dly_min { + u32 dbg:32; /* [31:0] Default:0xffff_ffff RO */ + } __packed info; + u32 data[NBL_VBLK_H2EDMA_TIME_DLY_MIN_DWLEN]; +} __packed; + +#define NBL_VBLK_H2EDMA_TIME_DLY_ACC_L_ADDR (0xf9c2d4) +#define NBL_VBLK_H2EDMA_TIME_DLY_ACC_L_DEPTH (1) +#define NBL_VBLK_H2EDMA_TIME_DLY_ACC_L_WIDTH (32) +#define NBL_VBLK_H2EDMA_TIME_DLY_ACC_L_DWLEN (1) +union vblk_h2edma_time_dly_acc_l_u { + struct vblk_h2edma_time_dly_acc_l { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_H2EDMA_TIME_DLY_ACC_L_DWLEN]; +} __packed; + +#define NBL_VBLK_H2EDMA_TIME_DLY_ACC_H_ADDR (0xf9c2d8) +#define NBL_VBLK_H2EDMA_TIME_DLY_ACC_H_DEPTH (1) +#define NBL_VBLK_H2EDMA_TIME_DLY_ACC_H_WIDTH (32) +#define NBL_VBLK_H2EDMA_TIME_DLY_ACC_H_DWLEN (1) +union vblk_h2edma_time_dly_acc_h_u { + struct vblk_h2edma_time_dly_acc_h { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_H2EDMA_TIME_DLY_ACC_H_DWLEN]; +} __packed; + +#define NBL_VBLK_E2HDMA_TIME_DLY_MAX_ADDR (0xf9c2dc) +#define NBL_VBLK_E2HDMA_TIME_DLY_MAX_DEPTH (1) +#define NBL_VBLK_E2HDMA_TIME_DLY_MAX_WIDTH (32) +#define NBL_VBLK_E2HDMA_TIME_DLY_MAX_DWLEN (1) +union vblk_e2hdma_time_dly_max_u { + struct vblk_e2hdma_time_dly_max { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_E2HDMA_TIME_DLY_MAX_DWLEN]; +} __packed; + +#define NBL_VBLK_E2HDMA_TIME_DLY_MIN_ADDR (0xf9c2e0) +#define NBL_VBLK_E2HDMA_TIME_DLY_MIN_DEPTH (1) +#define NBL_VBLK_E2HDMA_TIME_DLY_MIN_WIDTH (32) +#define NBL_VBLK_E2HDMA_TIME_DLY_MIN_DWLEN (1) +union vblk_e2hdma_time_dly_min_u { + struct vblk_e2hdma_time_dly_min { + u32 dbg:32; /* [31:0] Default:0xffff_ffff RO */ + } __packed info; + u32 data[NBL_VBLK_E2HDMA_TIME_DLY_MIN_DWLEN]; +} __packed; + +#define NBL_VBLK_E2HDMA_TIME_DLY_ACC_L_ADDR (0xf9c2e8) +#define NBL_VBLK_E2HDMA_TIME_DLY_ACC_L_DEPTH (1) +#define NBL_VBLK_E2HDMA_TIME_DLY_ACC_L_WIDTH (32) +#define NBL_VBLK_E2HDMA_TIME_DLY_ACC_L_DWLEN (1) +union vblk_e2hdma_time_dly_acc_l_u { + struct vblk_e2hdma_time_dly_acc_l { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_E2HDMA_TIME_DLY_ACC_L_DWLEN]; +} __packed; + +#define NBL_VBLK_E2HDMA_TIME_DLY_ACC_H_ADDR (0xf9c2ec) +#define NBL_VBLK_E2HDMA_TIME_DLY_ACC_H_DEPTH (1) +#define NBL_VBLK_E2HDMA_TIME_DLY_ACC_H_WIDTH (32) +#define NBL_VBLK_E2HDMA_TIME_DLY_ACC_H_DWLEN (1) +union vblk_e2hdma_time_dly_acc_h_u { + struct vblk_e2hdma_time_dly_acc_h { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_E2HDMA_TIME_DLY_ACC_H_DWLEN]; +} __packed; + +#define NBL_VBLK_CQRRSCH_IOPS_STAT_ADDR (0xf9c2f0) +#define NBL_VBLK_CQRRSCH_IOPS_STAT_DEPTH (1) +#define NBL_VBLK_CQRRSCH_IOPS_STAT_WIDTH (32) +#define NBL_VBLK_CQRRSCH_IOPS_STAT_DWLEN (1) +union vblk_cqrrsch_iops_stat_u { + struct vblk_cqrrsch_iops_stat { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CQRRSCH_IOPS_STAT_DWLEN]; +} __packed; + +#define NBL_VBLK_DQRRSCH_IOPS_STAT_ADDR (0xf9c2f4) +#define NBL_VBLK_DQRRSCH_IOPS_STAT_DEPTH (1) +#define NBL_VBLK_DQRRSCH_IOPS_STAT_WIDTH (32) +#define NBL_VBLK_DQRRSCH_IOPS_STAT_DWLEN (1) +union vblk_dqrrsch_iops_stat_u { + struct vblk_dqrrsch_iops_stat { + u32 dbg:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQRRSCH_IOPS_STAT_DWLEN]; +} __packed; + +#define NBL_VBLK_DMA_TIME_DLY_ACC_LOCK_CLR_ADDR (0xf9c2f8) +#define NBL_VBLK_DMA_TIME_DLY_ACC_LOCK_CLR_DEPTH (1) +#define NBL_VBLK_DMA_TIME_DLY_ACC_LOCK_CLR_WIDTH (32) +#define NBL_VBLK_DMA_TIME_DLY_ACC_LOCK_CLR_DWLEN (1) +union vblk_dma_time_dly_acc_lock_clr_u { + struct vblk_dma_time_dly_acc_lock_clr { + u32 clr:1; /* [0:0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DMA_TIME_DLY_ACC_LOCK_CLR_DWLEN]; +} __packed; + +#define NBL_VBLK_HW_IDX_QSEL_REG_ADDR (0xf9c318) +#define NBL_VBLK_HW_IDX_QSEL_REG_DEPTH (1) +#define NBL_VBLK_HW_IDX_QSEL_REG_WIDTH (32) +#define NBL_VBLK_HW_IDX_QSEL_REG_DWLEN (1) +union vblk_hw_idx_qsel_reg_u { + struct vblk_hw_idx_qsel_reg { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_VBLK_HW_IDX_QSEL_REG_DWLEN]; +} __packed; + +#define NBL_VBLK_CFG_CAP_SEL_ADDR (0xf9c3a0) +#define NBL_VBLK_CFG_CAP_SEL_DEPTH (1) +#define NBL_VBLK_CFG_CAP_SEL_WIDTH (32) +#define NBL_VBLK_CFG_CAP_SEL_DWLEN (1) +union vblk_cfg_cap_sel_u { + struct vblk_cfg_cap_sel { + u32 dbg:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_VBLK_CFG_CAP_SEL_DWLEN]; +} __packed; + +#define NBL_VBLK_E2HDMA_ERR_FLAG_INFO_ADDR (0xf9c3ac) +#define NBL_VBLK_E2HDMA_ERR_FLAG_INFO_DEPTH (1) +#define NBL_VBLK_E2HDMA_ERR_FLAG_INFO_WIDTH (32) +#define NBL_VBLK_E2HDMA_ERR_FLAG_INFO_DWLEN (1) +union vblk_e2hdma_err_flag_info_u { + struct vblk_e2hdma_err_flag_info { + u32 dbg:32; /* [31:0] Default:0x1ff RO */ + } __packed info; + u32 data[NBL_VBLK_E2HDMA_ERR_FLAG_INFO_DWLEN]; +} __packed; + +#define NBL_VBLK_H2EDMA_ERR_FLAG_INFO_ADDR (0xf9c3b0) +#define NBL_VBLK_H2EDMA_ERR_FLAG_INFO_DEPTH (1) +#define NBL_VBLK_H2EDMA_ERR_FLAG_INFO_WIDTH (32) +#define NBL_VBLK_H2EDMA_ERR_FLAG_INFO_DWLEN (1) +union vblk_h2edma_err_flag_info_u { + struct vblk_h2edma_err_flag_info { + u32 dbg:32; /* [31:0] Default:0x1ff RO */ + } __packed info; + u32 data[NBL_VBLK_H2EDMA_ERR_FLAG_INFO_DWLEN]; +} __packed; + +#define NBL_VBLK_DQDMA_IO_DONE_ADDR (0xf9c580) +#define NBL_VBLK_DQDMA_IO_DONE_DEPTH (1) +#define NBL_VBLK_DQDMA_IO_DONE_WIDTH (32) +#define NBL_VBLK_DQDMA_IO_DONE_DWLEN (1) +union vblk_dqdma_io_done_u { + struct vblk_dqdma_io_done { + u32 dn_th:16; /* [15:0] Default:0x3 RW */ + u32 up_th:16; /* [31:16] Default:0x3 RW */ + } __packed info; + u32 data[NBL_VBLK_DQDMA_IO_DONE_DWLEN]; +} __packed; + +#define NBL_VBLK_HQINFO_RAM_ERR_ADDR (0xf9d000) +#define NBL_VBLK_HQINFO_RAM_ERR_DEPTH (1) +#define NBL_VBLK_HQINFO_RAM_ERR_WIDTH (32) +#define NBL_VBLK_HQINFO_RAM_ERR_DWLEN (1) +union vblk_hqinfo_ram_err_u { + struct vblk_hqinfo_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_HQINFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_HQINFO_RAM1_ERR_ADDR (0xf9d004) +#define NBL_VBLK_HQINFO_RAM1_ERR_DEPTH (1) +#define NBL_VBLK_HQINFO_RAM1_ERR_WIDTH (32) +#define NBL_VBLK_HQINFO_RAM1_ERR_DWLEN (1) +union vblk_hqinfo_ram1_err_u { + struct vblk_hqinfo_ram1_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_HQINFO_RAM1_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_DQ_USED_INFO_RAM_ERR_ADDR (0xf9d008) +#define NBL_VBLK_DQ_USED_INFO_RAM_ERR_DEPTH (1) +#define NBL_VBLK_DQ_USED_INFO_RAM_ERR_WIDTH (32) +#define NBL_VBLK_DQ_USED_INFO_RAM_ERR_DWLEN (1) +union vblk_dq_used_info_ram_err_u { + struct vblk_dq_used_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQ_USED_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_CQ_AVAIL_INFO_RAM_ERR_ADDR (0xf9d00c) +#define NBL_VBLK_CQ_AVAIL_INFO_RAM_ERR_DEPTH (1) +#define NBL_VBLK_CQ_AVAIL_INFO_RAM_ERR_WIDTH (32) +#define NBL_VBLK_CQ_AVAIL_INFO_RAM_ERR_DWLEN (1) +union vblk_cq_avail_info_ram_err_u { + struct vblk_cq_avail_info_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CQ_AVAIL_INFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_CQINFO_RAM_ERR_ADDR (0xf9d010) +#define NBL_VBLK_CQINFO_RAM_ERR_DEPTH (1) +#define NBL_VBLK_CQINFO_RAM_ERR_WIDTH (32) +#define NBL_VBLK_CQINFO_RAM_ERR_DWLEN (1) +union vblk_cqinfo_ram_err_u { + struct vblk_cqinfo_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CQINFO_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_DQINFO_BADDR_RAM_ERR_ADDR (0xf9d014) +#define NBL_VBLK_DQINFO_BADDR_RAM_ERR_DEPTH (1) +#define NBL_VBLK_DQINFO_BADDR_RAM_ERR_WIDTH (32) +#define NBL_VBLK_DQINFO_BADDR_RAM_ERR_DWLEN (1) +union vblk_dqinfo_baddr_ram_err_u { + struct vblk_dqinfo_baddr_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQINFO_BADDR_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_DQINFO_QSIZE_RAM_ERR_ADDR (0xf9d018) +#define NBL_VBLK_DQINFO_QSIZE_RAM_ERR_DEPTH (1) +#define NBL_VBLK_DQINFO_QSIZE_RAM_ERR_WIDTH (32) +#define NBL_VBLK_DQINFO_QSIZE_RAM_ERR_DWLEN (1) +union vblk_dqinfo_qsize_ram_err_u { + struct vblk_dqinfo_qsize_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQINFO_QSIZE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_DQINFO_RAM1_ERR_ADDR (0xf9d01c) +#define NBL_VBLK_DQINFO_RAM1_ERR_DEPTH (1) +#define NBL_VBLK_DQINFO_RAM1_ERR_WIDTH (32) +#define NBL_VBLK_DQINFO_RAM1_ERR_DWLEN (1) +union vblk_dqinfo_ram1_err_u { + struct vblk_dqinfo_ram1_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQINFO_RAM1_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_DQINFO_RAM2_ERR_ADDR (0xf9d020) +#define NBL_VBLK_DQINFO_RAM2_ERR_DEPTH (1) +#define NBL_VBLK_DQINFO_RAM2_ERR_WIDTH (32) +#define NBL_VBLK_DQINFO_RAM2_ERR_DWLEN (1) +union vblk_dqinfo_ram2_err_u { + struct vblk_dqinfo_ram2_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQINFO_RAM2_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_RING_HD_PNR_RAM_ERR_ADDR (0xf9d024) +#define NBL_VBLK_GET_RING_HD_PNR_RAM_ERR_DEPTH (1) +#define NBL_VBLK_GET_RING_HD_PNR_RAM_ERR_WIDTH (32) +#define NBL_VBLK_GET_RING_HD_PNR_RAM_ERR_DWLEN (1) +union vblk_get_ring_hd_pnr_ram_err_u { + struct vblk_get_ring_hd_pnr_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_RING_HD_PNR_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_E2H_HEADER_RAM_ERR_ADDR (0xf9d028) +#define NBL_VBLK_E2H_HEADER_RAM_ERR_DEPTH (1) +#define NBL_VBLK_E2H_HEADER_RAM_ERR_WIDTH (32) +#define NBL_VBLK_E2H_HEADER_RAM_ERR_DWLEN (1) +union vblk_e2h_header_ram_err_u { + struct vblk_e2h_header_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_E2H_HEADER_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_HD_RAM_ERR_ADDR (0xf9d02c) +#define NBL_VBLK_GET_IODESC_HD_RAM_ERR_DEPTH (1) +#define NBL_VBLK_GET_IODESC_HD_RAM_ERR_WIDTH (32) +#define NBL_VBLK_GET_IODESC_HD_RAM_ERR_DWLEN (1) +union vblk_get_iodesc_hd_ram_err_u { + struct vblk_get_iodesc_hd_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_HD_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_EDESC_HD_RAM_ERR_ADDR (0xf9d030) +#define NBL_VBLK_GET_EDESC_HD_RAM_ERR_DEPTH (1) +#define NBL_VBLK_GET_EDESC_HD_RAM_ERR_WIDTH (32) +#define NBL_VBLK_GET_EDESC_HD_RAM_ERR_DWLEN (1) +union vblk_get_edesc_hd_ram_err_u { + struct vblk_get_edesc_hd_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_EDESC_HD_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_IO_SCH_RAM_ERR_ADDR (0xf9d034) +#define NBL_VBLK_GET_IODESC_IO_SCH_RAM_ERR_DEPTH (1) +#define NBL_VBLK_GET_IODESC_IO_SCH_RAM_ERR_WIDTH (32) +#define NBL_VBLK_GET_IODESC_IO_SCH_RAM_ERR_DWLEN (1) +union vblk_get_iodesc_io_sch_ram_err_u { + struct vblk_get_iodesc_io_sch_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_IO_SCH_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_IO_SCH_S_RAM_ERR_ADDR (0xf9d038) +#define NBL_VBLK_GET_IODESC_IO_SCH_S_RAM_ERR_DEPTH (1) +#define NBL_VBLK_GET_IODESC_IO_SCH_S_RAM_ERR_WIDTH (32) +#define NBL_VBLK_GET_IODESC_IO_SCH_S_RAM_ERR_DWLEN (1) +union vblk_get_iodesc_io_sch_s_ram_err_u { + struct vblk_get_iodesc_io_sch_s_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_IO_SCH_S_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_IO_DONE_RAM_ERR_ADDR (0xf9d03c) +#define NBL_VBLK_GET_IODESC_IO_DONE_RAM_ERR_DEPTH (1) +#define NBL_VBLK_GET_IODESC_IO_DONE_RAM_ERR_WIDTH (32) +#define NBL_VBLK_GET_IODESC_IO_DONE_RAM_ERR_DWLEN (1) +union vblk_get_iodesc_io_done_ram_err_u { + struct vblk_get_iodesc_io_done_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_IO_DONE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_GET_IODESC_IO_INVLD_RAM_ERR_ADDR (0xf9d040) +#define NBL_VBLK_GET_IODESC_IO_INVLD_RAM_ERR_DEPTH (1) +#define NBL_VBLK_GET_IODESC_IO_INVLD_RAM_ERR_WIDTH (32) +#define NBL_VBLK_GET_IODESC_IO_INVLD_RAM_ERR_DWLEN (1) +union vblk_get_iodesc_io_invld_ram_err_u { + struct vblk_get_iodesc_io_invld_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_GET_IODESC_IO_INVLD_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_CAP_CFG_RAM_ERR_ADDR (0xf9d044) +#define NBL_VBLK_CAP_CFG_RAM_ERR_DEPTH (1) +#define NBL_VBLK_CAP_CFG_RAM_ERR_WIDTH (32) +#define NBL_VBLK_CAP_CFG_RAM_ERR_DWLEN (1) +union vblk_cap_cfg_ram_err_u { + struct vblk_cap_cfg_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CAP_CFG_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_DQDESC_HW_IDX_RAM_ERR_ADDR (0xf9d048) +#define NBL_VBLK_DQDESC_HW_IDX_RAM_ERR_DEPTH (1) +#define NBL_VBLK_DQDESC_HW_IDX_RAM_ERR_WIDTH (32) +#define NBL_VBLK_DQDESC_HW_IDX_RAM_ERR_DWLEN (1) +union vblk_dqdesc_hw_idx_ram_err_u { + struct vblk_dqdesc_hw_idx_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQDESC_HW_IDX_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_DQDMA_IODONE_NUM_RAM_ERR_ADDR (0xf9d04c) +#define NBL_VBLK_DQDMA_IODONE_NUM_RAM_ERR_DEPTH (1) +#define NBL_VBLK_DQDMA_IODONE_NUM_RAM_ERR_WIDTH (32) +#define NBL_VBLK_DQDMA_IODONE_NUM_RAM_ERR_DWLEN (1) +union vblk_dqdma_iodone_num_ram_err_u { + struct vblk_dqdma_iodone_num_ram_err { + u32 info:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQDMA_IODONE_NUM_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_VBLK_HQ_CFG_ADDR (0xfa0000) +#define NBL_VBLK_HQ_CFG_DEPTH (512) +#define NBL_VBLK_HQ_CFG_WIDTH (256) +#define NBL_VBLK_HQ_CFG_DWLEN (8) +union vblk_hq_cfg_u { + struct vblk_hq_cfg { + u32 queue_size_mask:16; /* [15:0] Default:0x0 RW */ + u32 queue_enable:1; /* [16:16] Default:0x0 RW */ + u32 RESERVE:15; /* [31:17] Default:0x0 RO */ + u32 desc_base_addr_l:32; /* [63:32] Default:0x0 RW */ + u32 desc_base_addr_h:32; /* [95:64] Default:0x0 RW */ + u32 RESERVE_96_127:32; /* [127:96] Default:0x0 RO */ + u32 RESERVE_128:1; /* [128:128] Default:0x0 RO */ + u32 rsv3:31; /* [159:129] Default:0x0 RO */ + u32 rsv2:32; /* [191:160] Default:0x0 RO */ + u32 rsv1:32; /* [223:192] Default:0x0 RO */ + u32 rsv:32; /* [255:224] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_HQ_CFG_DWLEN]; +} __packed; +#define NBL_VBLK_HQ_CFG_REG(r) (NBL_VBLK_HQ_CFG_ADDR + \ + (NBL_VBLK_HQ_CFG_DWLEN * 4) * (r)) + +#define NBL_VBLK_SPLIT_CFG_ADDR (0xfa4000) +#define NBL_VBLK_SPLIT_CFG_DEPTH (512) +#define NBL_VBLK_SPLIT_CFG_WIDTH (256) +#define NBL_VBLK_SPLIT_CFG_DWLEN (8) +union vblk_split_cfg_u { + struct vblk_split_cfg { + u32 queue_size_mask:16; /* [15:0] Default:0x0 RW */ + u32 RESERVE_0:1; /* [16:16] Default:0x0 RO */ + u32 RESERVE_1:15; /* [31:17] Default:0x0 RO */ + u32 used_base_addr_l:32; /* [63:32] Default:0x0 RW */ + u32 used_base_addr_h:32; /* [95:64] Default:0x0 RW */ + u32 avail_base_addr_I:32; /* [127:96] Default:0x0 RW */ + u32 avail_base_addr_h:32; /* [159:128] Default:0x0 RW */ + u32 packed_ring_feature:1; /* [160:160] Default:0x1 RW */ + u32 rsv:31; /* [255:161] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:161] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_SPLIT_CFG_DWLEN]; +} __packed; +#define NBL_VBLK_SPLIT_CFG_REG(r) (NBL_VBLK_SPLIT_CFG_ADDR + \ + (NBL_VBLK_SPLIT_CFG_DWLEN * 4) * (r)) + +#define NBL_VBLK_CQ_CFG_ADDR (0xfac000) +#define NBL_VBLK_CQ_CFG_DEPTH (512) +#define NBL_VBLK_CQ_CFG_WIDTH (128) +#define NBL_VBLK_CQ_CFG_DWLEN (4) +union vblk_cq_cfg_u { + struct vblk_cq_cfg { + u32 queue_size_mask:16; /* [15:0] Default:0x0 RW */ + u32 queue_enable:1; /* [16:16] Default:0x0 RW */ + u32 RESERVE:15; /* [31:17] Default:0x0 RO */ + u32 desc_base_addr_l:32; /* [63:32] Default:0x0 RW */ + u32 desc_base_addr_h:32; /* [95:64] Default:0x0 RW */ + u32 rsv:32; /* [127:96] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CQ_CFG_DWLEN]; +} __packed; +#define NBL_VBLK_CQ_CFG_REG(r) (NBL_VBLK_CQ_CFG_ADDR + \ + (NBL_VBLK_CQ_CFG_DWLEN * 4) * (r)) + +#define NBL_VBLK_DQ_CFG_ADDR (0xfb0000) +#define NBL_VBLK_DQ_CFG_DEPTH (512) +#define NBL_VBLK_DQ_CFG_WIDTH (128) +#define NBL_VBLK_DQ_CFG_DWLEN (4) +union vblk_dq_cfg_u { + struct vblk_dq_cfg { + u32 queue_size_mask:16; /* [15:0] Default:0x0 RW */ + u32 queue_enable:1; /* [16:16] Default:0x0 RW */ + u32 RESERVE:15; /* [31:17] Default:0x0 RO */ + u32 desc_base_addr_l:32; /* [63:32] Default:0x0 RW */ + u32 desc_base_addr_h:32; /* [95:64] Default:0x0 RW */ + u32 rsv:32; /* [127:96] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQ_CFG_DWLEN]; +} __packed; +#define NBL_VBLK_DQ_CFG_REG(r) (NBL_VBLK_DQ_CFG_ADDR + \ + (NBL_VBLK_DQ_CFG_DWLEN * 4) * (r)) + +#define NBL_VBLK_CQ_STATE_ADDR (0xfbc000) +#define NBL_VBLK_CQ_STATE_DEPTH (512) +#define NBL_VBLK_CQ_STATE_WIDTH (32) +#define NBL_VBLK_CQ_STATE_DWLEN (1) +union vblk_cq_state_u { + struct vblk_cq_state { + u32 is_running:1; /* [0:0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CQ_STATE_DWLEN]; +} __packed; +#define NBL_VBLK_CQ_STATE_REG(r) (NBL_VBLK_CQ_STATE_ADDR + \ + (NBL_VBLK_CQ_STATE_DWLEN * 4) * (r)) + +#define NBL_VBLK_DQ_STATE_ADDR (0xfbd000) +#define NBL_VBLK_DQ_STATE_DEPTH (512) +#define NBL_VBLK_DQ_STATE_WIDTH (32) +#define NBL_VBLK_DQ_STATE_DWLEN (1) +union vblk_dq_state_u { + struct vblk_dq_state { + u32 is_running:1; /* [0:0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQ_STATE_DWLEN]; +} __packed; +#define NBL_VBLK_DQ_STATE_REG(r) (NBL_VBLK_DQ_STATE_ADDR + \ + (NBL_VBLK_DQ_STATE_DWLEN * 4) * (r)) + +#define NBL_VBLK_SPLIT_AVAIL_HD_RAM_ADDR (0xfbe000) +#define NBL_VBLK_SPLIT_AVAIL_HD_RAM_DEPTH (512) +#define NBL_VBLK_SPLIT_AVAIL_HD_RAM_WIDTH (32) +#define NBL_VBLK_SPLIT_AVAIL_HD_RAM_DWLEN (1) +union vblk_split_avail_hd_ram_u { + struct vblk_split_avail_hd_ram { + u32 split_avail_header_pntr:17; /* [16:0] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_SPLIT_AVAIL_HD_RAM_DWLEN]; +} __packed; +#define NBL_VBLK_SPLIT_AVAIL_HD_RAM_REG(r) (NBL_VBLK_SPLIT_AVAIL_HD_RAM_ADDR + \ + (NBL_VBLK_SPLIT_AVAIL_HD_RAM_DWLEN * 4) * (r)) + +#define NBL_VBLK_SPLIT_USED_HD_RAM_ADDR (0xfbf000) +#define NBL_VBLK_SPLIT_USED_HD_RAM_DEPTH (512) +#define NBL_VBLK_SPLIT_USED_HD_RAM_WIDTH (32) +#define NBL_VBLK_SPLIT_USED_HD_RAM_DWLEN (1) +union vblk_split_used_hd_ram_u { + struct vblk_split_used_hd_ram { + u32 split_used_header_pntr:16; /* [15:0] Default:0x0 RW */ + u32 RESERVE:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_SPLIT_USED_HD_RAM_DWLEN]; +} __packed; +#define NBL_VBLK_SPLIT_USED_HD_RAM_REG(r) (NBL_VBLK_SPLIT_USED_HD_RAM_ADDR + \ + (NBL_VBLK_SPLIT_USED_HD_RAM_DWLEN * 4) * (r)) + +#define NBL_VBLK_HOST_PACKED_HD_RAM_ADDR (0xfc0000) +#define NBL_VBLK_HOST_PACKED_HD_RAM_DEPTH (512) +#define NBL_VBLK_HOST_PACKED_HD_RAM_WIDTH (32) +#define NBL_VBLK_HOST_PACKED_HD_RAM_DWLEN (1) +union vblk_host_packed_hd_ram_u { + struct vblk_host_packed_hd_ram { + u32 packed_header_pntr:16; /* [15:0] Default:0x0 RW */ + u32 wrap_cnt:1; /* [16:16] Default:0x1 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_HOST_PACKED_HD_RAM_DWLEN]; +} __packed; +#define NBL_VBLK_HOST_PACKED_HD_RAM_REG(r) (NBL_VBLK_HOST_PACKED_HD_RAM_ADDR + \ + (NBL_VBLK_HOST_PACKED_HD_RAM_DWLEN * 4) * (r)) + +#define NBL_VBLK_CQ_HD_RAM_ADDR (0xfc1000) +#define NBL_VBLK_CQ_HD_RAM_DEPTH (512) +#define NBL_VBLK_CQ_HD_RAM_WIDTH (32) +#define NBL_VBLK_CQ_HD_RAM_DWLEN (1) +union vblk_cq_hd_ram_u { + struct vblk_cq_hd_ram { + u32 packed_header_pntr:16; /* [15:0] Default:0x0 RW */ + u32 wrap_cnt:1; /* [16:16] Default:0x1 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CQ_HD_RAM_DWLEN]; +} __packed; +#define NBL_VBLK_CQ_HD_RAM_REG(r) (NBL_VBLK_CQ_HD_RAM_ADDR + \ + (NBL_VBLK_CQ_HD_RAM_DWLEN * 4) * (r)) + +#define NBL_VBLK_CQ_IO_STAT_RAM_ADDR (0xfc2000) +#define NBL_VBLK_CQ_IO_STAT_RAM_DEPTH (512) +#define NBL_VBLK_CQ_IO_STAT_RAM_WIDTH (128) +#define NBL_VBLK_CQ_IO_STAT_RAM_DWLEN (4) +union vblk_cq_io_stat_ram_u { + struct vblk_cq_io_stat_ram { + u32 cq_sch_cnt:32; /* [31:0] Default:0x0 RW */ + u32 cq_sch_split_cnt:32; /* [63:32] Default:0x0 RW */ + u32 cq_invld_scn_cnt:32; /* [95:64] Default:0x0 RW */ + u32 cq_iodone_cnt:32; /* [127:96] Default:0x0 RW */ + } __packed info; + u32 data[NBL_VBLK_CQ_IO_STAT_RAM_DWLEN]; +} __packed; +#define NBL_VBLK_CQ_IO_STAT_RAM_REG(r) (NBL_VBLK_CQ_IO_STAT_RAM_ADDR + \ + (NBL_VBLK_CQ_IO_STAT_RAM_DWLEN * 4) * (r)) + +#define NBL_VBLK_CAP_RAM_ADDR (0xfc5000) +#define NBL_VBLK_CAP_RAM_DEPTH (512) +#define NBL_VBLK_CAP_RAM_WIDTH (128) +#define NBL_VBLK_CAP_RAM_DWLEN (4) +union vblk_cap_ram_u { + struct vblk_cap_ram { + u32 cap_data_arr[4]; /* [127:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_CAP_RAM_DWLEN]; +} __packed; +#define NBL_VBLK_CAP_RAM_REG(r) (NBL_VBLK_CAP_RAM_ADDR + \ + (NBL_VBLK_CAP_RAM_DWLEN * 4) * (r)) + +#define NBL_VBLK_DQ_HD_RAM_ADDR (0xfc7000) +#define NBL_VBLK_DQ_HD_RAM_DEPTH (512) +#define NBL_VBLK_DQ_HD_RAM_WIDTH (32) +#define NBL_VBLK_DQ_HD_RAM_DWLEN (1) +union vblk_dq_hd_ram_u { + struct vblk_dq_hd_ram { + u32 packed_header_pntr:16; /* [15:0] Default:0x0 RW */ + u32 wrap_cnt:1; /* [16:16] Default:0x1 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VBLK_DQ_HD_RAM_DWLEN]; +} __packed; +#define NBL_VBLK_DQ_HD_RAM_REG(r) (NBL_VBLK_DQ_HD_RAM_ADDR + \ + (NBL_VBLK_DQ_HD_RAM_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h new file mode 100644 index 0000000000000000000000000000000000000000..5e7892880130e46256e98a121284467e1cfe63ec --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_intf_vdpa.h @@ -0,0 +1,293 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_VDPA_H +#define NBL_VDPA_H 1 + +#include + +#define NBL_VDPA_BASE (0x00F98000) + +#define NBL_VDPA_INT_STATUS_ADDR (0xf98000) +#define NBL_VDPA_INT_STATUS_DEPTH (1) +#define NBL_VDPA_INT_STATUS_WIDTH (32) +#define NBL_VDPA_INT_STATUS_DWLEN (1) +union vdpa_int_status_u { + struct vdpa_int_status { + u32 fatal_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RWC */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RWC */ + u32 cif_err:1; /* [05:05] Default:0x0 RWC */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_VDPA_INT_MASK_ADDR (0xf98004) +#define NBL_VDPA_INT_MASK_DEPTH (1) +#define NBL_VDPA_INT_MASK_WIDTH (32) +#define NBL_VDPA_INT_MASK_DWLEN (1) +union vdpa_int_mask_u { + struct vdpa_int_mask { + u32 fatal_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 RW */ + u32 data_cor_err:1; /* [04:04] Default:0x0 RW */ + u32 cif_err:1; /* [05:05] Default:0x0 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_INT_MASK_DWLEN]; +} __packed; + +#define NBL_VDPA_INT_SET_ADDR (0xf98008) +#define NBL_VDPA_INT_SET_DEPTH (1) +#define NBL_VDPA_INT_SET_WIDTH (32) +#define NBL_VDPA_INT_SET_DWLEN (1) +union vdpa_int_set_u { + struct vdpa_int_set { + u32 fatal_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 data_ucor_err:1; /* [03:03] Default:0x0 WO */ + u32 data_cor_err:1; /* [04:04] Default:0x0 WO */ + u32 cif_err:1; /* [05:05] Default:0x0 WO */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_INT_SET_DWLEN]; +} __packed; + +#define NBL_VDPA_INIT_DONE_ADDR (0xf9800c) +#define NBL_VDPA_INIT_DONE_DEPTH (1) +#define NBL_VDPA_INIT_DONE_WIDTH (32) +#define NBL_VDPA_INIT_DONE_DWLEN (1) +union vdpa_init_done_u { + struct vdpa_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_VDPA_CAR_CTRL_ADDR (0xf98058) +#define NBL_VDPA_CAR_CTRL_DEPTH (1) +#define NBL_VDPA_CAR_CTRL_WIDTH (32) +#define NBL_VDPA_CAR_CTRL_DWLEN (1) +union vdpa_car_ctrl_u { + struct vdpa_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_VDPA_FLOW_EN_ADDR (0xf9805c) +#define NBL_VDPA_FLOW_EN_DEPTH (1) +#define NBL_VDPA_FLOW_EN_WIDTH (32) +#define NBL_VDPA_FLOW_EN_DWLEN (1) +union vdpa_flow_en_u { + struct vdpa_flow_en { + u32 ivdpa_cnt_en:1; /* [00:00] Default:0x1 RW */ + u32 ovdpa_cnt_en:1; /* [01:01] Default:0x1 RW */ + u32 vdpa_drop_cnt_en:1; /* [02:02] Default:0x1 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_FLOW_EN_DWLEN]; +} __packed; + +#define NBL_VDPA_CIF_ERR_INFO_ADDR (0xf98060) +#define NBL_VDPA_CIF_ERR_INFO_DEPTH (1) +#define NBL_VDPA_CIF_ERR_INFO_WIDTH (32) +#define NBL_VDPA_CIF_ERR_INFO_DWLEN (1) +union vdpa_cif_err_info_u { + struct vdpa_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_VDPA_EN_ADDR (0xf98100) +#define NBL_VDPA_EN_DEPTH (1) +#define NBL_VDPA_EN_WIDTH (32) +#define NBL_VDPA_EN_DWLEN (1) +union vdpa_en_u { + struct vdpa_en { + u32 vdpa_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_EN_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_BASE_ADDR_L_ADDR (0xf98104) +#define NBL_VDPA_RING_BASE_ADDR_L_DEPTH (1) +#define NBL_VDPA_RING_BASE_ADDR_L_WIDTH (32) +#define NBL_VDPA_RING_BASE_ADDR_L_DWLEN (1) +union vdpa_ring_base_addr_l_u { + struct vdpa_ring_base_addr_l { + u32 base_addr_l:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_VDPA_RING_BASE_ADDR_L_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_BASE_ADDR_H_ADDR (0xf98108) +#define NBL_VDPA_RING_BASE_ADDR_H_DEPTH (1) +#define NBL_VDPA_RING_BASE_ADDR_H_WIDTH (32) +#define NBL_VDPA_RING_BASE_ADDR_H_DWLEN (1) +union vdpa_ring_base_addr_h_u { + struct vdpa_ring_base_addr_h { + u32 base_addr_h:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_VDPA_RING_BASE_ADDR_H_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_SIZE_MASK_ADDR (0xf9810c) +#define NBL_VDPA_RING_SIZE_MASK_DEPTH (1) +#define NBL_VDPA_RING_SIZE_MASK_WIDTH (32) +#define NBL_VDPA_RING_SIZE_MASK_DWLEN (1) +union vdpa_ring_size_mask_u { + struct vdpa_ring_size_mask { + u32 size_mask:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_RING_SIZE_MASK_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_TPNTR_ADDR (0xf98110) +#define NBL_VDPA_RING_TPNTR_DEPTH (1) +#define NBL_VDPA_RING_TPNTR_WIDTH (32) +#define NBL_VDPA_RING_TPNTR_DWLEN (1) +union vdpa_ring_tpntr_u { + struct vdpa_ring_tpntr { + u32 tpntr:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_RING_TPNTR_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_HPNTR_ADDR (0xf98114) +#define NBL_VDPA_RING_HPNTR_DEPTH (1) +#define NBL_VDPA_RING_HPNTR_WIDTH (32) +#define NBL_VDPA_RING_HPNTR_DWLEN (1) +union vdpa_ring_hpntr_u { + struct vdpa_ring_hpntr { + u32 hpntr:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_RING_HPNTR_DWLEN]; +} __packed; + +#define NBL_VDPA_RING_HPNTR_RST_ADDR (0xf98118) +#define NBL_VDPA_RING_HPNTR_RST_DEPTH (1) +#define NBL_VDPA_RING_HPNTR_RST_WIDTH (32) +#define NBL_VDPA_RING_HPNTR_RST_DWLEN (1) +union vdpa_ring_hpntr_rst_u { + struct vdpa_ring_hpntr_rst { + u32 hpntr_rst:1; /* [00:00] Default:0x0 WO */ + u32 rdy:1; /* [01:01] Default:0x1 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_RING_HPNTR_RST_DWLEN]; +} __packed; + +#define NBL_VDPA_BURST_LEN_ADDR (0xf9811c) +#define NBL_VDPA_BURST_LEN_DEPTH (1) +#define NBL_VDPA_BURST_LEN_WIDTH (32) +#define NBL_VDPA_BURST_LEN_DWLEN (1) +union vdpa_burst_len_u { + struct vdpa_burst_len { + u32 burst_len:6; /* [05:00] Default:0x10 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_BURST_LEN_DWLEN]; +} __packed; + +#define NBL_VDPA_TIMEOUT_VALUE_ADDR (0xf98120) +#define NBL_VDPA_TIMEOUT_VALUE_DEPTH (1) +#define NBL_VDPA_TIMEOUT_VALUE_WIDTH (32) +#define NBL_VDPA_TIMEOUT_VALUE_DWLEN (1) +union vdpa_timeout_value_u { + struct vdpa_timeout_value { + u32 timeout_value:32; /* [31:00] Default:0x190 RW */ + } __packed info; + u32 data[NBL_VDPA_TIMEOUT_VALUE_DWLEN]; +} __packed; + +#define NBL_VDPA_DIF_MODE_ADDR (0xf98124) +#define NBL_VDPA_DIF_MODE_DEPTH (1) +#define NBL_VDPA_DIF_MODE_WIDTH (32) +#define NBL_VDPA_DIF_MODE_DWLEN (1) +union vdpa_dif_mode_u { + struct vdpa_dif_mode { + u32 dif_mode:3; /* [02:00] Default:0x2 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_DIF_MODE_DWLEN]; +} __packed; + +#define NBL_VDPA_DIF_INFO_ADDR (0xf98128) +#define NBL_VDPA_DIF_INFO_DEPTH (1) +#define NBL_VDPA_DIF_INFO_WIDTH (32) +#define NBL_VDPA_DIF_INFO_DWLEN (1) +union vdpa_dif_info_u { + struct vdpa_dif_info { + u32 dif_info:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_DIF_INFO_DWLEN]; +} __packed; + +#define NBL_VDPA_DIF_BDF_ADDR (0xf9812c) +#define NBL_VDPA_DIF_BDF_DEPTH (1) +#define NBL_VDPA_DIF_BDF_WIDTH (32) +#define NBL_VDPA_DIF_BDF_DWLEN (1) +union vdpa_dif_bdf_u { + struct vdpa_dif_bdf { + u32 dif_bdf:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_DIF_BDF_DWLEN]; +} __packed; + +#define NBL_VDPA_DIF_INT_ADDR (0xf98130) +#define NBL_VDPA_DIF_INT_DEPTH (1) +#define NBL_VDPA_DIF_INT_WIDTH (32) +#define NBL_VDPA_DIF_INT_DWLEN (1) +union vdpa_dif_int_u { + struct vdpa_dif_int { + u32 dif_int:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_VDPA_DIF_INT_DWLEN]; +} __packed; + +#define NBL_VDPA_MDL_INFO_ADDR (0xf98134) +#define NBL_VDPA_MDL_INFO_DEPTH (1) +#define NBL_VDPA_MDL_INFO_WIDTH (32) +#define NBL_VDPA_MDL_INFO_DWLEN (1) +union vdpa_mdl_info_u { + struct vdpa_mdl_info { + u32 version_id:16; /* [15:00] Default:0x0001 RO */ + u32 prj_id:16; /* [31:16] Default:0x0020 RO */ + } __packed info; + u32 data[NBL_VDPA_MDL_INFO_DWLEN]; +} __packed; + +#define NBL_VDPA_VERSION_ADDR (0xf98138) +#define NBL_VDPA_VERSION_DEPTH (1) +#define NBL_VDPA_VERSION_WIDTH (32) +#define NBL_VDPA_VERSION_DWLEN (1) +union vdpa_version_u { + struct vdpa_version { + u32 date:32; /* [31:00] Default:0x20220615 RO */ + } __packed info; + u32 data[NBL_VDPA_VERSION_DWLEN]; +} __packed; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h new file mode 100644 index 0000000000000000000000000000000000000000..ae68b8efe15aedaf3e3928d0a97952d86ab5ed57 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe.h @@ -0,0 +1,14 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#include "nbl_ppe_ipro.h" +#include "nbl_ppe_epro.h" +#include "nbl_ppe_pp0.h" +#include "nbl_ppe_pp1.h" +#include "nbl_ppe_pp2.h" +#include "nbl_ppe_fem.h" +#include "nbl_ppe_mcc.h" +#include "nbl_ppe_acl.h" +#include "nbl_ppe_cap.h" +#include "nbl_ppe_uprbac.h" +#include "nbl_ppe_dprbac.h" diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h new file mode 100644 index 0000000000000000000000000000000000000000..8cb5158e94975bcccd3fa963599beddffa55aa5e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_acl.h @@ -0,0 +1,2412 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_ACL_H +#define NBL_ACL_H 1 + +#include + +#define NBL_ACL_BASE (0x00B64000) + +#define NBL_ACL_INT_STATUS_ADDR (0xb64000) +#define NBL_ACL_INT_STATUS_DEPTH (1) +#define NBL_ACL_INT_STATUS_WIDTH (32) +#define NBL_ACL_INT_STATUS_DWLEN (1) +union acl_int_status_u { + struct acl_int_status { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [02:02] Default:0x0 RWC */ + u32 data_cor_err:1; /* [03:03] Default:0x0 RWC */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 set_dport_encode_cfg_err:1; /* [05:05] Default:0x0 RWC */ + u32 tcam_cor_err:1; /* [06:06] Default:0x0 RWC */ + u32 tcam_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 flow_id_err:1; /* [08:08] Default:0x0 RWC */ + u32 stat_id_conflict_int:1; /* [09:09] Default:0x0 RWC */ + u32 flow_id_conflict_int:1; /* [10:10] Default:0x0 RWC */ + u32 fsm_err:1; /* [11:11] Default:0x0 RWC */ + u32 nxt_stage_lp_cfg_err:1; /* [12:12] Default:0x0 RWC */ + u32 input_err:1; /* [13:13] Default:0x0 RWC */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_ACL_INT_MASK_ADDR (0xb64004) +#define NBL_ACL_INT_MASK_DEPTH (1) +#define NBL_ACL_INT_MASK_WIDTH (32) +#define NBL_ACL_INT_MASK_DWLEN (1) +union acl_int_mask_u { + struct acl_int_mask { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 RW */ + u32 data_ucor_err:1; /* [02:02] Default:0x0 RW */ + u32 data_cor_err:1; /* [03:03] Default:0x0 RW */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 set_dport_encode_cfg_err:1; /* [05:05] Default:0x0 RW */ + u32 tcam_cor_err:1; /* [06:06] Default:0x0 RW */ + u32 tcam_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 flow_id_err:1; /* [08:08] Default:0x0 RW */ + u32 stat_id_conflict_int:1; /* [09:09] Default:0x0 RW */ + u32 flow_id_conflict_int:1; /* [10:10] Default:0x0 RW */ + u32 fsm_err:1; /* [11:11] Default:0x0 RW */ + u32 nxt_stage_lp_cfg_err:1; /* [12:12] Default:0x0 RW */ + u32 input_err:1; /* [13:13] Default:0x0 RW */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INT_MASK_DWLEN]; +} __packed; + +#define NBL_ACL_INT_SET_ADDR (0xb64008) +#define NBL_ACL_INT_SET_DEPTH (1) +#define NBL_ACL_INT_SET_WIDTH (32) +#define NBL_ACL_INT_SET_DWLEN (1) +union acl_int_set_u { + struct acl_int_set { + u32 fifo_uflw_err:1; /* [00:00] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [01:01] Default:0x0 WO */ + u32 data_ucor_err:1; /* [02:02] Default:0x0 WO */ + u32 data_cor_err:1; /* [03:03] Default:0x0 WO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 set_dport_encode_cfg_err:1; /* [05:05] Default:0x0 WO */ + u32 tcam_cor_err:1; /* [06:06] Default:0x0 WO */ + u32 tcam_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 flow_id_err:1; /* [08:08] Default:0x0 WO */ + u32 stat_id_conflict_int:1; /* [09:09] Default:0x0 WO */ + u32 flow_id_conflict_int:1; /* [10:10] Default:0x0 WO */ + u32 fsm_err:1; /* [11:11] Default:0x0 WO */ + u32 nxt_stage_lp_cfg_err:1; /* [12:12] Default:0x0 WO */ + u32 input_err:1; /* [13:13] Default:0x0 WO */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INT_SET_DWLEN]; +} __packed; + +#define NBL_ACL_INIT_DONE_ADDR (0xb6400c) +#define NBL_ACL_INIT_DONE_DEPTH (1) +#define NBL_ACL_INIT_DONE_WIDTH (32) +#define NBL_ACL_INIT_DONE_DWLEN (1) +union acl_init_done_u { + struct acl_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_ACL_CIF_ERR_INFO_ADDR (0xb64084) +#define NBL_ACL_CIF_ERR_INFO_DEPTH (1) +#define NBL_ACL_CIF_ERR_INFO_WIDTH (32) +#define NBL_ACL_CIF_ERR_INFO_DWLEN (1) +union acl_cif_err_info_u { + struct acl_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_ACL_INIT_START_ADDR (0xb6409c) +#define NBL_ACL_INIT_START_DEPTH (1) +#define NBL_ACL_INIT_START_WIDTH (32) +#define NBL_ACL_INIT_START_DWLEN (1) +union acl_init_start_u { + struct acl_init_start { + u32 start:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INIT_START_DWLEN]; +} __packed; + +#define NBL_ACL_BYPASS_REG_ADDR (0xb64100) +#define NBL_ACL_BYPASS_REG_DEPTH (1) +#define NBL_ACL_BYPASS_REG_WIDTH (32) +#define NBL_ACL_BYPASS_REG_DWLEN (1) +union acl_bypass_reg_u { + struct acl_bypass_reg { + u32 acl_bypass:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_BYPASS_REG_DWLEN]; +} __packed; + +#define NBL_ACL_LOOP_BACK_EN_ADDR (0xb64108) +#define NBL_ACL_LOOP_BACK_EN_DEPTH (1) +#define NBL_ACL_LOOP_BACK_EN_WIDTH (32) +#define NBL_ACL_LOOP_BACK_EN_DWLEN (1) +union acl_loop_back_en_u { + struct acl_loop_back_en { + u32 loop_back_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_LOOP_BACK_EN_DWLEN]; +} __packed; + +#define NBL_ACL_LOOP_FLAG_EN_ADDR (0xb6410c) +#define NBL_ACL_LOOP_FLAG_EN_DEPTH (1) +#define NBL_ACL_LOOP_FLAG_EN_WIDTH (32) +#define NBL_ACL_LOOP_FLAG_EN_DWLEN (1) +union acl_loop_flag_en_u { + struct acl_loop_flag_en { + u32 flag_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_LOOP_FLAG_EN_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION0_ADDR (0xb64160) +#define NBL_ACL_DEFAULT_ACTION0_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION0_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION0_DWLEN (1) +union acl_default_action0_u { + struct acl_default_action0 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION0_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION1_ADDR (0xb64164) +#define NBL_ACL_DEFAULT_ACTION1_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION1_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION1_DWLEN (1) +union acl_default_action1_u { + struct acl_default_action1 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION1_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION2_ADDR (0xb64168) +#define NBL_ACL_DEFAULT_ACTION2_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION2_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION2_DWLEN (1) +union acl_default_action2_u { + struct acl_default_action2 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION2_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION3_ADDR (0xb6416c) +#define NBL_ACL_DEFAULT_ACTION3_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION3_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION3_DWLEN (1) +union acl_default_action3_u { + struct acl_default_action3 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION3_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION4_ADDR (0xb64170) +#define NBL_ACL_DEFAULT_ACTION4_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION4_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION4_DWLEN (1) +union acl_default_action4_u { + struct acl_default_action4 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION4_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION5_ADDR (0xb64174) +#define NBL_ACL_DEFAULT_ACTION5_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION5_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION5_DWLEN (1) +union acl_default_action5_u { + struct acl_default_action5 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION5_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION6_ADDR (0xb64178) +#define NBL_ACL_DEFAULT_ACTION6_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION6_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION6_DWLEN (1) +union acl_default_action6_u { + struct acl_default_action6 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION6_DWLEN]; +} __packed; + +#define NBL_ACL_DEFAULT_ACTION7_ADDR (0xb6417c) +#define NBL_ACL_DEFAULT_ACTION7_DEPTH (1) +#define NBL_ACL_DEFAULT_ACTION7_WIDTH (32) +#define NBL_ACL_DEFAULT_ACTION7_DWLEN (1) +union acl_default_action7_u { + struct acl_default_action7 { + u32 data:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION7_DWLEN]; +} __packed; + +#define NBL_ACL_SET_FLAG_ADDR (0xb64200) +#define NBL_ACL_SET_FLAG_DEPTH (1) +#define NBL_ACL_SET_FLAG_WIDTH (32) +#define NBL_ACL_SET_FLAG_DWLEN (1) +union acl_set_flag_u { + struct acl_set_flag { + u32 set_flag0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_SET_FLAG_DWLEN]; +} __packed; + +#define NBL_ACL_CLEAR_FLAG_ADDR (0xb64204) +#define NBL_ACL_CLEAR_FLAG_DEPTH (1) +#define NBL_ACL_CLEAR_FLAG_WIDTH (32) +#define NBL_ACL_CLEAR_FLAG_DWLEN (1) +union acl_clear_flag_u { + struct acl_clear_flag { + u32 clear_flag0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_CLEAR_FLAG_DWLEN]; +} __packed; + +#define NBL_ACL_SET_FLAG0_ADDR (0xb64208) +#define NBL_ACL_SET_FLAG0_DEPTH (1) +#define NBL_ACL_SET_FLAG0_WIDTH (32) +#define NBL_ACL_SET_FLAG0_DWLEN (1) +union acl_set_flag0_u { + struct acl_set_flag0 { + u32 set_flag0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_SET_FLAG0_DWLEN]; +} __packed; + +#define NBL_ACL_CLEAR_FLAG0_ADDR (0xb6420c) +#define NBL_ACL_CLEAR_FLAG0_DEPTH (1) +#define NBL_ACL_CLEAR_FLAG0_WIDTH (32) +#define NBL_ACL_CLEAR_FLAG0_DWLEN (1) +union acl_clear_flag0_u { + struct acl_clear_flag0 { + u32 clear_flag0:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_CLEAR_FLAG0_DWLEN]; +} __packed; + +#define NBL_ACL_DPORT_CFG_ADDR (0xb64220) +#define NBL_ACL_DPORT_CFG_DEPTH (1) +#define NBL_ACL_DPORT_CFG_WIDTH (32) +#define NBL_ACL_DPORT_CFG_DWLEN (1) +union acl_dport_cfg_u { + struct acl_dport_cfg { + u32 act_id:6; /* [05:00] Default:0x9 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DPORT_CFG_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY0_ADDR (0xb64230) +#define NBL_ACL_ACTION_PRIORITY0_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY0_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY0_DWLEN (1) +union acl_action_priority0_u { + struct acl_action_priority0 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY0_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY1_ADDR (0xb64234) +#define NBL_ACL_ACTION_PRIORITY1_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY1_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY1_DWLEN (1) +union acl_action_priority1_u { + struct acl_action_priority1 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY1_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY2_ADDR (0xb64238) +#define NBL_ACL_ACTION_PRIORITY2_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY2_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY2_DWLEN (1) +union acl_action_priority2_u { + struct acl_action_priority2 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY2_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY3_ADDR (0xb6423c) +#define NBL_ACL_ACTION_PRIORITY3_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY3_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY3_DWLEN (1) +union acl_action_priority3_u { + struct acl_action_priority3 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY3_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY4_ADDR (0xb64240) +#define NBL_ACL_ACTION_PRIORITY4_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY4_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY4_DWLEN (1) +union acl_action_priority4_u { + struct acl_action_priority4 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY4_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY5_ADDR (0xb64244) +#define NBL_ACL_ACTION_PRIORITY5_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY5_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY5_DWLEN (1) +union acl_action_priority5_u { + struct acl_action_priority5 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY5_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY6_ADDR (0xb64248) +#define NBL_ACL_ACTION_PRIORITY6_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY6_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY6_DWLEN (1) +union acl_action_priority6_u { + struct acl_action_priority6 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY6_DWLEN]; +} __packed; + +#define NBL_ACL_ACTION_PRIORITY7_ADDR (0xb6424c) +#define NBL_ACL_ACTION_PRIORITY7_DEPTH (1) +#define NBL_ACL_ACTION_PRIORITY7_WIDTH (32) +#define NBL_ACL_ACTION_PRIORITY7_DWLEN (1) +union acl_action_priority7_u { + struct acl_action_priority7 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_PRIORITY7_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_MASK_ADDR_ADDR (0xb64280) +#define NBL_ACL_TCAM_MASK_ADDR_DEPTH (1) +#define NBL_ACL_TCAM_MASK_ADDR_WIDTH (32) +#define NBL_ACL_TCAM_MASK_ADDR_DWLEN (1) +union acl_tcam_mask_addr_u { + struct acl_tcam_mask_addr { + u32 addr0:9; /* [08:00] Default:0x0 RW */ + u32 addr0_en:1; /* [09:09] Default:0x0 RW */ + u32 addr1:9; /* [18:10] Default:0x0 RW */ + u32 addr1_en:1; /* [19:19] Default:0x0 RW */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_MASK_ADDR_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_MASK_BTM_ADDR (0xb64284) +#define NBL_ACL_TCAM_MASK_BTM_DEPTH (1) +#define NBL_ACL_TCAM_MASK_BTM_WIDTH (32) +#define NBL_ACL_TCAM_MASK_BTM_DWLEN (1) +union acl_tcam_mask_btm_u { + struct acl_tcam_mask_btm { + u32 btm:16; /* [15:00] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_MASK_BTM_DWLEN]; +} __packed; + +#define NBL_ACL_CAP_ADDR (0xb64288) +#define NBL_ACL_CAP_DEPTH (1) +#define NBL_ACL_CAP_WIDTH (32) +#define NBL_ACL_CAP_DWLEN (1) +union acl_cap_u { + struct acl_cap { + u32 onloop_cap_mode:1; /* [00:00] Default:0x0 RW */ + u32 noloop_cap_start:1; /* [01:01] Default:0x0 WO */ + u32 loop_cap_mode:1; /* [02:02] Default:0x0 RW */ + u32 loop_cap_start:1; /* [03:03] Default:0x0 WO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_CAP_DWLEN]; +} __packed; + +#define NBL_ACL_FLOW_ID_STAT_ACT_ADDR (0xb64300) +#define NBL_ACL_FLOW_ID_STAT_ACT_DEPTH (1) +#define NBL_ACL_FLOW_ID_STAT_ACT_WIDTH (32) +#define NBL_ACL_FLOW_ID_STAT_ACT_DWLEN (1) +union acl_flow_id_stat_act_u { + struct acl_flow_id_stat_act { + u32 flow_id_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_ACT_DWLEN]; +} __packed; + +#define NBL_ACL_FLOW_ID_STAT_GLB_CLR_ADDR (0xb64304) +#define NBL_ACL_FLOW_ID_STAT_GLB_CLR_DEPTH (1) +#define NBL_ACL_FLOW_ID_STAT_GLB_CLR_WIDTH (32) +#define NBL_ACL_FLOW_ID_STAT_GLB_CLR_DWLEN (1) +union acl_flow_id_stat_glb_clr_u { + struct acl_flow_id_stat_glb_clr { + u32 glb_clr:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_GLB_CLR_DWLEN]; +} __packed; + +#define NBL_ACL_FLOW_ID_STAT_RD_CLR_ADDR (0xb64308) +#define NBL_ACL_FLOW_ID_STAT_RD_CLR_DEPTH (1) +#define NBL_ACL_FLOW_ID_STAT_RD_CLR_WIDTH (32) +#define NBL_ACL_FLOW_ID_STAT_RD_CLR_DWLEN (1) +union acl_flow_id_stat_rd_clr_u { + struct acl_flow_id_stat_rd_clr { + u32 cpu_rd_clr:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_RD_CLR_DWLEN]; +} __packed; + +#define NBL_ACL_FLOW_ID_STAT_DONE_ADDR (0xb64310) +#define NBL_ACL_FLOW_ID_STAT_DONE_DEPTH (1) +#define NBL_ACL_FLOW_ID_STAT_DONE_WIDTH (32) +#define NBL_ACL_FLOW_ID_STAT_DONE_DWLEN (1) +union acl_flow_id_stat_done_u { + struct acl_flow_id_stat_done { + u32 glb_clr_done:1; /* [00:00] Default:0x0 RO */ + u32 stat_init_done:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_DONE_DWLEN]; +} __packed; + +#define NBL_ACL_SCAN_TH_ADDR (0xb64318) +#define NBL_ACL_SCAN_TH_DEPTH (1) +#define NBL_ACL_SCAN_TH_WIDTH (32) +#define NBL_ACL_SCAN_TH_DWLEN (1) +union acl_scan_th_u { + struct acl_scan_th { + u32 scan_th:10; /* [09:00] Default:0xff RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_SCAN_TH_DWLEN]; +} __packed; + +#define NBL_ACL_SCAN_EN_ADDR (0xb6431c) +#define NBL_ACL_SCAN_EN_DEPTH (1) +#define NBL_ACL_SCAN_EN_WIDTH (32) +#define NBL_ACL_SCAN_EN_DWLEN (1) +union acl_scan_en_u { + struct acl_scan_en { + u32 scan_en:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_SCAN_EN_DWLEN]; +} __packed; + +#define NBL_ACL_STAT_ID_STAT_GLB_CLR_ADDR (0xb64320) +#define NBL_ACL_STAT_ID_STAT_GLB_CLR_DEPTH (1) +#define NBL_ACL_STAT_ID_STAT_GLB_CLR_WIDTH (32) +#define NBL_ACL_STAT_ID_STAT_GLB_CLR_DWLEN (1) +union acl_stat_id_stat_glb_clr_u { + struct acl_stat_id_stat_glb_clr { + u32 glb_clr:1; /* [00:00] Default:0x0 WO */ + u32 rsv:31; /* [31:01] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_STAT_GLB_CLR_DWLEN]; +} __packed; + +#define NBL_ACL_STAT_ID_STAT_RD_CLR_ADDR (0xb64324) +#define NBL_ACL_STAT_ID_STAT_RD_CLR_DEPTH (1) +#define NBL_ACL_STAT_ID_STAT_RD_CLR_WIDTH (32) +#define NBL_ACL_STAT_ID_STAT_RD_CLR_DWLEN (1) +union acl_stat_id_stat_rd_clr_u { + struct acl_stat_id_stat_rd_clr { + u32 cpu_rd_clr:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_STAT_RD_CLR_DWLEN]; +} __packed; + +#define NBL_ACL_STAT_ID_STAT_DONE_ADDR (0xb64328) +#define NBL_ACL_STAT_ID_STAT_DONE_DEPTH (1) +#define NBL_ACL_STAT_ID_STAT_DONE_WIDTH (32) +#define NBL_ACL_STAT_ID_STAT_DONE_DWLEN (1) +union acl_stat_id_stat_done_u { + struct acl_stat_id_stat_done { + u32 glb_clr_done:1; /* [00:00] Default:0x0 RO */ + u32 stat_init_done:1; /* [01:01] Default:0x0 RO */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_STAT_DONE_DWLEN]; +} __packed; + +#define NBL_ACL_STAT_ID_ACT_ADDR (0xb6432c) +#define NBL_ACL_STAT_ID_ACT_DEPTH (1) +#define NBL_ACL_STAT_ID_ACT_WIDTH (32) +#define NBL_ACL_STAT_ID_ACT_DWLEN (1) +union acl_stat_id_act_u { + struct acl_stat_id_act { + u32 act_id:6; /* [05:00] Default:0x10 RW */ + u32 act_en:1; /* [06:06] Default:0x0 RW */ + u32 rsv:25; /* [31:07] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_ACT_DWLEN]; +} __packed; + +#define NBL_ACL_CAR_CTRL_ADDR (0xb64410) +#define NBL_ACL_CAR_CTRL_DEPTH (1) +#define NBL_ACL_CAR_CTRL_WIDTH (32) +#define NBL_ACL_CAR_CTRL_DWLEN (1) +union acl_car_ctrl_u { + struct acl_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_ACL_IN_ADDR (0xb64600) +#define NBL_ACL_IN_DEPTH (1) +#define NBL_ACL_IN_WIDTH (32) +#define NBL_ACL_IN_DWLEN (1) +union acl_in_u { + struct acl_in { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_IN_DWLEN]; +} __packed; + +#define NBL_ACL_OUT_ADDR (0xb64608) +#define NBL_ACL_OUT_DEPTH (1) +#define NBL_ACL_OUT_WIDTH (32) +#define NBL_ACL_OUT_DWLEN (1) +union acl_out_u { + struct acl_out { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_OUT_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_SE_ADDR (0xb6461c) +#define NBL_ACL_TCAM_SE_DEPTH (1) +#define NBL_ACL_TCAM_SE_WIDTH (32) +#define NBL_ACL_TCAM_SE_DWLEN (1) +union acl_tcam_se_u { + struct acl_tcam_se { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_TCAM_SE_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR (0xb64624) +#define NBL_ACL_TCAM_HIT_DEPTH (1) +#define NBL_ACL_TCAM_HIT_WIDTH (32) +#define NBL_ACL_TCAM_HIT_DWLEN (1) +union acl_tcam_hit_u { + struct acl_tcam_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR0_ADDR (0xb6462c) +#define NBL_ACL_TCAM_HIT_ADDR0_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR0_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR0_DWLEN (1) +union acl_tcam_hit_addr0_u { + struct acl_tcam_hit_addr0 { + u32 addr0:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id0:4; /* [12:09] Default:0x0 RO */ + u32 addr1:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id1:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR0_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR1_ADDR (0xb64630) +#define NBL_ACL_TCAM_HIT_ADDR1_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR1_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR1_DWLEN (1) +union acl_tcam_hit_addr1_u { + struct acl_tcam_hit_addr1 { + u32 addr2:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id2:4; /* [12:09] Default:0x0 RO */ + u32 addr3:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id3:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR1_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR2_ADDR (0xb64634) +#define NBL_ACL_TCAM_HIT_ADDR2_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR2_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR2_DWLEN (1) +union acl_tcam_hit_addr2_u { + struct acl_tcam_hit_addr2 { + u32 addr4:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id4:4; /* [12:09] Default:0x0 RO */ + u32 addr5:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id5:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR2_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR3_ADDR (0xb64638) +#define NBL_ACL_TCAM_HIT_ADDR3_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR3_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR3_DWLEN (1) +union acl_tcam_hit_addr3_u { + struct acl_tcam_hit_addr3 { + u32 addr6:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id6:4; /* [12:09] Default:0x0 RO */ + u32 addr7:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id7:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR3_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR4_ADDR (0xb6463c) +#define NBL_ACL_TCAM_HIT_ADDR4_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR4_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR4_DWLEN (1) +union acl_tcam_hit_addr4_u { + struct acl_tcam_hit_addr4 { + u32 addr8:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id8:4; /* [12:09] Default:0x0 RO */ + u32 addr9:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id9:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR4_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR5_ADDR (0xb64640) +#define NBL_ACL_TCAM_HIT_ADDR5_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR5_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR5_DWLEN (1) +union acl_tcam_hit_addr5_u { + struct acl_tcam_hit_addr5 { + u32 addr10:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id10:4; /* [12:09] Default:0x0 RO */ + u32 addr11:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id11:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR5_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR6_ADDR (0xb64644) +#define NBL_ACL_TCAM_HIT_ADDR6_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR6_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR6_DWLEN (1) +union acl_tcam_hit_addr6_u { + struct acl_tcam_hit_addr6 { + u32 addr12:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id12:4; /* [12:09] Default:0x0 RO */ + u32 addr13:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id13:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR6_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_ADDR7_ADDR (0xb64648) +#define NBL_ACL_TCAM_HIT_ADDR7_DEPTH (1) +#define NBL_ACL_TCAM_HIT_ADDR7_WIDTH (32) +#define NBL_ACL_TCAM_HIT_ADDR7_DWLEN (1) +union acl_tcam_hit_addr7_u { + struct acl_tcam_hit_addr7 { + u32 addr14:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id14:4; /* [12:09] Default:0x0 RO */ + u32 addr15:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id15:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_ADDR7_DWLEN]; +} __packed; + +#define NBL_ACL_CMP_SET_VEC_ADDR (0xb64650) +#define NBL_ACL_CMP_SET_VEC_DEPTH (1) +#define NBL_ACL_CMP_SET_VEC_WIDTH (32) +#define NBL_ACL_CMP_SET_VEC_DWLEN (1) +union acl_cmp_set_vec_u { + struct acl_cmp_set_vec { + u32 vec:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_CMP_SET_VEC_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_NOLOOP_HIT_VLD_ADDR (0xb64670) +#define NBL_ACL_TCAM_NOLOOP_HIT_VLD_DEPTH (1) +#define NBL_ACL_TCAM_NOLOOP_HIT_VLD_WIDTH (32) +#define NBL_ACL_TCAM_NOLOOP_HIT_VLD_DWLEN (1) +union acl_tcam_noloop_hit_vld_u { + struct acl_tcam_noloop_hit_vld { + u32 hit_vld:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_NOLOOP_HIT_VLD_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_LOOP_HIT_VLD_ADDR (0xb64674) +#define NBL_ACL_TCAM_LOOP_HIT_VLD_DEPTH (1) +#define NBL_ACL_TCAM_LOOP_HIT_VLD_WIDTH (32) +#define NBL_ACL_TCAM_LOOP_HIT_VLD_DWLEN (1) +union acl_tcam_loop_hit_vld_u { + struct acl_tcam_loop_hit_vld { + u32 hit_vld:16; /* [15:00] Default:0x0 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_LOOP_HIT_VLD_DWLEN]; +} __packed; + +#define NBL_ACL_ISE_TCAM_HIT_ADDR (0xb64680) +#define NBL_ACL_ISE_TCAM_HIT_DEPTH (1) +#define NBL_ACL_ISE_TCAM_HIT_WIDTH (32) +#define NBL_ACL_ISE_TCAM_HIT_DWLEN (1) +union acl_ise_tcam_hit_u { + struct acl_ise_tcam_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_ISE_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_ISE_TCAM_NOHIT_ADDR (0xb64684) +#define NBL_ACL_ISE_TCAM_NOHIT_DEPTH (1) +#define NBL_ACL_ISE_TCAM_NOHIT_WIDTH (32) +#define NBL_ACL_ISE_TCAM_NOHIT_DWLEN (1) +union acl_ise_tcam_nohit_u { + struct acl_ise_tcam_nohit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_ISE_TCAM_NOHIT_DWLEN]; +} __packed; + +#define NBL_ACL_LOOP_TCAM_HIT_ADDR (0xb64688) +#define NBL_ACL_LOOP_TCAM_HIT_DEPTH (1) +#define NBL_ACL_LOOP_TCAM_HIT_WIDTH (32) +#define NBL_ACL_LOOP_TCAM_HIT_DWLEN (1) +union acl_loop_tcam_hit_u { + struct acl_loop_tcam_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_LOOP_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_NOLOOP_TCAM_HIT_ADDR (0xb6468c) +#define NBL_ACL_NOLOOP_TCAM_HIT_DEPTH (1) +#define NBL_ACL_NOLOOP_TCAM_HIT_WIDTH (32) +#define NBL_ACL_NOLOOP_TCAM_HIT_DWLEN (1) +union acl_noloop_tcam_hit_u { + struct acl_noloop_tcam_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_NOLOOP_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR0_ADDR (0xb64690) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR0_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR0_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR0_DWLEN (1) +union acl_tcam_hit_loop_addr0_u { + struct acl_tcam_hit_loop_addr0 { + u32 addr0:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id0:4; /* [12:09] Default:0x0 RO */ + u32 addr1:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id1:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR0_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR1_ADDR (0xb64694) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR1_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR1_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR1_DWLEN (1) +union acl_tcam_hit_loop_addr1_u { + struct acl_tcam_hit_loop_addr1 { + u32 addr2:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id2:4; /* [12:09] Default:0x0 RO */ + u32 addr3:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id3:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR1_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR2_ADDR (0xb64698) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR2_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR2_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR2_DWLEN (1) +union acl_tcam_hit_loop_addr2_u { + struct acl_tcam_hit_loop_addr2 { + u32 addr4:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id4:4; /* [12:09] Default:0x0 RO */ + u32 addr5:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id5:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR2_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR3_ADDR (0xb6469c) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR3_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR3_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR3_DWLEN (1) +union acl_tcam_hit_loop_addr3_u { + struct acl_tcam_hit_loop_addr3 { + u32 addr6:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id6:4; /* [12:09] Default:0x0 RO */ + u32 addr7:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id7:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR3_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR4_ADDR (0xb646a0) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR4_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR4_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR4_DWLEN (1) +union acl_tcam_hit_loop_addr4_u { + struct acl_tcam_hit_loop_addr4 { + u32 addr8:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id8:4; /* [12:09] Default:0x0 RO */ + u32 addr9:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id9:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR4_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR5_ADDR (0xb646a4) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR5_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR5_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR5_DWLEN (1) +union acl_tcam_hit_loop_addr5_u { + struct acl_tcam_hit_loop_addr5 { + u32 addr10:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id10:4; /* [12:09] Default:0x0 RO */ + u32 addr11:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id11:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR5_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR6_ADDR (0xb646a8) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR6_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR6_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR6_DWLEN (1) +union acl_tcam_hit_loop_addr6_u { + struct acl_tcam_hit_loop_addr6 { + u32 addr12:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id12:4; /* [12:09] Default:0x0 RO */ + u32 addr13:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id13:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR6_DWLEN]; +} __packed; + +#define NBL_ACL_TCAM_HIT_LOOP_ADDR7_ADDR (0xb646ac) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR7_DEPTH (1) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR7_WIDTH (32) +#define NBL_ACL_TCAM_HIT_LOOP_ADDR7_DWLEN (1) +union acl_tcam_hit_loop_addr7_u { + struct acl_tcam_hit_loop_addr7 { + u32 addr14:9; /* [08:00] Default:0x0 RO */ + u32 tcam_id14:4; /* [12:09] Default:0x0 RO */ + u32 addr15:9; /* [21:13] Default:0x0 RO */ + u32 tcam_id15:4; /* [25:22] Default:0x0 RO */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_HIT_LOOP_ADDR7_DWLEN]; +} __packed; + +#define NBL_ACL_OUT_DROP_ADDR (0xb646c8) +#define NBL_ACL_OUT_DROP_DEPTH (1) +#define NBL_ACL_OUT_DROP_WIDTH (32) +#define NBL_ACL_OUT_DROP_DWLEN (1) +union acl_out_drop_u { + struct acl_out_drop { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_OUT_DROP_DWLEN]; +} __packed; + +#define NBL_ACL_NXT_STAGE_ADDR (0xb646d0) +#define NBL_ACL_NXT_STAGE_DEPTH (1) +#define NBL_ACL_NXT_STAGE_WIDTH (32) +#define NBL_ACL_NXT_STAGE_DWLEN (1) +union acl_nxt_stage_u { + struct acl_nxt_stage { + u32 in_nxt_stage:4; /* [03:00] Default:0x0 RO */ + u32 out_nxt_satge:4; /* [07:04] Default:0x0 RO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_NXT_STAGE_DWLEN]; +} __packed; + +#define NBL_ACL_BP_STATE_ADDR (0xb64700) +#define NBL_ACL_BP_STATE_DEPTH (1) +#define NBL_ACL_BP_STATE_WIDTH (32) +#define NBL_ACL_BP_STATE_DWLEN (1) +union acl_bp_state_u { + struct acl_bp_state { + u32 in_bp:1; /* [00:00] Default:0x0 RO */ + u32 out_bp:1; /* [01:01] Default:0x0 RO */ + u32 inter_bp:1; /* [02:02] Default:0x0 RO */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_BP_STATE_DWLEN]; +} __packed; + +#define NBL_ACL_CMDQ_REQ_HIT_ADDR (0xb647a0) +#define NBL_ACL_CMDQ_REQ_HIT_DEPTH (1) +#define NBL_ACL_CMDQ_REQ_HIT_WIDTH (32) +#define NBL_ACL_CMDQ_REQ_HIT_DWLEN (1) +union acl_cmdq_req_hit_u { + struct acl_cmdq_req_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_CMDQ_REQ_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_CMDQ_REQ_NO_HIT_ADDR (0xb647a8) +#define NBL_ACL_CMDQ_REQ_NO_HIT_DEPTH (1) +#define NBL_ACL_CMDQ_REQ_NO_HIT_WIDTH (32) +#define NBL_ACL_CMDQ_REQ_NO_HIT_DWLEN (1) +union acl_cmdq_req_no_hit_u { + struct acl_cmdq_req_no_hit { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_ACL_CMDQ_REQ_NO_HIT_DWLEN]; +} __packed; + +#define NBL_ACL_INSERT_SEARCH_CTRL_ADDR (0xb64880) +#define NBL_ACL_INSERT_SEARCH_CTRL_DEPTH (1) +#define NBL_ACL_INSERT_SEARCH_CTRL_WIDTH (32) +#define NBL_ACL_INSERT_SEARCH_CTRL_DWLEN (1) +union acl_insert_search_ctrl_u { + struct acl_insert_search_ctrl { + u32 profile_idx:4; /* [03:00] Default:0x0 RW */ + u32 start:1; /* [04:04] Default:0x0 WO */ + u32 rsv:27; /* [31:05] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INSERT_SEARCH_CTRL_DWLEN]; +} __packed; + +#define NBL_ACL_INSERT_SEARCH_ACK_ADDR (0xb64884) +#define NBL_ACL_INSERT_SEARCH_ACK_DEPTH (1) +#define NBL_ACL_INSERT_SEARCH_ACK_WIDTH (32) +#define NBL_ACL_INSERT_SEARCH_ACK_DWLEN (1) +union acl_insert_search_ack_u { + struct acl_insert_search_ack { + u32 ack:1; /* [00:00] Default:0x0 RC */ + u32 status:2; /* [02:01] Default:0x0 RWW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INSERT_SEARCH_ACK_DWLEN]; +} __packed; + +#define NBL_ACL_INSERT_SEARCH_DATA_ADDR (0xb64890) +#define NBL_ACL_INSERT_SEARCH_DATA_DEPTH (20) +#define NBL_ACL_INSERT_SEARCH_DATA_WIDTH (32) +#define NBL_ACL_INSERT_SEARCH_DATA_DWLEN (1) +union acl_insert_search_data_u { + struct acl_insert_search_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INSERT_SEARCH_DATA_DWLEN]; +} __packed; +#define NBL_ACL_INSERT_SEARCH_DATA_REG(r) (NBL_ACL_INSERT_SEARCH_DATA_ADDR + \ + (NBL_ACL_INSERT_SEARCH_DATA_DWLEN * 4) * (r)) + +#define NBL_ACL_INDIRECT_ACCESS_ACK_ADDR (0xb648f0) +#define NBL_ACL_INDIRECT_ACCESS_ACK_DEPTH (1) +#define NBL_ACL_INDIRECT_ACCESS_ACK_WIDTH (32) +#define NBL_ACL_INDIRECT_ACCESS_ACK_DWLEN (1) +union acl_indirect_access_ack_u { + struct acl_indirect_access_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:16; /* [16:01] Default:0x0 RWW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_CTRL_ADDR (0xb648f4) +#define NBL_ACL_INDIRECT_CTRL_DEPTH (1) +#define NBL_ACL_INDIRECT_CTRL_WIDTH (32) +#define NBL_ACL_INDIRECT_CTRL_DWLEN (1) +union acl_indirect_ctrl_u { + struct acl_indirect_ctrl { + u32 tcam_addr:9; /* [08:00] Default:0x0 RW */ + u32 cpu_acl_cfg_start:1; /* [09:09] Default:0x0 WO */ + u32 cpu_acl_cfg_rw:1; /* [10:10] Default:0x0 RW */ + u32 rsv:5; /* [15:11] Default:0x0 WO */ + u32 acc_btm:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_CTRL_DWLEN]; +} __packed; + +#define NBL_ACL_VALID_BIT_ADDR (0xb64900) +#define NBL_ACL_VALID_BIT_DEPTH (1) +#define NBL_ACL_VALID_BIT_WIDTH (32) +#define NBL_ACL_VALID_BIT_DWLEN (1) +union acl_valid_bit_u { + struct acl_valid_bit { + u32 valid_bit:16; /* [15:00] Default:0x0 RWW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_VALID_BIT_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM0_XL_ADDR (0xb64904) +#define NBL_ACL_INDIRECT_TCAM0_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM0_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM0_XL_DWLEN (1) +union acl_indirect_tcam0_xl_u { + struct acl_indirect_tcam0_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM0_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM0_XH_ADDR (0xb64908) +#define NBL_ACL_INDIRECT_TCAM0_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM0_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM0_XH_DWLEN (1) +union acl_indirect_tcam0_xh_u { + struct acl_indirect_tcam0_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM0_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM1_XL_ADDR (0xb6490c) +#define NBL_ACL_INDIRECT_TCAM1_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM1_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM1_XL_DWLEN (1) +union acl_indirect_tcam1_xl_u { + struct acl_indirect_tcam1_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM1_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM1_XH_ADDR (0xb64910) +#define NBL_ACL_INDIRECT_TCAM1_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM1_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM1_XH_DWLEN (1) +union acl_indirect_tcam1_xh_u { + struct acl_indirect_tcam1_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM1_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM2_XL_ADDR (0xb64914) +#define NBL_ACL_INDIRECT_TCAM2_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM2_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM2_XL_DWLEN (1) +union acl_indirect_tcam2_xl_u { + struct acl_indirect_tcam2_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM2_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM2_XH_ADDR (0xb64918) +#define NBL_ACL_INDIRECT_TCAM2_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM2_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM2_XH_DWLEN (1) +union acl_indirect_tcam2_xh_u { + struct acl_indirect_tcam2_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM2_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM3_XL_ADDR (0xb6491c) +#define NBL_ACL_INDIRECT_TCAM3_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM3_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM3_XL_DWLEN (1) +union acl_indirect_tcam3_xl_u { + struct acl_indirect_tcam3_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM3_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM3_XH_ADDR (0xb64920) +#define NBL_ACL_INDIRECT_TCAM3_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM3_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM3_XH_DWLEN (1) +union acl_indirect_tcam3_xh_u { + struct acl_indirect_tcam3_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM3_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM4_XL_ADDR (0xb64924) +#define NBL_ACL_INDIRECT_TCAM4_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM4_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM4_XL_DWLEN (1) +union acl_indirect_tcam4_xl_u { + struct acl_indirect_tcam4_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM4_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM4_XH_ADDR (0xb64928) +#define NBL_ACL_INDIRECT_TCAM4_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM4_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM4_XH_DWLEN (1) +union acl_indirect_tcam4_xh_u { + struct acl_indirect_tcam4_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM4_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM5_XL_ADDR (0xb6492c) +#define NBL_ACL_INDIRECT_TCAM5_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM5_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM5_XL_DWLEN (1) +union acl_indirect_tcam5_xl_u { + struct acl_indirect_tcam5_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM5_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM5_XH_ADDR (0xb64930) +#define NBL_ACL_INDIRECT_TCAM5_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM5_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM5_XH_DWLEN (1) +union acl_indirect_tcam5_xh_u { + struct acl_indirect_tcam5_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM5_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM6_XL_ADDR (0xb64934) +#define NBL_ACL_INDIRECT_TCAM6_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM6_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM6_XL_DWLEN (1) +union acl_indirect_tcam6_xl_u { + struct acl_indirect_tcam6_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM6_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM6_XH_ADDR (0xb64938) +#define NBL_ACL_INDIRECT_TCAM6_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM6_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM6_XH_DWLEN (1) +union acl_indirect_tcam6_xh_u { + struct acl_indirect_tcam6_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM6_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM7_XL_ADDR (0xb6493c) +#define NBL_ACL_INDIRECT_TCAM7_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM7_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM7_XL_DWLEN (1) +union acl_indirect_tcam7_xl_u { + struct acl_indirect_tcam7_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM7_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM7_XH_ADDR (0xb64940) +#define NBL_ACL_INDIRECT_TCAM7_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM7_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM7_XH_DWLEN (1) +union acl_indirect_tcam7_xh_u { + struct acl_indirect_tcam7_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM7_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM8_XL_ADDR (0xb64944) +#define NBL_ACL_INDIRECT_TCAM8_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM8_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM8_XL_DWLEN (1) +union acl_indirect_tcam8_xl_u { + struct acl_indirect_tcam8_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM8_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM8_XH_ADDR (0xb64948) +#define NBL_ACL_INDIRECT_TCAM8_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM8_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM8_XH_DWLEN (1) +union acl_indirect_tcam8_xh_u { + struct acl_indirect_tcam8_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM8_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM9_XL_ADDR (0xb6494c) +#define NBL_ACL_INDIRECT_TCAM9_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM9_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM9_XL_DWLEN (1) +union acl_indirect_tcam9_xl_u { + struct acl_indirect_tcam9_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM9_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM9_XH_ADDR (0xb64950) +#define NBL_ACL_INDIRECT_TCAM9_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM9_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM9_XH_DWLEN (1) +union acl_indirect_tcam9_xh_u { + struct acl_indirect_tcam9_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM9_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM10_XL_ADDR (0xb64954) +#define NBL_ACL_INDIRECT_TCAM10_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM10_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM10_XL_DWLEN (1) +union acl_indirect_tcam10_xl_u { + struct acl_indirect_tcam10_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM10_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM10_XH_ADDR (0xb64958) +#define NBL_ACL_INDIRECT_TCAM10_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM10_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM10_XH_DWLEN (1) +union acl_indirect_tcam10_xh_u { + struct acl_indirect_tcam10_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM10_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM11_XL_ADDR (0xb6495c) +#define NBL_ACL_INDIRECT_TCAM11_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM11_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM11_XL_DWLEN (1) +union acl_indirect_tcam11_xl_u { + struct acl_indirect_tcam11_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM11_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM11_XH_ADDR (0xb64960) +#define NBL_ACL_INDIRECT_TCAM11_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM11_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM11_XH_DWLEN (1) +union acl_indirect_tcam11_xh_u { + struct acl_indirect_tcam11_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM11_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM12_XL_ADDR (0xb64964) +#define NBL_ACL_INDIRECT_TCAM12_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM12_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM12_XL_DWLEN (1) +union acl_indirect_tcam12_xl_u { + struct acl_indirect_tcam12_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM12_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM12_XH_ADDR (0xb64968) +#define NBL_ACL_INDIRECT_TCAM12_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM12_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM12_XH_DWLEN (1) +union acl_indirect_tcam12_xh_u { + struct acl_indirect_tcam12_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM12_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM13_XL_ADDR (0xb6496c) +#define NBL_ACL_INDIRECT_TCAM13_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM13_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM13_XL_DWLEN (1) +union acl_indirect_tcam13_xl_u { + struct acl_indirect_tcam13_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM13_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM13_XH_ADDR (0xb64970) +#define NBL_ACL_INDIRECT_TCAM13_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM13_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM13_XH_DWLEN (1) +union acl_indirect_tcam13_xh_u { + struct acl_indirect_tcam13_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM13_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM14_XL_ADDR (0xb64974) +#define NBL_ACL_INDIRECT_TCAM14_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM14_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM14_XL_DWLEN (1) +union acl_indirect_tcam14_xl_u { + struct acl_indirect_tcam14_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM14_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM14_XH_ADDR (0xb64978) +#define NBL_ACL_INDIRECT_TCAM14_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM14_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM14_XH_DWLEN (1) +union acl_indirect_tcam14_xh_u { + struct acl_indirect_tcam14_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM14_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM15_XL_ADDR (0xb6497c) +#define NBL_ACL_INDIRECT_TCAM15_XL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM15_XL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM15_XL_DWLEN (1) +union acl_indirect_tcam15_xl_u { + struct acl_indirect_tcam15_xl { + u32 xl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM15_XL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM15_XH_ADDR (0xb64980) +#define NBL_ACL_INDIRECT_TCAM15_XH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM15_XH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM15_XH_DWLEN (1) +union acl_indirect_tcam15_xh_u { + struct acl_indirect_tcam15_xh { + u32 xh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM15_XH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM0_YL_ADDR (0xb64990) +#define NBL_ACL_INDIRECT_TCAM0_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM0_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM0_YL_DWLEN (1) +union acl_indirect_tcam0_yl_u { + struct acl_indirect_tcam0_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM0_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM0_YH_ADDR (0xb64994) +#define NBL_ACL_INDIRECT_TCAM0_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM0_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM0_YH_DWLEN (1) +union acl_indirect_tcam0_yh_u { + struct acl_indirect_tcam0_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM0_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM1_YL_ADDR (0xb64998) +#define NBL_ACL_INDIRECT_TCAM1_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM1_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM1_YL_DWLEN (1) +union acl_indirect_tcam1_yl_u { + struct acl_indirect_tcam1_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM1_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM1_YH_ADDR (0xb6499c) +#define NBL_ACL_INDIRECT_TCAM1_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM1_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM1_YH_DWLEN (1) +union acl_indirect_tcam1_yh_u { + struct acl_indirect_tcam1_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM1_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM2_YL_ADDR (0xb649a0) +#define NBL_ACL_INDIRECT_TCAM2_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM2_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM2_YL_DWLEN (1) +union acl_indirect_tcam2_yl_u { + struct acl_indirect_tcam2_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM2_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM2_YH_ADDR (0xb649a4) +#define NBL_ACL_INDIRECT_TCAM2_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM2_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM2_YH_DWLEN (1) +union acl_indirect_tcam2_yh_u { + struct acl_indirect_tcam2_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM2_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM3_YL_ADDR (0xb649a8) +#define NBL_ACL_INDIRECT_TCAM3_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM3_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM3_YL_DWLEN (1) +union acl_indirect_tcam3_yl_u { + struct acl_indirect_tcam3_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM3_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM3_YH_ADDR (0xb649ac) +#define NBL_ACL_INDIRECT_TCAM3_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM3_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM3_YH_DWLEN (1) +union acl_indirect_tcam3_yh_u { + struct acl_indirect_tcam3_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM3_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM4_YL_ADDR (0xb649b0) +#define NBL_ACL_INDIRECT_TCAM4_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM4_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM4_YL_DWLEN (1) +union acl_indirect_tcam4_yl_u { + struct acl_indirect_tcam4_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM4_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM4_YH_ADDR (0xb649b4) +#define NBL_ACL_INDIRECT_TCAM4_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM4_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM4_YH_DWLEN (1) +union acl_indirect_tcam4_yh_u { + struct acl_indirect_tcam4_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM4_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM5_YL_ADDR (0xb649b8) +#define NBL_ACL_INDIRECT_TCAM5_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM5_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM5_YL_DWLEN (1) +union acl_indirect_tcam5_yl_u { + struct acl_indirect_tcam5_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM5_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM5_YH_ADDR (0xb649bc) +#define NBL_ACL_INDIRECT_TCAM5_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM5_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM5_YH_DWLEN (1) +union acl_indirect_tcam5_yh_u { + struct acl_indirect_tcam5_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM5_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM6_YL_ADDR (0xb649c0) +#define NBL_ACL_INDIRECT_TCAM6_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM6_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM6_YL_DWLEN (1) +union acl_indirect_tcam6_yl_u { + struct acl_indirect_tcam6_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM6_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM6_YH_ADDR (0xb649c4) +#define NBL_ACL_INDIRECT_TCAM6_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM6_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM6_YH_DWLEN (1) +union acl_indirect_tcam6_yh_u { + struct acl_indirect_tcam6_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM6_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM7_YL_ADDR (0xb649c8) +#define NBL_ACL_INDIRECT_TCAM7_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM7_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM7_YL_DWLEN (1) +union acl_indirect_tcam7_yl_u { + struct acl_indirect_tcam7_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM7_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM7_YH_ADDR (0xb649cc) +#define NBL_ACL_INDIRECT_TCAM7_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM7_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM7_YH_DWLEN (1) +union acl_indirect_tcam7_yh_u { + struct acl_indirect_tcam7_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM7_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM8_YL_ADDR (0xb649d0) +#define NBL_ACL_INDIRECT_TCAM8_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM8_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM8_YL_DWLEN (1) +union acl_indirect_tcam8_yl_u { + struct acl_indirect_tcam8_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM8_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM8_YH_ADDR (0xb649d4) +#define NBL_ACL_INDIRECT_TCAM8_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM8_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM8_YH_DWLEN (1) +union acl_indirect_tcam8_yh_u { + struct acl_indirect_tcam8_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM8_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM9_YL_ADDR (0xb649d8) +#define NBL_ACL_INDIRECT_TCAM9_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM9_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM9_YL_DWLEN (1) +union acl_indirect_tcam9_yl_u { + struct acl_indirect_tcam9_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM9_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM9_YH_ADDR (0xb649dc) +#define NBL_ACL_INDIRECT_TCAM9_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM9_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM9_YH_DWLEN (1) +union acl_indirect_tcam9_yh_u { + struct acl_indirect_tcam9_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM9_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM10_YL_ADDR (0xb649e0) +#define NBL_ACL_INDIRECT_TCAM10_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM10_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM10_YL_DWLEN (1) +union acl_indirect_tcam10_yl_u { + struct acl_indirect_tcam10_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM10_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM10_YH_ADDR (0xb649e4) +#define NBL_ACL_INDIRECT_TCAM10_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM10_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM10_YH_DWLEN (1) +union acl_indirect_tcam10_yh_u { + struct acl_indirect_tcam10_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM10_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM11_YL_ADDR (0xb649e8) +#define NBL_ACL_INDIRECT_TCAM11_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM11_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM11_YL_DWLEN (1) +union acl_indirect_tcam11_yl_u { + struct acl_indirect_tcam11_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM11_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM11_YH_ADDR (0xb649ec) +#define NBL_ACL_INDIRECT_TCAM11_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM11_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM11_YH_DWLEN (1) +union acl_indirect_tcam11_yh_u { + struct acl_indirect_tcam11_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM11_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM12_YL_ADDR (0xb649f0) +#define NBL_ACL_INDIRECT_TCAM12_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM12_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM12_YL_DWLEN (1) +union acl_indirect_tcam12_yl_u { + struct acl_indirect_tcam12_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM12_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM12_YH_ADDR (0xb649f4) +#define NBL_ACL_INDIRECT_TCAM12_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM12_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM12_YH_DWLEN (1) +union acl_indirect_tcam12_yh_u { + struct acl_indirect_tcam12_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM12_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM13_YL_ADDR (0xb649f8) +#define NBL_ACL_INDIRECT_TCAM13_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM13_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM13_YL_DWLEN (1) +union acl_indirect_tcam13_yl_u { + struct acl_indirect_tcam13_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM13_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM13_YH_ADDR (0xb649fc) +#define NBL_ACL_INDIRECT_TCAM13_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM13_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM13_YH_DWLEN (1) +union acl_indirect_tcam13_yh_u { + struct acl_indirect_tcam13_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM13_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM14_YL_ADDR (0xb64a00) +#define NBL_ACL_INDIRECT_TCAM14_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM14_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM14_YL_DWLEN (1) +union acl_indirect_tcam14_yl_u { + struct acl_indirect_tcam14_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM14_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM14_YH_ADDR (0xb64a04) +#define NBL_ACL_INDIRECT_TCAM14_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM14_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM14_YH_DWLEN (1) +union acl_indirect_tcam14_yh_u { + struct acl_indirect_tcam14_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM14_YH_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM15_YL_ADDR (0xb64a08) +#define NBL_ACL_INDIRECT_TCAM15_YL_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM15_YL_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM15_YL_DWLEN (1) +union acl_indirect_tcam15_yl_u { + struct acl_indirect_tcam15_yl { + u32 yl:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM15_YL_DWLEN]; +} __packed; + +#define NBL_ACL_INDIRECT_TCAM15_YH_ADDR (0xb64a0c) +#define NBL_ACL_INDIRECT_TCAM15_YH_DEPTH (1) +#define NBL_ACL_INDIRECT_TCAM15_YH_WIDTH (32) +#define NBL_ACL_INDIRECT_TCAM15_YH_DWLEN (1) +union acl_indirect_tcam15_yh_u { + struct acl_indirect_tcam15_yh { + u32 yh:8; /* [07:00] Default:0x0 RWW */ + u32 rsv:24; /* [31:08] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_ACL_INDIRECT_TCAM15_YH_DWLEN]; +} __packed; + +#define NBL_ACL_KGEN_TCAM_ADDR (0xb65800) +#define NBL_ACL_KGEN_TCAM_DEPTH (16) +#define NBL_ACL_KGEN_TCAM_WIDTH (64) +#define NBL_ACL_KGEN_TCAM_DWLEN (2) +union acl_kgen_tcam_u { + struct acl_kgen_tcam { + u32 mask:16; + u32 data:16; + u32 valid_bit:1; + u32 rsv:31; + } __packed info; + u32 data[NBL_ACL_KGEN_TCAM_DWLEN]; +} __packed; +#define NBL_ACL_KGEN_TCAM_REG(r) (NBL_ACL_KGEN_TCAM_ADDR + \ + (NBL_ACL_KGEN_TCAM_DWLEN * 4) * (r)) + +#define NBL_ACL_TCAM_CFG_ADDR (0xb65a00) +#define NBL_ACL_TCAM_CFG_DEPTH (16) +#define NBL_ACL_TCAM_CFG_WIDTH (128) +#define NBL_ACL_TCAM_CFG_DWLEN (4) +union acl_tcam_cfg_u { + struct acl_tcam_cfg { + u32 startcompare0:1; /* [00:00] Default:0x1 RW */ + u32 startset0:1; /* [01:01] Default:0x1 RW */ + u32 tcam0_enable:1; /* [02:02] Default:0x0 RW */ + u32 startcompare1:1; /* [03:03] Default:0x1 RW */ + u32 startset1:1; /* [04:04] Default:0x1 RW */ + u32 tcam1_enable:1; /* [05:05] Default:0x0 RW */ + u32 startcompare2:1; /* [06:06] Default:0x1 RW */ + u32 startset2:1; /* [07:07] Default:0x1 RW */ + u32 tcam2_enable:1; /* [08:08] Default:0x0 RW */ + u32 startcompare3:1; /* [09:09] Default:0x1 RW */ + u32 startset3:1; /* [10:10] Default:0x1 RW */ + u32 tcam3_enable:1; /* [11:11] Default:0x0 RW */ + u32 startcompare4:1; /* [12:12] Default:0x1 RW */ + u32 startset4:1; /* [13:13] Default:0x1 RW */ + u32 tcam4_enable:1; /* [14:14] Default:0x0 RW */ + u32 startcompare5:1; /* [15:15] Default:0x1 RW */ + u32 startset5:1; /* [16:16] Default:0x1 RW */ + u32 tcam5_enable:1; /* [17:17] Default:0x0 RW */ + u32 startcompare6:1; /* [18:18] Default:0x1 RW */ + u32 startset6:1; /* [19:19] Default:0x1 RW */ + u32 tcam6_enable:1; /* [20:20] Default:0x0 RW */ + u32 startcompare7:1; /* [21:21] Default:0x1 RW */ + u32 startset7:1; /* [22:22] Default:0x1 RW */ + u32 tcam7_enable:1; /* [23:23] Default:0x0 RW */ + u32 startcompare8:1; /* [24:24] Default:0x1 RW */ + u32 startset8:1; /* [25:25] Default:0x1 RW */ + u32 tcam8_enable:1; /* [26:26] Default:0x0 RW */ + u32 startcompare9:1; /* [27:27] Default:0x1 RW */ + u32 startset9:1; /* [28:28] Default:0x1 RW */ + u32 tcam9_enable:1; /* [29:29] Default:0x0 RW */ + u32 startcompare10:1; /* [30:30] Default:0x1 RW */ + u32 startset10:1; /* [31:31] Default:0x1 RW */ + u32 tcam10_enable:1; /* [32:32] Default:0x0 RW */ + u32 startcompare11:1; /* [33:33] Default:0x1 RW */ + u32 startset11:1; /* [34:34] Default:0x1 RW */ + u32 tcam11_enable:1; /* [35:35] Default:0x0 RW */ + u32 startcompare12:1; /* [36:36] Default:0x1 RW */ + u32 startset12:1; /* [37:37] Default:0x1 RW */ + u32 tcam12_enable:1; /* [38:38] Default:0x0 RW */ + u32 startcompare13:1; /* [39:39] Default:0x1 RW */ + u32 startset13:1; /* [40:40] Default:0x1 RW */ + u32 tcam13_enable:1; /* [41:41] Default:0x0 RW */ + u32 startcompare14:1; /* [42:42] Default:0x1 RW */ + u32 startset14:1; /* [43:43] Default:0x1 RW */ + u32 tcam14_enable:1; /* [44:44] Default:0x0 RW */ + u32 startcompare15:1; /* [45:45] Default:0x1 RW */ + u32 startset15:1; /* [46:46] Default:0x1 RW */ + u32 tcam15_enable:1; /* [47:47] Default:0x0 RW */ + u32 key_id0:4; /* [51:48] Default:0x0 RW */ + u32 key_id1:4; /* [55:52] Default:0x0 RW */ + u32 key_id2:4; /* [59:56] Default:0x0 RW */ + u32 key_id3:4; /* [63:60] Default:0x0 RW */ + u32 key_id4:4; /* [67:64] Default:0x0 RW */ + u32 key_id5:4; /* [71:68] Default:0x0 RW */ + u32 key_id6:4; /* [75:72] Default:0x0 RW */ + u32 key_id7:4; /* [79:76] Default:0x0 RW */ + u32 key_id8:4; /* [83:80] Default:0x0 RW */ + u32 key_id9:4; /* [87:84] Default:0x0 RW */ + u32 key_id10:4; /* [91:88] Default:0x0 RW */ + u32 key_id11:4; /* [95:92] Default:0x0 RW */ + u32 key_id12:4; /* [99:96] Default:0x0 RW */ + u32 key_id13:4; /* [103:100] Default:0x0 RW */ + u32 key_id14:4; /* [107:104] Default:0x0 RW */ + u32 key_id15:4; /* [111:108] Default:0x0 RW */ + u32 rsv:16; /* [127:112] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_TCAM_CFG_DWLEN]; +} __packed; +#define NBL_ACL_TCAM_CFG_REG(r) (NBL_ACL_TCAM_CFG_ADDR + \ + (NBL_ACL_TCAM_CFG_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM_CFG_ADDR (0xb65c00) +#define NBL_ACL_ACTION_RAM_CFG_DEPTH (16) +#define NBL_ACL_ACTION_RAM_CFG_WIDTH (128) +#define NBL_ACL_ACTION_RAM_CFG_DWLEN (4) +union acl_action_ram_cfg_u { + struct acl_action_ram_cfg { + u32 action_ram0_alloc_id:4; /* [03:00] Default:0x0 RW */ + u32 action_ram0_enable:1; /* [04:04] Default:0x0 RW */ + u32 action_ram1_alloc_id:4; /* [08:05] Default:0x0 RW */ + u32 action_ram1_enable:1; /* [09:09] Default:0x0 RW */ + u32 action_ram2_alloc_id:4; /* [13:10] Default:0x0 RW */ + u32 action_ram2_enable:1; /* [14:14] Default:0x0 RW */ + u32 action_ram3_alloc_id:4; /* [18:15] Default:0x0 RW */ + u32 action_ram3_enable:1; /* [19:19] Default:0x0 RW */ + u32 action_ram4_alloc_id:4; /* [23:20] Default:0x0 RW */ + u32 action_ram4_enable:1; /* [24:24] Default:0x0 RW */ + u32 action_ram5_alloc_id:4; /* [28:25] Default:0x0 RW */ + u32 action_ram5_enable:1; /* [29:29] Default:0x0 RW */ + u32 action_ram6_alloc_id:4; /* [33:30] Default:0x0 RW */ + u32 action_ram6_enable:1; /* [34:34] Default:0x0 RW */ + u32 action_ram7_alloc_id:4; /* [38:35] Default:0x0 RW */ + u32 action_ram7_enable:1; /* [39:39] Default:0x0 RW */ + u32 action_ram8_alloc_id:4; /* [43:40] Default:0x0 RW */ + u32 action_ram8_enable:1; /* [44:44] Default:0x0 RW */ + u32 action_ram9_alloc_id:4; /* [48:45] Default:0x0 RW */ + u32 action_ram9_enable:1; /* [49:49] Default:0x0 RW */ + u32 action_ram10_alloc_id:4; /* [53:50] Default:0x0 RW */ + u32 action_ram10_enable:1; /* [54:54] Default:0x0 RW */ + u32 action_ram11_alloc_id:4; /* [58:55] Default:0x0 RW */ + u32 action_ram11_enable:1; /* [59:59] Default:0x0 RW */ + u32 action_ram12_alloc_id:4; /* [63:60] Default:0x0 RW */ + u32 action_ram12_enable:1; /* [64:64] Default:0x0 RW */ + u32 action_ram13_alloc_id:4; /* [68:65] Default:0x0 RW */ + u32 action_ram13_enable:1; /* [69:69] Default:0x0 RW */ + u32 action_ram14_alloc_id:4; /* [73:70] Default:0x0 RW */ + u32 action_ram14_enable:1; /* [74:74] Default:0x0 RW */ + u32 action_ram15_alloc_id:4; /* [78:75] Default:0x0 RW */ + u32 action_ram15_enable:1; /* [79:79] Default:0x0 RW */ + u32 rsv_l:32; /* [127:80] Default:0x0 RO */ + u32 rsv_h:16; /* [127:80] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM_CFG_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM_CFG_REG(r) (NBL_ACL_ACTION_RAM_CFG_ADDR + \ + (NBL_ACL_ACTION_RAM_CFG_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM0_ADDR (0xb66000) +#define NBL_ACL_ACTION_RAM0_DEPTH (512) +#define NBL_ACL_ACTION_RAM0_WIDTH (128) +#define NBL_ACL_ACTION_RAM0_DWLEN (4) +union acl_action_ram0_u { + struct acl_action_ram0 { + u32 action0:22; /* [21:00] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM0_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM0_REG(r) (NBL_ACL_ACTION_RAM0_ADDR + \ + (NBL_ACL_ACTION_RAM0_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM1_ADDR (0xb68000) +#define NBL_ACL_ACTION_RAM1_DEPTH (512) +#define NBL_ACL_ACTION_RAM1_WIDTH (128) +#define NBL_ACL_ACTION_RAM1_DWLEN (4) +union acl_action_ram1_u { + struct acl_action_ram1 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM1_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM1_REG(r) (NBL_ACL_ACTION_RAM1_ADDR + \ + (NBL_ACL_ACTION_RAM1_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM2_ADDR (0xb6a000) +#define NBL_ACL_ACTION_RAM2_DEPTH (512) +#define NBL_ACL_ACTION_RAM2_WIDTH (128) +#define NBL_ACL_ACTION_RAM2_DWLEN (4) +union acl_action_ram2_u { + struct acl_action_ram2 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM2_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM2_REG(r) (NBL_ACL_ACTION_RAM2_ADDR + \ + (NBL_ACL_ACTION_RAM2_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM3_ADDR (0xb6c000) +#define NBL_ACL_ACTION_RAM3_DEPTH (512) +#define NBL_ACL_ACTION_RAM3_WIDTH (128) +#define NBL_ACL_ACTION_RAM3_DWLEN (4) +union acl_action_ram3_u { + struct acl_action_ram3 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM3_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM3_REG(r) (NBL_ACL_ACTION_RAM3_ADDR + \ + (NBL_ACL_ACTION_RAM3_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM4_ADDR (0xb6e000) +#define NBL_ACL_ACTION_RAM4_DEPTH (512) +#define NBL_ACL_ACTION_RAM4_WIDTH (128) +#define NBL_ACL_ACTION_RAM4_DWLEN (4) +union acl_action_ram4_u { + struct acl_action_ram4 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM4_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM4_REG(r) (NBL_ACL_ACTION_RAM4_ADDR + \ + (NBL_ACL_ACTION_RAM4_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM5_ADDR (0xb70000) +#define NBL_ACL_ACTION_RAM5_DEPTH (512) +#define NBL_ACL_ACTION_RAM5_WIDTH (128) +#define NBL_ACL_ACTION_RAM5_DWLEN (4) +union acl_action_ram5_u { + struct acl_action_ram5 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM5_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM5_REG(r) (NBL_ACL_ACTION_RAM5_ADDR + \ + (NBL_ACL_ACTION_RAM5_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM6_ADDR (0xb72000) +#define NBL_ACL_ACTION_RAM6_DEPTH (512) +#define NBL_ACL_ACTION_RAM6_WIDTH (128) +#define NBL_ACL_ACTION_RAM6_DWLEN (4) +union acl_action_ram6_u { + struct acl_action_ram6 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM6_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM6_REG(r) (NBL_ACL_ACTION_RAM6_ADDR + \ + (NBL_ACL_ACTION_RAM6_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM7_ADDR (0xb74000) +#define NBL_ACL_ACTION_RAM7_DEPTH (512) +#define NBL_ACL_ACTION_RAM7_WIDTH (128) +#define NBL_ACL_ACTION_RAM7_DWLEN (4) +union acl_action_ram7_u { + struct acl_action_ram7 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM7_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM7_REG(r) (NBL_ACL_ACTION_RAM7_ADDR + \ + (NBL_ACL_ACTION_RAM7_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM8_ADDR (0xb76000) +#define NBL_ACL_ACTION_RAM8_DEPTH (512) +#define NBL_ACL_ACTION_RAM8_WIDTH (128) +#define NBL_ACL_ACTION_RAM8_DWLEN (4) +union acl_action_ram8_u { + struct acl_action_ram8 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM8_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM8_REG(r) (NBL_ACL_ACTION_RAM8_ADDR + \ + (NBL_ACL_ACTION_RAM8_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM9_ADDR (0xb78000) +#define NBL_ACL_ACTION_RAM9_DEPTH (512) +#define NBL_ACL_ACTION_RAM9_WIDTH (128) +#define NBL_ACL_ACTION_RAM9_DWLEN (4) +union acl_action_ram9_u { + struct acl_action_ram9 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM9_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM9_REG(r) (NBL_ACL_ACTION_RAM9_ADDR + \ + (NBL_ACL_ACTION_RAM9_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM10_ADDR (0xb7a000) +#define NBL_ACL_ACTION_RAM10_DEPTH (512) +#define NBL_ACL_ACTION_RAM10_WIDTH (128) +#define NBL_ACL_ACTION_RAM10_DWLEN (4) +union acl_action_ram10_u { + struct acl_action_ram10 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM10_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM10_REG(r) (NBL_ACL_ACTION_RAM10_ADDR + \ + (NBL_ACL_ACTION_RAM10_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM11_ADDR (0xb7c000) +#define NBL_ACL_ACTION_RAM11_DEPTH (512) +#define NBL_ACL_ACTION_RAM11_WIDTH (128) +#define NBL_ACL_ACTION_RAM11_DWLEN (4) +union acl_action_ram11_u { + struct acl_action_ram11 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM11_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM11_REG(r) (NBL_ACL_ACTION_RAM11_ADDR + \ + (NBL_ACL_ACTION_RAM11_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM12_ADDR (0xb7e000) +#define NBL_ACL_ACTION_RAM12_DEPTH (512) +#define NBL_ACL_ACTION_RAM12_WIDTH (128) +#define NBL_ACL_ACTION_RAM12_DWLEN (4) +union acl_action_ram12_u { + struct acl_action_ram12 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM12_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM12_REG(r) (NBL_ACL_ACTION_RAM12_ADDR + \ + (NBL_ACL_ACTION_RAM12_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM13_ADDR (0xb80000) +#define NBL_ACL_ACTION_RAM13_DEPTH (512) +#define NBL_ACL_ACTION_RAM13_WIDTH (128) +#define NBL_ACL_ACTION_RAM13_DWLEN (4) +union acl_action_ram13_u { + struct acl_action_ram13 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM13_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM13_REG(r) (NBL_ACL_ACTION_RAM13_ADDR + \ + (NBL_ACL_ACTION_RAM13_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM14_ADDR (0xb82000) +#define NBL_ACL_ACTION_RAM14_DEPTH (512) +#define NBL_ACL_ACTION_RAM14_WIDTH (128) +#define NBL_ACL_ACTION_RAM14_DWLEN (4) +union acl_action_ram14_u { + struct acl_action_ram14 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM14_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM14_REG(r) (NBL_ACL_ACTION_RAM14_ADDR + \ + (NBL_ACL_ACTION_RAM14_DWLEN * 4) * (r)) + +#define NBL_ACL_ACTION_RAM15_ADDR (0xb84000) +#define NBL_ACL_ACTION_RAM15_DEPTH (512) +#define NBL_ACL_ACTION_RAM15_WIDTH (128) +#define NBL_ACL_ACTION_RAM15_DWLEN (4) +union acl_action_ram15_u { + struct acl_action_ram15 { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_ACTION_RAM15_DWLEN]; +} __packed; +#define NBL_ACL_ACTION_RAM15_REG(r) (NBL_ACL_ACTION_RAM15_ADDR + \ + (NBL_ACL_ACTION_RAM15_DWLEN * 4) * (r)) + +#define NBL_ACL_DEFAULT_ACTION_RAM_ADDR (0xb86000) +#define NBL_ACL_DEFAULT_ACTION_RAM_DEPTH (16) +#define NBL_ACL_DEFAULT_ACTION_RAM_WIDTH (256) +#define NBL_ACL_DEFAULT_ACTION_RAM_DWLEN (8) +union acl_default_action_ram_u { + struct acl_default_action_ram { + u32 action0:22; /* [21:00] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 actoin6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 rsv:16; /* [255:176] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:176] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_DEFAULT_ACTION_RAM_DWLEN]; +} __packed; +#define NBL_ACL_DEFAULT_ACTION_RAM_REG(r) (NBL_ACL_DEFAULT_ACTION_RAM_ADDR + \ + (NBL_ACL_DEFAULT_ACTION_RAM_DWLEN * 4) * (r)) + +#define NBL_ACL_FLOW_ID_STAT_RAM_ADDR (0xb94000) +#define NBL_ACL_FLOW_ID_STAT_RAM_DEPTH (131072) +#define NBL_ACL_FLOW_ID_STAT_RAM_WIDTH (128) +#define NBL_ACL_FLOW_ID_STAT_RAM_DWLEN (4) +union acl_flow_id_stat_ram_u { + struct acl_flow_id_stat_ram { + u32 pkt_byte_l:32; /* [47:00] Default:0x0 RO */ + u32 pkt_byte_h:16; /* [47:00] Default:0x0 RO */ + u32 pkt_cnt_l:32; /* [87:48] Default:0x0 RO */ + u32 pkt_cnt_h:8; /* [87:48] Default:0x0 RO */ + u32 rsv_l:32; /* [127:88] Default:0x0 RO */ + u32 rsv_h:8; /* [127:88] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_FLOW_ID_STAT_RAM_DWLEN]; +} __packed; +#define NBL_ACL_FLOW_ID_STAT_RAM_REG(r) (NBL_ACL_FLOW_ID_STAT_RAM_ADDR + \ + (NBL_ACL_FLOW_ID_STAT_RAM_DWLEN * 4) * (r)) + +#define NBL_ACL_STAT_ID_STAT_RAM_ADDR (0xd94000) +#define NBL_ACL_STAT_ID_STAT_RAM_DEPTH (2048) +#define NBL_ACL_STAT_ID_STAT_RAM_WIDTH (128) +#define NBL_ACL_STAT_ID_STAT_RAM_DWLEN (4) +union acl_stat_id_stat_ram_u { + struct acl_stat_id_stat_ram { + u32 pkt_byte_arr[2]; /* [63:0] Default:0x0 RO */ + u32 pkt_cnt_arr[2]; /* [127:64] Default:0x0 RO */ + } __packed info; + u32 data[NBL_ACL_STAT_ID_STAT_RAM_DWLEN]; +} __packed; +#define NBL_ACL_STAT_ID_STAT_RAM_REG(r) (NBL_ACL_STAT_ID_STAT_RAM_ADDR + \ + (NBL_ACL_STAT_ID_STAT_RAM_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_cap.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_cap.h new file mode 100644 index 0000000000000000000000000000000000000000..5eff108ab328b7012b532bbce016e0ee20979115 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_cap.h @@ -0,0 +1,374 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_CAP_H +#define NBL_CAP_H 1 + +#include + +#define NBL_CAP_BASE (0x00E64000) + +#define NBL_CAP_INT_STATUS_ADDR (0xe64000) +#define NBL_CAP_INT_STATUS_DEPTH (1) +#define NBL_CAP_INT_STATUS_WIDTH (32) +#define NBL_CAP_INT_STATUS_DWLEN (1) +union cap_int_status_u { + struct cap_int_status { + u32 cap_done:1; /* [0] Default:0x0 RWC */ + u32 cif_err:1; /* [1] Default:0x0 RWC */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_CAP_INT_MASK_ADDR (0xe64004) +#define NBL_CAP_INT_MASK_DEPTH (1) +#define NBL_CAP_INT_MASK_WIDTH (32) +#define NBL_CAP_INT_MASK_DWLEN (1) +union cap_int_mask_u { + struct cap_int_mask { + u32 cap_done:1; /* [0] Default:0x0 RW */ + u32 cif_err:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_INT_MASK_DWLEN]; +} __packed; + +#define NBL_CAP_INT_SET_ADDR (0xe64008) +#define NBL_CAP_INT_SET_DEPTH (1) +#define NBL_CAP_INT_SET_WIDTH (32) +#define NBL_CAP_INT_SET_DWLEN (1) +union cap_int_set_u { + struct cap_int_set { + u32 cap_done:1; /* [0] Default:0x0 WO */ + u32 cif_err:1; /* [1] Default:0x0 WO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_INT_SET_DWLEN]; +} __packed; + +#define NBL_CAP_INIT_DONE_ADDR (0xe6400c) +#define NBL_CAP_INIT_DONE_DEPTH (1) +#define NBL_CAP_INIT_DONE_WIDTH (32) +#define NBL_CAP_INIT_DONE_DWLEN (1) +union cap_init_done_u { + struct cap_init_done { + u32 done:1; /* [0:0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_CAP_CIF_ERR_INFO_ADDR (0xe64040) +#define NBL_CAP_CIF_ERR_INFO_DEPTH (1) +#define NBL_CAP_CIF_ERR_INFO_WIDTH (32) +#define NBL_CAP_CIF_ERR_INFO_DWLEN (1) +union cap_cif_err_info_u { + struct cap_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 rsv:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_CAP_CAR_CTRL_ADDR (0xe64100) +#define NBL_CAP_CAR_CTRL_DEPTH (1) +#define NBL_CAP_CAR_CTRL_WIDTH (32) +#define NBL_CAP_CAR_CTRL_DWLEN (1) +union cap_car_ctrl_u { + struct cap_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_CAP_INIT_START_ADDR (0xe64108) +#define NBL_CAP_INIT_START_DEPTH (1) +#define NBL_CAP_INIT_START_WIDTH (32) +#define NBL_CAP_INIT_START_DWLEN (1) +union cap_init_start_u { + struct cap_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_INIT_START_DWLEN]; +} __packed; + +#define NBL_CAP_SPORT_ADDR (0xe64200) +#define NBL_CAP_SPORT_DEPTH (1) +#define NBL_CAP_SPORT_WIDTH (32) +#define NBL_CAP_SPORT_DWLEN (1) +union cap_sport_u { + struct cap_sport { + u32 sport_mask:16; /* [15:0] Default:0x0 RW */ + u32 sport_value:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_SPORT_DWLEN]; +} __packed; + +#define NBL_CAP_PTYPE_ADDR (0xe64208) +#define NBL_CAP_PTYPE_DEPTH (1) +#define NBL_CAP_PTYPE_WIDTH (32) +#define NBL_CAP_PTYPE_DWLEN (1) +union cap_ptype_u { + struct cap_ptype { + u32 ptype_mask:8; /* [7:0] Default:0x0 RW */ + u32 ptype_value:8; /* [15:8] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_PTYPE_DWLEN]; +} __packed; + +#define NBL_CAP_FLAG_MASK_ADDR (0xe64220) +#define NBL_CAP_FLAG_MASK_DEPTH (1) +#define NBL_CAP_FLAG_MASK_WIDTH (32) +#define NBL_CAP_FLAG_MASK_DWLEN (1) +union cap_flag_mask_u { + struct cap_flag_mask { + u32 flag_mask:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_FLAG_MASK_DWLEN]; +} __packed; + +#define NBL_CAP_FLAG_VALUE_ADDR (0xe64224) +#define NBL_CAP_FLAG_VALUE_DEPTH (1) +#define NBL_CAP_FLAG_VALUE_WIDTH (32) +#define NBL_CAP_FLAG_VALUE_DWLEN (1) +union cap_flag_value_u { + struct cap_flag_value { + u32 flag_value:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_FLAG_VALUE_DWLEN]; +} __packed; + +#define NBL_CAP_DPORT_ACT_ID_ADDR (0xe64228) +#define NBL_CAP_DPORT_ACT_ID_DEPTH (1) +#define NBL_CAP_DPORT_ACT_ID_WIDTH (32) +#define NBL_CAP_DPORT_ACT_ID_DWLEN (1) +union cap_dport_act_id_u { + struct cap_dport_act_id { + u32 dport_act_id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_DPORT_ACT_ID_DWLEN]; +} __packed; + +#define NBL_CAP_DPORT_ADDR (0xe64230) +#define NBL_CAP_DPORT_DEPTH (1) +#define NBL_CAP_DPORT_WIDTH (32) +#define NBL_CAP_DPORT_DWLEN (1) +union cap_dport_u { + struct cap_dport { + u32 dport_mask:16; /* [15:0] Default:0x0 RW */ + u32 dport_value:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_DPORT_DWLEN]; +} __packed; + +#define NBL_CAP_FLOWID_MASK_ADDR (0xe64238) +#define NBL_CAP_FLOWID_MASK_DEPTH (1) +#define NBL_CAP_FLOWID_MASK_WIDTH (32) +#define NBL_CAP_FLOWID_MASK_DWLEN (1) +union cap_flowid_mask_u { + struct cap_flowid_mask { + u32 flowid_mask:17; /* [16:0] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_FLOWID_MASK_DWLEN]; +} __packed; + +#define NBL_CAP_FLOWID_VALUE_ADDR (0xe6423c) +#define NBL_CAP_FLOWID_VALUE_DEPTH (1) +#define NBL_CAP_FLOWID_VALUE_WIDTH (32) +#define NBL_CAP_FLOWID_VALUE_DWLEN (1) +union cap_flowid_value_u { + struct cap_flowid_value { + u32 flowid_value:17; /* [16:0] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_FLOWID_VALUE_DWLEN]; +} __packed; + +#define NBL_CAP_FWDACT_RS_FIELD_ACT_ID_ADDR (0xe64240) +#define NBL_CAP_FWDACT_RS_FIELD_ACT_ID_DEPTH (1) +#define NBL_CAP_FWDACT_RS_FIELD_ACT_ID_WIDTH (32) +#define NBL_CAP_FWDACT_RS_FIELD_ACT_ID_DWLEN (1) +union cap_fwdact_rs_field_act_id_u { + struct cap_fwdact_rs_field_act_id { + u32 rs_field0_act_id:6; /* [5:0] Default:0x0 RW */ + u32 rsv1:2; /* [7:6] Default:0x0 RO */ + u32 rs_field1_act_id:6; /* [13:8] Default:0x0 RW */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_FWDACT_RS_FIELD_ACT_ID_DWLEN]; +} __packed; + +#define NBL_CAP_FWDACT_RS_FIELD_MASK_ADDR (0xe64244) +#define NBL_CAP_FWDACT_RS_FIELD_MASK_DEPTH (1) +#define NBL_CAP_FWDACT_RS_FIELD_MASK_WIDTH (32) +#define NBL_CAP_FWDACT_RS_FIELD_MASK_DWLEN (1) +union cap_fwdact_rs_field_mask_u { + struct cap_fwdact_rs_field_mask { + u32 rs_field0_mask:16; /* [15:0] Default:0x0 RW */ + u32 rs_field1_mask:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_FWDACT_RS_FIELD_MASK_DWLEN]; +} __packed; + +#define NBL_CAP_FWDACT_RS_FIELD_VALUE_ADDR (0xe64248) +#define NBL_CAP_FWDACT_RS_FIELD_VALUE_DEPTH (1) +#define NBL_CAP_FWDACT_RS_FIELD_VALUE_WIDTH (32) +#define NBL_CAP_FWDACT_RS_FIELD_VALUE_DWLEN (1) +union cap_fwdact_rs_field_value_u { + struct cap_fwdact_rs_field_value { + u32 rs_field0_value:16; /* [15:0] Default:0x0 RW */ + u32 rs_field1_value:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_FWDACT_RS_FIELD_VALUE_DWLEN]; +} __packed; + +#define NBL_CAP_EXT_RS_FIELD16_OFT_ADDR (0xe6424c) +#define NBL_CAP_EXT_RS_FIELD16_OFT_DEPTH (1) +#define NBL_CAP_EXT_RS_FIELD16_OFT_WIDTH (32) +#define NBL_CAP_EXT_RS_FIELD16_OFT_DWLEN (1) +union cap_ext_rs_field16_oft_u { + struct cap_ext_rs_field16_oft { + u32 rs_field0_oft:8; /* [7:0] Default:0x0 RW */ + u32 rs_field1_oft:8; /* [15:8] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_EXT_RS_FIELD16_OFT_DWLEN]; +} __packed; + +#define NBL_CAP_EXT_RS_FIELD16_MASK_ADDR (0xe64250) +#define NBL_CAP_EXT_RS_FIELD16_MASK_DEPTH (1) +#define NBL_CAP_EXT_RS_FIELD16_MASK_WIDTH (32) +#define NBL_CAP_EXT_RS_FIELD16_MASK_DWLEN (1) +union cap_ext_rs_field16_mask_u { + struct cap_ext_rs_field16_mask { + u32 rs_field0_mask:16; /* [15:0] Default:0x0 RW */ + u32 rs_field1_mask:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_EXT_RS_FIELD16_MASK_DWLEN]; +} __packed; + +#define NBL_CAP_EXT_RS_FIELD16_VALUE_ADDR (0xe64254) +#define NBL_CAP_EXT_RS_FIELD16_VALUE_DEPTH (1) +#define NBL_CAP_EXT_RS_FIELD16_VALUE_WIDTH (32) +#define NBL_CAP_EXT_RS_FIELD16_VALUE_DWLEN (1) +union cap_ext_rs_field16_value_u { + struct cap_ext_rs_field16_value { + u32 rs_field0_value:16; /* [15:0] Default:0x0 RW */ + u32 rs_field1_value:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_EXT_RS_FIELD16_VALUE_DWLEN]; +} __packed; + +#define NBL_CAP_EXT_RS_FIELD32_OFT_ADDR (0xe64258) +#define NBL_CAP_EXT_RS_FIELD32_OFT_DEPTH (1) +#define NBL_CAP_EXT_RS_FIELD32_OFT_WIDTH (32) +#define NBL_CAP_EXT_RS_FIELD32_OFT_DWLEN (1) +union cap_ext_rs_field32_oft_u { + struct cap_ext_rs_field32_oft { + u32 rs_field0_oft:8; /* [7:0] Default:0x0 RW */ + u32 rs_field1_oft:8; /* [15:8] Default:0x0 RW */ + u32 rs_field2_oft:8; /* [23:16] Default:0x0 RW */ + u32 rs_field3_oft:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_EXT_RS_FIELD32_OFT_DWLEN]; +} __packed; + +#define NBL_CAP_EXT_RS_FIELD32_MASK_ADDR (0xe6425c) +#define NBL_CAP_EXT_RS_FIELD32_MASK_DEPTH (4) +#define NBL_CAP_EXT_RS_FIELD32_MASK_WIDTH (32) +#define NBL_CAP_EXT_RS_FIELD32_MASK_DWLEN (1) +union cap_ext_rs_field32_mask_u { + struct cap_ext_rs_field32_mask { + u32 rs_field_mask:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_EXT_RS_FIELD32_MASK_DWLEN]; +} __packed; +#define NBL_CAP_EXT_RS_FIELD32_MASK_REG(r) (NBL_CAP_EXT_RS_FIELD32_MASK_ADDR + \ + (NBL_CAP_EXT_RS_FIELD32_MASK_DWLEN * 4) * (r)) + +#define NBL_CAP_EXT_RS_FIELD32_VALUE_ADDR (0xe6426c) +#define NBL_CAP_EXT_RS_FIELD32_VALUE_DEPTH (4) +#define NBL_CAP_EXT_RS_FIELD32_VALUE_WIDTH (32) +#define NBL_CAP_EXT_RS_FIELD32_VALUE_DWLEN (1) +union cap_ext_rs_field32_value_u { + struct cap_ext_rs_field32_value { + u32 rs_field_value:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_EXT_RS_FIELD32_VALUE_DWLEN]; +} __packed; +#define NBL_CAP_EXT_RS_FIELD32_VALUE_REG(r) (NBL_CAP_EXT_RS_FIELD32_VALUE_ADDR + \ + (NBL_CAP_EXT_RS_FIELD32_VALUE_DWLEN * 4) * (r)) + +#define NBL_CAP_MODE_ADDR (0xe64300) +#define NBL_CAP_MODE_DEPTH (1) +#define NBL_CAP_MODE_WIDTH (32) +#define NBL_CAP_MODE_DWLEN (1) +union cap_mode_u { + struct cap_mode { + u32 cont_cap_num:4; /* [3:0] Default:0x1 RW */ + u32 cap_mode:1; /* [4] Default:0x0 RW */ + u32 match_mode:1; /* [5] Default:0x0 RW */ + u32 match_cmp:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_MODE_DWLEN]; +} __packed; + +#define NBL_CAP_MD_START_ADDR (0xe64304) +#define NBL_CAP_MD_START_DEPTH (1) +#define NBL_CAP_MD_START_WIDTH (32) +#define NBL_CAP_MD_START_DWLEN (1) +union cap_md_start_u { + struct cap_md_start { + u32 cap_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_MD_START_DWLEN]; +} __packed; + +#define NBL_CAP_MD_DONE_ADDR (0xe6430c) +#define NBL_CAP_MD_DONE_DEPTH (1) +#define NBL_CAP_MD_DONE_WIDTH (32) +#define NBL_CAP_MD_DONE_DWLEN (1) +union cap_md_done_u { + struct cap_md_done { + u32 cap_done:1; /* [0] Default:0x0 RC */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_MD_DONE_DWLEN]; +} __packed; + +#define NBL_CAP_CFG_TEST_ADDR (0xe64310) +#define NBL_CAP_CFG_TEST_DEPTH (1) +#define NBL_CAP_CFG_TEST_WIDTH (32) +#define NBL_CAP_CFG_TEST_DWLEN (1) +union cap_cfg_test_u { + struct cap_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_CAP_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_CAP_MD_TABLE_ADDR (0xe65000) +#define NBL_CAP_MD_TABLE_DEPTH (32) +#define NBL_CAP_MD_TABLE_WIDTH (1024) +#define NBL_CAP_MD_TABLE_DWLEN (32) +union cap_md_table_u { + struct cap_md_table { + u32 capped_md_arr[32]; /* [1023:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_CAP_MD_TABLE_DWLEN]; +} __packed; +#define NBL_CAP_MD_TABLE_REG(r) (NBL_CAP_MD_TABLE_ADDR + \ + (NBL_CAP_MD_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_dprbac.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_dprbac.h new file mode 100644 index 0000000000000000000000000000000000000000..fa8fd5ddf26bd40f6a40c4c8c4ee21919504991b --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_dprbac.h @@ -0,0 +1,585 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_DPRBAC_H +#define NBL_DPRBAC_H 1 + +#include + +#define NBL_DPRBAC_BASE (0x00904000) + +#define NBL_DPRBAC_INT_STATUS_ADDR (0x904000) +#define NBL_DPRBAC_INT_STATUS_DEPTH (1) +#define NBL_DPRBAC_INT_STATUS_WIDTH (32) +#define NBL_DPRBAC_INT_STATUS_DWLEN (1) +union dprbac_int_status_u { + struct dprbac_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_underflow:1; /* [1] Default:0x0 RWC */ + u32 fifo_overflow:1; /* [2] Default:0x0 RWC */ + u32 cif_err:1; /* [3] Default:0x0 RWC */ + u32 cfg_err:1; /* [4] Default:0x0 RWC */ + u32 ucor_err:1; /* [5] Default:0x0 RWC */ + u32 cor_err:1; /* [6] Default:0x0 RWC */ + u32 soft_lifetime:1; /* [7] Default:0x0 RWC */ + u32 hard_lifetime:1; /* [8] Default:0x0 RWC */ + u32 pkt_err:1; /* [9] Default:0x0 RWC */ + u32 esn_ovf:1; /* [10] Default:0x0 RWC */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_DPRBAC_INT_MASK_ADDR (0x904004) +#define NBL_DPRBAC_INT_MASK_DEPTH (1) +#define NBL_DPRBAC_INT_MASK_WIDTH (32) +#define NBL_DPRBAC_INT_MASK_DWLEN (1) +union dprbac_int_mask_u { + struct dprbac_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_underflow:1; /* [1] Default:0x0 RW */ + u32 fifo_overflow:1; /* [2] Default:0x0 RW */ + u32 cif_err:1; /* [3] Default:0x0 RW */ + u32 cfg_err:1; /* [4] Default:0x0 RW */ + u32 ucor_err:1; /* [5] Default:0x0 RW */ + u32 cor_err:1; /* [6] Default:0x0 RW */ + u32 soft_lifetime:1; /* [7] Default:0x0 RW */ + u32 hard_lifetime:1; /* [8] Default:0x0 RW */ + u32 pkt_err:1; /* [9] Default:0x0 RW */ + u32 esn_ovf:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_INT_MASK_DWLEN]; +} __packed; + +#define NBL_DPRBAC_INT_SET_ADDR (0x904008) +#define NBL_DPRBAC_INT_SET_DEPTH (1) +#define NBL_DPRBAC_INT_SET_WIDTH (32) +#define NBL_DPRBAC_INT_SET_DWLEN (1) +union dprbac_int_set_u { + struct dprbac_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_underflow:1; /* [1] Default:0x0 WO */ + u32 fifo_overflow:1; /* [2] Default:0x0 WO */ + u32 cif_err:1; /* [3] Default:0x0 WO */ + u32 cfg_err:1; /* [4] Default:0x0 WO */ + u32 ucor_err:1; /* [5] Default:0x0 WO */ + u32 cor_err:1; /* [6] Default:0x0 WO */ + u32 soft_lifetime:1; /* [7] Default:0x0 WO */ + u32 hard_lifetime:1; /* [8] Default:0x0 WO */ + u32 pkt_err:1; /* [9] Default:0x0 WO */ + u32 esn_ovf:1; /* [10] Default:0x0 WO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_INT_SET_DWLEN]; +} __packed; + +#define NBL_DPRBAC_ESN_OVF_INFO_ADDR (0x904010) +#define NBL_DPRBAC_ESN_OVF_INFO_DEPTH (1) +#define NBL_DPRBAC_ESN_OVF_INFO_WIDTH (32) +#define NBL_DPRBAC_ESN_OVF_INFO_DWLEN (1) +union dprbac_esn_ovf_info_u { + struct dprbac_esn_ovf_info { + u32 sad_index:11; /* [10:0] Default:0x0 RO */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_ESN_OVF_INFO_DWLEN]; +} __packed; + +#define NBL_DPRBAC_LIFETIME_INFO_ADDR (0x904014) +#define NBL_DPRBAC_LIFETIME_INFO_DEPTH (1) +#define NBL_DPRBAC_LIFETIME_INFO_WIDTH (32) +#define NBL_DPRBAC_LIFETIME_INFO_DWLEN (1) +union dprbac_lifetime_info_u { + struct dprbac_lifetime_info { + u32 soft_sad_index:11; /* [10:0] Default:0x0 RO */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 hard_sad_index:11; /* [26:16] Default:0x0 RO */ + u32 rsv:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_LIFETIME_INFO_DWLEN]; +} __packed; + +#define NBL_DPRBAC_UCOR_ERR_INFO_ADDR (0x904024) +#define NBL_DPRBAC_UCOR_ERR_INFO_DEPTH (1) +#define NBL_DPRBAC_UCOR_ERR_INFO_WIDTH (32) +#define NBL_DPRBAC_UCOR_ERR_INFO_DWLEN (1) +union dprbac_ucor_err_info_u { + struct dprbac_ucor_err_info { + u32 addr:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_UCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPRBAC_COR_ERR_INFO_ADDR (0x90402c) +#define NBL_DPRBAC_COR_ERR_INFO_DEPTH (1) +#define NBL_DPRBAC_COR_ERR_INFO_WIDTH (32) +#define NBL_DPRBAC_COR_ERR_INFO_DWLEN (1) +union dprbac_cor_err_info_u { + struct dprbac_cor_err_info { + u32 addr:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPRBAC_CFG_ERR_INFO_ADDR (0x904034) +#define NBL_DPRBAC_CFG_ERR_INFO_DEPTH (1) +#define NBL_DPRBAC_CFG_ERR_INFO_WIDTH (32) +#define NBL_DPRBAC_CFG_ERR_INFO_DWLEN (1) +union dprbac_cfg_err_info_u { + struct dprbac_cfg_err_info { + u32 addr:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPRBAC_CIF_ERR_INFO_ADDR (0x904040) +#define NBL_DPRBAC_CIF_ERR_INFO_DEPTH (1) +#define NBL_DPRBAC_CIF_ERR_INFO_WIDTH (32) +#define NBL_DPRBAC_CIF_ERR_INFO_DWLEN (1) +union dprbac_cif_err_info_u { + struct dprbac_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPRBAC_PKT_ERR_INFO_ADDR (0x904048) +#define NBL_DPRBAC_PKT_ERR_INFO_DEPTH (1) +#define NBL_DPRBAC_PKT_ERR_INFO_WIDTH (32) +#define NBL_DPRBAC_PKT_ERR_INFO_DWLEN (1) +union dprbac_pkt_err_info_u { + struct dprbac_pkt_err_info { + u32 dport:16; /* [15:0] Default:0x0 RO */ + u32 pkt_len:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_PKT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_DPRBAC_CAR_CTRL_ADDR (0x904100) +#define NBL_DPRBAC_CAR_CTRL_DEPTH (1) +#define NBL_DPRBAC_CAR_CTRL_WIDTH (32) +#define NBL_DPRBAC_CAR_CTRL_DWLEN (1) +union dprbac_car_ctrl_u { + struct dprbac_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_DPRBAC_ENABLE_ADDR (0x904114) +#define NBL_DPRBAC_ENABLE_DEPTH (1) +#define NBL_DPRBAC_ENABLE_WIDTH (32) +#define NBL_DPRBAC_ENABLE_DWLEN (1) +union dprbac_enable_u { + struct dprbac_enable { + u32 prbac:1; /* [0] Default:0x0 RW */ + u32 mf_fwd:1; /* [1] Default:0x1 RW */ + u32 ipv4_nat_csm:1; /* [2] Default:0x0 RW */ + u32 ipv6_nat_csm:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_ENABLE_DWLEN]; +} __packed; + +#define NBL_DPRBAC_CLK_GATE_ADDR (0x904118) +#define NBL_DPRBAC_CLK_GATE_DEPTH (1) +#define NBL_DPRBAC_CLK_GATE_WIDTH (32) +#define NBL_DPRBAC_CLK_GATE_DWLEN (1) +union dprbac_clk_gate_u { + struct dprbac_clk_gate { + u32 clk_en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_CLK_GATE_DWLEN]; +} __packed; + +#define NBL_DPRBAC_INIT_START_ADDR (0x904124) +#define NBL_DPRBAC_INIT_START_DEPTH (1) +#define NBL_DPRBAC_INIT_START_WIDTH (32) +#define NBL_DPRBAC_INIT_START_DWLEN (1) +union dprbac_init_start_u { + struct dprbac_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_INIT_START_DWLEN]; +} __packed; + +#define NBL_DPRBAC_INIT_DONE_ADDR (0x904128) +#define NBL_DPRBAC_INIT_DONE_DEPTH (1) +#define NBL_DPRBAC_INIT_DONE_WIDTH (32) +#define NBL_DPRBAC_INIT_DONE_DWLEN (1) +union dprbac_init_done_u { + struct dprbac_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_DPRBAC_NAT_ADDR (0x90412c) +#define NBL_DPRBAC_NAT_DEPTH (1) +#define NBL_DPRBAC_NAT_WIDTH (32) +#define NBL_DPRBAC_NAT_DWLEN (1) +union dprbac_nat_u { + struct dprbac_nat { + u32 rsv:16; /* [15:0] Default:0x0 RO */ + u32 sport:16; /* [31:16] Default:4500 RW */ + } __packed info; + u32 data[NBL_DPRBAC_NAT_DWLEN]; +} __packed; + +#define NBL_DPRBAC_VLAN_TYPE0_ADDR (0x904130) +#define NBL_DPRBAC_VLAN_TYPE0_DEPTH (1) +#define NBL_DPRBAC_VLAN_TYPE0_WIDTH (32) +#define NBL_DPRBAC_VLAN_TYPE0_DWLEN (1) +union dprbac_vlan_type0_u { + struct dprbac_vlan_type0 { + u32 tpid0:16; /* [15:0] Default:0x88A8 RW */ + u32 tpid1:16; /* [31:16] Default:0x9100 RW */ + } __packed info; + u32 data[NBL_DPRBAC_VLAN_TYPE0_DWLEN]; +} __packed; + +#define NBL_DPRBAC_VLAN_TYPE1_ADDR (0x904134) +#define NBL_DPRBAC_VLAN_TYPE1_DEPTH (1) +#define NBL_DPRBAC_VLAN_TYPE1_WIDTH (32) +#define NBL_DPRBAC_VLAN_TYPE1_DWLEN (1) +union dprbac_vlan_type1_u { + struct dprbac_vlan_type1 { + u32 tpid2:16; /* [15:0] Default:0x0 RW */ + u32 tpid3:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPRBAC_VLAN_TYPE1_DWLEN]; +} __packed; + +#define NBL_DPRBAC_VLAN_ENABLE_ADDR (0x904140) +#define NBL_DPRBAC_VLAN_ENABLE_DEPTH (1) +#define NBL_DPRBAC_VLAN_ENABLE_WIDTH (32) +#define NBL_DPRBAC_VLAN_ENABLE_DWLEN (1) +union dprbac_vlan_enable_u { + struct dprbac_vlan_enable { + u32 tpid0:1; /* [0] Default:0x1 RW */ + u32 tpid1:1; /* [1] Default:0x1 RW */ + u32 tpid2:1; /* [2] Default:0x0 RW */ + u32 tpid3:1; /* [3] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_VLAN_ENABLE_DWLEN]; +} __packed; + +#define NBL_DPRBAC_DROP_ADDR (0x904180) +#define NBL_DPRBAC_DROP_DEPTH (1) +#define NBL_DPRBAC_DROP_WIDTH (32) +#define NBL_DPRBAC_DROP_DWLEN (1) +union dprbac_drop_u { + struct dprbac_drop { + u32 prbac_bp:1; /* [0] Default:0x1 RW */ + u32 md_drop:1; /* [1] Default:0x1 RW */ + u32 md_errcode:1; /* [2] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_DROP_DWLEN]; +} __packed; + +#define NBL_DPRBAC_MTU_ADDR (0x904184) +#define NBL_DPRBAC_MTU_DEPTH (1) +#define NBL_DPRBAC_MTU_WIDTH (32) +#define NBL_DPRBAC_MTU_DWLEN (1) +union dprbac_mtu_u { + struct dprbac_mtu { + u32 len:14; /* [13:0] Default:9600 RW */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_MTU_DWLEN]; +} __packed; + +#define NBL_DPRBAC_WRR_WT_ADDR (0x904200) +#define NBL_DPRBAC_WRR_WT_DEPTH (1) +#define NBL_DPRBAC_WRR_WT_WIDTH (32) +#define NBL_DPRBAC_WRR_WT_DWLEN (1) +union dprbac_wrr_wt_u { + struct dprbac_wrr_wt { + u32 prbac:4; /* [3:0] Default:0x1 RW */ + u32 rsv1:12; /* [15:4] Default:0x0 RO */ + u32 normal:8; /* [23:16] Default:0x3 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_WRR_WT_DWLEN]; +} __packed; + +#define NBL_DPRBAC_SAD_LIFETIME_ADDR (0x904204) +#define NBL_DPRBAC_SAD_LIFETIME_DEPTH (1) +#define NBL_DPRBAC_SAD_LIFETIME_WIDTH (32) +#define NBL_DPRBAC_SAD_LIFETIME_DWLEN (1) +union dprbac_sad_lifetime_u { + struct dprbac_sad_lifetime { + u32 sad_index:11; /* [10:0] Default:0x0 RW */ + u32 rsv2:5; /* [15:11] Default:0x0 RW */ + u32 msb_value:1; /* [16] Default:0x1 RW */ + u32 flag_value:1; /* [17] Default:0x1 RW */ + u32 rsv1:2; /* [19:18] Default:0x0 RO */ + u32 msb_wen:1; /* [20] Default:0x0 RWW */ + u32 flag_wen:1; /* [21] Default:0x0 RWW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_SAD_LIFETIME_DWLEN]; +} __packed; + +#define NBL_DPRBAC_LIFETIME_DIFF_ADDR (0x904208) +#define NBL_DPRBAC_LIFETIME_DIFF_DEPTH (1) +#define NBL_DPRBAC_LIFETIME_DIFF_WIDTH (32) +#define NBL_DPRBAC_LIFETIME_DIFF_DWLEN (1) +union dprbac_lifetime_diff_u { + struct dprbac_lifetime_diff { + u32 value:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPRBAC_LIFETIME_DIFF_DWLEN]; +} __packed; + +#define NBL_DPRBAC_PADDING_DAT0_ADDR (0x904300) +#define NBL_DPRBAC_PADDING_DAT0_DEPTH (1) +#define NBL_DPRBAC_PADDING_DAT0_WIDTH (32) +#define NBL_DPRBAC_PADDING_DAT0_DWLEN (1) +union dprbac_padding_dat0_u { + struct dprbac_padding_dat0 { + u32 data:32; /* [31:0] Default:0x01020304 RW */ + } __packed info; + u32 data[NBL_DPRBAC_PADDING_DAT0_DWLEN]; +} __packed; + +#define NBL_DPRBAC_PADDING_DAT1_ADDR (0x904304) +#define NBL_DPRBAC_PADDING_DAT1_DEPTH (1) +#define NBL_DPRBAC_PADDING_DAT1_WIDTH (32) +#define NBL_DPRBAC_PADDING_DAT1_DWLEN (1) +union dprbac_padding_dat1_u { + struct dprbac_padding_dat1 { + u32 data:32; /* [31:0] Default:0x05060708 RW */ + } __packed info; + u32 data[NBL_DPRBAC_PADDING_DAT1_DWLEN]; +} __packed; + +#define NBL_DPRBAC_PADDING_DAT2_ADDR (0x904308) +#define NBL_DPRBAC_PADDING_DAT2_DEPTH (1) +#define NBL_DPRBAC_PADDING_DAT2_WIDTH (32) +#define NBL_DPRBAC_PADDING_DAT2_DWLEN (1) +union dprbac_padding_dat2_u { + struct dprbac_padding_dat2 { + u32 data:32; /* [31:0] Default:0x090a0b0c RW */ + } __packed info; + u32 data[NBL_DPRBAC_PADDING_DAT2_DWLEN]; +} __packed; + +#define NBL_DPRBAC_PADDING_DAT3_ADDR (0x90430c) +#define NBL_DPRBAC_PADDING_DAT3_DEPTH (1) +#define NBL_DPRBAC_PADDING_DAT3_WIDTH (32) +#define NBL_DPRBAC_PADDING_DAT3_DWLEN (1) +union dprbac_padding_dat3_u { + struct dprbac_padding_dat3 { + u32 data:32; /* [31:0] Default:0x0d0e0f10 RW */ + } __packed info; + u32 data[NBL_DPRBAC_PADDING_DAT3_DWLEN]; +} __packed; + +#define NBL_DPRBAC_PADDING_ALIGN_LEN_ADDR (0x904310) +#define NBL_DPRBAC_PADDING_ALIGN_LEN_DEPTH (1) +#define NBL_DPRBAC_PADDING_ALIGN_LEN_WIDTH (32) +#define NBL_DPRBAC_PADDING_ALIGN_LEN_DWLEN (1) +union dprbac_padding_align_len_u { + struct dprbac_padding_align_len { + u32 info:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_PADDING_ALIGN_LEN_DWLEN]; +} __packed; + +#define NBL_DPRBAC_MF_VALID_EXTRACT_ADDR (0x904378) +#define NBL_DPRBAC_MF_VALID_EXTRACT_DEPTH (1) +#define NBL_DPRBAC_MF_VALID_EXTRACT_WIDTH (32) +#define NBL_DPRBAC_MF_VALID_EXTRACT_DWLEN (1) +union dprbac_mf_valid_extract_u { + struct dprbac_mf_valid_extract { + u32 offset:5; /* [4:0] Default:5 RW */ + u32 en:1; /* [5] Default:0x1 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_MF_VALID_EXTRACT_DWLEN]; +} __packed; + +#define NBL_DPRBAC_DPORT_EXTRACT_ADDR (0x90437c) +#define NBL_DPRBAC_DPORT_EXTRACT_DEPTH (1) +#define NBL_DPRBAC_DPORT_EXTRACT_WIDTH (32) +#define NBL_DPRBAC_DPORT_EXTRACT_DWLEN (1) +union dprbac_dport_extract_u { + struct dprbac_dport_extract { + u32 id:6; /* [5:0] Default:0x9 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_DPORT_EXTRACT_DWLEN]; +} __packed; + +#define NBL_DPRBAC_SAD_IDX_EXTRACT_ADDR (0x904380) +#define NBL_DPRBAC_SAD_IDX_EXTRACT_DEPTH (1) +#define NBL_DPRBAC_SAD_IDX_EXTRACT_WIDTH (32) +#define NBL_DPRBAC_SAD_IDX_EXTRACT_DWLEN (1) +union dprbac_sad_idx_extract_u { + struct dprbac_sad_idx_extract { + u32 id:6; /* [5:0] Default:17 RW */ + u32 en:1; /* [6] Default:0x1 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_SAD_IDX_EXTRACT_DWLEN]; +} __packed; + +#define NBL_DPRBAC_SAD_IDX_FILTER_ADDR (0x904384) +#define NBL_DPRBAC_SAD_IDX_FILTER_DEPTH (1) +#define NBL_DPRBAC_SAD_IDX_FILTER_WIDTH (32) +#define NBL_DPRBAC_SAD_IDX_FILTER_DWLEN (1) +union dprbac_sad_idx_filter_u { + struct dprbac_sad_idx_filter { + u32 btm:30; /* [29:0] Default:0x8000 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_SAD_IDX_FILTER_DWLEN]; +} __packed; + +#define NBL_DPRBAC_XOFF_TO_DPED_ADDR (0x904430) +#define NBL_DPRBAC_XOFF_TO_DPED_DEPTH (1) +#define NBL_DPRBAC_XOFF_TO_DPED_WIDTH (32) +#define NBL_DPRBAC_XOFF_TO_DPED_DWLEN (1) +union dprbac_xoff_to_dped_u { + struct dprbac_xoff_to_dped { + u32 bp_set:1; /* [0] Default:0x0 RW */ + u32 bp_mask:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_XOFF_TO_DPED_DWLEN]; +} __packed; + +#define NBL_DPRBAC_XOFF_FROM_DDMUX_ADDR (0x904434) +#define NBL_DPRBAC_XOFF_FROM_DDMUX_DEPTH (1) +#define NBL_DPRBAC_XOFF_FROM_DDMUX_WIDTH (32) +#define NBL_DPRBAC_XOFF_FROM_DDMUX_DWLEN (1) +union dprbac_xoff_from_ddmux_u { + struct dprbac_xoff_from_ddmux { + u32 bp_set:1; /* [0] Default:0x0 RW */ + u32 bp_mask:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_XOFF_FROM_DDMUX_DWLEN]; +} __packed; + +#define NBL_DPRBAC_WIDE_TABLE_TIME_ADDR (0x905000) +#define NBL_DPRBAC_WIDE_TABLE_TIME_DEPTH (1) +#define NBL_DPRBAC_WIDE_TABLE_TIME_WIDTH (32) +#define NBL_DPRBAC_WIDE_TABLE_TIME_DWLEN (1) +union dprbac_wide_table_time_u { + struct dprbac_wide_table_time { + u32 xoff_th:8; /* [7:0] Default:128 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_WIDE_TABLE_TIME_DWLEN]; +} __packed; + +#define NBL_DPRBAC_SAD_IV_TABLE_ADDR (0x914000) +#define NBL_DPRBAC_SAD_IV_TABLE_DEPTH (2048) +#define NBL_DPRBAC_SAD_IV_TABLE_WIDTH (64) +#define NBL_DPRBAC_SAD_IV_TABLE_DWLEN (2) +union dprbac_sad_iv_table_u { + struct dprbac_sad_iv_table { + u32 iv_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_DPRBAC_SAD_IV_TABLE_DWLEN]; +} __packed; +#define NBL_DPRBAC_SAD_IV_TABLE_REG(r) (NBL_DPRBAC_SAD_IV_TABLE_ADDR + \ + (NBL_DPRBAC_SAD_IV_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPRBAC_SAD_ESN_TABLE_ADDR (0x924000) +#define NBL_DPRBAC_SAD_ESN_TABLE_DEPTH (2048) +#define NBL_DPRBAC_SAD_ESN_TABLE_WIDTH (128) +#define NBL_DPRBAC_SAD_ESN_TABLE_DWLEN (4) +union dprbac_sad_esn_table_u { + struct dprbac_sad_esn_table { + u32 sn:32; /* [31:0] Default:0x0 RW */ + u32 esn:32; /* [63:32] Default:0x0 RW */ + u32 wrap_en:1; /* [64] Default:0x0 RW */ + u32 enable:1; /* [65] Default:0x0 RW */ + u32 rsv_l:32; /* [127:66] Default:0x0 RO */ + u32 rsv_h:30; /* [127:66] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_SAD_ESN_TABLE_DWLEN]; +} __packed; +#define NBL_DPRBAC_SAD_ESN_TABLE_REG(r) (NBL_DPRBAC_SAD_ESN_TABLE_ADDR + \ + (NBL_DPRBAC_SAD_ESN_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPRBAC_SAD_LIFETIME_TABLE_ADDR (0x934000) +#define NBL_DPRBAC_SAD_LIFETIME_TABLE_DEPTH (2048) +#define NBL_DPRBAC_SAD_LIFETIME_TABLE_WIDTH (128) +#define NBL_DPRBAC_SAD_LIFETIME_TABLE_DWLEN (4) +union dprbac_sad_lifetime_table_u { + struct dprbac_sad_lifetime_table { + u32 diff:32; /* [31:0] Default:0x0 RW */ + u32 cnt:32; /* [63:32] Default:0x0 RW */ + u32 flag:1; /* [64] Default:0x0 RW */ + u32 unit:1; /* [65] Default:0x0 RW */ + u32 enable:1; /* [66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:67] Default:0x0 RO */ + u32 rsv_h:29; /* [127:67] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_SAD_LIFETIME_TABLE_DWLEN]; +} __packed; +#define NBL_DPRBAC_SAD_LIFETIME_TABLE_REG(r) (NBL_DPRBAC_SAD_LIFETIME_TABLE_ADDR + \ + (NBL_DPRBAC_SAD_LIFETIME_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPRBAC_SAD_CRYPTO_INFO_TABLE_ADDR (0x944000) +#define NBL_DPRBAC_SAD_CRYPTO_INFO_TABLE_DEPTH (2048) +#define NBL_DPRBAC_SAD_CRYPTO_INFO_TABLE_WIDTH (512) +#define NBL_DPRBAC_SAD_CRYPTO_INFO_TABLE_DWLEN (16) +union dprbac_sad_crypto_info_table_u { + struct dprbac_sad_crypto_info_table { + u32 key_arr[8]; /* [255:0] Default:0x0 RW */ + u32 salt:32; /* [287:256] Default:0x0 RW */ + u32 crypto_type:3; /* [290:288] Default:0x0 RW */ + u32 tunnel_mode:1; /* [291] Default:0x0 RW */ + u32 icv_len:2; /* [293:292] Default:0x0 RW */ + u32 rsv:25; /* [511:295] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:295] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_SAD_CRYPTO_INFO_TABLE_DWLEN]; +} __packed; +#define NBL_DPRBAC_SAD_CRYPTO_INFO_TABLE_REG(r) (NBL_DPRBAC_SAD_CRYPTO_INFO_TABLE_ADDR + \ + (NBL_DPRBAC_SAD_CRYPTO_INFO_TABLE_DWLEN * 4) * (r)) + +#define NBL_DPRBAC_SAD_ENCAP_INFO_TABLE_ADDR (0x964000) +#define NBL_DPRBAC_SAD_ENCAP_INFO_TABLE_DEPTH (2048) +#define NBL_DPRBAC_SAD_ENCAP_INFO_TABLE_WIDTH (512) +#define NBL_DPRBAC_SAD_ENCAP_INFO_TABLE_DWLEN (16) +union dprbac_sad_encap_info_table_u { + struct dprbac_sad_encap_info_table { + u32 dip_addr_arr[4]; /* [127:0] Default:0x0 RW */ + u32 sip_addr_arr[4]; /* [255:128] Default:0x0 RW */ + u32 spi:32; /* [287:256] Default:0x0 RW */ + u32 dport:16; /* [303:288] Default:0x0 RW */ + u32 nat_flag:1; /* [304] Default:0x0 RW */ + u32 rsv:15; /* [511:305] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:305] Default:0x0 RO */ + } __packed info; + u32 data[NBL_DPRBAC_SAD_ENCAP_INFO_TABLE_DWLEN]; +} __packed; +#define NBL_DPRBAC_SAD_ENCAP_INFO_TABLE_REG(r) (NBL_DPRBAC_SAD_ENCAP_INFO_TABLE_ADDR + \ + (NBL_DPRBAC_SAD_ENCAP_INFO_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h new file mode 100644 index 0000000000000000000000000000000000000000..454d1480be9bf34462d71b68d58c4bf76d9daaac --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_epro.h @@ -0,0 +1,659 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_EPRO_H +#define NBL_EPRO_H 1 + +#include + +#define NBL_EPRO_BASE (0x00E74000) + +#define NBL_EPRO_INT_STATUS_ADDR (0xe74000) +#define NBL_EPRO_INT_STATUS_DEPTH (1) +#define NBL_EPRO_INT_STATUS_WIDTH (32) +#define NBL_EPRO_INT_STATUS_DWLEN (1) +union epro_int_status_u { + struct epro_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 cif_err:1; /* [3] Default:0x0 RWC */ + u32 input_err:1; /* [4] Default:0x0 RWC */ + u32 cfg_err:1; /* [5] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [6] Default:0x0 RWC */ + u32 data_cor_err:1; /* [7] Default:0x0 RWC */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_EPRO_INT_MASK_ADDR (0xe74004) +#define NBL_EPRO_INT_MASK_DEPTH (1) +#define NBL_EPRO_INT_MASK_WIDTH (32) +#define NBL_EPRO_INT_MASK_DWLEN (1) +union epro_int_mask_u { + struct epro_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 cif_err:1; /* [3] Default:0x0 RW */ + u32 input_err:1; /* [4] Default:0x0 RW */ + u32 cfg_err:1; /* [5] Default:0x0 RW */ + u32 data_ucor_err:1; /* [6] Default:0x0 RW */ + u32 data_cor_err:1; /* [7] Default:0x0 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INT_MASK_DWLEN]; +} __packed; + +#define NBL_EPRO_INT_SET_ADDR (0xe74008) +#define NBL_EPRO_INT_SET_DEPTH (1) +#define NBL_EPRO_INT_SET_WIDTH (32) +#define NBL_EPRO_INT_SET_DWLEN (1) +union epro_int_set_u { + struct epro_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 cif_err:1; /* [3] Default:0x0 WO */ + u32 input_err:1; /* [4] Default:0x0 WO */ + u32 cfg_err:1; /* [5] Default:0x0 WO */ + u32 data_ucor_err:1; /* [6] Default:0x0 WO */ + u32 data_cor_err:1; /* [7] Default:0x0 WO */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INT_SET_DWLEN]; +} __packed; + +#define NBL_EPRO_INIT_DONE_ADDR (0xe7400c) +#define NBL_EPRO_INIT_DONE_DEPTH (1) +#define NBL_EPRO_INIT_DONE_WIDTH (32) +#define NBL_EPRO_INIT_DONE_DWLEN (1) +union epro_init_done_u { + struct epro_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_EPRO_CIF_ERR_INFO_ADDR (0xe74040) +#define NBL_EPRO_CIF_ERR_INFO_DEPTH (1) +#define NBL_EPRO_CIF_ERR_INFO_WIDTH (32) +#define NBL_EPRO_CIF_ERR_INFO_DWLEN (1) +union epro_cif_err_info_u { + struct epro_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_EPRO_CFG_ERR_INFO_ADDR (0xe74050) +#define NBL_EPRO_CFG_ERR_INFO_DEPTH (1) +#define NBL_EPRO_CFG_ERR_INFO_WIDTH (32) +#define NBL_EPRO_CFG_ERR_INFO_DWLEN (1) +union epro_cfg_err_info_u { + struct epro_cfg_err_info { + u32 addr:10; /* [9:0] Default:0x0 RO */ + u32 id:3; /* [12:10] Default:0x0 RO */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_EPRO_CAR_CTRL_ADDR (0xe74100) +#define NBL_EPRO_CAR_CTRL_DEPTH (1) +#define NBL_EPRO_CAR_CTRL_WIDTH (32) +#define NBL_EPRO_CAR_CTRL_DWLEN (1) +union epro_car_ctrl_u { + struct epro_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_EPRO_INIT_START_ADDR (0xe74180) +#define NBL_EPRO_INIT_START_DEPTH (1) +#define NBL_EPRO_INIT_START_WIDTH (32) +#define NBL_EPRO_INIT_START_DWLEN (1) +union epro_init_start_u { + struct epro_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_INIT_START_DWLEN]; +} __packed; + +#define NBL_EPRO_FLAG_SEL_ADDR (0xe74200) +#define NBL_EPRO_FLAG_SEL_DEPTH (1) +#define NBL_EPRO_FLAG_SEL_WIDTH (32) +#define NBL_EPRO_FLAG_SEL_DWLEN (1) +union epro_flag_sel_u { + struct epro_flag_sel { + u32 dir_offset_en:1; /* [0] Default:0x1 RW */ + u32 dir_offset:5; /* [5:1] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_FLAG_SEL_DWLEN]; +} __packed; + +#define NBL_EPRO_ACT_SEL_EN_ADDR (0xe74214) +#define NBL_EPRO_ACT_SEL_EN_DEPTH (1) +#define NBL_EPRO_ACT_SEL_EN_WIDTH (32) +#define NBL_EPRO_ACT_SEL_EN_DWLEN (1) +union epro_act_sel_en_u { + struct epro_act_sel_en { + u32 rssidx_en:1; /* [0] Default:0x1 RW */ + u32 dport_en:1; /* [1] Default:0x1 RW */ + u32 mirroridx_en:1; /* [2] Default:0x1 RW */ + u32 dqueue_en:1; /* [3] Default:0x1 RW */ + u32 encap_en:1; /* [4] Default:0x1 RW */ + u32 pop_8021q_en:1; /* [5] Default:0x1 RW */ + u32 pop_qinq_en:1; /* [6] Default:0x1 RW */ + u32 push_cvlan_en:1; /* [7] Default:0x1 RW */ + u32 push_svlan_en:1; /* [8] Default:0x1 RW */ + u32 replace_cvlan_en:1; /* [9] Default:0x1 RW */ + u32 replace_svlan_en:1; /* [10] Default:0x1 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_ACT_SEL_EN_DWLEN]; +} __packed; + +#define NBL_EPRO_AM_ACT_ID0_ADDR (0xe74218) +#define NBL_EPRO_AM_ACT_ID0_DEPTH (1) +#define NBL_EPRO_AM_ACT_ID0_WIDTH (32) +#define NBL_EPRO_AM_ACT_ID0_DWLEN (1) +union epro_am_act_id0_u { + struct epro_am_act_id0 { + u32 replace_cvlan:6; /* [5:0] Default:0x2b RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 replace_svlan:6; /* [13:8] Default:0x2a RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 push_cvlan:6; /* [21:16] Default:0x2d RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 push_svlan:6; /* [29:24] Default:0x2c RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_AM_ACT_ID0_DWLEN]; +} __packed; + +#define NBL_EPRO_AM_ACT_ID1_ADDR (0xe7421c) +#define NBL_EPRO_AM_ACT_ID1_DEPTH (1) +#define NBL_EPRO_AM_ACT_ID1_WIDTH (32) +#define NBL_EPRO_AM_ACT_ID1_DWLEN (1) +union epro_am_act_id1_u { + struct epro_am_act_id1 { + u32 pop_qinq:6; /* [5:0] Default:0x29 RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 pop_8021q:6; /* [13:08] Default:0x28 RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 dport:6; /* [21:16] Default:0x9 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 dqueue:6; /* [29:24] Default:0xa RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_AM_ACT_ID1_DWLEN]; +} __packed; + +#define NBL_EPRO_AM_ACT_ID2_ADDR (0xe74220) +#define NBL_EPRO_AM_ACT_ID2_DEPTH (1) +#define NBL_EPRO_AM_ACT_ID2_WIDTH (32) +#define NBL_EPRO_AM_ACT_ID2_DWLEN (1) +union epro_am_act_id2_u { + struct epro_am_act_id2 { + u32 rssidx:6; /* [5:0] Default:0x4 RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 mirroridx:6; /* [13:8] Default:0x8 RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 car:6; /* [21:16] Default:0x5 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 encap:6; /* [29:24] Default:0x2e RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_AM_ACT_ID2_DWLEN]; +} __packed; + +#define NBL_EPRO_AM_ACT_ID3_ADDR (0xe74224) +#define NBL_EPRO_AM_ACT_ID3_DEPTH (1) +#define NBL_EPRO_AM_ACT_ID3_WIDTH (32) +#define NBL_EPRO_AM_ACT_ID3_DWLEN (1) +union epro_am_act_id3_u { + struct epro_am_act_id3 { + u32 outer_sport_mdf:6; /* [5:0] Default:0x30 RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 pri_mdf:6; /* [13:8] Default:0x15 RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 dp_hash0:6; /* [21:16] Default:0x13 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 dp_hash1:6; /* [29:24] Default:0x14 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_AM_ACT_ID3_DWLEN]; +} __packed; + +#define NBL_EPRO_ACTION_PRIORITY_ADDR (0xe74230) +#define NBL_EPRO_ACTION_PRIORITY_DEPTH (1) +#define NBL_EPRO_ACTION_PRIORITY_WIDTH (32) +#define NBL_EPRO_ACTION_PRIORITY_DWLEN (1) +union epro_action_priority_u { + struct epro_action_priority { + u32 mirroridx:2; /* [1:0] Default:0x0 RW */ + u32 car:2; /* [3:2] Default:0x0 RW */ + u32 dqueue:2; /* [5:4] Default:0x0 RW */ + u32 dport:2; /* [7:6] Default:0x0 RW */ + u32 pop_8021q:2; /* [9:8] Default:0x0 RW */ + u32 pop_qinq:2; /* [11:10] Default:0x0 RW */ + u32 replace_inner_vlan:2; /* [13:12] Default:0x0 RW */ + u32 replace_outer_vlan:2; /* [15:14] Default:0x0 RW */ + u32 push_inner_vlan:2; /* [17:16] Default:0x0 RW */ + u32 push_outer_vlan:2; /* [19:18] Default:0x0 RW */ + u32 outer_sport_mdf:2; /* [21:20] Default:0x0 RW */ + u32 pri_mdf:2; /* [23:22] Default:0x0 RW */ + u32 dp_hash0:2; /* [25:24] Default:0x0 RW */ + u32 dp_hash1:2; /* [27:26] Default:0x0 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_ACTION_PRIORITY_DWLEN]; +} __packed; + +#define NBL_EPRO_MIRROR_ACTION_PRIORITY_ADDR (0xe74234) +#define NBL_EPRO_MIRROR_ACTION_PRIORITY_DEPTH (1) +#define NBL_EPRO_MIRROR_ACTION_PRIORITY_WIDTH (32) +#define NBL_EPRO_MIRROR_ACTION_PRIORITY_DWLEN (1) +union epro_mirror_action_priority_u { + struct epro_mirror_action_priority { + u32 car:2; /* [1:0] Default:0x0 RW */ + u32 dqueue:2; /* [3:2] Default:0x0 RW */ + u32 dport:2; /* [5:4] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_MIRROR_ACTION_PRIORITY_DWLEN]; +} __packed; + +#define NBL_EPRO_SET_FLAGS_ADDR (0xe74238) +#define NBL_EPRO_SET_FLAGS_DEPTH (1) +#define NBL_EPRO_SET_FLAGS_WIDTH (32) +#define NBL_EPRO_SET_FLAGS_DWLEN (1) +union epro_set_flags_u { + struct epro_set_flags { + u32 set_flags:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_SET_FLAGS_DWLEN]; +} __packed; + +#define NBL_EPRO_CLEAR_FLAGS_ADDR (0xe7423c) +#define NBL_EPRO_CLEAR_FLAGS_DEPTH (1) +#define NBL_EPRO_CLEAR_FLAGS_WIDTH (32) +#define NBL_EPRO_CLEAR_FLAGS_DWLEN (1) +union epro_clear_flags_u { + struct epro_clear_flags { + u32 clear_flags:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_CLEAR_FLAGS_DWLEN]; +} __packed; + +#define NBL_EPRO_RSS_SK_ADDR (0xe74400) +#define NBL_EPRO_RSS_SK_DEPTH (1) +#define NBL_EPRO_RSS_SK_WIDTH (320) +#define NBL_EPRO_RSS_SK_DWLEN (10) +union epro_rss_sk_u { + struct epro_rss_sk { + u32 sk_arr[10]; /* [319:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_RSS_SK_DWLEN]; +} __packed; + +#define NBL_EPRO_VXLAN_SP_ADDR (0xe74500) +#define NBL_EPRO_VXLAN_SP_DEPTH (1) +#define NBL_EPRO_VXLAN_SP_WIDTH (32) +#define NBL_EPRO_VXLAN_SP_DWLEN (1) +union epro_vxlan_sp_u { + struct epro_vxlan_sp { + u32 vxlan_tnl_sp_min:16; /* [15:0] Default:0x8000 RW */ + u32 vxlan_tnl_sp_max:16; /* [31:16] Default:0xee48 RW */ + } __packed info; + u32 data[NBL_EPRO_VXLAN_SP_DWLEN]; +} __packed; + +#define NBL_EPRO_LOOP_SCH_COS_DEFAULT_ADDR (0xe74600) +#define NBL_EPRO_LOOP_SCH_COS_DEFAULT_DEPTH (1) +#define NBL_EPRO_LOOP_SCH_COS_DEFAULT_WIDTH (32) +#define NBL_EPRO_LOOP_SCH_COS_DEFAULT_DWLEN (1) +union epro_loop_sch_cos_default_u { + struct epro_loop_sch_cos_default { + u32 sch_cos:3; /* [2:0] Default:0x0 RW */ + u32 pfc_mode:1; /* [3] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_LOOP_SCH_COS_DEFAULT_DWLEN]; +} __packed; + +#define NBL_EPRO_MIRROR_PKT_COS_DEFAULT_ADDR (0xe74604) +#define NBL_EPRO_MIRROR_PKT_COS_DEFAULT_DEPTH (1) +#define NBL_EPRO_MIRROR_PKT_COS_DEFAULT_WIDTH (32) +#define NBL_EPRO_MIRROR_PKT_COS_DEFAULT_DWLEN (1) +union epro_mirror_pkt_cos_default_u { + struct epro_mirror_pkt_cos_default { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_MIRROR_PKT_COS_DEFAULT_DWLEN]; +} __packed; + +#define NBL_EPRO_NO_DPORT_REDIRECT_ADDR (0xe7463c) +#define NBL_EPRO_NO_DPORT_REDIRECT_DEPTH (1) +#define NBL_EPRO_NO_DPORT_REDIRECT_WIDTH (32) +#define NBL_EPRO_NO_DPORT_REDIRECT_DWLEN (1) +union epro_no_dport_redirect_u { + struct epro_no_dport_redirect { + u32 dport:16; /* [15:0] Default:0x0 RW */ + u32 dqueue:11; /* [26:16] Default:0x0 RW */ + u32 dqueue_en:1; /* [27] Default:0x0 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_NO_DPORT_REDIRECT_DWLEN]; +} __packed; + +#define NBL_EPRO_SCH_COS_MAP_ETH0_ADDR (0xe74640) +#define NBL_EPRO_SCH_COS_MAP_ETH0_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_ETH0_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_ETH0_DWLEN (1) +union epro_sch_cos_map_eth0_u { + struct epro_sch_cos_map_eth0 { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_ETH0_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_ETH0_REG(r) (NBL_EPRO_SCH_COS_MAP_ETH0_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_ETH0_DWLEN * 4) * (r)) + +#define NBL_EPRO_SCH_COS_MAP_ETH1_ADDR (0xe74660) +#define NBL_EPRO_SCH_COS_MAP_ETH1_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_ETH1_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_ETH1_DWLEN (1) +union epro_sch_cos_map_eth1_u { + struct epro_sch_cos_map_eth1 { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_ETH1_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_ETH1_REG(r) (NBL_EPRO_SCH_COS_MAP_ETH1_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_ETH1_DWLEN * 4) * (r)) + +#define NBL_EPRO_SCH_COS_MAP_ETH2_ADDR (0xe74680) +#define NBL_EPRO_SCH_COS_MAP_ETH2_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_ETH2_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_ETH2_DWLEN (1) +union epro_sch_cos_map_eth2_u { + struct epro_sch_cos_map_eth2 { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_ETH2_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_ETH2_REG(r) (NBL_EPRO_SCH_COS_MAP_ETH2_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_ETH2_DWLEN * 4) * (r)) + +#define NBL_EPRO_SCH_COS_MAP_ETH3_ADDR (0xe746a0) +#define NBL_EPRO_SCH_COS_MAP_ETH3_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_ETH3_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_ETH3_DWLEN (1) +union epro_sch_cos_map_eth3_u { + struct epro_sch_cos_map_eth3 { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_ETH3_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_ETH3_REG(r) (NBL_EPRO_SCH_COS_MAP_ETH3_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_ETH3_DWLEN * 4) * (r)) + +#define NBL_EPRO_SCH_COS_MAP_LOOP_ADDR (0xe746c0) +#define NBL_EPRO_SCH_COS_MAP_LOOP_DEPTH (8) +#define NBL_EPRO_SCH_COS_MAP_LOOP_WIDTH (32) +#define NBL_EPRO_SCH_COS_MAP_LOOP_DWLEN (1) +union epro_sch_cos_map_loop_u { + struct epro_sch_cos_map_loop { + u32 pkt_cos:3; /* [2:0] Default:0x0 RW */ + u32 dscp:6; /* [8:3] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_SCH_COS_MAP_LOOP_DWLEN]; +} __packed; +#define NBL_EPRO_SCH_COS_MAP_LOOP_REG(r) (NBL_EPRO_SCH_COS_MAP_LOOP_ADDR + \ + (NBL_EPRO_SCH_COS_MAP_LOOP_DWLEN * 4) * (r)) + +#define NBL_EPRO_PORT_PRI_MDF_EN_ADDR (0xe746e0) +#define NBL_EPRO_PORT_PRI_MDF_EN_DEPTH (1) +#define NBL_EPRO_PORT_PRI_MDF_EN_WIDTH (32) +#define NBL_EPRO_PORT_PRI_MDF_EN_DWLEN (1) +union epro_port_pri_mdf_en_u { + struct epro_port_pri_mdf_en { + u32 eth0:1; /* [0] Default:0x0 RW */ + u32 eth1:1; /* [1] Default:0x0 RW */ + u32 eth2:1; /* [2] Default:0x0 RW */ + u32 eth3:1; /* [3] Default:0x0 RW */ + u32 loop:1; /* [4] Default:0x0 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_PORT_PRI_MDF_EN_DWLEN]; +} __packed; + +#define NBL_EPRO_CFG_TEST_ADDR (0xe7480c) +#define NBL_EPRO_CFG_TEST_DEPTH (1) +#define NBL_EPRO_CFG_TEST_WIDTH (32) +#define NBL_EPRO_CFG_TEST_DWLEN (1) +union epro_cfg_test_u { + struct epro_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_EPRO_BP_STATE_ADDR (0xe74b00) +#define NBL_EPRO_BP_STATE_DEPTH (1) +#define NBL_EPRO_BP_STATE_WIDTH (32) +#define NBL_EPRO_BP_STATE_DWLEN (1) +union epro_bp_state_u { + struct epro_bp_state { + u32 in_bp:1; /* [0] Default:0x0 RO */ + u32 out_bp:1; /* [1] Default:0x0 RO */ + u32 inter_bp:1; /* [2] Default:0x0 RO */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_BP_STATE_DWLEN]; +} __packed; + +#define NBL_EPRO_BP_HISTORY_ADDR (0xe74b04) +#define NBL_EPRO_BP_HISTORY_DEPTH (1) +#define NBL_EPRO_BP_HISTORY_WIDTH (32) +#define NBL_EPRO_BP_HISTORY_DWLEN (1) +union epro_bp_history_u { + struct epro_bp_history { + u32 in_bp:1; /* [0] Default:0x0 RC */ + u32 out_bp:1; /* [1] Default:0x0 RC */ + u32 inter_bp:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_EPRO_MT_ADDR (0xe75400) +#define NBL_EPRO_MT_DEPTH (16) +#define NBL_EPRO_MT_WIDTH (64) +#define NBL_EPRO_MT_DWLEN (2) +union epro_mt_u { + struct epro_mt { + u32 dport:16; /* [15:0] Default:0x0 RW */ + u32 dqueue:11; /* [26:16] Default:0x0 RW */ + u32 car_en:1; /* [27] Default:0x0 RW */ + u32 car_id:10; /* [37:28] Default:0x0 RW */ + u32 vld:1; /* [38] Default:0x0 RW */ + u32 rsv:25; /* [63:39] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_MT_DWLEN]; +} __packed; +#define NBL_EPRO_MT_REG(r) (NBL_EPRO_MT_ADDR + \ + (NBL_EPRO_MT_DWLEN * 4) * (r)) + +#define NBL_EPRO_KG_TCAM_ADDR (0xe75480) +#define NBL_EPRO_KG_TCAM_DEPTH (16) +#define NBL_EPRO_KG_TCAM_WIDTH (64) +#define NBL_EPRO_KG_TCAM_DWLEN (2) +union epro_kg_tcam_u { + struct epro_kg_tcam { + u32 mask:16; /* [15:0] Default:0x0 RW */ + u32 data:16; /* [31:16] Default:0x0 RW */ + u32 valid_bit:1; /* [32] Default:0x0 RW */ + u32 rsv:31; /* [63:33] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_KG_TCAM_DWLEN]; +} __packed; +#define NBL_EPRO_KG_TCAM_REG(r) (NBL_EPRO_KG_TCAM_ADDR + \ + (NBL_EPRO_KG_TCAM_DWLEN * 4) * (r)) + +#define NBL_EPRO_VPT_ADDR (0xe78000) +#define NBL_EPRO_VPT_DEPTH (1024) +#define NBL_EPRO_VPT_WIDTH (64) +#define NBL_EPRO_VPT_DWLEN (2) +union epro_vpt_u { + struct epro_vpt { + u32 cvlan:16; /* [15:0] Default:0x0 RW */ + u32 svlan:16; /* [31:16] Default:0x0 RW */ + u32 fwd:1; /* [32] Default:0x0 RW */ + u32 mirror_en:1; /* [33] Default:0x0 RW */ + u32 mirror_id:4; /* [37:34] Default:0x0 RW */ + u32 car_en:1; /* [38] Default:0x0 RW */ + u32 car_id:10; /* [48:39] Default:0x0 RW */ + u32 pop_vlan:2; /* [50:49] Default:0x0 RW */ + u32 push_vlan:2; /* [52:51] Default:0x0 RW */ + u32 replace_vlan:2; /* [54:53] Default:0x0 RW */ + u32 rss_alg_sel:1; /* [55] Default:0x0 RW */ + u32 rss_key_type_btm:2; /* [57:56] Default:0x0 RW */ + u32 vld:1; /* [58] Default:0x0 RW */ + u32 rsv:5; /* [63:59] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_VPT_DWLEN]; +} __packed; +#define NBL_EPRO_VPT_REG(r) (NBL_EPRO_VPT_ADDR + \ + (NBL_EPRO_VPT_DWLEN * 4) * (r)) + +#define NBL_EPRO_EPT_ADDR (0xe75800) +#define NBL_EPRO_EPT_DEPTH (8) +#define NBL_EPRO_EPT_WIDTH (64) +#define NBL_EPRO_EPT_DWLEN (2) +union epro_ept_u { + struct epro_ept { + u32 cvlan:16; /* [15:0] Default:0x0 RW */ + u32 svlan:16; /* [31:16] Default:0x0 RW */ + u32 fwd:1; /* [32] Default:0x0 RW */ + u32 mirror_en:1; /* [33] Default:0x0 RW */ + u32 mirror_id:4; /* [37:34] Default:0x0 RW */ + u32 pop_vlan:2; /* [39:38] Default:0x0 RW */ + u32 push_vlan:2; /* [41:40] Default:0x0 RW */ + u32 replace_vlan:2; /* [43:42] Default:0x0 RW */ + u32 lag_alg_sel:2; /* [45:44] Default:0x0 RW */ + u32 lag_port_btm:4; /* [49:46] Default:0x0 RW */ + u32 lag_l2_protect_en:1; /* [50] Default:0x0 RW */ + u32 pfc_sch_cos_default:3; /* [53:51] Default:0x0 RW */ + u32 pfc_mode:1; /* [54] Default:0x0 RW */ + u32 vld:1; /* [55] Default:0x0 RW */ + u32 rsv:8; /* [63:56] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_EPT_DWLEN]; +} __packed; +#define NBL_EPRO_EPT_REG(r) (NBL_EPRO_EPT_ADDR + \ + (NBL_EPRO_EPT_DWLEN * 4) * (r)) + +#define NBL_EPRO_AFT_ADDR (0xe75900) +#define NBL_EPRO_AFT_DEPTH (16) +#define NBL_EPRO_AFT_WIDTH (64) +#define NBL_EPRO_AFT_DWLEN (2) +union epro_aft_u { + struct epro_aft { + u32 action_filter_btm_arr[2]; /* [63:0] Default:0x0 RW */ + } __packed info; + u64 data; +} __packed; +#define NBL_EPRO_AFT_REG(r) (NBL_EPRO_AFT_ADDR + \ + (NBL_EPRO_AFT_DWLEN * 4) * (r)) + +#define NBL_EPRO_RSS_PT_ADDR (0xe76000) +#define NBL_EPRO_RSS_PT_DEPTH (1024) +#define NBL_EPRO_RSS_PT_WIDTH (64) +#define NBL_EPRO_RSS_PT_DWLEN (2) +union epro_rss_pt_u { + struct epro_rss_pt { + u32 entry_size:3; /* [2:0] Default:0x0 RW */ + u32 offset1:14; /* [16:3] Default:0x0 RW */ + u32 offset1_vld:1; /* [17:17] Default:0x0 RW */ + u32 offset0:14; /* [31:18] Default:0x0 RW */ + u32 offset0_vld:1; /* [32] Default:0x0 RW */ + u32 vld:1; /* [33] Default:0x0 RW */ + u32 rsv:30; /* [63:34] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_RSS_PT_DWLEN]; +} __packed; +#define NBL_EPRO_RSS_PT_REG(r) (NBL_EPRO_RSS_PT_ADDR + \ + (NBL_EPRO_RSS_PT_DWLEN * 4) * (r)) + +#define NBL_EPRO_ECPVPT_ADDR (0xe7a000) +#define NBL_EPRO_ECPVPT_DEPTH (256) +#define NBL_EPRO_ECPVPT_WIDTH (32) +#define NBL_EPRO_ECPVPT_DWLEN (1) +union epro_ecpvpt_u { + struct epro_ecpvpt { + u32 encap_cvlan_vld0:1; /* [0] Default:0x0 RW */ + u32 encap_svlan_vld0:1; /* [1] Default:0x0 RW */ + u32 encap_vlan_vld1_15:30; /* [31:2] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_ECPVPT_DWLEN]; +} __packed; +#define NBL_EPRO_ECPVPT_REG(r) (NBL_EPRO_ECPVPT_ADDR + \ + (NBL_EPRO_ECPVPT_DWLEN * 4) * (r)) + +#define NBL_EPRO_ECPIPT_ADDR (0xe7b000) +#define NBL_EPRO_ECPIPT_DEPTH (128) +#define NBL_EPRO_ECPIPT_WIDTH (32) +#define NBL_EPRO_ECPIPT_DWLEN (1) +union epro_ecpipt_u { + struct epro_ecpipt { + u32 encap_ip_type0:1; /* [0] Default:0x0 RW */ + u32 encap_ip_type1_31:31; /* [31:1] Default:0x0 RW */ + } __packed info; + u32 data[NBL_EPRO_ECPIPT_DWLEN]; +} __packed; +#define NBL_EPRO_ECPIPT_REG(r) (NBL_EPRO_ECPIPT_ADDR + \ + (NBL_EPRO_ECPIPT_DWLEN * 4) * (r)) + +#define NBL_EPRO_RSS_RET_ADDR (0xe7c000) +#define NBL_EPRO_RSS_RET_DEPTH (8192) +#define NBL_EPRO_RSS_RET_WIDTH (32) +#define NBL_EPRO_RSS_RET_DWLEN (1) +union epro_rss_ret_u { + struct epro_rss_ret { + u32 dqueue0:11; /* [10:0] Default:0x0 RW */ + u32 vld0:1; /* [11] Default:0x0 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 dqueue1:11; /* [26:16] Default:0x0 RW */ + u32 vld1:1; /* [27] Default:0x0 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_EPRO_RSS_RET_DWLEN]; +} __packed; +#define NBL_EPRO_RSS_RET_REG(r) (NBL_EPRO_RSS_RET_ADDR + \ + (NBL_EPRO_RSS_RET_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h new file mode 100644 index 0000000000000000000000000000000000000000..37fe59d6ad8a3f16598802c650e66ee1478b958f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_fem.h @@ -0,0 +1,1485 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_FEM_H +#define NBL_FEM_H 1 + +#include + +#define NBL_FEM_BASE (0x00A04000) + +#define NBL_FEM_INT_STATUS_ADDR (0xa04000) +#define NBL_FEM_INT_STATUS_DEPTH (1) +#define NBL_FEM_INT_STATUS_WIDTH (32) +#define NBL_FEM_INT_STATUS_DWLEN (1) +union fem_int_status_u { + struct fem_int_status { + u32 rsv3:2; /* [01:00] Default:0x0 RO */ + u32 fifo_ovf_err:1; /* [02:02] Default:0x0 RWC */ + u32 fifo_udf_err:1; /* [03:03] Default:0x0 RWC */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 bank_cflt_err:1; /* [08:08] Default:0x0 RWC */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_FEM_INT_MASK_ADDR (0xa04004) +#define NBL_FEM_INT_MASK_DEPTH (1) +#define NBL_FEM_INT_MASK_WIDTH (32) +#define NBL_FEM_INT_MASK_DWLEN (1) +union fem_int_mask_u { + struct fem_int_mask { + u32 rsv3:2; /* [01:00] Default:0x0 RO */ + u32 fifo_ovf_err:1; /* [02:02] Default:0x0 RW */ + u32 fifo_udf_err:1; /* [03:03] Default:0x0 RW */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 bank_cflt_err:1; /* [08:08] Default:0x0 RW */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INT_MASK_DWLEN]; +} __packed; + +#define NBL_FEM_INT_SET_ADDR (0xa04008) +#define NBL_FEM_INT_SET_DEPTH (1) +#define NBL_FEM_INT_SET_WIDTH (32) +#define NBL_FEM_INT_SET_DWLEN (1) +union fem_int_set_u { + struct fem_int_set { + u32 rsv3:2; /* [01:00] Default:0x0 RO */ + u32 fifo_ovf_err:1; /* [02:02] Default:0x0 WO */ + u32 fifo_udf_err:1; /* [03:03] Default:0x0 WO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv2:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 bank_cflt_err:1; /* [08:08] Default:0x0 WO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INT_SET_DWLEN]; +} __packed; + +#define NBL_FEM_INIT_DONE_ADDR (0xa0400c) +#define NBL_FEM_INIT_DONE_DEPTH (1) +#define NBL_FEM_INIT_DONE_WIDTH (32) +#define NBL_FEM_INIT_DONE_DWLEN (1) +union fem_init_done_u { + struct fem_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_FEM_CIF_ERR_INFO_ADDR (0xa04040) +#define NBL_FEM_CIF_ERR_INFO_DEPTH (1) +#define NBL_FEM_CIF_ERR_INFO_WIDTH (32) +#define NBL_FEM_CIF_ERR_INFO_DWLEN (1) +union fem_cif_err_info_u { + struct fem_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_FEM_CFG_ERR_INFO_ADDR (0xa04068) +#define NBL_FEM_CFG_ERR_INFO_DEPTH (1) +#define NBL_FEM_CFG_ERR_INFO_WIDTH (32) +#define NBL_FEM_CFG_ERR_INFO_DWLEN (1) +union fem_cfg_err_info_u { + struct fem_cfg_err_info { + u32 addr:24; /* [23:00] Default:0x0 RO */ + u32 id:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_FEM_BANK_CFLT_ERR_INFO0_ADDR (0xa04074) +#define NBL_FEM_BANK_CFLT_ERR_INFO0_DEPTH (1) +#define NBL_FEM_BANK_CFLT_ERR_INFO0_WIDTH (32) +#define NBL_FEM_BANK_CFLT_ERR_INFO0_DWLEN (1) +union fem_bank_cflt_err_info0_u { + struct fem_bank_cflt_err_info0 { + u32 addr0:24; /* [23:00] Default:0x0 RO */ + u32 id:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BANK_CFLT_ERR_INFO0_DWLEN]; +} __packed; + +#define NBL_FEM_BANK_CFLT_ERR_INFO1_ADDR (0xa04078) +#define NBL_FEM_BANK_CFLT_ERR_INFO1_DEPTH (1) +#define NBL_FEM_BANK_CFLT_ERR_INFO1_WIDTH (32) +#define NBL_FEM_BANK_CFLT_ERR_INFO1_DWLEN (1) +union fem_bank_cflt_err_info1_u { + struct fem_bank_cflt_err_info1 { + u32 addr1:24; /* [23:00] Default:0x0 RO */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BANK_CFLT_ERR_INFO1_DWLEN]; +} __packed; + +#define NBL_FEM_CAR_CTRL_ADDR (0xa04100) +#define NBL_FEM_CAR_CTRL_DEPTH (1) +#define NBL_FEM_CAR_CTRL_WIDTH (32) +#define NBL_FEM_CAR_CTRL_DWLEN (1) +union fem_car_ctrl_u { + struct fem_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_BP_TH_ADDR (0xa04118) +#define NBL_FEM_BP_TH_DEPTH (1) +#define NBL_FEM_BP_TH_WIDTH (32) +#define NBL_FEM_BP_TH_DWLEN (1) +union fem_bp_th_u { + struct fem_bp_th { + u32 th:12; /* [11:00] Default:0xf RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BP_TH_DWLEN]; +} __packed; + +#define NBL_FEM_HT_BANK_SEL_BTM_ADDR (0xa0411c) +#define NBL_FEM_HT_BANK_SEL_BTM_DEPTH (1) +#define NBL_FEM_HT_BANK_SEL_BTM_WIDTH (32) +#define NBL_FEM_HT_BANK_SEL_BTM_DWLEN (1) +union fem_ht_bank_sel_btm_u { + struct fem_ht_bank_sel_btm { + u32 port0_ht_depth:5; /* [04:00] Default:0x8 RW */ + u32 rsv2:3; /* [07:05] Default:0x0 RO */ + u32 port1_ht_depth:5; /* [12:08] Default:0x8 RW */ + u32 rsv1:3; /* [15:13] Default:0x0 RO */ + u32 port2_ht_depth:5; /* [20:16] Default:0x8 RW */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_HT_BANK_SEL_BTM_DWLEN]; +} __packed; + +#define NBL_FEM_INIT_START_ADDR (0xa04180) +#define NBL_FEM_INIT_START_DEPTH (1) +#define NBL_FEM_INIT_START_WIDTH (32) +#define NBL_FEM_INIT_START_DWLEN (1) +union fem_init_start_u { + struct fem_init_start { + u32 start:1; /* [00:00] Default:0x0 WO */ + u32 ht_bank_init:7; /* [07:01] Default:0x0 WO */ + u32 rsv:24; /* [31:08] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_INIT_START_DWLEN]; +} __packed; + +#define NBL_FEM_MHASH_ADDR (0xa04188) +#define NBL_FEM_MHASH_DEPTH (1) +#define NBL_FEM_MHASH_WIDTH (32) +#define NBL_FEM_MHASH_DWLEN (1) +union fem_mhash_u { + struct fem_mhash { + u32 mod_action_id:6; /* [05:00] Default:0x12 RW */ + u32 hash0_action_id:6; /* [11:06] Default:0x13 RW */ + u32 hash1_action_id:6; /* [17:12] Default:0x14 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_MHASH_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_ACCESS_CFG_ADDR (0xa04190) +#define NBL_FEM_CPU_ACCESS_CFG_DEPTH (1) +#define NBL_FEM_CPU_ACCESS_CFG_WIDTH (32) +#define NBL_FEM_CPU_ACCESS_CFG_DWLEN (1) +union fem_cpu_access_cfg_u { + struct fem_cpu_access_cfg { + u32 cpu_access_bp_th:8; /* [7:0] Default:0xf RW */ + u32 rsv1:8; /* [15:8] Default:0x0 RO */ + u32 cpu_access_timeout_th:10; /* [25:16] Default:0x50 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CPU_ACCESS_CFG_DWLEN]; +} __packed; + +#define NBL_FEM_HT_BANK_SEL_BITMAP_ADDR (0xa04200) +#define NBL_FEM_HT_BANK_SEL_BITMAP_DEPTH (1) +#define NBL_FEM_HT_BANK_SEL_BITMAP_WIDTH (32) +#define NBL_FEM_HT_BANK_SEL_BITMAP_DWLEN (1) +union fem_ht_bank_sel_bitmap_u { + struct fem_ht_bank_sel_bitmap { + u32 port0_bank_sel:8; /* [7:0] Default:0x1 RW */ + u32 port1_bank_sel:8; /* [15:8] Default:0x6 RW */ + u32 port2_bank_sel:8; /* [23:16] Default:0x78 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_HT_BANK_SEL_BITMAP_DWLEN]; +} __packed; + +#define NBL_FEM_KT_BANK_SEL_BITMAP_ADDR (0xa04204) +#define NBL_FEM_KT_BANK_SEL_BITMAP_DEPTH (1) +#define NBL_FEM_KT_BANK_SEL_BITMAP_WIDTH (32) +#define NBL_FEM_KT_BANK_SEL_BITMAP_DWLEN (1) +union fem_kt_bank_sel_bitmap_u { + struct fem_kt_bank_sel_bitmap { + u32 port0_bank_sel:8; /* [7:0] Default:0x1 RW */ + u32 port1_bank_sel:8; /* [15:8] Default:0x6 RW */ + u32 port2_bank_sel:8; /* [23:16] Default:0xF8 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_KT_BANK_SEL_BITMAP_DWLEN]; +} __packed; + +#define NBL_FEM_AT_BANK_SEL_BITMAP_ADDR (0xa04208) +#define NBL_FEM_AT_BANK_SEL_BITMAP_DEPTH (1) +#define NBL_FEM_AT_BANK_SEL_BITMAP_WIDTH (32) +#define NBL_FEM_AT_BANK_SEL_BITMAP_DWLEN (1) +union fem_at_bank_sel_bitmap_u { + struct fem_at_bank_sel_bitmap { + u32 port0_bank_sel:12; /* [11:0] Default:0x3 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 port1_bank_sel:12; /* [27:16] Default:0x1C RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AT_BANK_SEL_BITMAP_DWLEN]; +} __packed; + +#define NBL_FEM_AT_BANK_SEL_BITMAP2_ADDR (0xa0420c) +#define NBL_FEM_AT_BANK_SEL_BITMAP2_DEPTH (1) +#define NBL_FEM_AT_BANK_SEL_BITMAP2_WIDTH (32) +#define NBL_FEM_AT_BANK_SEL_BITMAP2_DWLEN (1) +union fem_at_bank_sel_bitmap2_u { + struct fem_at_bank_sel_bitmap2 { + u32 port2_bank_sel:12; /* [11:0] Default:0xFE0 RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AT_BANK_SEL_BITMAP2_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_EN_ADDR (0xa04210) +#define NBL_FEM_AGE_EN_DEPTH (1) +#define NBL_FEM_AGE_EN_WIDTH (32) +#define NBL_FEM_AGE_EN_DWLEN (1) +union fem_age_en_u { + struct fem_age_en { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AGE_EN_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_HARD_STEP_ADDR (0xa04214) +#define NBL_FEM_AGE_HARD_STEP_DEPTH (1) +#define NBL_FEM_AGE_HARD_STEP_WIDTH (32) +#define NBL_FEM_AGE_HARD_STEP_DWLEN (1) +union fem_age_hard_step_u { + struct fem_age_hard_step { + u32 data:3; /* [2:0] Default:0x6 RW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AGE_HARD_STEP_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_TIME_UNIT_ADDR (0xa04218) +#define NBL_FEM_AGE_TIME_UNIT_DEPTH (1) +#define NBL_FEM_AGE_TIME_UNIT_WIDTH (32) +#define NBL_FEM_AGE_TIME_UNIT_DWLEN (1) +union fem_age_time_unit_u { + struct fem_age_time_unit { + u32 data:32; /* [31:0] Default:0x17CB5 RW */ + } __packed info; + u32 data[NBL_FEM_AGE_TIME_UNIT_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_INFO_HEAD_ADDR (0xa04220) +#define NBL_FEM_AGE_INFO_HEAD_DEPTH (1) +#define NBL_FEM_AGE_INFO_HEAD_WIDTH (32) +#define NBL_FEM_AGE_INFO_HEAD_DWLEN (1) +union fem_age_info_head_u { + struct fem_age_info_head { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_FEM_AGE_INFO_HEAD_DWLEN]; +} __packed; + +#define NBL_FEM_KEY_IN_ADDR (0xa04240) +#define NBL_FEM_KEY_IN_DEPTH (1) +#define NBL_FEM_KEY_IN_WIDTH (32) +#define NBL_FEM_KEY_IN_DWLEN (1) +union fem_key_in_u { + struct fem_key_in { + u32 em0_cap_mode:1; /* [0:0] Default:0x1 RW */ + u32 em1_cap_mode:1; /* [01:01] Default:0x1 RW */ + u32 em2_cap_mode:1; /* [02:02] Default:0x1 RW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_KEY_IN_DWLEN]; +} __packed; + +#define NBL_FEM_CAP_ADDR (0xa04244) +#define NBL_FEM_CAP_DEPTH (1) +#define NBL_FEM_CAP_WIDTH (32) +#define NBL_FEM_CAP_DWLEN (1) +union fem_cap_u { + struct fem_cap { + u32 em0_cap_start:1; /* [0:0] Default:0x0 WO */ + u32 em1_cap_start:1; /* [01:01] Default:0x0 WO */ + u32 em2_cap_start:1; /* [02:02] Default:0x0 WO */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CAP_DWLEN]; +} __packed; + +#define NBL_FEM_HT_ACCESS_CTRL_ADDR (0xa04300) +#define NBL_FEM_HT_ACCESS_CTRL_DEPTH (1) +#define NBL_FEM_HT_ACCESS_CTRL_WIDTH (32) +#define NBL_FEM_HT_ACCESS_CTRL_DWLEN (1) +union fem_ht_access_ctrl_u { + struct fem_ht_access_ctrl { + u32 addr:17; /* [16:00] Default:0x0 RW */ + u32 port:2; /* [18:17] Default:0x0 RW */ + u32 rsv:10; /* [28:19] Default:0x0 RO */ + u32 access_size:1; /* [29:29] Default:0x0 RW */ + u32 rw:1; /* [30:30] Default:0x0 RW */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_HT_ACCESS_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_HT_ACCESS_ACK_ADDR (0xa04304) +#define NBL_FEM_HT_ACCESS_ACK_DEPTH (1) +#define NBL_FEM_HT_ACCESS_ACK_WIDTH (32) +#define NBL_FEM_HT_ACCESS_ACK_DWLEN (1) +union fem_ht_access_ack_u { + struct fem_ht_access_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:1; /* [01:01] Default:0x0 RWW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_HT_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_HT_ACCESS_DATA_ADDR (0xa04308) +#define NBL_FEM_HT_ACCESS_DATA_DEPTH (4) +#define NBL_FEM_HT_ACCESS_DATA_WIDTH (32) +#define NBL_FEM_HT_ACCESS_DATA_DWLEN (1) +union fem_ht_access_data_u { + struct fem_ht_access_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_HT_ACCESS_DATA_DWLEN]; +} __packed; +#define NBL_FEM_HT_ACCESS_DATA_REG(r) (NBL_FEM_HT_ACCESS_DATA_ADDR + \ + (NBL_FEM_HT_ACCESS_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_KT_ACCESS_CTRL_ADDR (0xa04340) +#define NBL_FEM_KT_ACCESS_CTRL_DEPTH (1) +#define NBL_FEM_KT_ACCESS_CTRL_WIDTH (32) +#define NBL_FEM_KT_ACCESS_CTRL_DWLEN (1) +union fem_kt_access_ctrl_u { + struct fem_kt_access_ctrl { + u32 addr:17; /* [16:00] Default:0x0 RW */ + u32 rsv:12; /* [28:17] Default:0x0 RO */ + u32 access_size:1; /* [29:29] Default:0x0 RW */ + u32 rw:1; /* [30:30] Default:0x0 RW */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_KT_ACCESS_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_KT_ACCESS_ACK_ADDR (0xa04344) +#define NBL_FEM_KT_ACCESS_ACK_DEPTH (1) +#define NBL_FEM_KT_ACCESS_ACK_WIDTH (32) +#define NBL_FEM_KT_ACCESS_ACK_DWLEN (1) +union fem_kt_access_ack_u { + struct fem_kt_access_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:1; /* [01:01] Default:0x0 RWW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_KT_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_KT_ACCESS_DATA_ADDR (0xa04348) +#define NBL_FEM_KT_ACCESS_DATA_DEPTH (10) +#define NBL_FEM_KT_ACCESS_DATA_WIDTH (32) +#define NBL_FEM_KT_ACCESS_DATA_DWLEN (1) +union fem_kt_access_data_u { + struct fem_kt_access_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_KT_ACCESS_DATA_DWLEN]; +} __packed; +#define NBL_FEM_KT_ACCESS_DATA_REG(r) (NBL_FEM_KT_ACCESS_DATA_ADDR + \ + (NBL_FEM_KT_ACCESS_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_AT_ACCESS_CTRL_ADDR (0xa04390) +#define NBL_FEM_AT_ACCESS_CTRL_DEPTH (1) +#define NBL_FEM_AT_ACCESS_CTRL_WIDTH (32) +#define NBL_FEM_AT_ACCESS_CTRL_DWLEN (1) +union fem_at_access_ctrl_u { + struct fem_at_access_ctrl { + u32 addr:17; /* [16:00] Default:0x0 RW */ + u32 rsv:12; /* [28:17] Default:0x0 RO */ + u32 access_size:1; /* [29:29] Default:0x0 RW */ + u32 rw:1; /* [30:30] Default:0x0 RW */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_AT_ACCESS_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_AT_ACCESS_ACK_ADDR (0xa04394) +#define NBL_FEM_AT_ACCESS_ACK_DEPTH (1) +#define NBL_FEM_AT_ACCESS_ACK_WIDTH (32) +#define NBL_FEM_AT_ACCESS_ACK_DWLEN (1) +union fem_at_access_ack_u { + struct fem_at_access_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:1; /* [01:01] Default:0x0 RWW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AT_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_AT_ACCESS_DATA_ADDR (0xa04398) +#define NBL_FEM_AT_ACCESS_DATA_DEPTH (6) +#define NBL_FEM_AT_ACCESS_DATA_WIDTH (32) +#define NBL_FEM_AT_ACCESS_DATA_DWLEN (1) +union fem_at_access_data_u { + struct fem_at_access_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_AT_ACCESS_DATA_DWLEN]; +} __packed; +#define NBL_FEM_AT_ACCESS_DATA_REG(r) (NBL_FEM_AT_ACCESS_DATA_ADDR + \ + (NBL_FEM_AT_ACCESS_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_AGE_TBL_ACCESS_CTRL_ADDR (0xa04400) +#define NBL_FEM_AGE_TBL_ACCESS_CTRL_DEPTH (1) +#define NBL_FEM_AGE_TBL_ACCESS_CTRL_WIDTH (32) +#define NBL_FEM_AGE_TBL_ACCESS_CTRL_DWLEN (1) +union fem_age_tbl_access_ctrl_u { + struct fem_age_tbl_access_ctrl { + u32 addr:17; /* [16:0] Default:0x0 RW */ + u32 rsv:13; /* [29:17] Default:0x0 RO */ + u32 rw:1; /* [30:30] Default:0x0 RW */ + u32 start:1; /* [31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_AGE_TBL_ACCESS_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_TBL_ACCESS_ACK_ADDR (0xa04404) +#define NBL_FEM_AGE_TBL_ACCESS_ACK_DEPTH (1) +#define NBL_FEM_AGE_TBL_ACCESS_ACK_WIDTH (32) +#define NBL_FEM_AGE_TBL_ACCESS_ACK_DWLEN (1) +union fem_age_tbl_access_ack_u { + struct fem_age_tbl_access_ack { + u32 done:1; /* [0] Default:0x0 RC */ + u32 status:1; /* [1] Default:0x0 RWW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_AGE_TBL_ACCESS_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_TBL_ACCESS_DATA_ADDR (0xa04408) +#define NBL_FEM_AGE_TBL_ACCESS_DATA_DEPTH (12) +#define NBL_FEM_AGE_TBL_ACCESS_DATA_WIDTH (32) +#define NBL_FEM_AGE_TBL_ACCESS_DATA_DWLEN (1) +union fem_age_tbl_access_data_u { + struct fem_age_tbl_access_data { + u32 data:32; /* [31:0] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_AGE_TBL_ACCESS_DATA_DWLEN]; +} __packed; +#define NBL_FEM_AGE_TBL_ACCESS_DATA_REG(r) (NBL_FEM_AGE_TBL_ACCESS_DATA_ADDR + \ + (NBL_FEM_AGE_TBL_ACCESS_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_CPU_INSERT_SEARCH0_CTRL_ADDR (0xa04500) +#define NBL_FEM_CPU_INSERT_SEARCH0_CTRL_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH0_CTRL_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH0_CTRL_DWLEN (1) +union fem_cpu_insert_search0_ctrl_u { + struct fem_cpu_insert_search0_ctrl { + u32 rsv:31; /* [30:00] Default:0x0 RO */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH0_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH0_ACK_ADDR (0xa04504) +#define NBL_FEM_CPU_INSERT_SEARCH0_ACK_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH0_ACK_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH0_ACK_DWLEN (1) +union fem_cpu_insert_search0_ack_u { + struct fem_cpu_insert_search0_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:2; /* [02:01] Default:0x0 RWW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH0_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_ADDR (0xa04508) +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_DEPTH (11) +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_DWLEN (1) +union fem_cpu_insert_search0_data_u { + struct fem_cpu_insert_search0_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH0_DATA_DWLEN]; +} __packed; +#define NBL_FEM_CPU_INSERT_SEARCH0_DATA_REG(r) (NBL_FEM_CPU_INSERT_SEARCH0_DATA_ADDR + \ + (NBL_FEM_CPU_INSERT_SEARCH0_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_CPU_INSERT_SEARCH1_CTRL_ADDR (0xa04550) +#define NBL_FEM_CPU_INSERT_SEARCH1_CTRL_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH1_CTRL_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH1_CTRL_DWLEN (1) +union fem_cpu_insert_search1_ctrl_u { + struct fem_cpu_insert_search1_ctrl { + u32 rsv:31; /* [30:00] Default:0x0 RO */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH1_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH1_ACK_ADDR (0xa04554) +#define NBL_FEM_CPU_INSERT_SEARCH1_ACK_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH1_ACK_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH1_ACK_DWLEN (1) +union fem_cpu_insert_search1_ack_u { + struct fem_cpu_insert_search1_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:2; /* [02:01] Default:0x0 RWW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH1_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_ADDR (0xa04558) +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_DEPTH (11) +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_DWLEN (1) +union fem_cpu_insert_search1_data_u { + struct fem_cpu_insert_search1_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH1_DATA_DWLEN]; +} __packed; +#define NBL_FEM_CPU_INSERT_SEARCH1_DATA_REG(r) (NBL_FEM_CPU_INSERT_SEARCH1_DATA_ADDR + \ + (NBL_FEM_CPU_INSERT_SEARCH1_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_CPU_INSERT_SEARCH2_CTRL_ADDR (0xa045a0) +#define NBL_FEM_CPU_INSERT_SEARCH2_CTRL_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH2_CTRL_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH2_CTRL_DWLEN (1) +union fem_cpu_insert_search2_ctrl_u { + struct fem_cpu_insert_search2_ctrl { + u32 rsv:31; /* [30:00] Default:0x0 RO */ + u32 start:1; /* [31:31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH2_CTRL_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH2_ACK_ADDR (0xa045a4) +#define NBL_FEM_CPU_INSERT_SEARCH2_ACK_DEPTH (1) +#define NBL_FEM_CPU_INSERT_SEARCH2_ACK_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH2_ACK_DWLEN (1) +union fem_cpu_insert_search2_ack_u { + struct fem_cpu_insert_search2_ack { + u32 done:1; /* [00:00] Default:0x0 RC */ + u32 status:2; /* [02:01] Default:0x0 RWW */ + u32 rsv:29; /* [31:03] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH2_ACK_DWLEN]; +} __packed; + +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_ADDR (0xa045a8) +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_DEPTH (11) +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_WIDTH (32) +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_DWLEN (1) +union fem_cpu_insert_search2_data_u { + struct fem_cpu_insert_search2_data { + u32 data:32; /* [31:00] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_FEM_CPU_INSERT_SEARCH2_DATA_DWLEN]; +} __packed; +#define NBL_FEM_CPU_INSERT_SEARCH2_DATA_REG(r) (NBL_FEM_CPU_INSERT_SEARCH2_DATA_ADDR + \ + (NBL_FEM_CPU_INSERT_SEARCH2_DATA_DWLEN * 4) * (r)) + +#define NBL_FEM_CFG_TEST_ADDR (0xa0480c) +#define NBL_FEM_CFG_TEST_DEPTH (1) +#define NBL_FEM_CFG_TEST_WIDTH (32) +#define NBL_FEM_CFG_TEST_DWLEN (1) +union fem_cfg_test_u { + struct fem_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_FEM_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_FEM_RCV_CMDQ_ADDR (0xa04818) +#define NBL_FEM_RCV_CMDQ_DEPTH (1) +#define NBL_FEM_RCV_CMDQ_WIDTH (32) +#define NBL_FEM_RCV_CMDQ_DWLEN (1) +union fem_rcv_cmdq_u { + struct fem_rcv_cmdq { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_RCV_CMDQ_DWLEN]; +} __packed; + +#define NBL_FEM_SND_CMDQ_ADDR (0xa0481c) +#define NBL_FEM_SND_CMDQ_DEPTH (1) +#define NBL_FEM_SND_CMDQ_WIDTH (32) +#define NBL_FEM_SND_CMDQ_DWLEN (1) +union fem_snd_cmdq_u { + struct fem_snd_cmdq { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_SND_CMDQ_DWLEN]; +} __packed; + +#define NBL_FEM_CMDQ_PRO_ADDR (0xa04820) +#define NBL_FEM_CMDQ_PRO_DEPTH (1) +#define NBL_FEM_CMDQ_PRO_WIDTH (32) +#define NBL_FEM_CMDQ_PRO_DWLEN (1) +union fem_cmdq_pro_u { + struct fem_cmdq_pro { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_CMDQ_PRO_DWLEN]; +} __packed; + +#define NBL_FEM_PP0_REQ_ADDR (0xa04850) +#define NBL_FEM_PP0_REQ_DEPTH (1) +#define NBL_FEM_PP0_REQ_WIDTH (32) +#define NBL_FEM_PP0_REQ_DWLEN (1) +union fem_pp0_req_u { + struct fem_pp0_req { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP0_REQ_DWLEN]; +} __packed; + +#define NBL_FEM_PP0_ALL_RSP_ADDR (0xa04854) +#define NBL_FEM_PP0_ALL_RSP_DEPTH (1) +#define NBL_FEM_PP0_ALL_RSP_WIDTH (32) +#define NBL_FEM_PP0_ALL_RSP_DWLEN (1) +union fem_pp0_all_rsp_u { + struct fem_pp0_all_rsp { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP0_ALL_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_PP0_RSP_ADDR (0xa04858) +#define NBL_FEM_PP0_RSP_DEPTH (1) +#define NBL_FEM_PP0_RSP_WIDTH (32) +#define NBL_FEM_PP0_RSP_DWLEN (1) +union fem_pp0_rsp_u { + struct fem_pp0_rsp { + u32 miss_cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 err_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP0_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_HT_LOOKUP_ADDR (0xa04878) +#define NBL_FEM_EM0_HT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM0_HT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM0_HT_LOOKUP_DWLEN (1) +union fem_em0_ht_lookup_u { + struct fem_em0_ht_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_HT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_HT_HIT_ADDR (0xa0487c) +#define NBL_FEM_EM0_HT_HIT_DEPTH (1) +#define NBL_FEM_EM0_HT_HIT_WIDTH (32) +#define NBL_FEM_EM0_HT_HIT_DWLEN (1) +union fem_em0_ht_hit_u { + struct fem_em0_ht_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_HT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_TCAM_LOOKUP_ADDR (0xa04880) +#define NBL_FEM_EM0_TCAM_LOOKUP_DEPTH (1) +#define NBL_FEM_EM0_TCAM_LOOKUP_WIDTH (32) +#define NBL_FEM_EM0_TCAM_LOOKUP_DWLEN (1) +union fem_em0_tcam_lookup_u { + struct fem_em0_tcam_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_TCAM_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_TCAM_HIT_ADDR (0xa04884) +#define NBL_FEM_EM0_TCAM_HIT_DEPTH (1) +#define NBL_FEM_EM0_TCAM_HIT_WIDTH (32) +#define NBL_FEM_EM0_TCAM_HIT_DWLEN (1) +union fem_em0_tcam_hit_u { + struct fem_em0_tcam_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_KT_LOOKUP_ADDR (0xa04888) +#define NBL_FEM_EM0_KT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM0_KT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM0_KT_LOOKUP_DWLEN (1) +union fem_em0_kt_lookup_u { + struct fem_em0_kt_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_KT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_KT_HIT_ADDR (0xa0488c) +#define NBL_FEM_EM0_KT_HIT_DEPTH (1) +#define NBL_FEM_EM0_KT_HIT_WIDTH (32) +#define NBL_FEM_EM0_KT_HIT_DWLEN (1) +union fem_em0_kt_hit_u { + struct fem_em0_kt_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_KT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_PP1_REQ_ADDR (0xa048b0) +#define NBL_FEM_PP1_REQ_DEPTH (1) +#define NBL_FEM_PP1_REQ_WIDTH (32) +#define NBL_FEM_PP1_REQ_DWLEN (1) +union fem_pp1_req_u { + struct fem_pp1_req { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP1_REQ_DWLEN]; +} __packed; + +#define NBL_FEM_PP1_ALL_RSP_ADDR (0xa048b4) +#define NBL_FEM_PP1_ALL_RSP_DEPTH (1) +#define NBL_FEM_PP1_ALL_RSP_WIDTH (32) +#define NBL_FEM_PP1_ALL_RSP_DWLEN (1) +union fem_pp1_all_rsp_u { + struct fem_pp1_all_rsp { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP1_ALL_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_PP1_RSP_ADDR (0xa048b8) +#define NBL_FEM_PP1_RSP_DEPTH (1) +#define NBL_FEM_PP1_RSP_WIDTH (32) +#define NBL_FEM_PP1_RSP_DWLEN (1) +union fem_pp1_rsp_u { + struct fem_pp1_rsp { + u32 miss_cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 err_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP1_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_HT_LOOKUP_ADDR (0xa048d8) +#define NBL_FEM_EM1_HT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM1_HT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM1_HT_LOOKUP_DWLEN (1) +union fem_em1_ht_lookup_u { + struct fem_em1_ht_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_HT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_HT_HIT_ADDR (0xa048dc) +#define NBL_FEM_EM1_HT_HIT_DEPTH (1) +#define NBL_FEM_EM1_HT_HIT_WIDTH (32) +#define NBL_FEM_EM1_HT_HIT_DWLEN (1) +union fem_em1_ht_hit_u { + struct fem_em1_ht_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_HT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_TCAM_LOOKUP_ADDR (0xa048e0) +#define NBL_FEM_EM1_TCAM_LOOKUP_DEPTH (1) +#define NBL_FEM_EM1_TCAM_LOOKUP_WIDTH (32) +#define NBL_FEM_EM1_TCAM_LOOKUP_DWLEN (1) +union fem_em1_tcam_lookup_u { + struct fem_em1_tcam_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_TCAM_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_TCAM_HIT_ADDR (0xa048e4) +#define NBL_FEM_EM1_TCAM_HIT_DEPTH (1) +#define NBL_FEM_EM1_TCAM_HIT_WIDTH (32) +#define NBL_FEM_EM1_TCAM_HIT_DWLEN (1) +union fem_em1_tcam_hit_u { + struct fem_em1_tcam_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_KT_LOOKUP_ADDR (0xa048e8) +#define NBL_FEM_EM1_KT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM1_KT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM1_KT_LOOKUP_DWLEN (1) +union fem_em1_kt_lookup_u { + struct fem_em1_kt_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_KT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_KT_HIT_ADDR (0xa048ec) +#define NBL_FEM_EM1_KT_HIT_DEPTH (1) +#define NBL_FEM_EM1_KT_HIT_WIDTH (32) +#define NBL_FEM_EM1_KT_HIT_DWLEN (1) +union fem_em1_kt_hit_u { + struct fem_em1_kt_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_KT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_PP2_REQ_ADDR (0xa04910) +#define NBL_FEM_PP2_REQ_DEPTH (1) +#define NBL_FEM_PP2_REQ_WIDTH (32) +#define NBL_FEM_PP2_REQ_DWLEN (1) +union fem_pp2_req_u { + struct fem_pp2_req { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP2_REQ_DWLEN]; +} __packed; + +#define NBL_FEM_PP2_ALL_RSP_ADDR (0xa04914) +#define NBL_FEM_PP2_ALL_RSP_DEPTH (1) +#define NBL_FEM_PP2_ALL_RSP_WIDTH (32) +#define NBL_FEM_PP2_ALL_RSP_DWLEN (1) +union fem_pp2_all_rsp_u { + struct fem_pp2_all_rsp { + u32 cnt:32; /* [31:00] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP2_ALL_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_PP2_RSP_ADDR (0xa04918) +#define NBL_FEM_PP2_RSP_DEPTH (1) +#define NBL_FEM_PP2_RSP_WIDTH (32) +#define NBL_FEM_PP2_RSP_DWLEN (1) +union fem_pp2_rsp_u { + struct fem_pp2_rsp { + u32 miss_cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 err_cnt:16; /* [31:16] Default:0x0 RCTR */ + } __packed info; + u32 data[NBL_FEM_PP2_RSP_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_HT_LOOKUP_ADDR (0xa04938) +#define NBL_FEM_EM2_HT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM2_HT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM2_HT_LOOKUP_DWLEN (1) +union fem_em2_ht_lookup_u { + struct fem_em2_ht_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_HT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_HT_HIT_ADDR (0xa0493c) +#define NBL_FEM_EM2_HT_HIT_DEPTH (1) +#define NBL_FEM_EM2_HT_HIT_WIDTH (32) +#define NBL_FEM_EM2_HT_HIT_DWLEN (1) +union fem_em2_ht_hit_u { + struct fem_em2_ht_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_HT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_TCAM_LOOKUP_ADDR (0xa04940) +#define NBL_FEM_EM2_TCAM_LOOKUP_DEPTH (1) +#define NBL_FEM_EM2_TCAM_LOOKUP_WIDTH (32) +#define NBL_FEM_EM2_TCAM_LOOKUP_DWLEN (1) +union fem_em2_tcam_lookup_u { + struct fem_em2_tcam_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_TCAM_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_TCAM_HIT_ADDR (0xa04944) +#define NBL_FEM_EM2_TCAM_HIT_DEPTH (1) +#define NBL_FEM_EM2_TCAM_HIT_WIDTH (32) +#define NBL_FEM_EM2_TCAM_HIT_DWLEN (1) +union fem_em2_tcam_hit_u { + struct fem_em2_tcam_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_TCAM_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_KT_LOOKUP_ADDR (0xa04948) +#define NBL_FEM_EM2_KT_LOOKUP_DEPTH (1) +#define NBL_FEM_EM2_KT_LOOKUP_WIDTH (32) +#define NBL_FEM_EM2_KT_LOOKUP_DWLEN (1) +union fem_em2_kt_lookup_u { + struct fem_em2_kt_lookup { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_KT_LOOKUP_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_KT_HIT_ADDR (0xa0494c) +#define NBL_FEM_EM2_KT_HIT_DEPTH (1) +#define NBL_FEM_EM2_KT_HIT_WIDTH (32) +#define NBL_FEM_EM2_KT_HIT_DWLEN (1) +union fem_em2_kt_hit_u { + struct fem_em2_kt_hit { + u32 cnt:16; /* [15:00] Default:0x0 RCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_KT_HIT_DWLEN]; +} __packed; + +#define NBL_FEM_AGE_INFO_DROP_ADDR (0xa04950) +#define NBL_FEM_AGE_INFO_DROP_DEPTH (1) +#define NBL_FEM_AGE_INFO_DROP_WIDTH (32) +#define NBL_FEM_AGE_INFO_DROP_DWLEN (1) +union fem_age_info_drop_u { + struct fem_age_info_drop { + u32 cnt:32; /* [31:00] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_FEM_AGE_INFO_DROP_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_PP_KEY_CHANGE_ADDR (0xa04954) +#define NBL_FEM_EM0_PP_KEY_CHANGE_DEPTH (1) +#define NBL_FEM_EM0_PP_KEY_CHANGE_WIDTH (32) +#define NBL_FEM_EM0_PP_KEY_CHANGE_DWLEN (1) +union fem_em0_pp_key_change_u { + struct fem_em0_pp_key_change { + u32 cnt:32; /* [31:00] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_FEM_EM0_PP_KEY_CHANGE_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_PP_KEY_CHANGE_ADDR (0xa04958) +#define NBL_FEM_EM1_PP_KEY_CHANGE_DEPTH (1) +#define NBL_FEM_EM1_PP_KEY_CHANGE_WIDTH (32) +#define NBL_FEM_EM1_PP_KEY_CHANGE_DWLEN (1) +union fem_em1_pp_key_change_u { + struct fem_em1_pp_key_change { + u32 cnt:32; /* [31:00] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_FEM_EM1_PP_KEY_CHANGE_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_PP_KEY_CHANGE_ADDR (0xa0495c) +#define NBL_FEM_EM2_PP_KEY_CHANGE_DEPTH (1) +#define NBL_FEM_EM2_PP_KEY_CHANGE_WIDTH (32) +#define NBL_FEM_EM2_PP_KEY_CHANGE_DWLEN (1) +union fem_em2_pp_key_change_u { + struct fem_em2_pp_key_change { + u32 cnt:32; /* [31:00] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_FEM_EM2_PP_KEY_CHANGE_DWLEN]; +} __packed; + +#define NBL_FEM_BP_STATE_ADDR (0xa04b00) +#define NBL_FEM_BP_STATE_DEPTH (1) +#define NBL_FEM_BP_STATE_WIDTH (32) +#define NBL_FEM_BP_STATE_DWLEN (1) +union fem_bp_state_u { + struct fem_bp_state { + u32 fem_pp0_bp:1; /* [00:00] Default:0x0 RO */ + u32 fem_pp1_bp:1; /* [01:01] Default:0x0 RO */ + u32 fem_pp2_bp:1; /* [02:02] Default:0x0 RO */ + u32 up_cmdq_bp:1; /* [03:03] Default:0x0 RO */ + u32 dn_acl_cmdq_bp:1; /* [04:04] Default:0x0 RO */ + u32 dn_age_msgq_bp:1; /* [05:05] Default:0x0 RO */ + u32 p0_ht0_cpu_acc_bp:1; /* [06:06] Default:0x0 RO */ + u32 p1_ht0_cpu_acc_bp:1; /* [07:07] Default:0x0 RO */ + u32 p2_ht0_cpu_acc_bp:1; /* [08:08] Default:0x0 RO */ + u32 p0_ht1_cpu_acc_bp:1; /* [09:09] Default:0x0 RO */ + u32 p1_ht1_cpu_acc_bp:1; /* [10:10] Default:0x0 RO */ + u32 p2_ht1_cpu_acc_bp:1; /* [11:11] Default:0x0 RO */ + u32 p0_kt_cpu_acc_bp:1; /* [12:12] Default:0x0 RO */ + u32 p1_kt_cpu_acc_bp:1; /* [13:13] Default:0x0 RO */ + u32 p2_kt_cpu_acc_bp:1; /* [14:14] Default:0x0 RO */ + u32 p0_at_cpu_acc_bp:1; /* [15:15] Default:0x0 RO */ + u32 p1_at_cpu_acc_bp:1; /* [16:16] Default:0x0 RO */ + u32 p2_at_cpu_acc_bp:1; /* [17:17] Default:0x0 RO */ + u32 p0_age_cpu_acc_bp:1; /* [18:18] Default:0x0 RO */ + u32 p1_age_cpu_acc_bp:1; /* [19:19] Default:0x0 RO */ + u32 p2_age_cpu_acc_bp:1; /* [20:20] Default:0x0 RO */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BP_STATE_DWLEN]; +} __packed; + +#define NBL_FEM_BP_HISTORY_ADDR (0xa04b04) +#define NBL_FEM_BP_HISTORY_DEPTH (1) +#define NBL_FEM_BP_HISTORY_WIDTH (32) +#define NBL_FEM_BP_HISTORY_DWLEN (1) +union fem_bp_history_u { + struct fem_bp_history { + u32 fem_pp0_bp:1; /* [00:00] Default:0x0 RC */ + u32 fem_pp1_bp:1; /* [01:01] Default:0x0 RC */ + u32 fem_pp2_bp:1; /* [02:02] Default:0x0 RC */ + u32 up_cmdq_bp:1; /* [03:03] Default:0x0 RC */ + u32 dn_acl_cmdq_bp:1; /* [04:04] Default:0x0 RC */ + u32 dn_age_msgq_bp:1; /* [05:05] Default:0x0 RC */ + u32 p0_ht0_cpu_acc_bp:1; /* [06:06] Default:0x0 RC */ + u32 p1_ht0_cpu_acc_bp:1; /* [07:07] Default:0x0 RC */ + u32 p2_ht0_cpu_acc_bp:1; /* [08:08] Default:0x0 RC */ + u32 p0_ht1_cpu_acc_bp:1; /* [09:09] Default:0x0 RC */ + u32 p1_ht1_cpu_acc_bp:1; /* [10:10] Default:0x0 RC */ + u32 p2_ht1_cpu_acc_bp:1; /* [11:11] Default:0x0 RC */ + u32 p0_kt_cpu_acc_bp:1; /* [12:12] Default:0x0 RC */ + u32 p1_kt_cpu_acc_bp:1; /* [13:13] Default:0x0 RC */ + u32 p2_kt_cpu_acc_bp:1; /* [14:14] Default:0x0 RC */ + u32 p0_at_cpu_acc_bp:1; /* [15:15] Default:0x0 RC */ + u32 p1_at_cpu_acc_bp:1; /* [16:16] Default:0x0 RC */ + u32 p2_at_cpu_acc_bp:1; /* [17:17] Default:0x0 RC */ + u32 p0_age_cpu_acc_bp:1; /* [18:18] Default:0x0 RC */ + u32 p1_age_cpu_acc_bp:1; /* [19:19] Default:0x0 RC */ + u32 p2_age_cpu_acc_bp:1; /* [20:20] Default:0x0 RC */ + u32 rsv:11; /* [31:21] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_LOCK_SEARCH_ADDR (0xa04c00) +#define NBL_FEM_EM0_LOCK_SEARCH_DEPTH (10) +#define NBL_FEM_EM0_LOCK_SEARCH_WIDTH (32) +#define NBL_FEM_EM0_LOCK_SEARCH_DWLEN (1) +union fem_em0_lock_search_u { + struct fem_em0_lock_search { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_LOCK_SEARCH_DWLEN]; +} __packed; +#define NBL_FEM_EM0_LOCK_SEARCH_REG(r) (NBL_FEM_EM0_LOCK_SEARCH_ADDR + \ + (NBL_FEM_EM0_LOCK_SEARCH_DWLEN * 4) * (r)) + +#define NBL_FEM_EM0_HT_VALUE_ADDR (0xa04c28) +#define NBL_FEM_EM0_HT_VALUE_DEPTH (1) +#define NBL_FEM_EM0_HT_VALUE_WIDTH (32) +#define NBL_FEM_EM0_HT_VALUE_DWLEN (1) +union fem_em0_ht_value_u { + struct fem_em0_ht_value { + u32 ht0_value:16; /* [15:00] Default:0x0 RO */ + u32 ht1_value:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_HT_VALUE_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_HT_INDEX_ADDR (0xa04c2c) +#define NBL_FEM_EM0_HT_INDEX_DEPTH (1) +#define NBL_FEM_EM0_HT_INDEX_WIDTH (32) +#define NBL_FEM_EM0_HT_INDEX_DWLEN (1) +union fem_em0_ht_index_u { + struct fem_em0_ht_index { + u32 ht0_idx:14; /* [13:00] Default:0x0 RO */ + u32 rsv1:2; /* [15:14] Default:0x0 RO */ + u32 ht1_idx:14; /* [29:16] Default:0x0 RO */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_HT_INDEX_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_LOCK_SEARCH_ADDR (0xa04c30) +#define NBL_FEM_EM1_LOCK_SEARCH_DEPTH (10) +#define NBL_FEM_EM1_LOCK_SEARCH_WIDTH (32) +#define NBL_FEM_EM1_LOCK_SEARCH_DWLEN (1) +union fem_em1_lock_search_u { + struct fem_em1_lock_search { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_LOCK_SEARCH_DWLEN]; +} __packed; +#define NBL_FEM_EM1_LOCK_SEARCH_REG(r) (NBL_FEM_EM1_LOCK_SEARCH_ADDR + \ + (NBL_FEM_EM1_LOCK_SEARCH_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_HT_VALUE_ADDR (0xa04c58) +#define NBL_FEM_EM1_HT_VALUE_DEPTH (1) +#define NBL_FEM_EM1_HT_VALUE_WIDTH (32) +#define NBL_FEM_EM1_HT_VALUE_DWLEN (1) +union fem_em1_ht_value_u { + struct fem_em1_ht_value { + u32 ht0_value:16; /* [15:00] Default:0x0 RO */ + u32 ht1_value:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_HT_VALUE_DWLEN]; +} __packed; + +#define NBL_FEM_EM1_HT_INDEX_ADDR (0xa04c5c) +#define NBL_FEM_EM1_HT_INDEX_DEPTH (1) +#define NBL_FEM_EM1_HT_INDEX_WIDTH (32) +#define NBL_FEM_EM1_HT_INDEX_DWLEN (1) +union fem_em1_ht_index_u { + struct fem_em1_ht_index { + u32 ht0_idx:14; /* [13:00] Default:0x0 RO */ + u32 rsv1:2; /* [15:14] Default:0x0 RO */ + u32 ht1_idx:14; /* [29:16] Default:0x0 RO */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_HT_INDEX_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_LOCK_SEARCH_ADDR (0xa04c60) +#define NBL_FEM_EM2_LOCK_SEARCH_DEPTH (10) +#define NBL_FEM_EM2_LOCK_SEARCH_WIDTH (32) +#define NBL_FEM_EM2_LOCK_SEARCH_DWLEN (1) +union fem_em2_lock_search_u { + struct fem_em2_lock_search { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_LOCK_SEARCH_DWLEN]; +} __packed; +#define NBL_FEM_EM2_LOCK_SEARCH_REG(r) (NBL_FEM_EM2_LOCK_SEARCH_ADDR + \ + (NBL_FEM_EM2_LOCK_SEARCH_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_HT_VALUE_ADDR (0xa04c88) +#define NBL_FEM_EM2_HT_VALUE_DEPTH (1) +#define NBL_FEM_EM2_HT_VALUE_WIDTH (32) +#define NBL_FEM_EM2_HT_VALUE_DWLEN (1) +union fem_em2_ht_value_u { + struct fem_em2_ht_value { + u32 ht0_value:16; /* [15:00] Default:0x0 RO */ + u32 ht1_value:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_HT_VALUE_DWLEN]; +} __packed; + +#define NBL_FEM_EM2_HT_INDEX_ADDR (0xa04c8c) +#define NBL_FEM_EM2_HT_INDEX_DEPTH (1) +#define NBL_FEM_EM2_HT_INDEX_WIDTH (32) +#define NBL_FEM_EM2_HT_INDEX_DWLEN (1) +union fem_em2_ht_index_u { + struct fem_em2_ht_index { + u32 ht0_idx:14; /* [13:00] Default:0x0 RO */ + u32 rsv1:2; /* [15:14] Default:0x0 RO */ + u32 ht1_idx:14; /* [29:16] Default:0x0 RO */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_HT_INDEX_DWLEN]; +} __packed; + +#define NBL_FEM_EM0_LOCK_MISS_ADDR (0xa04c90) +#define NBL_FEM_EM0_LOCK_MISS_DEPTH (10) +#define NBL_FEM_EM0_LOCK_MISS_WIDTH (32) +#define NBL_FEM_EM0_LOCK_MISS_DWLEN (1) +union fem_em0_lock_miss_u { + struct fem_em0_lock_miss { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_LOCK_MISS_DWLEN]; +} __packed; +#define NBL_FEM_EM0_LOCK_MISS_REG(r) (NBL_FEM_EM0_LOCK_MISS_ADDR + \ + (NBL_FEM_EM0_LOCK_MISS_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_LOCK_MISS_ADDR (0xa04cb8) +#define NBL_FEM_EM1_LOCK_MISS_DEPTH (10) +#define NBL_FEM_EM1_LOCK_MISS_WIDTH (32) +#define NBL_FEM_EM1_LOCK_MISS_DWLEN (1) +union fem_em1_lock_miss_u { + struct fem_em1_lock_miss { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_LOCK_MISS_DWLEN]; +} __packed; +#define NBL_FEM_EM1_LOCK_MISS_REG(r) (NBL_FEM_EM1_LOCK_MISS_ADDR + \ + (NBL_FEM_EM1_LOCK_MISS_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_LOCK_MISS_ADDR (0xa04ce0) +#define NBL_FEM_EM2_LOCK_MISS_DEPTH (10) +#define NBL_FEM_EM2_LOCK_MISS_WIDTH (32) +#define NBL_FEM_EM2_LOCK_MISS_DWLEN (1) +union fem_em2_lock_miss_u { + struct fem_em2_lock_miss { + u32 key:32; /* [31:00] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_LOCK_MISS_DWLEN]; +} __packed; +#define NBL_FEM_EM2_LOCK_MISS_REG(r) (NBL_FEM_EM2_LOCK_MISS_ADDR + \ + (NBL_FEM_EM2_LOCK_MISS_DWLEN * 4) * (r)) + +#define NBL_FEM_EM0_PROFILE_TABLE_ADDR (0xa05000) +#define NBL_FEM_EM0_PROFILE_TABLE_DEPTH (16) +#define NBL_FEM_EM0_PROFILE_TABLE_WIDTH (512) +#define NBL_FEM_EM0_PROFILE_TABLE_DWLEN (16) +union fem_em0_profile_table_u { + struct fem_em0_profile_table { + u32 cmd:1; /* [0] Default:0x0 RW */ + u32 key_size:1; /* [1] Default:0x0 RW */ + u32 mask_btm:16; /* [81:2] Default:0x0 RW */ + u32 mask_btm_arr[2]; /* [81:2] Default:0x0 RW */ + u32 hash_sel0:2; /* [83:82] Default:0x0 RW */ + u32 hash_sel1:2; /* [85:84] Default:0x0 RW */ + u32 action0:22; /* [107:86] Default:0x0 RW */ + u32 action1:22; /* [129:108] Default:0x0 RW */ + u32 action2:22; /* [151:130] Default:0x0 RW */ + u32 action3:22; /* [173:152] Default:0x0 RW */ + u32 action4:22; /* [195:174] Default:0x0 RW */ + u32 action5:22; /* [217:196] Default:0x0 RW */ + u32 action6:22; /* [239:218] Default:0x0 RW */ + u32 action7:22; /* [261:240] Default:0x0 RW */ + u32 act_num:4; /* [265:262] Default:0x0 RW */ + u32 vld:1; /* [266] Default:0x0 RW */ + u32 rsv_l:32; /* [511:267] Default:0x0 RO */ + u32 rsv_h:21; /* [511:267] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:267] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_PROFILE_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM0_PROFILE_TABLE_REG(r) (NBL_FEM_EM0_PROFILE_TABLE_ADDR + \ + (NBL_FEM_EM0_PROFILE_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_PROFILE_TABLE_ADDR (0xa06000) +#define NBL_FEM_EM1_PROFILE_TABLE_DEPTH (16) +#define NBL_FEM_EM1_PROFILE_TABLE_WIDTH (512) +#define NBL_FEM_EM1_PROFILE_TABLE_DWLEN (16) +union fem_em1_profile_table_u { + struct fem_em1_profile_table { + u32 cmd:1; /* [0] Default:0x0 RW */ + u32 key_size:1; /* [1] Default:0x0 RW */ + u32 mask_btm:16; /* [81:2] Default:0x0 RW */ + u32 mask_btm_arr[2]; /* [81:2] Default:0x0 RW */ + u32 hash_sel0:2; /* [83:82] Default:0x0 RW */ + u32 hash_sel1:2; /* [85:84] Default:0x0 RW */ + u32 action0:22; /* [107:86] Default:0x0 RW */ + u32 action1:22; /* [129:108] Default:0x0 RW */ + u32 action2:22; /* [151:130] Default:0x0 RW */ + u32 action3:22; /* [173:152] Default:0x0 RW */ + u32 action4:22; /* [195:174] Default:0x0 RW */ + u32 action5:22; /* [217:196] Default:0x0 RW */ + u32 action6:22; /* [239:218] Default:0x0 RW */ + u32 action7:22; /* [261:240] Default:0x0 RW */ + u32 act_num:4; /* [265:262] Default:0x0 RW */ + u32 vld:1; /* [266] Default:0x0 RW */ + u32 rsv_l:32; /* [511:267] Default:0x0 RO */ + u32 rsv_h:21; /* [511:267] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:267] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_PROFILE_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM1_PROFILE_TABLE_REG(r) (NBL_FEM_EM1_PROFILE_TABLE_ADDR + \ + (NBL_FEM_EM1_PROFILE_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_PROFILE_TABLE_ADDR (0xa07000) +#define NBL_FEM_EM2_PROFILE_TABLE_DEPTH (16) +#define NBL_FEM_EM2_PROFILE_TABLE_WIDTH (512) +#define NBL_FEM_EM2_PROFILE_TABLE_DWLEN (16) +union fem_em2_profile_table_u { + struct fem_em2_profile_table { + u32 cmd:1; /* [0] Default:0x0 RW */ + u32 key_size:1; /* [1] Default:0x0 RW */ + u32 mask_btm:16; /* [81:2] Default:0x0 RW */ + u32 mask_btm_arr[2]; /* [81:2] Default:0x0 RW */ + u32 hash_sel0:2; /* [83:82] Default:0x0 RW */ + u32 hash_sel1:2; /* [85:84] Default:0x0 RW */ + u32 action0:22; /* [107:86] Default:0x0 RW */ + u32 action1:22; /* [129:108] Default:0x0 RW */ + u32 action2:22; /* [151:130] Default:0x0 RW */ + u32 action3:22; /* [173:152] Default:0x0 RW */ + u32 action4:22; /* [195:174] Default:0x0 RW */ + u32 action5:22; /* [217:196] Default:0x0 RW */ + u32 action6:22; /* [239:218] Default:0x0 RW */ + u32 action7:22; /* [261:240] Default:0x0 RW */ + u32 act_num:4; /* [265:262] Default:0x0 RW */ + u32 vld:1; /* [266] Default:0x0 RW */ + u32 rsv_l:32; /* [511:267] Default:0x0 RO */ + u32 rsv_h:21; /* [511:267] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:267] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_PROFILE_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM2_PROFILE_TABLE_REG(r) (NBL_FEM_EM2_PROFILE_TABLE_ADDR + \ + (NBL_FEM_EM2_PROFILE_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM0_AD_TABLE_ADDR (0xa08000) +#define NBL_FEM_EM0_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM0_AD_TABLE_WIDTH (512) +#define NBL_FEM_EM0_AD_TABLE_DWLEN (16) +union fem_em0_ad_table_u { + struct fem_em0_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv:32; /* [511:352] Default:0x0 RO */ + u32 rsv_arr[4]; /* [511:352] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_AD_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM0_AD_TABLE_REG(r) (NBL_FEM_EM0_AD_TABLE_ADDR + \ + (NBL_FEM_EM0_AD_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_AD_TABLE_ADDR (0xa09000) +#define NBL_FEM_EM1_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM1_AD_TABLE_WIDTH (512) +#define NBL_FEM_EM1_AD_TABLE_DWLEN (16) +union fem_em1_ad_table_u { + struct fem_em1_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv:32; /* [511:352] Default:0x0 RO */ + u32 rsv_arr[4]; /* [511:352] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_AD_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM1_AD_TABLE_REG(r) (NBL_FEM_EM1_AD_TABLE_ADDR + \ + (NBL_FEM_EM1_AD_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_AD_TABLE_ADDR (0xa0a000) +#define NBL_FEM_EM2_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM2_AD_TABLE_WIDTH (512) +#define NBL_FEM_EM2_AD_TABLE_DWLEN (16) +union fem_em2_ad_table_u { + struct fem_em2_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv:32; /* [511:352] Default:0x0 RO */ + u32 rsv_arr[4]; /* [511:352] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_AD_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM2_AD_TABLE_REG(r) (NBL_FEM_EM2_AD_TABLE_ADDR + \ + (NBL_FEM_EM2_AD_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM0_TCAM_TABLE_ADDR (0xa0b000) +#define NBL_FEM_EM0_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM0_TCAM_TABLE_WIDTH (256) +#define NBL_FEM_EM0_TCAM_TABLE_DWLEN (8) +union fem_em0_tcam_table_u { + struct fem_em0_tcam_table { + u32 key:32; /* [159:0] Default:0x0 RW */ + u32 key_arr[4]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [255:162] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:162] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM0_TCAM_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM0_TCAM_TABLE_REG(r) (NBL_FEM_EM0_TCAM_TABLE_ADDR + \ + (NBL_FEM_EM0_TCAM_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM1_TCAM_TABLE_ADDR (0xa0c000) +#define NBL_FEM_EM1_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM1_TCAM_TABLE_WIDTH (256) +#define NBL_FEM_EM1_TCAM_TABLE_DWLEN (8) +union fem_em1_tcam_table_u { + struct fem_em1_tcam_table { + u32 key:32; /* [159:0] Default:0x0 RW */ + u32 key_arr[4]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [255:162] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:162] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM1_TCAM_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM1_TCAM_TABLE_REG(r) (NBL_FEM_EM1_TCAM_TABLE_ADDR + \ + (NBL_FEM_EM1_TCAM_TABLE_DWLEN * 4) * (r)) + +#define NBL_FEM_EM2_TCAM_TABLE_ADDR (0xa0d000) +#define NBL_FEM_EM2_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM2_TCAM_TABLE_WIDTH (256) +#define NBL_FEM_EM2_TCAM_TABLE_DWLEN (8) +union fem_em2_tcam_table_u { + struct fem_em2_tcam_table { + u32 key:32; /* [159:0] Default:0x0 RW */ + u32 key_arr[4]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [255:162] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:162] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM2_TCAM_TABLE_DWLEN]; +} __packed; +#define NBL_FEM_EM2_TCAM_TABLE_REG(r) (NBL_FEM_EM2_TCAM_TABLE_ADDR + \ + (NBL_FEM_EM2_TCAM_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h new file mode 100644 index 0000000000000000000000000000000000000000..416df1273597ab3d3481ca5637f82a81d6941421 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_ipro.h @@ -0,0 +1,1392 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_IPRO_H +#define NBL_IPRO_H 1 + +#include + +#define NBL_IPRO_BASE (0x00B04000) + +#define NBL_IPRO_INT_STATUS_ADDR (0xb04000) +#define NBL_IPRO_INT_STATUS_DEPTH (1) +#define NBL_IPRO_INT_STATUS_WIDTH (32) +#define NBL_IPRO_INT_STATUS_DWLEN (1) +union ipro_int_status_u { + struct ipro_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 cif_err:1; /* [3] Default:0x0 RWC */ + u32 input_err:1; /* [4] Default:0x0 RWC */ + u32 cfg_err:1; /* [5] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [6] Default:0x0 RWC */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_IPRO_INT_MASK_ADDR (0xb04004) +#define NBL_IPRO_INT_MASK_DEPTH (1) +#define NBL_IPRO_INT_MASK_WIDTH (32) +#define NBL_IPRO_INT_MASK_DWLEN (1) +union ipro_int_mask_u { + struct ipro_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 cif_err:1; /* [3] Default:0x0 RW */ + u32 input_err:1; /* [4] Default:0x0 RW */ + u32 cfg_err:1; /* [5] Default:0x0 RW */ + u32 data_ucor_err:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INT_MASK_DWLEN]; +} __packed; + +#define NBL_IPRO_INT_SET_ADDR (0xb04008) +#define NBL_IPRO_INT_SET_DEPTH (1) +#define NBL_IPRO_INT_SET_WIDTH (32) +#define NBL_IPRO_INT_SET_DWLEN (1) +union ipro_int_set_u { + struct ipro_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 cif_err:1; /* [3] Default:0x0 WO */ + u32 input_err:1; /* [4] Default:0x0 WO */ + u32 cfg_err:1; /* [5] Default:0x0 WO */ + u32 data_ucor_err:1; /* [6] Default:0x0 WO */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INT_SET_DWLEN]; +} __packed; + +#define NBL_IPRO_INIT_DONE_ADDR (0xb0400c) +#define NBL_IPRO_INIT_DONE_DEPTH (1) +#define NBL_IPRO_INIT_DONE_WIDTH (32) +#define NBL_IPRO_INIT_DONE_DWLEN (1) +union ipro_init_done_u { + struct ipro_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_IPRO_CIF_ERR_INFO_ADDR (0xb04040) +#define NBL_IPRO_CIF_ERR_INFO_DEPTH (1) +#define NBL_IPRO_CIF_ERR_INFO_WIDTH (32) +#define NBL_IPRO_CIF_ERR_INFO_DWLEN (1) +union ipro_cif_err_info_u { + struct ipro_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_IPRO_INPUT_ERR_INFO_ADDR (0xb04048) +#define NBL_IPRO_INPUT_ERR_INFO_DEPTH (1) +#define NBL_IPRO_INPUT_ERR_INFO_WIDTH (32) +#define NBL_IPRO_INPUT_ERR_INFO_DWLEN (1) +union ipro_input_err_info_u { + struct ipro_input_err_info { + u32 id:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INPUT_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_IPRO_CFG_ERR_INFO_ADDR (0xb04050) +#define NBL_IPRO_CFG_ERR_INFO_DEPTH (1) +#define NBL_IPRO_CFG_ERR_INFO_WIDTH (32) +#define NBL_IPRO_CFG_ERR_INFO_DWLEN (1) +union ipro_cfg_err_info_u { + struct ipro_cfg_err_info { + u32 id:2; /* [1:0] Default:0x0 RO */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_IPRO_CAR_CTRL_ADDR (0xb04100) +#define NBL_IPRO_CAR_CTRL_DEPTH (1) +#define NBL_IPRO_CAR_CTRL_WIDTH (32) +#define NBL_IPRO_CAR_CTRL_DWLEN (1) +union ipro_car_ctrl_u { + struct ipro_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_IPRO_INIT_START_ADDR (0xb04180) +#define NBL_IPRO_INIT_START_DEPTH (1) +#define NBL_IPRO_INIT_START_WIDTH (32) +#define NBL_IPRO_INIT_START_DWLEN (1) +union ipro_init_start_u { + struct ipro_init_start { + u32 init_start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_INIT_START_DWLEN]; +} __packed; + +#define NBL_IPRO_CREDIT_TOKEN_ADDR (0xb041c0) +#define NBL_IPRO_CREDIT_TOKEN_DEPTH (1) +#define NBL_IPRO_CREDIT_TOKEN_WIDTH (32) +#define NBL_IPRO_CREDIT_TOKEN_DWLEN (1) +union ipro_credit_token_u { + struct ipro_credit_token { + u32 up_token_num:8; /* [7:0] Default:0x80 RW */ + u32 down_token_num:8; /* [15:8] Default:0x80 RW */ + u32 up_init_vld:1; /* [16] Default:0x0 WO */ + u32 down_init_vld:1; /* [17] Default:0x0 WO */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_CREDIT_TOKEN_DWLEN]; +} __packed; + +#define NBL_IPRO_AM_SET_FLAG_ADDR (0xb041e0) +#define NBL_IPRO_AM_SET_FLAG_DEPTH (1) +#define NBL_IPRO_AM_SET_FLAG_WIDTH (32) +#define NBL_IPRO_AM_SET_FLAG_DWLEN (1) +union ipro_am_set_flag_u { + struct ipro_am_set_flag { + u32 set_flag:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_AM_SET_FLAG_DWLEN]; +} __packed; + +#define NBL_IPRO_AM_CLEAR_FLAG_ADDR (0xb041e4) +#define NBL_IPRO_AM_CLEAR_FLAG_DEPTH (1) +#define NBL_IPRO_AM_CLEAR_FLAG_WIDTH (32) +#define NBL_IPRO_AM_CLEAR_FLAG_DWLEN (1) +union ipro_am_clear_flag_u { + struct ipro_am_clear_flag { + u32 clear_flag:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_AM_CLEAR_FLAG_DWLEN]; +} __packed; + +#define NBL_IPRO_FLAG_OFFSET_0_ADDR (0xb04200) +#define NBL_IPRO_FLAG_OFFSET_0_DEPTH (1) +#define NBL_IPRO_FLAG_OFFSET_0_WIDTH (32) +#define NBL_IPRO_FLAG_OFFSET_0_DWLEN (1) +union ipro_flag_offset_0_u { + struct ipro_flag_offset_0 { + u32 dir_offset_en:1; /* [0] Default:0x1 RW */ + u32 dir_offset:5; /* [5:1] Default:0x00 RW */ + u32 rsv1:2; /* [7:6] Default:0x0 RO */ + u32 phy_flow_offset_en:1; /* [8] Default:0x1 RW */ + u32 phy_flow_offset:5; /* [13:9] Default:0xb RW */ + u32 rsv:18; /* [31:14] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_FLAG_OFFSET_0_DWLEN]; +} __packed; + +#define NBL_IPRO_DROP_NXT_STAGE_ADDR (0xb04210) +#define NBL_IPRO_DROP_NXT_STAGE_DEPTH (1) +#define NBL_IPRO_DROP_NXT_STAGE_WIDTH (32) +#define NBL_IPRO_DROP_NXT_STAGE_DWLEN (1) +union ipro_drop_nxt_stage_u { + struct ipro_drop_nxt_stage { + u32 stage:4; /* [3:0] Default:0xf RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_DROP_NXT_STAGE_DWLEN]; +} __packed; + +#define NBL_IPRO_FWD_ACTION_PRI_ADDR (0xb04220) +#define NBL_IPRO_FWD_ACTION_PRI_DEPTH (1) +#define NBL_IPRO_FWD_ACTION_PRI_WIDTH (32) +#define NBL_IPRO_FWD_ACTION_PRI_DWLEN (1) +union ipro_fwd_action_pri_u { + struct ipro_fwd_action_pri { + u32 dqueue:2; /* [1:0] Default:0x0 RW */ + u32 set_dport:2; /* [3:2] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_FWD_ACTION_PRI_DWLEN]; +} __packed; + +#define NBL_IPRO_MTU_CHECK_CTRL_ADDR (0xb0427c) +#define NBL_IPRO_MTU_CHECK_CTRL_DEPTH (1) +#define NBL_IPRO_MTU_CHECK_CTRL_WIDTH (32) +#define NBL_IPRO_MTU_CHECK_CTRL_DWLEN (1) +union ipro_mtu_check_ctrl_u { + struct ipro_mtu_check_ctrl { + u32 set_dport:16; /* [15:0] Default:0xFFFF RW */ + u32 set_dport_pri:2; /* [17:16] Default:0x3 RW */ + u32 proc_done:1; /* [18] Default:0x1 RW */ + u32 rsv:13; /* [31:19] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MTU_CHECK_CTRL_DWLEN]; +} __packed; + +#define NBL_IPRO_MTU_SEL_ADDR (0xb04280) +#define NBL_IPRO_MTU_SEL_DEPTH (8) +#define NBL_IPRO_MTU_SEL_WIDTH (32) +#define NBL_IPRO_MTU_SEL_DWLEN (1) +union ipro_mtu_sel_u { + struct ipro_mtu_sel { + u32 mtu_1:16; /* [15:0] Default:0x0 RW */ + u32 mtu_0:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MTU_SEL_DWLEN]; +} __packed; +#define NBL_IPRO_MTU_SEL_REG(r) (NBL_IPRO_MTU_SEL_ADDR + \ + (NBL_IPRO_MTU_SEL_DWLEN * 4) * (r)) + +#define NBL_IPRO_UDL_PKT_FLT_DMAC_ADDR (0xb04300) +#define NBL_IPRO_UDL_PKT_FLT_DMAC_DEPTH (16) +#define NBL_IPRO_UDL_PKT_FLT_DMAC_WIDTH (64) +#define NBL_IPRO_UDL_PKT_FLT_DMAC_DWLEN (2) +union ipro_udl_pkt_flt_dmac_u { + struct ipro_udl_pkt_flt_dmac { + u32 dmac_l:32; /* [47:0] Default:0x0 RW */ + u32 dmac_h:16; /* [47:0] Default:0x0 RW */ + u32 rsv:16; /* [63:48] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UDL_PKT_FLT_DMAC_DWLEN]; +} __packed; +#define NBL_IPRO_UDL_PKT_FLT_DMAC_REG(r) (NBL_IPRO_UDL_PKT_FLT_DMAC_ADDR + \ + (NBL_IPRO_UDL_PKT_FLT_DMAC_DWLEN * 4) * (r)) + +#define NBL_IPRO_UDL_PKT_FLT_VLAN_ADDR (0xb04380) +#define NBL_IPRO_UDL_PKT_FLT_VLAN_DEPTH (16) +#define NBL_IPRO_UDL_PKT_FLT_VLAN_WIDTH (32) +#define NBL_IPRO_UDL_PKT_FLT_VLAN_DWLEN (1) +union ipro_udl_pkt_flt_vlan_u { + struct ipro_udl_pkt_flt_vlan { + u32 vlan_0:12; /* [11:0] Default:0x0 RW */ + u32 vlan_1:12; /* [23:12] Default:0x0 RW */ + u32 vlan_layer:2; /* [25:24] Default:0x0 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UDL_PKT_FLT_VLAN_DWLEN]; +} __packed; +#define NBL_IPRO_UDL_PKT_FLT_VLAN_REG(r) (NBL_IPRO_UDL_PKT_FLT_VLAN_ADDR + \ + (NBL_IPRO_UDL_PKT_FLT_VLAN_DWLEN * 4) * (r)) + +#define NBL_IPRO_UDL_PKT_FLT_CTRL_ADDR (0xb043c0) +#define NBL_IPRO_UDL_PKT_FLT_CTRL_DEPTH (1) +#define NBL_IPRO_UDL_PKT_FLT_CTRL_WIDTH (32) +#define NBL_IPRO_UDL_PKT_FLT_CTRL_DWLEN (1) +union ipro_udl_pkt_flt_ctrl_u { + struct ipro_udl_pkt_flt_ctrl { + u32 vld:16; /* [15:0] Default:0x0 RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UDL_PKT_FLT_CTRL_DWLEN]; +} __packed; + +#define NBL_IPRO_UDL_PKT_FLT_ACTION_ADDR (0xb043c4) +#define NBL_IPRO_UDL_PKT_FLT_ACTION_DEPTH (1) +#define NBL_IPRO_UDL_PKT_FLT_ACTION_WIDTH (32) +#define NBL_IPRO_UDL_PKT_FLT_ACTION_DWLEN (1) +union ipro_udl_pkt_flt_action_u { + struct ipro_udl_pkt_flt_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x0 RW */ + u32 set_dport_en:1; /* [15] Default:0x0 RW */ + u32 set_dport:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_UDL_PKT_FLT_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_ADDR (0xb043e0) +#define NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_DEPTH (1) +#define NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_WIDTH (32) +#define NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_DWLEN (1) +union ipro_anti_fake_addr_errcode_u { + struct ipro_anti_fake_addr_errcode { + u32 num:4; /* [3:0] Default:0xA RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_DWLEN]; +} __packed; + +#define NBL_IPRO_ANTI_FAKE_ADDR_ACTION_ADDR (0xb043e4) +#define NBL_IPRO_ANTI_FAKE_ADDR_ACTION_DEPTH (1) +#define NBL_IPRO_ANTI_FAKE_ADDR_ACTION_WIDTH (32) +#define NBL_IPRO_ANTI_FAKE_ADDR_ACTION_DWLEN (1) +union ipro_anti_fake_addr_action_u { + struct ipro_anti_fake_addr_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x1 RW */ + u32 set_dport_en:1; /* [15] Default:0x1 RW */ + u32 set_dport:16; /* [31:16] Default:0xFFFF RW */ + } __packed info; + u32 data[NBL_IPRO_ANTI_FAKE_ADDR_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_VLAN_NUM_CHK_ERRCODE_ADDR (0xb043f0) +#define NBL_IPRO_VLAN_NUM_CHK_ERRCODE_DEPTH (1) +#define NBL_IPRO_VLAN_NUM_CHK_ERRCODE_WIDTH (32) +#define NBL_IPRO_VLAN_NUM_CHK_ERRCODE_DWLEN (1) +union ipro_vlan_num_chk_errcode_u { + struct ipro_vlan_num_chk_errcode { + u32 num:4; /* [3:0] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_VLAN_NUM_CHK_ERRCODE_DWLEN]; +} __packed; + +#define NBL_IPRO_VLAN_NUM_CHK_ACTION_ADDR (0xb043f4) +#define NBL_IPRO_VLAN_NUM_CHK_ACTION_DEPTH (1) +#define NBL_IPRO_VLAN_NUM_CHK_ACTION_WIDTH (32) +#define NBL_IPRO_VLAN_NUM_CHK_ACTION_DWLEN (1) +union ipro_vlan_num_chk_action_u { + struct ipro_vlan_num_chk_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x1 RW */ + u32 set_dport_en:1; /* [15] Default:0x1 RW */ + u32 set_dport:16; /* [31:16] Default:0xFFFF RW */ + } __packed info; + u32 data[NBL_IPRO_VLAN_NUM_CHK_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_TCP_STATE_PROBE_ADDR (0xb04400) +#define NBL_IPRO_TCP_STATE_PROBE_DEPTH (1) +#define NBL_IPRO_TCP_STATE_PROBE_WIDTH (32) +#define NBL_IPRO_TCP_STATE_PROBE_DWLEN (1) +union ipro_tcp_state_probe_u { + struct ipro_tcp_state_probe { + u32 up_chk_en:1; /* [0] Default:0x0 RW */ + u32 dn_chk_en:1; /* [1] Default:0x0 RW */ + u32 rsv:14; /* [15:2] Default:0x0 RO */ + u32 up_bitmap:8; /* [23:16] Default:0x0 RW */ + u32 dn_bitmap:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_TCP_STATE_PROBE_DWLEN]; +} __packed; + +#define NBL_IPRO_TCP_STATE_UP_ACTION_ADDR (0xb04404) +#define NBL_IPRO_TCP_STATE_UP_ACTION_DEPTH (1) +#define NBL_IPRO_TCP_STATE_UP_ACTION_WIDTH (32) +#define NBL_IPRO_TCP_STATE_UP_ACTION_DWLEN (1) +union ipro_tcp_state_up_action_u { + struct ipro_tcp_state_up_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x0 RW */ + u32 set_dport_en:1; /* [15] Default:0x0 RW */ + u32 set_dport:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_TCP_STATE_UP_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_TCP_STATE_DN_ACTION_ADDR (0xb04408) +#define NBL_IPRO_TCP_STATE_DN_ACTION_DEPTH (1) +#define NBL_IPRO_TCP_STATE_DN_ACTION_WIDTH (32) +#define NBL_IPRO_TCP_STATE_DN_ACTION_DWLEN (1) +union ipro_tcp_state_dn_action_u { + struct ipro_tcp_state_dn_action { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 rsv:2; /* [13:12] Default:0x0 RO */ + u32 proc_done:1; /* [14] Default:0x0 RW */ + u32 set_dport_en:1; /* [15] Default:0x0 RW */ + u32 set_dport:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_TCP_STATE_DN_ACTION_DWLEN]; +} __packed; + +#define NBL_IPRO_FWD_ACTION_ID_ADDR (0xb04440) +#define NBL_IPRO_FWD_ACTION_ID_DEPTH (1) +#define NBL_IPRO_FWD_ACTION_ID_WIDTH (32) +#define NBL_IPRO_FWD_ACTION_ID_DWLEN (1) +union ipro_fwd_action_id_u { + struct ipro_fwd_action_id { + u32 mirror_index:6; /* [5:0] Default:0x8 RW */ + u32 dport:6; /* [11:6] Default:0x9 RW */ + u32 dqueue:6; /* [17:12] Default:0xA RW */ + u32 car:6; /* [23:18] Default:0x5 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_FWD_ACTION_ID_DWLEN]; +} __packed; + +#define NBL_IPRO_PED_ACTION_ID_ADDR (0xb04448) +#define NBL_IPRO_PED_ACTION_ID_DEPTH (1) +#define NBL_IPRO_PED_ACTION_ID_WIDTH (32) +#define NBL_IPRO_PED_ACTION_ID_DWLEN (1) +union ipro_ped_action_id_u { + struct ipro_ped_action_id { + u32 encap:6; /* [5:0] Default:0x2E RW */ + u32 decap:6; /* [11:6] Default:0x2F RW */ + u32 rsv:20; /* [31:12] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_PED_ACTION_ID_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_HIT_ACTION_ADDR (0xb04510) +#define NBL_IPRO_MNG_HIT_ACTION_DEPTH (8) +#define NBL_IPRO_MNG_HIT_ACTION_WIDTH (32) +#define NBL_IPRO_MNG_HIT_ACTION_DWLEN (1) +union ipro_mng_hit_action_u { + struct ipro_mng_hit_action { + u32 data:24; /* [23:0] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_HIT_ACTION_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_HIT_ACTION_REG(r) (NBL_IPRO_MNG_HIT_ACTION_ADDR + \ + (NBL_IPRO_MNG_HIT_ACTION_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DECISION_FLT_0_ADDR (0xb04530) +#define NBL_IPRO_MNG_DECISION_FLT_0_DEPTH (4) +#define NBL_IPRO_MNG_DECISION_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_DECISION_FLT_0_DWLEN (1) +union ipro_mng_decision_flt_0_u { + struct ipro_mng_decision_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 pkt_len_and:1; /* [1] Default:0x0 RW */ + u32 flow_ctrl_and:1; /* [2] Default:0x0 RW */ + u32 ncsi_and:1; /* [3] Default:0x0 RW */ + u32 eth_id:2; /* [5:4] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_DECISION_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DECISION_FLT_0_REG(r) (NBL_IPRO_MNG_DECISION_FLT_0_ADDR + \ + (NBL_IPRO_MNG_DECISION_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DECISION_FLT_1_ADDR (0xb04540) +#define NBL_IPRO_MNG_DECISION_FLT_1_DEPTH (4) +#define NBL_IPRO_MNG_DECISION_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_DECISION_FLT_1_DWLEN (1) +union ipro_mng_decision_flt_1_u { + struct ipro_mng_decision_flt_1 { + u32 dmac_and:4; /* [3:0] Default:0x0 RW */ + u32 brcast_and:1; /* [4] Default:0x0 RW */ + u32 mulcast_and:1; /* [5] Default:0x0 RW */ + u32 vlan_and:8; /* [13:6] Default:0x0 RW */ + u32 ipv4_dip_and:4; /* [17:14] Default:0x0 RW */ + u32 ipv6_dip_and:4; /* [21:18] Default:0x0 RW */ + u32 ethertype_and:4; /* [25:22] Default:0x0 RW */ + u32 brcast_or:1; /* [26] Default:0x0 RW */ + u32 icmpv4_or:1; /* [27] Default:0x0 RW */ + u32 mld_or:4; /* [31:28] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_DECISION_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DECISION_FLT_1_REG(r) (NBL_IPRO_MNG_DECISION_FLT_1_ADDR + \ + (NBL_IPRO_MNG_DECISION_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DECISION_FLT_2_ADDR (0xb04550) +#define NBL_IPRO_MNG_DECISION_FLT_2_DEPTH (4) +#define NBL_IPRO_MNG_DECISION_FLT_2_WIDTH (32) +#define NBL_IPRO_MNG_DECISION_FLT_2_DWLEN (1) +union ipro_mng_decision_flt_2_u { + struct ipro_mng_decision_flt_2 { + u32 neighbor_or:4; /* [3:0] Default:0x0 RW */ + u32 port_or:16; /* [19:4] Default:0x0 RW */ + u32 ethertype_or:4; /* [23:20] Default:0x0 RW */ + u32 arp_rsp_or:2; /* [25:24] Default:0x0 RW */ + u32 arp_req_or:2; /* [27:26] Default:0x0 RW */ + u32 dmac_or:4; /* [31:28] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_DECISION_FLT_2_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DECISION_FLT_2_REG(r) (NBL_IPRO_MNG_DECISION_FLT_2_ADDR + \ + (NBL_IPRO_MNG_DECISION_FLT_2_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DMAC_FLT_0_ADDR (0xb04560) +#define NBL_IPRO_MNG_DMAC_FLT_0_DEPTH (4) +#define NBL_IPRO_MNG_DMAC_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_DMAC_FLT_0_DWLEN (1) +union ipro_mng_dmac_flt_0_u { + struct ipro_mng_dmac_flt_0 { + u32 data:16; /* [15:0] Default:0x0 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_DMAC_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DMAC_FLT_0_REG(r) (NBL_IPRO_MNG_DMAC_FLT_0_ADDR + \ + (NBL_IPRO_MNG_DMAC_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_DMAC_FLT_1_ADDR (0xb04570) +#define NBL_IPRO_MNG_DMAC_FLT_1_DEPTH (4) +#define NBL_IPRO_MNG_DMAC_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_DMAC_FLT_1_DWLEN (1) +union ipro_mng_dmac_flt_1_u { + struct ipro_mng_dmac_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_DMAC_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_DMAC_FLT_1_REG(r) (NBL_IPRO_MNG_DMAC_FLT_1_ADDR + \ + (NBL_IPRO_MNG_DMAC_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_VLAN_FLT_ADDR (0xb04580) +#define NBL_IPRO_MNG_VLAN_FLT_DEPTH (8) +#define NBL_IPRO_MNG_VLAN_FLT_WIDTH (32) +#define NBL_IPRO_MNG_VLAN_FLT_DWLEN (1) +union ipro_mng_vlan_flt_u { + struct ipro_mng_vlan_flt { + u32 data:12; /* [11:0] Default:0x0 RW */ + u32 sel:1; /* [12] Default:0x0 RW */ + u32 nontag:1; /* [13] Default:0x0 RW */ + u32 en:1; /* [14] Default:0x0 RW */ + u32 rsv:17; /* [31:15] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_VLAN_FLT_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_VLAN_FLT_REG(r) (NBL_IPRO_MNG_VLAN_FLT_ADDR + \ + (NBL_IPRO_MNG_VLAN_FLT_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ETHERTYPE_FLT_ADDR (0xb045a0) +#define NBL_IPRO_MNG_ETHERTYPE_FLT_DEPTH (4) +#define NBL_IPRO_MNG_ETHERTYPE_FLT_WIDTH (32) +#define NBL_IPRO_MNG_ETHERTYPE_FLT_DWLEN (1) +union ipro_mng_ethertype_flt_u { + struct ipro_mng_ethertype_flt { + u32 data:16; /* [15:0] Default:0x0 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_ETHERTYPE_FLT_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ETHERTYPE_FLT_REG(r) (NBL_IPRO_MNG_ETHERTYPE_FLT_ADDR + \ + (NBL_IPRO_MNG_ETHERTYPE_FLT_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV4_FLT_0_ADDR (0xb045b0) +#define NBL_IPRO_MNG_IPV4_FLT_0_DEPTH (4) +#define NBL_IPRO_MNG_IPV4_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_IPV4_FLT_0_DWLEN (1) +union ipro_mng_ipv4_flt_0_u { + struct ipro_mng_ipv4_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV4_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV4_FLT_0_REG(r) (NBL_IPRO_MNG_IPV4_FLT_0_ADDR + \ + (NBL_IPRO_MNG_IPV4_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV4_FLT_1_ADDR (0xb045c0) +#define NBL_IPRO_MNG_IPV4_FLT_1_DEPTH (4) +#define NBL_IPRO_MNG_IPV4_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_IPV4_FLT_1_DWLEN (1) +union ipro_mng_ipv4_flt_1_u { + struct ipro_mng_ipv4_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV4_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV4_FLT_1_REG(r) (NBL_IPRO_MNG_IPV4_FLT_1_ADDR + \ + (NBL_IPRO_MNG_IPV4_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_0_ADDR (0xb04600) +#define NBL_IPRO_MNG_IPV6_FLT_0_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_0_DWLEN (1) +union ipro_mng_ipv6_flt_0_u { + struct ipro_mng_ipv6_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:15; /* [15:1] Default:0x0 RO */ + u32 mask:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_0_REG(r) (NBL_IPRO_MNG_IPV6_FLT_0_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_1_ADDR (0xb04610) +#define NBL_IPRO_MNG_IPV6_FLT_1_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_1_DWLEN (1) +union ipro_mng_ipv6_flt_1_u { + struct ipro_mng_ipv6_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_1_REG(r) (NBL_IPRO_MNG_IPV6_FLT_1_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_2_ADDR (0xb04620) +#define NBL_IPRO_MNG_IPV6_FLT_2_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_2_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_2_DWLEN (1) +union ipro_mng_ipv6_flt_2_u { + struct ipro_mng_ipv6_flt_2 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_2_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_2_REG(r) (NBL_IPRO_MNG_IPV6_FLT_2_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_2_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_3_ADDR (0xb04630) +#define NBL_IPRO_MNG_IPV6_FLT_3_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_3_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_3_DWLEN (1) +union ipro_mng_ipv6_flt_3_u { + struct ipro_mng_ipv6_flt_3 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_3_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_3_REG(r) (NBL_IPRO_MNG_IPV6_FLT_3_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_3_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_IPV6_FLT_4_ADDR (0xb04640) +#define NBL_IPRO_MNG_IPV6_FLT_4_DEPTH (4) +#define NBL_IPRO_MNG_IPV6_FLT_4_WIDTH (32) +#define NBL_IPRO_MNG_IPV6_FLT_4_DWLEN (1) +union ipro_mng_ipv6_flt_4_u { + struct ipro_mng_ipv6_flt_4 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_IPV6_FLT_4_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_IPV6_FLT_4_REG(r) (NBL_IPRO_MNG_IPV6_FLT_4_ADDR + \ + (NBL_IPRO_MNG_IPV6_FLT_4_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_PORT_FLT_ADDR (0xb04650) +#define NBL_IPRO_MNG_PORT_FLT_DEPTH (16) +#define NBL_IPRO_MNG_PORT_FLT_WIDTH (32) +#define NBL_IPRO_MNG_PORT_FLT_DWLEN (1) +union ipro_mng_port_flt_u { + struct ipro_mng_port_flt { + u32 data:16; /* [15:0] Default:0x0 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 mode:1; /* [17] Default:0x0 RW */ + u32 tcp:1; /* [18] Default:0x0 RW */ + u32 udp:1; /* [19] Default:0x0 RW */ + u32 rsv:12; /* [31:20] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_PORT_FLT_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_PORT_FLT_REG(r) (NBL_IPRO_MNG_PORT_FLT_ADDR + \ + (NBL_IPRO_MNG_PORT_FLT_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_ADDR (0xb04690) +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_DEPTH (2) +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_DWLEN (1) +union ipro_mng_arp_req_flt_0_u { + struct ipro_mng_arp_req_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:15; /* [15:1] Default:0x0 RO */ + u32 op:16; /* [31:16] Default:0x1 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_ARP_REQ_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ARP_REQ_FLT_0_REG(r) (NBL_IPRO_MNG_ARP_REQ_FLT_0_ADDR + \ + (NBL_IPRO_MNG_ARP_REQ_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_ADDR (0xb046a0) +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_DEPTH (2) +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_DWLEN (1) +union ipro_mng_arp_req_flt_1_u { + struct ipro_mng_arp_req_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_ARP_REQ_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ARP_REQ_FLT_1_REG(r) (NBL_IPRO_MNG_ARP_REQ_FLT_1_ADDR + \ + (NBL_IPRO_MNG_ARP_REQ_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_ADDR (0xb046b0) +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_DEPTH (2) +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_WIDTH (32) +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_DWLEN (1) +union ipro_mng_arp_rsp_flt_0_u { + struct ipro_mng_arp_rsp_flt_0 { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:15; /* [15:1] Default:0x0 RO */ + u32 op:16; /* [31:16] Default:0x2 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_ARP_RSP_FLT_0_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ARP_RSP_FLT_0_REG(r) (NBL_IPRO_MNG_ARP_RSP_FLT_0_ADDR + \ + (NBL_IPRO_MNG_ARP_RSP_FLT_0_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_ADDR (0xb046c0) +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_DEPTH (2) +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_WIDTH (32) +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_DWLEN (1) +union ipro_mng_arp_rsp_flt_1_u { + struct ipro_mng_arp_rsp_flt_1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_MNG_ARP_RSP_FLT_1_DWLEN]; +} __packed; +#define NBL_IPRO_MNG_ARP_RSP_FLT_1_REG(r) (NBL_IPRO_MNG_ARP_RSP_FLT_1_ADDR + \ + (NBL_IPRO_MNG_ARP_RSP_FLT_1_DWLEN * 4) * (r)) + +#define NBL_IPRO_MNG_NEIGHBOR_FLT_86_ADDR (0xb046d0) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_86_DEPTH (1) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_86_WIDTH (32) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_86_DWLEN (1) +union ipro_mng_neighbor_flt_86_u { + struct ipro_mng_neighbor_flt_86 { + u32 data:8; /* [7:0] Default:0x86 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NEIGHBOR_FLT_86_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_NEIGHBOR_FLT_87_ADDR (0xb046d4) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_87_DEPTH (1) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_87_WIDTH (32) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_87_DWLEN (1) +union ipro_mng_neighbor_flt_87_u { + struct ipro_mng_neighbor_flt_87 { + u32 data:8; /* [7:0] Default:0x87 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NEIGHBOR_FLT_87_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_NEIGHBOR_FLT_88_ADDR (0xb046d8) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_88_DEPTH (1) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_88_WIDTH (32) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_88_DWLEN (1) +union ipro_mng_neighbor_flt_88_u { + struct ipro_mng_neighbor_flt_88 { + u32 data:8; /* [7:0] Default:0x88 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NEIGHBOR_FLT_88_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_NEIGHBOR_FLT_89_ADDR (0xb046dc) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_89_DEPTH (1) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_89_WIDTH (32) +#define NBL_IPRO_MNG_NEIGHBOR_FLT_89_DWLEN (1) +union ipro_mng_neighbor_flt_89_u { + struct ipro_mng_neighbor_flt_89 { + u32 data:8; /* [7:0] Default:0x89 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NEIGHBOR_FLT_89_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MLD_FLT_82_ADDR (0xb046e0) +#define NBL_IPRO_MNG_MLD_FLT_82_DEPTH (1) +#define NBL_IPRO_MNG_MLD_FLT_82_WIDTH (32) +#define NBL_IPRO_MNG_MLD_FLT_82_DWLEN (1) +union ipro_mng_mld_flt_82_u { + struct ipro_mng_mld_flt_82 { + u32 data:8; /* [7:0] Default:0x82 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MLD_FLT_82_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MLD_FLT_83_ADDR (0xb046e4) +#define NBL_IPRO_MNG_MLD_FLT_83_DEPTH (1) +#define NBL_IPRO_MNG_MLD_FLT_83_WIDTH (32) +#define NBL_IPRO_MNG_MLD_FLT_83_DWLEN (1) +union ipro_mng_mld_flt_83_u { + struct ipro_mng_mld_flt_83 { + u32 data:8; /* [7:0] Default:0x83 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MLD_FLT_83_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MLD_FLT_84_ADDR (0xb046e8) +#define NBL_IPRO_MNG_MLD_FLT_84_DEPTH (1) +#define NBL_IPRO_MNG_MLD_FLT_84_WIDTH (32) +#define NBL_IPRO_MNG_MLD_FLT_84_DWLEN (1) +union ipro_mng_mld_flt_84_u { + struct ipro_mng_mld_flt_84 { + u32 data:8; /* [7:0] Default:0x84 RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MLD_FLT_84_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MLD_FLT_8F_ADDR (0xb046ec) +#define NBL_IPRO_MNG_MLD_FLT_8F_DEPTH (1) +#define NBL_IPRO_MNG_MLD_FLT_8F_WIDTH (32) +#define NBL_IPRO_MNG_MLD_FLT_8F_DWLEN (1) +union ipro_mng_mld_flt_8f_u { + struct ipro_mng_mld_flt_8f { + u32 data:8; /* [7:0] Default:0x8f RW */ + u32 en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MLD_FLT_8F_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_ICMPV4_FLT_ADDR (0xb046f0) +#define NBL_IPRO_MNG_ICMPV4_FLT_DEPTH (1) +#define NBL_IPRO_MNG_ICMPV4_FLT_WIDTH (32) +#define NBL_IPRO_MNG_ICMPV4_FLT_DWLEN (1) +union ipro_mng_icmpv4_flt_u { + struct ipro_mng_icmpv4_flt { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_ICMPV4_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_BRCAST_FLT_ADDR (0xb04700) +#define NBL_IPRO_MNG_BRCAST_FLT_DEPTH (1) +#define NBL_IPRO_MNG_BRCAST_FLT_WIDTH (32) +#define NBL_IPRO_MNG_BRCAST_FLT_DWLEN (1) +union ipro_mng_brcast_flt_u { + struct ipro_mng_brcast_flt { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_BRCAST_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_MULCAST_FLT_ADDR (0xb04704) +#define NBL_IPRO_MNG_MULCAST_FLT_DEPTH (1) +#define NBL_IPRO_MNG_MULCAST_FLT_WIDTH (32) +#define NBL_IPRO_MNG_MULCAST_FLT_DWLEN (1) +union ipro_mng_mulcast_flt_u { + struct ipro_mng_mulcast_flt { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_MULCAST_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_FLOW_CTRL_FLT_ADDR (0xb04710) +#define NBL_IPRO_MNG_FLOW_CTRL_FLT_DEPTH (1) +#define NBL_IPRO_MNG_FLOW_CTRL_FLT_WIDTH (32) +#define NBL_IPRO_MNG_FLOW_CTRL_FLT_DWLEN (1) +union ipro_mng_flow_ctrl_flt_u { + struct ipro_mng_flow_ctrl_flt { + u32 data:16; /* [15:0] Default:0x8808 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 bow:1; /* [17] Default:0x0 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_FLOW_CTRL_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_NCSI_FLT_ADDR (0xb04714) +#define NBL_IPRO_MNG_NCSI_FLT_DEPTH (1) +#define NBL_IPRO_MNG_NCSI_FLT_WIDTH (32) +#define NBL_IPRO_MNG_NCSI_FLT_DWLEN (1) +union ipro_mng_ncsi_flt_u { + struct ipro_mng_ncsi_flt { + u32 data:16; /* [15:0] Default:0x88F8 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 bow:1; /* [17] Default:0x1 RW */ + u32 rsv:14; /* [31:18] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_NCSI_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_PKT_LEN_FLT_ADDR (0xb04720) +#define NBL_IPRO_MNG_PKT_LEN_FLT_DEPTH (1) +#define NBL_IPRO_MNG_PKT_LEN_FLT_WIDTH (32) +#define NBL_IPRO_MNG_PKT_LEN_FLT_DWLEN (1) +union ipro_mng_pkt_len_flt_u { + struct ipro_mng_pkt_len_flt { + u32 max:16; /* [15:0] Default:0x800 RW */ + u32 en:1; /* [16] Default:0x0 RW */ + u32 rsv:15; /* [31:17] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_PKT_LEN_FLT_DWLEN]; +} __packed; + +#define NBL_IPRO_FLOW_STOP_ADDR (0xb04810) +#define NBL_IPRO_FLOW_STOP_DEPTH (1) +#define NBL_IPRO_FLOW_STOP_WIDTH (32) +#define NBL_IPRO_FLOW_STOP_DWLEN (1) +union ipro_flow_stop_u { + struct ipro_flow_stop { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_FLOW_STOP_DWLEN]; +} __packed; + +#define NBL_IPRO_TOKEN_NUM_ADDR (0xb04814) +#define NBL_IPRO_TOKEN_NUM_DEPTH (1) +#define NBL_IPRO_TOKEN_NUM_WIDTH (32) +#define NBL_IPRO_TOKEN_NUM_DWLEN (1) +union ipro_token_num_u { + struct ipro_token_num { + u32 dn_cnt:8; /* [7:0] Default:0x80 RO */ + u32 up_cnt:8; /* [15:8] Default:0x80 RO */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_TOKEN_NUM_DWLEN]; +} __packed; + +#define NBL_IPRO_BYPASS_ADDR (0xb04818) +#define NBL_IPRO_BYPASS_DEPTH (1) +#define NBL_IPRO_BYPASS_WIDTH (32) +#define NBL_IPRO_BYPASS_DWLEN (1) +union ipro_bypass_u { + struct ipro_bypass { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_BYPASS_DWLEN]; +} __packed; + +#define NBL_IPRO_RR_REQ_MASK_ADDR (0xb0481c) +#define NBL_IPRO_RR_REQ_MASK_DEPTH (1) +#define NBL_IPRO_RR_REQ_MASK_WIDTH (32) +#define NBL_IPRO_RR_REQ_MASK_DWLEN (1) +union ipro_rr_req_mask_u { + struct ipro_rr_req_mask { + u32 dn:1; /* [0] Default:0x0 RW */ + u32 up:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_RR_REQ_MASK_DWLEN]; +} __packed; + +#define NBL_IPRO_BP_STATE_ADDR (0xb04828) +#define NBL_IPRO_BP_STATE_DEPTH (1) +#define NBL_IPRO_BP_STATE_WIDTH (32) +#define NBL_IPRO_BP_STATE_DWLEN (1) +union ipro_bp_state_u { + struct ipro_bp_state { + u32 pp_up_link_fc:1; /* [0] Default:0x0 RO */ + u32 pp_dn_link_fc:1; /* [1] Default:0x0 RO */ + u32 pp_up_creadit:1; /* [2] Default:0x0 RO */ + u32 pp_dn_creadit:1; /* [3] Default:0x0 RO */ + u32 mcc_up_creadit:1; /* [4] Default:0x0 RO */ + u32 mcc_dn_creadit:1; /* [5] Default:0x0 RO */ + u32 pp_rdy:1; /* [6] Default:0x1 RO */ + u32 dn_rdy:1; /* [7] Default:0x1 RO */ + u32 up_rdy:1; /* [8] Default:0x1 RO */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_BP_STATE_DWLEN]; +} __packed; + +#define NBL_IPRO_BP_HISTORY_ADDR (0xb0482c) +#define NBL_IPRO_BP_HISTORY_DEPTH (1) +#define NBL_IPRO_BP_HISTORY_WIDTH (32) +#define NBL_IPRO_BP_HISTORY_DWLEN (1) +union ipro_bp_history_u { + struct ipro_bp_history { + u32 pp_rdy:1; /* [0] Default:0x0 RC */ + u32 dn_rdy:1; /* [1] Default:0x0 RC */ + u32 up_rdy:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_IPRO_ERRCODE_TBL_DROP_ADDR (0xb0486c) +#define NBL_IPRO_ERRCODE_TBL_DROP_DEPTH (1) +#define NBL_IPRO_ERRCODE_TBL_DROP_WIDTH (32) +#define NBL_IPRO_ERRCODE_TBL_DROP_DWLEN (1) +union ipro_errcode_tbl_drop_u { + struct ipro_errcode_tbl_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_ERRCODE_TBL_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_SPORT_TBL_DROP_ADDR (0xb04870) +#define NBL_IPRO_SPORT_TBL_DROP_DEPTH (1) +#define NBL_IPRO_SPORT_TBL_DROP_WIDTH (32) +#define NBL_IPRO_SPORT_TBL_DROP_DWLEN (1) +union ipro_sport_tbl_drop_u { + struct ipro_sport_tbl_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_SPORT_TBL_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_PTYPE_TBL_DROP_ADDR (0xb04874) +#define NBL_IPRO_PTYPE_TBL_DROP_DEPTH (1) +#define NBL_IPRO_PTYPE_TBL_DROP_WIDTH (32) +#define NBL_IPRO_PTYPE_TBL_DROP_DWLEN (1) +union ipro_ptype_tbl_drop_u { + struct ipro_ptype_tbl_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_PTYPE_TBL_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_UDL_DROP_ADDR (0xb04878) +#define NBL_IPRO_UDL_DROP_DEPTH (1) +#define NBL_IPRO_UDL_DROP_WIDTH (32) +#define NBL_IPRO_UDL_DROP_DWLEN (1) +union ipro_udl_drop_u { + struct ipro_udl_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UDL_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_ANTIFAKE_DROP_ADDR (0xb0487c) +#define NBL_IPRO_ANTIFAKE_DROP_DEPTH (1) +#define NBL_IPRO_ANTIFAKE_DROP_WIDTH (32) +#define NBL_IPRO_ANTIFAKE_DROP_DWLEN (1) +union ipro_antifake_drop_u { + struct ipro_antifake_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_ANTIFAKE_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_VLAN_NUM_DROP_ADDR (0xb04880) +#define NBL_IPRO_VLAN_NUM_DROP_DEPTH (1) +#define NBL_IPRO_VLAN_NUM_DROP_WIDTH (32) +#define NBL_IPRO_VLAN_NUM_DROP_DWLEN (1) +union ipro_vlan_num_drop_u { + struct ipro_vlan_num_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_VLAN_NUM_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_TCP_STATE_DROP_ADDR (0xb04884) +#define NBL_IPRO_TCP_STATE_DROP_DEPTH (1) +#define NBL_IPRO_TCP_STATE_DROP_WIDTH (32) +#define NBL_IPRO_TCP_STATE_DROP_DWLEN (1) +union ipro_tcp_state_drop_u { + struct ipro_tcp_state_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_TCP_STATE_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_RAM_ERR_DROP_ADDR (0xb04888) +#define NBL_IPRO_RAM_ERR_DROP_DEPTH (1) +#define NBL_IPRO_RAM_ERR_DROP_WIDTH (32) +#define NBL_IPRO_RAM_ERR_DROP_DWLEN (1) +union ipro_ram_err_drop_u { + struct ipro_ram_err_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_RAM_ERR_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_KG_MISS_ADDR (0xb0488c) +#define NBL_IPRO_KG_MISS_DEPTH (1) +#define NBL_IPRO_KG_MISS_WIDTH (32) +#define NBL_IPRO_KG_MISS_DWLEN (1) +union ipro_kg_miss_u { + struct ipro_kg_miss { + u32 drop_cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 cnt:16; /* [31:16] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_IPRO_KG_MISS_DWLEN]; +} __packed; + +#define NBL_IPRO_MNG_DROP_ADDR (0xb04890) +#define NBL_IPRO_MNG_DROP_DEPTH (1) +#define NBL_IPRO_MNG_DROP_WIDTH (32) +#define NBL_IPRO_MNG_DROP_DWLEN (1) +union ipro_mng_drop_u { + struct ipro_mng_drop { + u32 cnt:16; /* [15:0] Default:0x0 SCTR */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_MNG_DROP_DWLEN]; +} __packed; + +#define NBL_IPRO_MTU_CHECK_DROP_ADDR (0xb04900) +#define NBL_IPRO_MTU_CHECK_DROP_DEPTH (256) +#define NBL_IPRO_MTU_CHECK_DROP_WIDTH (32) +#define NBL_IPRO_MTU_CHECK_DROP_DWLEN (1) +union ipro_mtu_check_drop_u { + struct ipro_mtu_check_drop { + u32 vsi_3:8; /* [7:0] Default:0x0 SCTR */ + u32 vsi_2:8; /* [15:8] Default:0x0 SCTR */ + u32 vsi_1:8; /* [23:16] Default:0x0 SCTR */ + u32 vsi_0:8; /* [31:24] Default:0x0 SCTR */ + } __packed info; + u32 data[NBL_IPRO_MTU_CHECK_DROP_DWLEN]; +} __packed; +#define NBL_IPRO_MTU_CHECK_DROP_REG(r) (NBL_IPRO_MTU_CHECK_DROP_ADDR + \ + (NBL_IPRO_MTU_CHECK_DROP_DWLEN * 4) * (r)) + +#define NBL_IPRO_LAST_QUEUE_RAM_ERR_ADDR (0xb04d08) +#define NBL_IPRO_LAST_QUEUE_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_QUEUE_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_QUEUE_RAM_ERR_DWLEN (1) +union ipro_last_queue_ram_err_u { + struct ipro_last_queue_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_QUEUE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_ADDR (0xb04d0c) +#define NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_DWLEN (1) +union ipro_last_dn_src_port_ram_err_u { + struct ipro_last_dn_src_port_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_DN_SRC_PORT_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_ADDR (0xb04d10) +#define NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_DWLEN (1) +union ipro_last_up_src_port_ram_err_u { + struct ipro_last_up_src_port_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_UP_SRC_PORT_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_ADDR (0xb04d14) +#define NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_DWLEN (1) +union ipro_last_dn_ptype_ram_err_u { + struct ipro_last_dn_ptype_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_DN_PTYPE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_ADDR (0xb04d18) +#define NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_DWLEN (1) +union ipro_last_up_ptype_ram_err_u { + struct ipro_last_up_ptype_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_UP_PTYPE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_KG_PROF_RAM_ERR_ADDR (0xb04d20) +#define NBL_IPRO_LAST_KG_PROF_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_KG_PROF_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_KG_PROF_RAM_ERR_DWLEN (1) +union ipro_last_kg_prof_ram_err_u { + struct ipro_last_kg_prof_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_KG_PROF_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_LAST_ERRCODE_RAM_ERR_ADDR (0xb04d28) +#define NBL_IPRO_LAST_ERRCODE_RAM_ERR_DEPTH (1) +#define NBL_IPRO_LAST_ERRCODE_RAM_ERR_WIDTH (32) +#define NBL_IPRO_LAST_ERRCODE_RAM_ERR_DWLEN (1) +union ipro_last_errcode_ram_err_u { + struct ipro_last_errcode_ram_err { + u32 info:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_LAST_ERRCODE_RAM_ERR_DWLEN]; +} __packed; + +#define NBL_IPRO_IN_PKT_CAP_EN_ADDR (0xb04dfc) +#define NBL_IPRO_IN_PKT_CAP_EN_DEPTH (1) +#define NBL_IPRO_IN_PKT_CAP_EN_WIDTH (32) +#define NBL_IPRO_IN_PKT_CAP_EN_DWLEN (1) +union ipro_in_pkt_cap_en_u { + struct ipro_in_pkt_cap_en { + u32 en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_IN_PKT_CAP_EN_DWLEN]; +} __packed; + +#define NBL_IPRO_IN_PKT_CAP_ADDR (0xb04e00) +#define NBL_IPRO_IN_PKT_CAP_DEPTH (64) +#define NBL_IPRO_IN_PKT_CAP_WIDTH (32) +#define NBL_IPRO_IN_PKT_CAP_DWLEN (1) +union ipro_in_pkt_cap_u { + struct ipro_in_pkt_cap { + u32 data:32; /* [31:0] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_IN_PKT_CAP_DWLEN]; +} __packed; +#define NBL_IPRO_IN_PKT_CAP_REG(r) (NBL_IPRO_IN_PKT_CAP_ADDR + \ + (NBL_IPRO_IN_PKT_CAP_DWLEN * 4) * (r)) + +#define NBL_IPRO_ERRCODE_TBL_ADDR (0xb05000) +#define NBL_IPRO_ERRCODE_TBL_DEPTH (16) +#define NBL_IPRO_ERRCODE_TBL_WIDTH (64) +#define NBL_IPRO_ERRCODE_TBL_DWLEN (2) +union ipro_errcode_tbl_u { + struct ipro_errcode_tbl { + u32 dqueue:11; /* [10:0] Default:0x0 RW */ + u32 dqueue_en:1; /* [11] Default:0x0 RW */ + u32 dqueue_pri:2; /* [13:12] Default:0x0 RW */ + u32 set_dport_pri:2; /* [15:14] Default:0x0 RW */ + u32 set_dport:16; /* [31:16] Default:0x0 RW */ + u32 set_dport_en:1; /* [32] Default:0x0 RW */ + u32 proc_done:1; /* [33] Default:0x0 RW */ + u32 vld:1; /* [34] Default:0x0 RW */ + u32 rsv:29; /* [63:35] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_ERRCODE_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_ERRCODE_TBL_REG(r) (NBL_IPRO_ERRCODE_TBL_ADDR + \ + (NBL_IPRO_ERRCODE_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_DN_PTYPE_TBL_ADDR (0xb06000) +#define NBL_IPRO_DN_PTYPE_TBL_DEPTH (256) +#define NBL_IPRO_DN_PTYPE_TBL_WIDTH (64) +#define NBL_IPRO_DN_PTYPE_TBL_DWLEN (2) +union ipro_dn_ptype_tbl_u { + struct ipro_dn_ptype_tbl { + u32 dn_entry_vld:1; /* [0] Default:0x0 RW */ + u32 dn_mirror_en:1; /* [1] Default:0x0 RW */ + u32 dn_mirror_pri:2; /* [3:2] Default:0x0 RW */ + u32 dn_mirror_id:4; /* [7:4] Default:0x0 RW */ + u32 dn_encap_en:1; /* [8] Default:0x0 RW */ + u32 dn_encap_pri:2; /* [10:9] Default:0x0 RW */ + u32 dn_encap_index:13; /* [23:11] Default:0x0 RW */ + u32 not_used_0:6; /* [29:24] Default:0x0 RW */ + u32 proc_done:1; /* [30] Default:0x0 RW */ + u32 set_dport_en:1; /* [31] Default:0x0 RW */ + u32 set_dport:16; /* [47:32] Default:0x0 RW */ + u32 set_dport_pri:2; /* [49:48] Default:0x0 RW */ + u32 dqueue_pri:2; /* [51:50] Default:0x0 RW */ + u32 dqueue:11; /* [62:52] Default:0x0 RW */ + u32 dqueue_en:1; /* [63] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_DN_PTYPE_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_DN_PTYPE_TBL_REG(r) (NBL_IPRO_DN_PTYPE_TBL_ADDR + \ + (NBL_IPRO_DN_PTYPE_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_UP_PTYPE_TBL_ADDR (0xb06800) +#define NBL_IPRO_UP_PTYPE_TBL_DEPTH (256) +#define NBL_IPRO_UP_PTYPE_TBL_WIDTH (64) +#define NBL_IPRO_UP_PTYPE_TBL_DWLEN (2) +union ipro_up_ptype_tbl_u { + struct ipro_up_ptype_tbl { + u32 up_entry_vld:1; /* [0] Default:0x0 RW */ + u32 up_mirror_en:1; /* [1] Default:0x0 RW */ + u32 up_mirror_pri:2; /* [3:2] Default:0x0 RW */ + u32 up_mirror_id:4; /* [7:4] Default:0x0 RW */ + u32 up_decap_en:1; /* [8] Default:0x0 RW */ + u32 up_decap_pri:2; /* [10:9] Default:0x0 RW */ + u32 not_used_1:19; /* [29:11] Default:0x0 RW */ + u32 proc_done:1; /* [30] Default:0x0 RW */ + u32 set_dport_en:1; /* [31] Default:0x0 RW */ + u32 set_dport:16; /* [47:32] Default:0x0 RW */ + u32 set_dport_pri:2; /* [49:48] Default:0x0 RW */ + u32 dqueue_pri:2; /* [51:50] Default:0x0 RW */ + u32 dqueue:11; /* [62:52] Default:0x0 RW */ + u32 dqueue_en:1; /* [63] Default:0x0 RW */ + } __packed info; + u32 data[NBL_IPRO_UP_PTYPE_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_UP_PTYPE_TBL_REG(r) (NBL_IPRO_UP_PTYPE_TBL_ADDR + \ + (NBL_IPRO_UP_PTYPE_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_QUEUE_TBL_ADDR (0xb08000) +#define NBL_IPRO_QUEUE_TBL_DEPTH (2048) +#define NBL_IPRO_QUEUE_TBL_WIDTH (32) +#define NBL_IPRO_QUEUE_TBL_DWLEN (1) +union ipro_queue_tbl_u { + struct ipro_queue_tbl { + u32 vsi:10; /* [9:0] Default:0x0 RW */ + u32 vsi_en:1; /* [10] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_QUEUE_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_QUEUE_TBL_REG(r) (NBL_IPRO_QUEUE_TBL_ADDR + \ + (NBL_IPRO_QUEUE_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_UP_SRC_PORT_TBL_ADDR (0xb0b000) +#define NBL_IPRO_UP_SRC_PORT_TBL_DEPTH (4) +#define NBL_IPRO_UP_SRC_PORT_TBL_WIDTH (64) +#define NBL_IPRO_UP_SRC_PORT_TBL_DWLEN (2) +union ipro_up_src_port_tbl_u { + struct ipro_up_src_port_tbl { + u32 entry_vld:1; /* [0] Default:0x0 RW */ + u32 vlan_layer_num_0:2; /* [2:1] Default:0x0 RW */ + u32 vlan_layer_num_1:2; /* [4:3] Default:0x0 RW */ + u32 lag_vld:1; /* [5] Default:0x0 RW */ + u32 lag_id:2; /* [7:6] Default:0x0 RW */ + u32 phy_flow:1; /* [8] Default:0x0 RW */ + u32 mirror_en:1; /* [9] Default:0x0 RW */ + u32 mirror_pr:2; /* [11:10] Default:0x0 RW */ + u32 mirror_id:4; /* [15:12] Default:0x0 RW */ + u32 dqueue_pri:2; /* [17:16] Default:0x0 RW */ + u32 set_dport_pri:2; /* [19:18] Default:0x0 RW */ + u32 dqueue:11; /* [30:20] Default:0x0 RW */ + u32 dqueue_en:1; /* [31] Default:0x0 RW */ + u32 set_dport:16; /* [47:32] Default:0x0 RW */ + u32 set_dport_en:1; /* [48] Default:0x0 RW */ + u32 proc_done:1; /* [49] Default:0x0 RW */ + u32 car_en:1; /* [50] Default:0x0 RW */ + u32 car_pr:2; /* [52:51] Default:0x0 RW */ + u32 car_id:10; /* [62:53] Default:0x0 RW */ + u32 rsv:1; /* [63] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_UP_SRC_PORT_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_UP_SRC_PORT_TBL_REG(r) (NBL_IPRO_UP_SRC_PORT_TBL_ADDR + \ + (NBL_IPRO_UP_SRC_PORT_TBL_DWLEN * 4) * (r)) + +#define NBL_IPRO_DN_SRC_PORT_TBL_ADDR (0xb0c000) +#define NBL_IPRO_DN_SRC_PORT_TBL_DEPTH (1024) +#define NBL_IPRO_DN_SRC_PORT_TBL_WIDTH (128) +#define NBL_IPRO_DN_SRC_PORT_TBL_DWLEN (4) +union ipro_dn_src_port_tbl_u { + struct ipro_dn_src_port_tbl { + u32 entry_vld:1; /* [0] Default:0x0 RW */ + u32 mirror_en:1; /* [1] Default:0x0 RW */ + u32 mirror_pr:2; /* [3:2] Default:0x0 RW */ + u32 mirror_id:4; /* [7:4] Default:0x0 RW */ + u32 vlan_layer_num_1:2; /* [9:8] Default:0x0 RW */ + u32 phy_flow:1; /* [10] Default:0x0 RW */ + u32 mtu_sel:4; /* [14:11] Default:0x0 RW */ + u32 addr_check_en:1; /* [15] Default:0x0 RW */ + u32 smac_l:32; /* [63:16] Default:0x0 RW */ + u32 smac_h:16; /* [63:16] Default:0x0 RW */ + u32 dqueue:11; /* [74:64] Default:0x0 RW */ + u32 dqueue_en:1; /* [75] Default:0x0 RW */ + u32 dqueue_pri:2; /* [77:76] Default:0x0 RW */ + u32 set_dport_pri:2; /* [79:78] Default:0x0 RW */ + u32 set_dport:16; /* [95:80] Default:0x0 RW */ + u32 set_dport_en:1; /* [96] Default:0x0 RW */ + u32 proc_done:1; /* [97] Default:0x0 RW */ + u32 not_used_1:2; /* [99:98] Default:0x0 RW */ + u32 rsv:28; /* [127:100] Default:0x0 RO */ + } __packed info; + u32 data[NBL_IPRO_DN_SRC_PORT_TBL_DWLEN]; +} __packed; +#define NBL_IPRO_DN_SRC_PORT_TBL_REG(r) (NBL_IPRO_DN_SRC_PORT_TBL_ADDR + \ + (NBL_IPRO_DN_SRC_PORT_TBL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h new file mode 100644 index 0000000000000000000000000000000000000000..da3e1e6f87260200c3d016f99a5f03fc67dabcbe --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_mcc.h @@ -0,0 +1,407 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_MCC_H +#define NBL_MCC_H 1 + +#include + +#define NBL_MCC_BASE (0x00B44000) + +#define NBL_MCC_INT_STATUS_ADDR (0xb44000) +#define NBL_MCC_INT_STATUS_DEPTH (1) +#define NBL_MCC_INT_STATUS_WIDTH (32) +#define NBL_MCC_INT_STATUS_DWLEN (1) +union mcc_int_status_u { + struct mcc_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RWC */ + u32 fsm_err:1; /* [3] Default:0x0 RWC */ + u32 cif_err:1; /* [4] Default:0x0 RWC */ + u32 cfg_err:1; /* [5] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [6] Default:0x0 RWC */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_MCC_INT_MASK_ADDR (0xb44004) +#define NBL_MCC_INT_MASK_DEPTH (1) +#define NBL_MCC_INT_MASK_WIDTH (32) +#define NBL_MCC_INT_MASK_DWLEN (1) +union mcc_int_mask_u { + struct mcc_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 RW */ + u32 fsm_err:1; /* [3] Default:0x0 RW */ + u32 cif_err:1; /* [4] Default:0x0 RW */ + u32 cfg_err:1; /* [5] Default:0x0 RW */ + u32 data_ucor_err:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INT_MASK_DWLEN]; +} __packed; + +#define NBL_MCC_INT_SET_ADDR (0xb44008) +#define NBL_MCC_INT_SET_DEPTH (1) +#define NBL_MCC_INT_SET_WIDTH (32) +#define NBL_MCC_INT_SET_DWLEN (1) +union mcc_int_set_u { + struct mcc_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_uflw_err:1; /* [1] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [2] Default:0x0 WO */ + u32 fsm_err:1; /* [3] Default:0x0 WO */ + u32 cif_err:1; /* [4] Default:0x0 WO */ + u32 cfg_err:1; /* [5] Default:0x0 WO */ + u32 data_ucor_err:1; /* [6] Default:0x0 WO */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INT_SET_DWLEN]; +} __packed; + +#define NBL_MCC_INIT_DONE_ADDR (0xb4400c) +#define NBL_MCC_INIT_DONE_DEPTH (1) +#define NBL_MCC_INIT_DONE_WIDTH (32) +#define NBL_MCC_INIT_DONE_DWLEN (1) +union mcc_init_done_u { + struct mcc_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_MCC_CIF_ERR_INFO_ADDR (0xb44040) +#define NBL_MCC_CIF_ERR_INFO_DEPTH (1) +#define NBL_MCC_CIF_ERR_INFO_WIDTH (32) +#define NBL_MCC_CIF_ERR_INFO_DWLEN (1) +union mcc_cif_err_info_u { + struct mcc_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MCC_CFG_ERR_INFO_ADDR (0xb44050) +#define NBL_MCC_CFG_ERR_INFO_DEPTH (1) +#define NBL_MCC_CFG_ERR_INFO_WIDTH (32) +#define NBL_MCC_CFG_ERR_INFO_DWLEN (1) +union mcc_cfg_err_info_u { + struct mcc_cfg_err_info { + u32 id:8; /* [7:0] Default:0x0 RO */ + u32 addr:16; /* [23:8] Default:0x0 RO */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_MCC_CAR_CTRL_ADDR (0xb44100) +#define NBL_MCC_CAR_CTRL_DEPTH (1) +#define NBL_MCC_CAR_CTRL_WIDTH (32) +#define NBL_MCC_CAR_CTRL_DWLEN (1) +union mcc_car_ctrl_u { + struct mcc_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_MCC_TIMEOUT_CFG_ADDR (0xb44140) +#define NBL_MCC_TIMEOUT_CFG_DEPTH (1) +#define NBL_MCC_TIMEOUT_CFG_WIDTH (32) +#define NBL_MCC_TIMEOUT_CFG_DWLEN (1) +union mcc_timeout_cfg_u { + struct mcc_timeout_cfg { + u32 fsm_max_num:32; /* [31:0] Default:0x0ffffffff RW */ + } __packed info; + u32 data[NBL_MCC_TIMEOUT_CFG_DWLEN]; +} __packed; + +#define NBL_MCC_INIT_START_ADDR (0xb44180) +#define NBL_MCC_INIT_START_DEPTH (1) +#define NBL_MCC_INIT_START_WIDTH (32) +#define NBL_MCC_INIT_START_DWLEN (1) +union mcc_init_start_u { + struct mcc_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_INIT_START_DWLEN]; +} __packed; + +#define NBL_MCC_RATE_CTRL_ADDR (0xb44300) +#define NBL_MCC_RATE_CTRL_DEPTH (1) +#define NBL_MCC_RATE_CTRL_WIDTH (32) +#define NBL_MCC_RATE_CTRL_DWLEN (1) +union mcc_rate_ctrl_u { + struct mcc_rate_ctrl { + u32 rate_ctrl_eth_bandwidth:3; /* [2:0] Default:0x0 RW */ + u32 rate_ctrl_eth_switch:2; /* [4:3] Default:0x0 RW */ + u32 rate_ctrl_gear:3; /* [7:5] Default:0x0 RW */ + u32 rate_ctrl_en:1; /* [8] Default:0x0 RW */ + u32 rsv:23; /* [31:9] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_RATE_CTRL_DWLEN]; +} __packed; + +#define NBL_MCC_CREDIT_ADDR (0xb44400) +#define NBL_MCC_CREDIT_DEPTH (1) +#define NBL_MCC_CREDIT_WIDTH (32) +#define NBL_MCC_CREDIT_DWLEN (1) +union mcc_credit_u { + struct mcc_credit { + u32 mcc_up_credit:5; /* [4:0] Default:0x1d RW */ + u32 mcc_up_vld:1; /* [5] Default:0x0 WO */ + u32 rsv1:10; /* [15:6] Default:0x0 RO */ + u32 mcc_dn_credit:5; /* [20:16] Default:0x1d RW */ + u32 mcc_dn_vld:1; /* [21] Default:0x0 WO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_CREDIT_DWLEN]; +} __packed; + +#define NBL_MCC_ACTION_PRIORITY_ADDR (0xb44500) +#define NBL_MCC_ACTION_PRIORITY_DEPTH (1) +#define NBL_MCC_ACTION_PRIORITY_WIDTH (32) +#define NBL_MCC_ACTION_PRIORITY_DWLEN (1) +union mcc_action_priority_u { + struct mcc_action_priority { + u32 statidx_act_pri:2; /* [1:0] Default:0x0 RW */ + u32 dport_act_pri:2; /* [3:2] Default:0x0 RW */ + u32 dqueue_act_pri:2; /* [5:4] Default:0x0 RW */ + u32 rsv:26; /* [31:6] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_ACTION_PRIORITY_DWLEN]; +} __packed; + +#define NBL_MCC_UU_WEIGHT_ADDR (0xb44600) +#define NBL_MCC_UU_WEIGHT_DEPTH (1) +#define NBL_MCC_UU_WEIGHT_WIDTH (32) +#define NBL_MCC_UU_WEIGHT_DWLEN (1) +union mcc_uu_weight_u { + struct mcc_uu_weight { + u32 uu_weight:8; /* [7:0] Default:0x2 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_UU_WEIGHT_DWLEN]; +} __packed; + +#define NBL_MCC_DU_WEIGHT_ADDR (0xb44604) +#define NBL_MCC_DU_WEIGHT_DEPTH (1) +#define NBL_MCC_DU_WEIGHT_WIDTH (32) +#define NBL_MCC_DU_WEIGHT_DWLEN (1) +union mcc_du_weight_u { + struct mcc_du_weight { + u32 du_weight:8; /* [7:0] Default:0x2 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_DU_WEIGHT_DWLEN]; +} __packed; + +#define NBL_MCC_UCH_WEIGHT_ADDR (0xb44608) +#define NBL_MCC_UCH_WEIGHT_DEPTH (1) +#define NBL_MCC_UCH_WEIGHT_WIDTH (32) +#define NBL_MCC_UCH_WEIGHT_DWLEN (1) +union mcc_uch_weight_u { + struct mcc_uch_weight { + u32 uch_weight:8; /* [7:0] Default:0x1 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_UCH_WEIGHT_DWLEN]; +} __packed; + +#define NBL_MCC_DCH_WEIGHT_ADDR (0xb4460c) +#define NBL_MCC_DCH_WEIGHT_DEPTH (1) +#define NBL_MCC_DCH_WEIGHT_WIDTH (32) +#define NBL_MCC_DCH_WEIGHT_DWLEN (1) +union mcc_dch_weight_u { + struct mcc_dch_weight { + u32 dch_weight:8; /* [7:0] Default:0x1 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_DCH_WEIGHT_DWLEN]; +} __packed; + +#define NBL_MCC_SPD_TIMEOUT_TH_ADDR (0xb44740) +#define NBL_MCC_SPD_TIMEOUT_TH_DEPTH (1) +#define NBL_MCC_SPD_TIMEOUT_TH_WIDTH (32) +#define NBL_MCC_SPD_TIMEOUT_TH_DWLEN (1) +union mcc_spd_timeout_th_u { + struct mcc_spd_timeout_th { + u32 timeout_th:8; /* [7:0] Default:0xff RW */ + u32 rsv:14; /* [21:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_SPD_TIMEOUT_TH_DWLEN]; +} __packed; + +#define NBL_MCC_EXT_FLAG_OFFSET_ADDR (0xb44800) +#define NBL_MCC_EXT_FLAG_OFFSET_DEPTH (1) +#define NBL_MCC_EXT_FLAG_OFFSET_WIDTH (32) +#define NBL_MCC_EXT_FLAG_OFFSET_DWLEN (1) +union mcc_ext_flag_offset_u { + struct mcc_ext_flag_offset { + u32 dir_offset:5; /* [4:0] Default:0x00 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_EXT_FLAG_OFFSET_DWLEN]; +} __packed; + +#define NBL_MCC_EXT_MCIDX_ADDR (0xb44804) +#define NBL_MCC_EXT_MCIDX_DEPTH (1) +#define NBL_MCC_EXT_MCIDX_WIDTH (32) +#define NBL_MCC_EXT_MCIDX_DWLEN (1) +union mcc_ext_mcidx_u { + struct mcc_ext_mcidx { + u32 mcidx_act_id:6; /* [5:0] Default:0x0d RW */ + u32 mcidx_vld:1; /* [6] Default:0x1 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_EXT_MCIDX_DWLEN]; +} __packed; + +#define NBL_MCC_MC_ORIGINAL_DPORT_ADDR (0xb44808) +#define NBL_MCC_MC_ORIGINAL_DPORT_DEPTH (1) +#define NBL_MCC_MC_ORIGINAL_DPORT_WIDTH (32) +#define NBL_MCC_MC_ORIGINAL_DPORT_DWLEN (1) +union mcc_mc_original_dport_u { + struct mcc_mc_original_dport { + u32 dport:16; /* [15:0] Default:0x2fef RW */ + u32 rsv:16; /* [31:16] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_MC_ORIGINAL_DPORT_DWLEN]; +} __packed; + +#define NBL_MCC_AM_SET_FLAGS_ADDR (0xb44900) +#define NBL_MCC_AM_SET_FLAGS_DEPTH (1) +#define NBL_MCC_AM_SET_FLAGS_WIDTH (32) +#define NBL_MCC_AM_SET_FLAGS_DWLEN (1) +union mcc_am_set_flags_u { + struct mcc_am_set_flags { + u32 set_flags:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MCC_AM_SET_FLAGS_DWLEN]; +} __packed; + +#define NBL_MCC_AM_CLEAR_FLAGS_ADDR (0xb44904) +#define NBL_MCC_AM_CLEAR_FLAGS_DEPTH (1) +#define NBL_MCC_AM_CLEAR_FLAGS_WIDTH (32) +#define NBL_MCC_AM_CLEAR_FLAGS_DWLEN (1) +union mcc_am_clear_flags_u { + struct mcc_am_clear_flags { + u32 clear_flags:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MCC_AM_CLEAR_FLAGS_DWLEN]; +} __packed; + +#define NBL_MCC_AM_ACT_ID_ADDR (0xb44a00) +#define NBL_MCC_AM_ACT_ID_DEPTH (1) +#define NBL_MCC_AM_ACT_ID_WIDTH (32) +#define NBL_MCC_AM_ACT_ID_DWLEN (1) +union mcc_am_act_id_u { + struct mcc_am_act_id { + u32 dport_act_id:6; /* [5:0] Default:0x9 RW */ + u32 rsv3:2; /* [7:6] Default:0x0 RO */ + u32 dqueue_act_id:6; /* [13:8] Default:0xa RW */ + u32 rsv2:2; /* [15:14] Default:0x0 RO */ + u32 statidx_act_id:6; /* [21:16] Default:0x10 RW */ + u32 rsv1:2; /* [23:22] Default:0x0 RO */ + u32 mirroridx_act_id:6; /* [29:24] Default:0x08 RW */ + u32 rsv:2; /* [31:30] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_AM_ACT_ID_DWLEN]; +} __packed; + +#define NBL_MCC_QUEUE_EN_CTRL_ADDR (0xb44b00) +#define NBL_MCC_QUEUE_EN_CTRL_DEPTH (1) +#define NBL_MCC_QUEUE_EN_CTRL_WIDTH (32) +#define NBL_MCC_QUEUE_EN_CTRL_DWLEN (1) +union mcc_queue_en_ctrl_u { + struct mcc_queue_en_ctrl { + u32 uuq_en:1; /* [0] Default:0x1 RW */ + u32 duq_en:1; /* [1] Default:0x1 RW */ + u32 umhq_en:1; /* [2] Default:0x1 RW */ + u32 dmhq_en:1; /* [3] Default:0x1 RW */ + u32 umlq_en:1; /* [4] Default:0x1 RW */ + u32 dmlq_en:1; /* [5] Default:0x1 RW */ + u32 uchq_en:1; /* [6] Default:0x1 RW */ + u32 dchq_en:1; /* [7] Default:0x1 RW */ + u32 uclq_en:1; /* [8] Default:0x1 RW */ + u32 dclq_en:1; /* [9] Default:0x1 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_QUEUE_EN_CTRL_DWLEN]; +} __packed; + +#define NBL_MCC_CFG_TEST_ADDR (0xb44c00) +#define NBL_MCC_CFG_TEST_DEPTH (1) +#define NBL_MCC_CFG_TEST_WIDTH (32) +#define NBL_MCC_CFG_TEST_DWLEN (1) +union mcc_cfg_test_u { + struct mcc_cfg_test { + u32 test:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_MCC_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_MCC_BP_STATE_ADDR (0xb44f00) +#define NBL_MCC_BP_STATE_DEPTH (1) +#define NBL_MCC_BP_STATE_WIDTH (32) +#define NBL_MCC_BP_STATE_DWLEN (1) +union mcc_bp_state_u { + struct mcc_bp_state { + u32 in_bp:1; /* [0] Default:0x0 RO */ + u32 out_bp:1; /* [1] Default:0x0 RO */ + u32 inter_bp:1; /* [2] Default:0x0 RO */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_BP_STATE_DWLEN]; +} __packed; + +#define NBL_MCC_BP_HISTORY_ADDR (0xb44f04) +#define NBL_MCC_BP_HISTORY_DEPTH (1) +#define NBL_MCC_BP_HISTORY_WIDTH (32) +#define NBL_MCC_BP_HISTORY_DWLEN (1) +union mcc_bp_history_u { + struct mcc_bp_history { + u32 in_bp:1; /* [0] Default:0x0 RC */ + u32 out_bp:1; /* [1] Default:0x0 RC */ + u32 inter_bp:1; /* [2] Default:0x0 RC */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_MCC_TBL_ADDR (0xb54000) +#define NBL_MCC_TBL_DEPTH (8192) +#define NBL_MCC_TBL_WIDTH (64) +#define NBL_MCC_TBL_DWLEN (2) +union mcc_tbl_u { + struct mcc_tbl { + u32 dport_act:16; /* [15:0] Default:0x0 RW */ + u32 dqueue_act:11; /* [26:16] Default:0x0 RW */ + u32 dqueue_en:1; /* [27] Default:0x0 RW */ + u32 dqueue_rsv:4; /* [31:28] Default:0x0 RO */ + u32 statid_act:11; /* [42:32] Default:0x0 RW */ + u32 statid_filter:1; /* [43] Default:0x0 RW */ + u32 flowid_filter:1; /* [44] Default:0x0 RW */ + u32 stateid_rsv:3; /* [47:45] Default:0x0 RO */ + u32 next_pntr:13; /* [60:48] Default:0x0 RW */ + u32 tail:1; /* [61] Default:0x0 RW */ + u32 vld:1; /* [62] Default:0x0 RW */ + u32 rsv:1; /* [63] Default:0x0 RO */ + } __packed info; + u32 data[NBL_MCC_TBL_DWLEN]; +} __packed; +#define NBL_MCC_TBL_REG(r) (NBL_MCC_TBL_ADDR + \ + (NBL_MCC_TBL_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h new file mode 100644 index 0000000000000000000000000000000000000000..690c6ce96d8462d765a4de31905e16fe9b328289 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp0.h @@ -0,0 +1,614 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PP0_H +#define NBL_PP0_H 1 + +#include + +#define NBL_PP0_BASE (0x00B14000) + +#define NBL_PP0_INT_STATUS_ADDR (0xb14000) +#define NBL_PP0_INT_STATUS_DEPTH (1) +#define NBL_PP0_INT_STATUS_WIDTH (32) +#define NBL_PP0_INT_STATUS_DWLEN (1) +union pp0_int_status_u { + struct pp0_int_status { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PP0_INT_MASK_ADDR (0xb14004) +#define NBL_PP0_INT_MASK_DEPTH (1) +#define NBL_PP0_INT_MASK_WIDTH (32) +#define NBL_PP0_INT_MASK_DWLEN (1) +union pp0_int_mask_u { + struct pp0_int_mask { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PP0_INT_SET_ADDR (0xb14008) +#define NBL_PP0_INT_SET_DEPTH (1) +#define NBL_PP0_INT_SET_WIDTH (32) +#define NBL_PP0_INT_SET_DWLEN (1) +union pp0_int_set_u { + struct pp0_int_set { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INT_SET_DWLEN]; +} __packed; + +#define NBL_PP0_INIT_DONE_ADDR (0xb1400c) +#define NBL_PP0_INIT_DONE_DEPTH (1) +#define NBL_PP0_INIT_DONE_WIDTH (32) +#define NBL_PP0_INIT_DONE_DWLEN (1) +union pp0_init_done_u { + struct pp0_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PP0_CFG_ERR_INFO_ADDR (0xb14038) +#define NBL_PP0_CFG_ERR_INFO_DEPTH (1) +#define NBL_PP0_CFG_ERR_INFO_WIDTH (32) +#define NBL_PP0_CFG_ERR_INFO_DWLEN (1) +union pp0_cfg_err_info_u { + struct pp0_cfg_err_info { + u32 id:1; /* [0:0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP0_CIF_ERR_INFO_ADDR (0xb14040) +#define NBL_PP0_CIF_ERR_INFO_DEPTH (1) +#define NBL_PP0_CIF_ERR_INFO_WIDTH (32) +#define NBL_PP0_CIF_ERR_INFO_DWLEN (1) +union pp0_cif_err_info_u { + struct pp0_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP0_CAR_CTRL_ADDR (0xb14100) +#define NBL_PP0_CAR_CTRL_DEPTH (1) +#define NBL_PP0_CAR_CTRL_WIDTH (32) +#define NBL_PP0_CAR_CTRL_DWLEN (1) +union pp0_car_ctrl_u { + struct pp0_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PP0_MODE_ADDR (0xb14104) +#define NBL_PP0_MODE_DEPTH (1) +#define NBL_PP0_MODE_WIDTH (32) +#define NBL_PP0_MODE_DWLEN (1) +union pp0_mode_u { + struct pp0_mode { + u32 bypass:1; /* [0] Default:0x0 RW */ + u32 internal_loopback_en:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_MODE_DWLEN]; +} __packed; + +#define NBL_PP0_SET_FLAGS0_ADDR (0xb14108) +#define NBL_PP0_SET_FLAGS0_DEPTH (1) +#define NBL_PP0_SET_FLAGS0_WIDTH (32) +#define NBL_PP0_SET_FLAGS0_DWLEN (1) +union pp0_set_flags0_u { + struct pp0_set_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_SET_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP0_SET_FLAGS1_ADDR (0xb1410c) +#define NBL_PP0_SET_FLAGS1_DEPTH (1) +#define NBL_PP0_SET_FLAGS1_WIDTH (32) +#define NBL_PP0_SET_FLAGS1_DWLEN (1) +union pp0_set_flags1_u { + struct pp0_set_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_SET_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP0_CLEAR_FLAGS0_ADDR (0xb14110) +#define NBL_PP0_CLEAR_FLAGS0_DEPTH (1) +#define NBL_PP0_CLEAR_FLAGS0_WIDTH (32) +#define NBL_PP0_CLEAR_FLAGS0_DWLEN (1) +union pp0_clear_flags0_u { + struct pp0_clear_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_CLEAR_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP0_CLEAR_FLAGS1_ADDR (0xb14114) +#define NBL_PP0_CLEAR_FLAGS1_DEPTH (1) +#define NBL_PP0_CLEAR_FLAGS1_WIDTH (32) +#define NBL_PP0_CLEAR_FLAGS1_DWLEN (1) +union pp0_clear_flags1_u { + struct pp0_clear_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_CLEAR_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY0_ADDR (0xb14118) +#define NBL_PP0_ACTION_PRIORITY0_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY0_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY0_DWLEN (1) +union pp0_action_priority0_u { + struct pp0_action_priority0 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY0_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY1_ADDR (0xb1411c) +#define NBL_PP0_ACTION_PRIORITY1_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY1_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY1_DWLEN (1) +union pp0_action_priority1_u { + struct pp0_action_priority1 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY1_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY2_ADDR (0xb14120) +#define NBL_PP0_ACTION_PRIORITY2_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY2_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY2_DWLEN (1) +union pp0_action_priority2_u { + struct pp0_action_priority2 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY2_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY3_ADDR (0xb14124) +#define NBL_PP0_ACTION_PRIORITY3_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY3_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY3_DWLEN (1) +union pp0_action_priority3_u { + struct pp0_action_priority3 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY3_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY4_ADDR (0xb14128) +#define NBL_PP0_ACTION_PRIORITY4_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY4_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY4_DWLEN (1) +union pp0_action_priority4_u { + struct pp0_action_priority4 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY4_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY5_ADDR (0xb1412c) +#define NBL_PP0_ACTION_PRIORITY5_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY5_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY5_DWLEN (1) +union pp0_action_priority5_u { + struct pp0_action_priority5 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY5_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY6_ADDR (0xb14130) +#define NBL_PP0_ACTION_PRIORITY6_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY6_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY6_DWLEN (1) +union pp0_action_priority6_u { + struct pp0_action_priority6 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY6_DWLEN]; +} __packed; + +#define NBL_PP0_ACTION_PRIORITY7_ADDR (0xb14134) +#define NBL_PP0_ACTION_PRIORITY7_DEPTH (1) +#define NBL_PP0_ACTION_PRIORITY7_WIDTH (32) +#define NBL_PP0_ACTION_PRIORITY7_DWLEN (1) +union pp0_action_priority7_u { + struct pp0_action_priority7 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ACTION_PRIORITY7_DWLEN]; +} __packed; + +#define NBL_PP0_CPU_ACCESS_ADDR (0xb1416c) +#define NBL_PP0_CPU_ACCESS_DEPTH (1) +#define NBL_PP0_CPU_ACCESS_WIDTH (32) +#define NBL_PP0_CPU_ACCESS_DWLEN (1) +union pp0_cpu_access_u { + struct pp0_cpu_access { + u32 bp_th:10; /* [9:0] Default:0x34 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 timeout_th:10; /* [25:16] Default:0x100 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_CPU_ACCESS_DWLEN]; +} __packed; + +#define NBL_PP0_RDMA_BYPASS_ADDR (0xb14170) +#define NBL_PP0_RDMA_BYPASS_DEPTH (1) +#define NBL_PP0_RDMA_BYPASS_WIDTH (32) +#define NBL_PP0_RDMA_BYPASS_DWLEN (1) +union pp0_rdma_bypass_u { + struct pp0_rdma_bypass { + u32 rdma_flag_offset:5; /* [4:0] Default:0x0 RW */ + u32 dn_bypass_en:1; /* [5] Default:0x0 RW */ + u32 up_bypass_en:1; /* [6] Default:0x0 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 dir_flag_offset:5; /* [12:8] Default:0x0 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_RDMA_BYPASS_DWLEN]; +} __packed; + +#define NBL_PP0_INIT_START_ADDR (0xb141fc) +#define NBL_PP0_INIT_START_DEPTH (1) +#define NBL_PP0_INIT_START_WIDTH (32) +#define NBL_PP0_INIT_START_DWLEN (1) +union pp0_init_start_u { + struct pp0_init_start { + u32 en:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_INIT_START_DWLEN]; +} __packed; + +#define NBL_PP0_BP_SET_ADDR (0xb14200) +#define NBL_PP0_BP_SET_DEPTH (1) +#define NBL_PP0_BP_SET_WIDTH (32) +#define NBL_PP0_BP_SET_DWLEN (1) +union pp0_bp_set_u { + struct pp0_bp_set { + u32 pp_up:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_BP_SET_DWLEN]; +} __packed; + +#define NBL_PP0_BP_MASK_ADDR (0xb14204) +#define NBL_PP0_BP_MASK_DEPTH (1) +#define NBL_PP0_BP_MASK_WIDTH (32) +#define NBL_PP0_BP_MASK_DWLEN (1) +union pp0_bp_mask_u { + struct pp0_bp_mask { + u32 dn_pp:1; /* [00:00] Default:0x0 RW */ + u32 fem_pp:1; /* [01:01] Default:0x0 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_BP_MASK_DWLEN]; +} __packed; + +#define NBL_PP0_BP_STATE_ADDR (0xb14308) +#define NBL_PP0_BP_STATE_DEPTH (1) +#define NBL_PP0_BP_STATE_WIDTH (32) +#define NBL_PP0_BP_STATE_DWLEN (1) +union pp0_bp_state_u { + struct pp0_bp_state { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RO */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RO */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RO */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_BP_STATE_DWLEN]; +} __packed; + +#define NBL_PP0_BP_HISTORY_ADDR (0xb1430c) +#define NBL_PP0_BP_HISTORY_DEPTH (1) +#define NBL_PP0_BP_HISTORY_WIDTH (32) +#define NBL_PP0_BP_HISTORY_DWLEN (1) +union pp0_bp_history_u { + struct pp0_bp_history { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RC */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RC */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RC */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_PP0_CFG_TEST_ADDR (0xb1442c) +#define NBL_PP0_CFG_TEST_DEPTH (1) +#define NBL_PP0_CFG_TEST_WIDTH (32) +#define NBL_PP0_CFG_TEST_DWLEN (1) +union pp0_cfg_test_u { + struct pp0_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION0_ADDR (0xb14430) +#define NBL_PP0_ABNORMAL_ACTION0_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION0_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION0_DWLEN (1) +union pp0_abnormal_action0_u { + struct pp0_abnormal_action0 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION0_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION1_ADDR (0xb14434) +#define NBL_PP0_ABNORMAL_ACTION1_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION1_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION1_DWLEN (1) +union pp0_abnormal_action1_u { + struct pp0_abnormal_action1 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION1_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION2_ADDR (0xb14438) +#define NBL_PP0_ABNORMAL_ACTION2_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION2_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION2_DWLEN (1) +union pp0_abnormal_action2_u { + struct pp0_abnormal_action2 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION2_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION3_ADDR (0xb1443c) +#define NBL_PP0_ABNORMAL_ACTION3_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION3_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION3_DWLEN (1) +union pp0_abnormal_action3_u { + struct pp0_abnormal_action3 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION3_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION4_ADDR (0xb14440) +#define NBL_PP0_ABNORMAL_ACTION4_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION4_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION4_DWLEN (1) +union pp0_abnormal_action4_u { + struct pp0_abnormal_action4 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION4_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION5_ADDR (0xb14444) +#define NBL_PP0_ABNORMAL_ACTION5_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION5_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION5_DWLEN (1) +union pp0_abnormal_action5_u { + struct pp0_abnormal_action5 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION5_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION6_ADDR (0xb14448) +#define NBL_PP0_ABNORMAL_ACTION6_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION6_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION6_DWLEN (1) +union pp0_abnormal_action6_u { + struct pp0_abnormal_action6 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION6_DWLEN]; +} __packed; + +#define NBL_PP0_ABNORMAL_ACTION7_ADDR (0xb1444c) +#define NBL_PP0_ABNORMAL_ACTION7_DEPTH (1) +#define NBL_PP0_ABNORMAL_ACTION7_WIDTH (32) +#define NBL_PP0_ABNORMAL_ACTION7_DWLEN (1) +union pp0_abnormal_action7_u { + struct pp0_abnormal_action7 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_ABNORMAL_ACTION7_DWLEN]; +} __packed; + +#define NBL_PP0_FWD_DPORT_ACTION_ADDR (0xb14450) +#define NBL_PP0_FWD_DPORT_ACTION_DEPTH (1) +#define NBL_PP0_FWD_DPORT_ACTION_WIDTH (32) +#define NBL_PP0_FWD_DPORT_ACTION_DWLEN (1) +union pp0_fwd_dport_action_u { + struct pp0_fwd_dport_action { + u32 action_id:6; /* [05:00] Default:0x9 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP0_FWD_DPORT_ACTION_DWLEN]; +} __packed; + +#define NBL_PP0_RDMA_VSI_BTM_ADDR (0xb14454) +#define NBL_PP0_RDMA_VSI_BTM_DEPTH (32) +#define NBL_PP0_RDMA_VSI_BTM_WIDTH (32) +#define NBL_PP0_RDMA_VSI_BTM_DWLEN (1) +union pp0_rdma_vsi_btm_u { + struct pp0_rdma_vsi_btm { + u32 btm:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP0_RDMA_VSI_BTM_DWLEN]; +} __packed; +#define NBL_PP0_RDMA_VSI_BTM_REG(r) (NBL_PP0_RDMA_VSI_BTM_ADDR + \ + (NBL_PP0_RDMA_VSI_BTM_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h new file mode 100644 index 0000000000000000000000000000000000000000..d909fd0df59ae29dafc8b92c997f5521e8b4928a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp1.h @@ -0,0 +1,696 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PP1_H +#define NBL_PP1_H 1 + +#include + +#define NBL_PP1_BASE (0x00B24000) + +#define NBL_PP1_INT_STATUS_ADDR (0xb24000) +#define NBL_PP1_INT_STATUS_DEPTH (1) +#define NBL_PP1_INT_STATUS_WIDTH (32) +#define NBL_PP1_INT_STATUS_DWLEN (1) +union pp1_int_status_u { + struct pp1_int_status { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PP1_INT_MASK_ADDR (0xb24004) +#define NBL_PP1_INT_MASK_DEPTH (1) +#define NBL_PP1_INT_MASK_WIDTH (32) +#define NBL_PP1_INT_MASK_DWLEN (1) +union pp1_int_mask_u { + struct pp1_int_mask { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PP1_INT_SET_ADDR (0xb24008) +#define NBL_PP1_INT_SET_DEPTH (1) +#define NBL_PP1_INT_SET_WIDTH (32) +#define NBL_PP1_INT_SET_DWLEN (1) +union pp1_int_set_u { + struct pp1_int_set { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INT_SET_DWLEN]; +} __packed; + +#define NBL_PP1_INIT_DONE_ADDR (0xb2400c) +#define NBL_PP1_INIT_DONE_DEPTH (1) +#define NBL_PP1_INIT_DONE_WIDTH (32) +#define NBL_PP1_INIT_DONE_DWLEN (1) +union pp1_init_done_u { + struct pp1_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PP1_CFG_ERR_INFO_ADDR (0xb24038) +#define NBL_PP1_CFG_ERR_INFO_DEPTH (1) +#define NBL_PP1_CFG_ERR_INFO_WIDTH (32) +#define NBL_PP1_CFG_ERR_INFO_DWLEN (1) +union pp1_cfg_err_info_u { + struct pp1_cfg_err_info { + u32 id:1; /* [0:0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP1_CIF_ERR_INFO_ADDR (0xb24040) +#define NBL_PP1_CIF_ERR_INFO_DEPTH (1) +#define NBL_PP1_CIF_ERR_INFO_WIDTH (32) +#define NBL_PP1_CIF_ERR_INFO_DWLEN (1) +union pp1_cif_err_info_u { + struct pp1_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP1_CAR_CTRL_ADDR (0xb24100) +#define NBL_PP1_CAR_CTRL_DEPTH (1) +#define NBL_PP1_CAR_CTRL_WIDTH (32) +#define NBL_PP1_CAR_CTRL_DWLEN (1) +union pp1_car_ctrl_u { + struct pp1_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PP1_MODE_ADDR (0xb24104) +#define NBL_PP1_MODE_DEPTH (1) +#define NBL_PP1_MODE_WIDTH (32) +#define NBL_PP1_MODE_DWLEN (1) +union pp1_mode_u { + struct pp1_mode { + u32 bypass:1; /* [0] Default:0x0 RW */ + u32 internal_loopback_en:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_MODE_DWLEN]; +} __packed; + +#define NBL_PP1_SET_FLAGS0_ADDR (0xb24108) +#define NBL_PP1_SET_FLAGS0_DEPTH (1) +#define NBL_PP1_SET_FLAGS0_WIDTH (32) +#define NBL_PP1_SET_FLAGS0_DWLEN (1) +union pp1_set_flags0_u { + struct pp1_set_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_SET_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP1_SET_FLAGS1_ADDR (0xb2410c) +#define NBL_PP1_SET_FLAGS1_DEPTH (1) +#define NBL_PP1_SET_FLAGS1_WIDTH (32) +#define NBL_PP1_SET_FLAGS1_DWLEN (1) +union pp1_set_flags1_u { + struct pp1_set_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_SET_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP1_CLEAR_FLAGS0_ADDR (0xb24110) +#define NBL_PP1_CLEAR_FLAGS0_DEPTH (1) +#define NBL_PP1_CLEAR_FLAGS0_WIDTH (32) +#define NBL_PP1_CLEAR_FLAGS0_DWLEN (1) +union pp1_clear_flags0_u { + struct pp1_clear_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_CLEAR_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP1_CLEAR_FLAGS1_ADDR (0xb24114) +#define NBL_PP1_CLEAR_FLAGS1_DEPTH (1) +#define NBL_PP1_CLEAR_FLAGS1_WIDTH (32) +#define NBL_PP1_CLEAR_FLAGS1_DWLEN (1) +union pp1_clear_flags1_u { + struct pp1_clear_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_CLEAR_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY0_ADDR (0xb24118) +#define NBL_PP1_ACTION_PRIORITY0_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY0_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY0_DWLEN (1) +union pp1_action_priority0_u { + struct pp1_action_priority0 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY0_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY1_ADDR (0xb2411c) +#define NBL_PP1_ACTION_PRIORITY1_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY1_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY1_DWLEN (1) +union pp1_action_priority1_u { + struct pp1_action_priority1 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY1_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY2_ADDR (0xb24120) +#define NBL_PP1_ACTION_PRIORITY2_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY2_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY2_DWLEN (1) +union pp1_action_priority2_u { + struct pp1_action_priority2 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY2_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY3_ADDR (0xb24124) +#define NBL_PP1_ACTION_PRIORITY3_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY3_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY3_DWLEN (1) +union pp1_action_priority3_u { + struct pp1_action_priority3 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY3_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY4_ADDR (0xb24128) +#define NBL_PP1_ACTION_PRIORITY4_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY4_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY4_DWLEN (1) +union pp1_action_priority4_u { + struct pp1_action_priority4 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY4_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY5_ADDR (0xb2412c) +#define NBL_PP1_ACTION_PRIORITY5_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY5_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY5_DWLEN (1) +union pp1_action_priority5_u { + struct pp1_action_priority5 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY5_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY6_ADDR (0xb24130) +#define NBL_PP1_ACTION_PRIORITY6_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY6_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY6_DWLEN (1) +union pp1_action_priority6_u { + struct pp1_action_priority6 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY6_DWLEN]; +} __packed; + +#define NBL_PP1_ACTION_PRIORITY7_ADDR (0xb24134) +#define NBL_PP1_ACTION_PRIORITY7_DEPTH (1) +#define NBL_PP1_ACTION_PRIORITY7_WIDTH (32) +#define NBL_PP1_ACTION_PRIORITY7_DWLEN (1) +union pp1_action_priority7_u { + struct pp1_action_priority7 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ACTION_PRIORITY7_DWLEN]; +} __packed; + +#define NBL_PP1_CPU_ACCESS_ADDR (0xb2416c) +#define NBL_PP1_CPU_ACCESS_DEPTH (1) +#define NBL_PP1_CPU_ACCESS_WIDTH (32) +#define NBL_PP1_CPU_ACCESS_DWLEN (1) +union pp1_cpu_access_u { + struct pp1_cpu_access { + u32 bp_th:10; /* [9:0] Default:0x34 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 timeout_th:10; /* [25:16] Default:0x100 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_CPU_ACCESS_DWLEN]; +} __packed; + +#define NBL_PP1_RDMA_BYPASS_ADDR (0xb24170) +#define NBL_PP1_RDMA_BYPASS_DEPTH (1) +#define NBL_PP1_RDMA_BYPASS_WIDTH (32) +#define NBL_PP1_RDMA_BYPASS_DWLEN (1) +union pp1_rdma_bypass_u { + struct pp1_rdma_bypass { + u32 rdma_flag_offset:5; /* [4:0] Default:0x0 RW */ + u32 dn_bypass_en:1; /* [5] Default:0x0 RW */ + u32 up_bypass_en:1; /* [6] Default:0x0 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 dir_flag_offset:5; /* [12:8] Default:0x0 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_RDMA_BYPASS_DWLEN]; +} __packed; + +#define NBL_PP1_INIT_START_ADDR (0xb241fc) +#define NBL_PP1_INIT_START_DEPTH (1) +#define NBL_PP1_INIT_START_WIDTH (32) +#define NBL_PP1_INIT_START_DWLEN (1) +union pp1_init_start_u { + struct pp1_init_start { + u32 en:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_INIT_START_DWLEN]; +} __packed; + +#define NBL_PP1_BP_SET_ADDR (0xb24200) +#define NBL_PP1_BP_SET_DEPTH (1) +#define NBL_PP1_BP_SET_WIDTH (32) +#define NBL_PP1_BP_SET_DWLEN (1) +union pp1_bp_set_u { + struct pp1_bp_set { + u32 pp_up:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_BP_SET_DWLEN]; +} __packed; + +#define NBL_PP1_BP_MASK_ADDR (0xb24204) +#define NBL_PP1_BP_MASK_DEPTH (1) +#define NBL_PP1_BP_MASK_WIDTH (32) +#define NBL_PP1_BP_MASK_DWLEN (1) +union pp1_bp_mask_u { + struct pp1_bp_mask { + u32 dn_pp:1; /* [00:00] Default:0x0 RW */ + u32 fem_pp:1; /* [01:01] Default:0x0 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_BP_MASK_DWLEN]; +} __packed; + +#define NBL_PP1_BP_STATE_ADDR (0xb24308) +#define NBL_PP1_BP_STATE_DEPTH (1) +#define NBL_PP1_BP_STATE_WIDTH (32) +#define NBL_PP1_BP_STATE_DWLEN (1) +union pp1_bp_state_u { + struct pp1_bp_state { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RO */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RO */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RO */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_BP_STATE_DWLEN]; +} __packed; + +#define NBL_PP1_BP_HISTORY_ADDR (0xb2430c) +#define NBL_PP1_BP_HISTORY_DEPTH (1) +#define NBL_PP1_BP_HISTORY_WIDTH (32) +#define NBL_PP1_BP_HISTORY_DWLEN (1) +union pp1_bp_history_u { + struct pp1_bp_history { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RC */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RC */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RC */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_PP1_CFG_TEST_ADDR (0xb2442c) +#define NBL_PP1_CFG_TEST_DEPTH (1) +#define NBL_PP1_CFG_TEST_WIDTH (32) +#define NBL_PP1_CFG_TEST_DWLEN (1) +union pp1_cfg_test_u { + struct pp1_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION0_ADDR (0xb24430) +#define NBL_PP1_ABNORMAL_ACTION0_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION0_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION0_DWLEN (1) +union pp1_abnormal_action0_u { + struct pp1_abnormal_action0 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION0_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION1_ADDR (0xb24434) +#define NBL_PP1_ABNORMAL_ACTION1_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION1_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION1_DWLEN (1) +union pp1_abnormal_action1_u { + struct pp1_abnormal_action1 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION1_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION2_ADDR (0xb24438) +#define NBL_PP1_ABNORMAL_ACTION2_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION2_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION2_DWLEN (1) +union pp1_abnormal_action2_u { + struct pp1_abnormal_action2 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION2_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION3_ADDR (0xb2443c) +#define NBL_PP1_ABNORMAL_ACTION3_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION3_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION3_DWLEN (1) +union pp1_abnormal_action3_u { + struct pp1_abnormal_action3 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION3_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION4_ADDR (0xb24440) +#define NBL_PP1_ABNORMAL_ACTION4_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION4_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION4_DWLEN (1) +union pp1_abnormal_action4_u { + struct pp1_abnormal_action4 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION4_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION5_ADDR (0xb24444) +#define NBL_PP1_ABNORMAL_ACTION5_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION5_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION5_DWLEN (1) +union pp1_abnormal_action5_u { + struct pp1_abnormal_action5 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION5_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION6_ADDR (0xb24448) +#define NBL_PP1_ABNORMAL_ACTION6_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION6_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION6_DWLEN (1) +union pp1_abnormal_action6_u { + struct pp1_abnormal_action6 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION6_DWLEN]; +} __packed; + +#define NBL_PP1_ABNORMAL_ACTION7_ADDR (0xb2444c) +#define NBL_PP1_ABNORMAL_ACTION7_DEPTH (1) +#define NBL_PP1_ABNORMAL_ACTION7_WIDTH (32) +#define NBL_PP1_ABNORMAL_ACTION7_DWLEN (1) +union pp1_abnormal_action7_u { + struct pp1_abnormal_action7 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_ABNORMAL_ACTION7_DWLEN]; +} __packed; + +#define NBL_PP1_FWD_DPORT_ACTION_ADDR (0xb24450) +#define NBL_PP1_FWD_DPORT_ACTION_DEPTH (1) +#define NBL_PP1_FWD_DPORT_ACTION_WIDTH (32) +#define NBL_PP1_FWD_DPORT_ACTION_DWLEN (1) +union pp1_fwd_dport_action_u { + struct pp1_fwd_dport_action { + u32 action_id:6; /* [05:00] Default:0x9 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP1_FWD_DPORT_ACTION_DWLEN]; +} __packed; + +#define NBL_PP1_RDMA_VSI_BTM_ADDR (0xb24454) +#define NBL_PP1_RDMA_VSI_BTM_DEPTH (32) +#define NBL_PP1_RDMA_VSI_BTM_WIDTH (32) +#define NBL_PP1_RDMA_VSI_BTM_DWLEN (1) +union pp1_rdma_vsi_btm_u { + struct pp1_rdma_vsi_btm { + u32 btm:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP1_RDMA_VSI_BTM_DWLEN]; +} __packed; +#define NBL_PP1_RDMA_VSI_BTM_REG(r) (NBL_PP1_RDMA_VSI_BTM_ADDR + \ + (NBL_PP1_RDMA_VSI_BTM_DWLEN * 4) * (r)) + +#define NBL_PP1_KGEN_KEY_PRF_ADDR (0xb25000) +#define NBL_PP1_KGEN_KEY_PRF_DEPTH (16) +#define NBL_PP1_KGEN_KEY_PRF_WIDTH (512) +#define NBL_PP1_KGEN_KEY_PRF_DWLEN (16) +union pp1_kgen_key_prf_u { + struct pp1_kgen_key_prf { + u32 ext4_0_src:10; + u32 ext4_0_dst:7; + u32 ext4_1_src:10; + u32 ext4_1_dst:7; + u32 ext4_2_src:10; + u32 ext4_2_dst:7; + u32 ext4_3_src:10; + u32 ext4_3_dst:7; + u32 ext8_0_src:9; + u32 ext8_0_dst:6; + u32 ext8_1_src:9; + u32 ext8_1_dst:6; + u32 ext8_2_src:9; + u32 ext8_2_dst:6; + u32 ext8_3_src:9; + u32 ext8_3_dst:6; + u32 ext8_4_src:9; + u32 ext8_4_dst:6; + u32 ext8_5_src:9; + u32 ext8_5_dst:6; + u32 ext8_6_src:9; + u32 ext8_6_dst:6; + u32 ext8_7_src:9; + u32 ext8_7_dst:6; + u32 ext16_0_src:8; + u32 ext16_0_dst:5; + u32 ext16_1_src:8; + u32 ext16_1_dst:5; + u32 ext16_2_src:8; + u32 ext16_2_dst:5; + u32 ext16_3_src:8; + u32 ext16_3_dst:5; + u32 ext32_0_src:7; + u32 ext32_0_dst:4; + u32 ext32_1_src:7; + u32 ext32_1_dst:4; + u32 ext32_2_src:7; + u32 ext32_2_dst:4; + u32 ext32_3_src:7; + u32 ext32_3_dst:4; + u32 sp_2_en:1; + u32 sp_2_src_offset:3; + u32 sp_2_dst_offset:8; + u32 sp_4_en:1; + u32 sp_4_src_offset:2; + u32 sp_4_dst_offset:7; + u32 sp_8_en:1; + u32 sp_8_src_offset:1; + u32 sp_8_dst_offset:6; + u32 fwdact0_en:1; + u32 fwdact0_id:6; + u32 fwdact0_dst_offset:5; + u32 fwdact1_en:1; + u32 fwdact1_id:6; + u32 fwdact1_dst_offset:5; + u32 bts_en0:1; + u32 bts_data0:1; + u32 bts_des_offset0:9; + u32 bts_en1:1; + u32 bts_data1:1; + u32 bts_des_offset1:9; + u32 bts_en2:1; + u32 bts_data2:1; + u32 bts_des_offset2:9; + u32 bts_en3:1; + u32 bts_data3:1; + u32 bts_des_offset3:9; + u32 rsv1:2; + u32 rsv[4]; + } __packed info; + u32 data[NBL_PP1_KGEN_KEY_PRF_DWLEN]; +}; + +#define NBL_PP1_KGEN_KEY_PRF_REG(r) (NBL_PP1_KGEN_KEY_PRF_ADDR + \ + (NBL_PP1_KGEN_KEY_PRF_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h new file mode 100644 index 0000000000000000000000000000000000000000..71e98b61584f57302c67f5d532e3d73551b21d5f --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_pp2.h @@ -0,0 +1,614 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_PP2_H +#define NBL_PP2_H 1 + +#include + +#define NBL_PP2_BASE (0x00B34000) + +#define NBL_PP2_INT_STATUS_ADDR (0xb34000) +#define NBL_PP2_INT_STATUS_DEPTH (1) +#define NBL_PP2_INT_STATUS_WIDTH (32) +#define NBL_PP2_INT_STATUS_DWLEN (1) +union pp2_int_status_u { + struct pp2_int_status { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RWC */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RWC */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RWC */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RWC */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RWC */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_PP2_INT_MASK_ADDR (0xb34004) +#define NBL_PP2_INT_MASK_DEPTH (1) +#define NBL_PP2_INT_MASK_WIDTH (32) +#define NBL_PP2_INT_MASK_DWLEN (1) +union pp2_int_mask_u { + struct pp2_int_mask { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 RW */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 RW */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 RW */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 RW */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 RW */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INT_MASK_DWLEN]; +} __packed; + +#define NBL_PP2_INT_SET_ADDR (0xb34008) +#define NBL_PP2_INT_SET_DEPTH (1) +#define NBL_PP2_INT_SET_WIDTH (32) +#define NBL_PP2_INT_SET_DWLEN (1) +union pp2_int_set_u { + struct pp2_int_set { + u32 rsv5:1; /* [00:00] Default:0x0 RO */ + u32 fifo_uflw_err:1; /* [01:01] Default:0x0 WO */ + u32 fifo_dflw_err:1; /* [02:02] Default:0x0 WO */ + u32 rsv4:1; /* [03:03] Default:0x0 RO */ + u32 cif_err:1; /* [04:04] Default:0x0 WO */ + u32 rsv3:1; /* [05:05] Default:0x0 RO */ + u32 cfg_err:1; /* [06:06] Default:0x0 WO */ + u32 data_ucor_err:1; /* [07:07] Default:0x0 WO */ + u32 rsv2:1; /* [08:08] Default:0x0 RO */ + u32 rsv1:1; /* [09:09] Default:0x0 RO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INT_SET_DWLEN]; +} __packed; + +#define NBL_PP2_INIT_DONE_ADDR (0xb3400c) +#define NBL_PP2_INIT_DONE_DEPTH (1) +#define NBL_PP2_INIT_DONE_WIDTH (32) +#define NBL_PP2_INIT_DONE_DWLEN (1) +union pp2_init_done_u { + struct pp2_init_done { + u32 done:1; /* [00:00] Default:0x0 RO */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_PP2_CFG_ERR_INFO_ADDR (0xb34038) +#define NBL_PP2_CFG_ERR_INFO_DEPTH (1) +#define NBL_PP2_CFG_ERR_INFO_WIDTH (32) +#define NBL_PP2_CFG_ERR_INFO_DWLEN (1) +union pp2_cfg_err_info_u { + struct pp2_cfg_err_info { + u32 id:1; /* [0:0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP2_CIF_ERR_INFO_ADDR (0xb34040) +#define NBL_PP2_CIF_ERR_INFO_DEPTH (1) +#define NBL_PP2_CIF_ERR_INFO_WIDTH (32) +#define NBL_PP2_CIF_ERR_INFO_DWLEN (1) +union pp2_cif_err_info_u { + struct pp2_cif_err_info { + u32 addr:30; /* [29:00] Default:0x0 RO */ + u32 wr_err:1; /* [30:30] Default:0x0 RO */ + u32 ucor_err:1; /* [31:31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_PP2_CAR_CTRL_ADDR (0xb34100) +#define NBL_PP2_CAR_CTRL_DEPTH (1) +#define NBL_PP2_CAR_CTRL_WIDTH (32) +#define NBL_PP2_CAR_CTRL_DWLEN (1) +union pp2_car_ctrl_u { + struct pp2_car_ctrl { + u32 sctr_car:1; /* [00:00] Default:0x1 RW */ + u32 rctr_car:1; /* [01:01] Default:0x1 RW */ + u32 rc_car:1; /* [02:02] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [03:03] Default:0x1 RW */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_PP2_MODE_ADDR (0xb34104) +#define NBL_PP2_MODE_DEPTH (1) +#define NBL_PP2_MODE_WIDTH (32) +#define NBL_PP2_MODE_DWLEN (1) +union pp2_mode_u { + struct pp2_mode { + u32 bypass:1; /* [0] Default:0x0 RW */ + u32 internal_loopback_en:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_MODE_DWLEN]; +} __packed; + +#define NBL_PP2_SET_FLAGS0_ADDR (0xb34108) +#define NBL_PP2_SET_FLAGS0_DEPTH (1) +#define NBL_PP2_SET_FLAGS0_WIDTH (32) +#define NBL_PP2_SET_FLAGS0_DWLEN (1) +union pp2_set_flags0_u { + struct pp2_set_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_SET_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP2_SET_FLAGS1_ADDR (0xb3410c) +#define NBL_PP2_SET_FLAGS1_DEPTH (1) +#define NBL_PP2_SET_FLAGS1_WIDTH (32) +#define NBL_PP2_SET_FLAGS1_DWLEN (1) +union pp2_set_flags1_u { + struct pp2_set_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_SET_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP2_CLEAR_FLAGS0_ADDR (0xb34110) +#define NBL_PP2_CLEAR_FLAGS0_DEPTH (1) +#define NBL_PP2_CLEAR_FLAGS0_WIDTH (32) +#define NBL_PP2_CLEAR_FLAGS0_DWLEN (1) +union pp2_clear_flags0_u { + struct pp2_clear_flags0 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_CLEAR_FLAGS0_DWLEN]; +} __packed; + +#define NBL_PP2_CLEAR_FLAGS1_ADDR (0xb34114) +#define NBL_PP2_CLEAR_FLAGS1_DEPTH (1) +#define NBL_PP2_CLEAR_FLAGS1_WIDTH (32) +#define NBL_PP2_CLEAR_FLAGS1_DWLEN (1) +union pp2_clear_flags1_u { + struct pp2_clear_flags1 { + u32 data:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_CLEAR_FLAGS1_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY0_ADDR (0xb34118) +#define NBL_PP2_ACTION_PRIORITY0_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY0_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY0_DWLEN (1) +union pp2_action_priority0_u { + struct pp2_action_priority0 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY0_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY1_ADDR (0xb3411c) +#define NBL_PP2_ACTION_PRIORITY1_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY1_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY1_DWLEN (1) +union pp2_action_priority1_u { + struct pp2_action_priority1 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY1_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY2_ADDR (0xb34120) +#define NBL_PP2_ACTION_PRIORITY2_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY2_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY2_DWLEN (1) +union pp2_action_priority2_u { + struct pp2_action_priority2 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY2_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY3_ADDR (0xb34124) +#define NBL_PP2_ACTION_PRIORITY3_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY3_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY3_DWLEN (1) +union pp2_action_priority3_u { + struct pp2_action_priority3 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY3_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY4_ADDR (0xb34128) +#define NBL_PP2_ACTION_PRIORITY4_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY4_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY4_DWLEN (1) +union pp2_action_priority4_u { + struct pp2_action_priority4 { + u32 action_id3_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id4_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id5_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id6_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id7_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id8_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id9_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id10_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id11_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id12_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id13_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id14_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id15_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id16_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id17_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id18_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY4_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY5_ADDR (0xb3412c) +#define NBL_PP2_ACTION_PRIORITY5_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY5_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY5_DWLEN (1) +union pp2_action_priority5_u { + struct pp2_action_priority5 { + u32 action_id19_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id20_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id21_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id22_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id23_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id24_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id25_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id26_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id27_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id28_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id29_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id30_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id31_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id32_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id33_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id34_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY5_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY6_ADDR (0xb34130) +#define NBL_PP2_ACTION_PRIORITY6_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY6_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY6_DWLEN (1) +union pp2_action_priority6_u { + struct pp2_action_priority6 { + u32 action_id35_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id36_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id37_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id38_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id39_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id40_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id41_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id42_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id43_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id44_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id45_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id46_pri:2; /* [23:22] Default:0x0 RW */ + u32 action_id47_pri:2; /* [25:24] Default:0x0 RW */ + u32 action_id48_pri:2; /* [27:26] Default:0x0 RW */ + u32 action_id49_pri:2; /* [29:28] Default:0x0 RW */ + u32 action_id50_pri:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY6_DWLEN]; +} __packed; + +#define NBL_PP2_ACTION_PRIORITY7_ADDR (0xb34134) +#define NBL_PP2_ACTION_PRIORITY7_DEPTH (1) +#define NBL_PP2_ACTION_PRIORITY7_WIDTH (32) +#define NBL_PP2_ACTION_PRIORITY7_DWLEN (1) +union pp2_action_priority7_u { + struct pp2_action_priority7 { + u32 action_id51_pri:2; /* [01:00] Default:0x0 RW */ + u32 action_id52_pri:2; /* [03:02] Default:0x0 RW */ + u32 action_id53_pri:2; /* [05:04] Default:0x0 RW */ + u32 action_id54_pri:2; /* [07:06] Default:0x0 RW */ + u32 action_id55_pri:2; /* [09:08] Default:0x0 RW */ + u32 action_id56_pri:2; /* [11:10] Default:0x0 RW */ + u32 action_id57_pri:2; /* [13:12] Default:0x0 RW */ + u32 action_id58_pri:2; /* [15:14] Default:0x0 RW */ + u32 action_id59_pri:2; /* [17:16] Default:0x0 RW */ + u32 action_id60_pri:2; /* [19:18] Default:0x0 RW */ + u32 action_id61_pri:2; /* [21:20] Default:0x0 RW */ + u32 action_id62_pri:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ACTION_PRIORITY7_DWLEN]; +} __packed; + +#define NBL_PP2_CPU_ACCESS_ADDR (0xb3416c) +#define NBL_PP2_CPU_ACCESS_DEPTH (1) +#define NBL_PP2_CPU_ACCESS_WIDTH (32) +#define NBL_PP2_CPU_ACCESS_DWLEN (1) +union pp2_cpu_access_u { + struct pp2_cpu_access { + u32 bp_th:10; /* [9:0] Default:0x34 RW */ + u32 rsv1:6; /* [15:10] Default:0x0 RO */ + u32 timeout_th:10; /* [25:16] Default:0x100 RW */ + u32 rsv:6; /* [31:26] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_CPU_ACCESS_DWLEN]; +} __packed; + +#define NBL_PP2_RDMA_BYPASS_ADDR (0xb34170) +#define NBL_PP2_RDMA_BYPASS_DEPTH (1) +#define NBL_PP2_RDMA_BYPASS_WIDTH (32) +#define NBL_PP2_RDMA_BYPASS_DWLEN (1) +union pp2_rdma_bypass_u { + struct pp2_rdma_bypass { + u32 rdma_flag_offset:5; /* [4:0] Default:0x0 RW */ + u32 dn_bypass_en:1; /* [5] Default:0x0 RW */ + u32 up_bypass_en:1; /* [6] Default:0x0 RW */ + u32 rsv1:1; /* [7] Default:0x0 RO */ + u32 dir_flag_offset:5; /* [12:8] Default:0x0 RW */ + u32 rsv:19; /* [31:13] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_RDMA_BYPASS_DWLEN]; +} __packed; + +#define NBL_PP2_INIT_START_ADDR (0xb341fc) +#define NBL_PP2_INIT_START_DEPTH (1) +#define NBL_PP2_INIT_START_WIDTH (32) +#define NBL_PP2_INIT_START_DWLEN (1) +union pp2_init_start_u { + struct pp2_init_start { + u32 en:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_INIT_START_DWLEN]; +} __packed; + +#define NBL_PP2_BP_SET_ADDR (0xb34200) +#define NBL_PP2_BP_SET_DEPTH (1) +#define NBL_PP2_BP_SET_WIDTH (32) +#define NBL_PP2_BP_SET_DWLEN (1) +union pp2_bp_set_u { + struct pp2_bp_set { + u32 pp_up:1; /* [00:00] Default:0x0 RW */ + u32 rsv:31; /* [31:01] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_BP_SET_DWLEN]; +} __packed; + +#define NBL_PP2_BP_MASK_ADDR (0xb34204) +#define NBL_PP2_BP_MASK_DEPTH (1) +#define NBL_PP2_BP_MASK_WIDTH (32) +#define NBL_PP2_BP_MASK_DWLEN (1) +union pp2_bp_mask_u { + struct pp2_bp_mask { + u32 dn_pp:1; /* [00:00] Default:0x0 RW */ + u32 fem_pp:1; /* [01:01] Default:0x0 RW */ + u32 rsv:30; /* [31:02] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_BP_MASK_DWLEN]; +} __packed; + +#define NBL_PP2_BP_STATE_ADDR (0xb34308) +#define NBL_PP2_BP_STATE_DEPTH (1) +#define NBL_PP2_BP_STATE_WIDTH (32) +#define NBL_PP2_BP_STATE_DWLEN (1) +union pp2_bp_state_u { + struct pp2_bp_state { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RO */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RO */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RO */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RO */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_BP_STATE_DWLEN]; +} __packed; + +#define NBL_PP2_BP_HISTORY_ADDR (0xb3430c) +#define NBL_PP2_BP_HISTORY_DEPTH (1) +#define NBL_PP2_BP_HISTORY_WIDTH (32) +#define NBL_PP2_BP_HISTORY_DWLEN (1) +union pp2_bp_history_u { + struct pp2_bp_history { + u32 dn_pp_bp:1; /* [00:00] Default:0x0 RC */ + u32 fem_pp_bp:1; /* [01:01] Default:0x0 RC */ + u32 pp_up_bp:1; /* [02:02] Default:0x0 RC */ + u32 inter_pp_bp:1; /* [03:03] Default:0x0 RC */ + u32 rsv:28; /* [31:04] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_BP_HISTORY_DWLEN]; +} __packed; + +#define NBL_PP2_CFG_TEST_ADDR (0xb3442c) +#define NBL_PP2_CFG_TEST_DEPTH (1) +#define NBL_PP2_CFG_TEST_WIDTH (32) +#define NBL_PP2_CFG_TEST_DWLEN (1) +union pp2_cfg_test_u { + struct pp2_cfg_test { + u32 test:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_CFG_TEST_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION0_ADDR (0xb34430) +#define NBL_PP2_ABNORMAL_ACTION0_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION0_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION0_DWLEN (1) +union pp2_abnormal_action0_u { + struct pp2_abnormal_action0 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION0_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION1_ADDR (0xb34434) +#define NBL_PP2_ABNORMAL_ACTION1_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION1_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION1_DWLEN (1) +union pp2_abnormal_action1_u { + struct pp2_abnormal_action1 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION1_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION2_ADDR (0xb34438) +#define NBL_PP2_ABNORMAL_ACTION2_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION2_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION2_DWLEN (1) +union pp2_abnormal_action2_u { + struct pp2_abnormal_action2 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION2_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION3_ADDR (0xb3443c) +#define NBL_PP2_ABNORMAL_ACTION3_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION3_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION3_DWLEN (1) +union pp2_abnormal_action3_u { + struct pp2_abnormal_action3 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION3_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION4_ADDR (0xb34440) +#define NBL_PP2_ABNORMAL_ACTION4_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION4_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION4_DWLEN (1) +union pp2_abnormal_action4_u { + struct pp2_abnormal_action4 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION4_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION5_ADDR (0xb34444) +#define NBL_PP2_ABNORMAL_ACTION5_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION5_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION5_DWLEN (1) +union pp2_abnormal_action5_u { + struct pp2_abnormal_action5 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION5_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION6_ADDR (0xb34448) +#define NBL_PP2_ABNORMAL_ACTION6_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION6_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION6_DWLEN (1) +union pp2_abnormal_action6_u { + struct pp2_abnormal_action6 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION6_DWLEN]; +} __packed; + +#define NBL_PP2_ABNORMAL_ACTION7_ADDR (0xb3444c) +#define NBL_PP2_ABNORMAL_ACTION7_DEPTH (1) +#define NBL_PP2_ABNORMAL_ACTION7_WIDTH (32) +#define NBL_PP2_ABNORMAL_ACTION7_DWLEN (1) +union pp2_abnormal_action7_u { + struct pp2_abnormal_action7 { + u32 data:22; /* [21:00] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_ABNORMAL_ACTION7_DWLEN]; +} __packed; + +#define NBL_PP2_FWD_DPORT_ACTION_ADDR (0xb34450) +#define NBL_PP2_FWD_DPORT_ACTION_DEPTH (1) +#define NBL_PP2_FWD_DPORT_ACTION_WIDTH (32) +#define NBL_PP2_FWD_DPORT_ACTION_DWLEN (1) +union pp2_fwd_dport_action_u { + struct pp2_fwd_dport_action { + u32 action_id:6; /* [05:00] Default:0x9 RW */ + u32 rsv:26; /* [31:06] Default:0x0 RO */ + } __packed info; + u32 data[NBL_PP2_FWD_DPORT_ACTION_DWLEN]; +} __packed; + +#define NBL_PP2_RDMA_VSI_BTM_ADDR (0xb34454) +#define NBL_PP2_RDMA_VSI_BTM_DEPTH (32) +#define NBL_PP2_RDMA_VSI_BTM_WIDTH (32) +#define NBL_PP2_RDMA_VSI_BTM_DWLEN (1) +union pp2_rdma_vsi_btm_u { + struct pp2_rdma_vsi_btm { + u32 btm:32; /* [31:00] Default:0x0 RW */ + } __packed info; + u32 data[NBL_PP2_RDMA_VSI_BTM_DWLEN]; +} __packed; +#define NBL_PP2_RDMA_VSI_BTM_REG(r) (NBL_PP2_RDMA_VSI_BTM_ADDR + \ + (NBL_PP2_RDMA_VSI_BTM_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_uprbac.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_uprbac.h new file mode 100644 index 0000000000000000000000000000000000000000..13e8a6aa23923ef1ad930d9676e809bef35eb0cf --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/base/nbl_ppe_uprbac.h @@ -0,0 +1,849 @@ +// Code generated by interstellar. DO NOT EDIT. +// Compatible with leonis RTL tag 0710 + +#ifndef NBL_UPRBAC_H +#define NBL_UPRBAC_H 1 + +#include + +#define NBL_UPRBAC_BASE (0x0000C000) + +#define NBL_UPRBAC_INT_STATUS_ADDR (0xc000) +#define NBL_UPRBAC_INT_STATUS_DEPTH (1) +#define NBL_UPRBAC_INT_STATUS_WIDTH (32) +#define NBL_UPRBAC_INT_STATUS_DWLEN (1) +union uprbac_int_status_u { + struct uprbac_int_status { + u32 fatal_err:1; /* [0] Default:0x0 RWC */ + u32 fifo_underflow:1; /* [1] Default:0x0 RWC */ + u32 fifo_overflow:1; /* [2] Default:0x0 RWC */ + u32 cif_err:1; /* [3] Default:0x0 RWC */ + u32 cfg_err:1; /* [4] Default:0x0 RWC */ + u32 ucor_err:1; /* [5] Default:0x0 RWC */ + u32 cor_err:1; /* [6] Default:0x0 RWC */ + u32 soft_lifetime:1; /* [7] Default:0x0 RWC */ + u32 hard_lifetime:1; /* [8] Default:0x0 RWC */ + u32 esn_replay:1; /* [9] Default:0x0 RWC */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_INT_STATUS_DWLEN]; +} __packed; + +#define NBL_UPRBAC_INT_MASK_ADDR (0xc004) +#define NBL_UPRBAC_INT_MASK_DEPTH (1) +#define NBL_UPRBAC_INT_MASK_WIDTH (32) +#define NBL_UPRBAC_INT_MASK_DWLEN (1) +union uprbac_int_mask_u { + struct uprbac_int_mask { + u32 fatal_err:1; /* [0] Default:0x0 RW */ + u32 fifo_underflow:1; /* [1] Default:0x0 RW */ + u32 fifo_overflow:1; /* [2] Default:0x0 RW */ + u32 cif_err:1; /* [3] Default:0x0 RW */ + u32 cfg_err:1; /* [4] Default:0x0 RW */ + u32 ucor_err:1; /* [5] Default:0x0 RW */ + u32 cor_err:1; /* [6] Default:0x0 RW */ + u32 soft_lifetime:1; /* [7] Default:0x0 RW */ + u32 hard_lifetime:1; /* [8] Default:0x0 RW */ + u32 esn_replay:1; /* [9] Default:0x0 RW */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_INT_MASK_DWLEN]; +} __packed; + +#define NBL_UPRBAC_INT_SET_ADDR (0xc008) +#define NBL_UPRBAC_INT_SET_DEPTH (1) +#define NBL_UPRBAC_INT_SET_WIDTH (32) +#define NBL_UPRBAC_INT_SET_DWLEN (1) +union uprbac_int_set_u { + struct uprbac_int_set { + u32 fatal_err:1; /* [0] Default:0x0 WO */ + u32 fifo_underflow:1; /* [1] Default:0x0 WO */ + u32 fifo_overflow:1; /* [2] Default:0x0 WO */ + u32 cif_err:1; /* [3] Default:0x0 WO */ + u32 cfg_err:1; /* [4] Default:0x0 WO */ + u32 ucor_err:1; /* [5] Default:0x0 WO */ + u32 cor_err:1; /* [6] Default:0x0 WO */ + u32 soft_lifetime:1; /* [7] Default:0x0 WO */ + u32 hard_lifetime:1; /* [8] Default:0x0 WO */ + u32 esn_replay:1; /* [9] Default:0x0 WO */ + u32 rsv:22; /* [31:10] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_INT_SET_DWLEN]; +} __packed; + +#define NBL_UPRBAC_LIFETIME_INFO_ADDR (0xc014) +#define NBL_UPRBAC_LIFETIME_INFO_DEPTH (1) +#define NBL_UPRBAC_LIFETIME_INFO_WIDTH (32) +#define NBL_UPRBAC_LIFETIME_INFO_DWLEN (1) +union uprbac_lifetime_info_u { + struct uprbac_lifetime_info { + u32 soft_sad_index:11; /* [10:0] Default:0x0 RO */ + u32 rsv1:5; /* [15:11] Default:0x0 RO */ + u32 hard_sad_index:11; /* [26:16] Default:0x0 RO */ + u32 rsv:5; /* [31:27] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_LIFETIME_INFO_DWLEN]; +} __packed; + +#define NBL_UPRBAC_UCOR_ERR_INFO_ADDR (0xc024) +#define NBL_UPRBAC_UCOR_ERR_INFO_DEPTH (1) +#define NBL_UPRBAC_UCOR_ERR_INFO_WIDTH (32) +#define NBL_UPRBAC_UCOR_ERR_INFO_DWLEN (1) +union uprbac_ucor_err_info_u { + struct uprbac_ucor_err_info { + u32 addr:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_UCOR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPRBAC_COR_ERR_INFO_ADDR (0xc02c) +#define NBL_UPRBAC_COR_ERR_INFO_DEPTH (1) +#define NBL_UPRBAC_COR_ERR_INFO_WIDTH (32) +#define NBL_UPRBAC_COR_ERR_INFO_DWLEN (1) +union uprbac_cor_err_info_u { + struct uprbac_cor_err_info { + u32 addr:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_COR_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPRBAC_CFG_ERR_INFO_ADDR (0xc034) +#define NBL_UPRBAC_CFG_ERR_INFO_DEPTH (1) +#define NBL_UPRBAC_CFG_ERR_INFO_WIDTH (32) +#define NBL_UPRBAC_CFG_ERR_INFO_DWLEN (1) +union uprbac_cfg_err_info_u { + struct uprbac_cfg_err_info { + u32 addr:16; /* [15:0] Default:0x0 RO */ + u32 id:6; /* [21:16] Default:0x0 RO */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_CFG_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPRBAC_CIF_ERR_INFO_ADDR (0xc040) +#define NBL_UPRBAC_CIF_ERR_INFO_DEPTH (1) +#define NBL_UPRBAC_CIF_ERR_INFO_WIDTH (32) +#define NBL_UPRBAC_CIF_ERR_INFO_DWLEN (1) +union uprbac_cif_err_info_u { + struct uprbac_cif_err_info { + u32 addr:30; /* [29:0] Default:0x0 RO */ + u32 wr_err:1; /* [30] Default:0x0 RO */ + u32 ucor_err:1; /* [31] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_CIF_ERR_INFO_DWLEN]; +} __packed; + +#define NBL_UPRBAC_CAR_CTRL_ADDR (0xc100) +#define NBL_UPRBAC_CAR_CTRL_DEPTH (1) +#define NBL_UPRBAC_CAR_CTRL_WIDTH (32) +#define NBL_UPRBAC_CAR_CTRL_DWLEN (1) +union uprbac_car_ctrl_u { + struct uprbac_car_ctrl { + u32 sctr_car:1; /* [0] Default:0x1 RW */ + u32 rctr_car:1; /* [1] Default:0x1 RW */ + u32 rc_car:1; /* [2] Default:0x1 RW */ + u32 tbl_rc_car:1; /* [3] Default:0x1 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_CAR_CTRL_DWLEN]; +} __packed; + +#define NBL_UPRBAC_MODE_ADDR (0xc110) +#define NBL_UPRBAC_MODE_DEPTH (1) +#define NBL_UPRBAC_MODE_WIDTH (32) +#define NBL_UPRBAC_MODE_DWLEN (1) +union uprbac_mode_u { + struct uprbac_mode { + u32 work_mode:1; /* [0] Default:0x1 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_MODE_DWLEN]; +} __packed; + +#define NBL_UPRBAC_ENABLE_ADDR (0xc114) +#define NBL_UPRBAC_ENABLE_DEPTH (1) +#define NBL_UPRBAC_ENABLE_WIDTH (32) +#define NBL_UPRBAC_ENABLE_DWLEN (1) +union uprbac_enable_u { + struct uprbac_enable { + u32 prbac:1; /* [0] Default:0x0 RW */ + u32 padding_check:1; /* [1] Default:0x1 RW */ + u32 pa_am:1; /* [2] Default:0x0 RW */ + u32 dm_am:1; /* [3] Default:0x0 RW */ + u32 icv_err:1; /* [4] Default:0x1 RW */ + u32 pad_err:1; /* [5] Default:0x1 RW */ + u32 ipv6_nat_csm0:1; /* [6] Default:0x0 RW */ + u32 rsv:25; /* [31:7] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_ENABLE_DWLEN]; +} __packed; + +#define NBL_UPRBAC_CLK_GATE_ADDR (0xc118) +#define NBL_UPRBAC_CLK_GATE_DEPTH (1) +#define NBL_UPRBAC_CLK_GATE_WIDTH (32) +#define NBL_UPRBAC_CLK_GATE_DWLEN (1) +union uprbac_clk_gate_u { + struct uprbac_clk_gate { + u32 clk_en:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_CLK_GATE_DWLEN]; +} __packed; + +#define NBL_UPRBAC_INIT_START_ADDR (0xc124) +#define NBL_UPRBAC_INIT_START_DEPTH (1) +#define NBL_UPRBAC_INIT_START_WIDTH (32) +#define NBL_UPRBAC_INIT_START_DWLEN (1) +union uprbac_init_start_u { + struct uprbac_init_start { + u32 start:1; /* [0] Default:0x0 WO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_INIT_START_DWLEN]; +} __packed; + +#define NBL_UPRBAC_INIT_DONE_ADDR (0xc128) +#define NBL_UPRBAC_INIT_DONE_DEPTH (1) +#define NBL_UPRBAC_INIT_DONE_WIDTH (32) +#define NBL_UPRBAC_INIT_DONE_DWLEN (1) +union uprbac_init_done_u { + struct uprbac_init_done { + u32 done:1; /* [0] Default:0x0 RO */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_INIT_DONE_DWLEN]; +} __packed; + +#define NBL_UPRBAC_NAT_ADDR (0xc12c) +#define NBL_UPRBAC_NAT_DEPTH (1) +#define NBL_UPRBAC_NAT_WIDTH (32) +#define NBL_UPRBAC_NAT_DWLEN (1) +union uprbac_nat_u { + struct uprbac_nat { + u32 enable:1; /* [0] Default:0x0 RW */ + u32 rsv:15; /* [15:1] Default:0x0 RO */ + u32 dport:16; /* [31:16] Default:4500 RW */ + } __packed info; + u32 data[NBL_UPRBAC_NAT_DWLEN]; +} __packed; + +#define NBL_UPRBAC_VLAN_TYPE0_ADDR (0xc130) +#define NBL_UPRBAC_VLAN_TYPE0_DEPTH (1) +#define NBL_UPRBAC_VLAN_TYPE0_WIDTH (32) +#define NBL_UPRBAC_VLAN_TYPE0_DWLEN (1) +union uprbac_vlan_type0_u { + struct uprbac_vlan_type0 { + u32 tpid0:16; /* [15:0] Default:0x88A8 RW */ + u32 tpid1:16; /* [31:16] Default:0x9100 RW */ + } __packed info; + u32 data[NBL_UPRBAC_VLAN_TYPE0_DWLEN]; +} __packed; + +#define NBL_UPRBAC_VLAN_TYPE1_ADDR (0xc134) +#define NBL_UPRBAC_VLAN_TYPE1_DEPTH (1) +#define NBL_UPRBAC_VLAN_TYPE1_WIDTH (32) +#define NBL_UPRBAC_VLAN_TYPE1_DWLEN (1) +union uprbac_vlan_type1_u { + struct uprbac_vlan_type1 { + u32 tpid2:16; /* [15:0] Default:0x0 RW */ + u32 tpid3:16; /* [31:16] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_VLAN_TYPE1_DWLEN]; +} __packed; + +#define NBL_UPRBAC_VLAN_ENABLE_ADDR (0xc140) +#define NBL_UPRBAC_VLAN_ENABLE_DEPTH (1) +#define NBL_UPRBAC_VLAN_ENABLE_WIDTH (32) +#define NBL_UPRBAC_VLAN_ENABLE_DWLEN (1) +union uprbac_vlan_enable_u { + struct uprbac_vlan_enable { + u32 tpid0:1; /* [0] Default:0x1 RW */ + u32 tpid1:1; /* [1] Default:0x1 RW */ + u32 tpid2:1; /* [2] Default:0x0 RW */ + u32 tpid3:1; /* [3] Default:0x0 RW */ + u32 rsv:28; /* [31:4] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_VLAN_ENABLE_DWLEN]; +} __packed; + +#define NBL_UPRBAC_DROP_ADDR (0xc180) +#define NBL_UPRBAC_DROP_DEPTH (1) +#define NBL_UPRBAC_DROP_WIDTH (32) +#define NBL_UPRBAC_DROP_DWLEN (1) +union uprbac_drop_u { + struct uprbac_drop { + u32 prbac_bp:1; /* [0] Default:0x1 RW */ + u32 prbac_err:1; /* [1] Default:0x1 RW */ + u32 prbac_replay:1; /* [2] Default:0x1 RW */ + u32 md_drop:1; /* [3] Default:0x1 RW */ + u32 md_errcode:1; /* [4] Default:0x1 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_DROP_DWLEN]; +} __packed; + +#define NBL_UPRBAC_WRR_WT_ADDR (0xc200) +#define NBL_UPRBAC_WRR_WT_DEPTH (1) +#define NBL_UPRBAC_WRR_WT_WIDTH (32) +#define NBL_UPRBAC_WRR_WT_DWLEN (1) +union uprbac_wrr_wt_u { + struct uprbac_wrr_wt { + u32 eth0:4; /* [3:0] Default:0x1 RW */ + u32 eth1:4; /* [7:4] Default:0x1 RW */ + u32 eth2:4; /* [11:8] Default:0x1 RW */ + u32 eth3:4; /* [15:12] Default:0x1 RW */ + u32 normal:8; /* [23:16] Default:12 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_WRR_WT_DWLEN]; +} __packed; + +#define NBL_UPRBAC_SAD_LIFETIME_ADDR (0xc204) +#define NBL_UPRBAC_SAD_LIFETIME_DEPTH (1) +#define NBL_UPRBAC_SAD_LIFETIME_WIDTH (32) +#define NBL_UPRBAC_SAD_LIFETIME_DWLEN (1) +union uprbac_sad_lifetime_u { + struct uprbac_sad_lifetime { + u32 sad_index:11; /* [10:0] Default:0x0 RW */ + u32 rsv2:5; /* [15:11] Default:0x0 RW */ + u32 msb_value:1; /* [16] Default:0x1 RW */ + u32 flag_value:1; /* [17] Default:0x1 RW */ + u32 rsv1:2; /* [19:18] Default:0x0 RO */ + u32 msb_wen:1; /* [20] Default:0x0 RWW */ + u32 flag_wen:1; /* [21] Default:0x0 RWW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_SAD_LIFETIME_DWLEN]; +} __packed; + +#define NBL_UPRBAC_LIFETIME_DIFF_ADDR (0xc208) +#define NBL_UPRBAC_LIFETIME_DIFF_DEPTH (1) +#define NBL_UPRBAC_LIFETIME_DIFF_WIDTH (32) +#define NBL_UPRBAC_LIFETIME_DIFF_DWLEN (1) +union uprbac_lifetime_diff_u { + struct uprbac_lifetime_diff { + u32 value:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_LIFETIME_DIFF_DWLEN]; +} __packed; + +#define NBL_UPRBAC_ERRCODE_ADDR (0xc20c) +#define NBL_UPRBAC_ERRCODE_DEPTH (1) +#define NBL_UPRBAC_ERRCODE_WIDTH (32) +#define NBL_UPRBAC_ERRCODE_DWLEN (1) +union uprbac_errcode_u { + struct uprbac_errcode { + u32 icv_err:4; /* [3:0] Default:6 RW */ + u32 pad_err:4; /* [7:4] Default:7 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_ERRCODE_DWLEN]; +} __packed; + +#define NBL_UPRBAC_PADDING_DAT0_ADDR (0xc300) +#define NBL_UPRBAC_PADDING_DAT0_DEPTH (1) +#define NBL_UPRBAC_PADDING_DAT0_WIDTH (32) +#define NBL_UPRBAC_PADDING_DAT0_DWLEN (1) +union uprbac_padding_dat0_u { + struct uprbac_padding_dat0 { + u32 data:32; /* [31:0] Default:0x01020304 RW */ + } __packed info; + u32 data[NBL_UPRBAC_PADDING_DAT0_DWLEN]; +} __packed; + +#define NBL_UPRBAC_PADDING_DAT1_ADDR (0xc304) +#define NBL_UPRBAC_PADDING_DAT1_DEPTH (1) +#define NBL_UPRBAC_PADDING_DAT1_WIDTH (32) +#define NBL_UPRBAC_PADDING_DAT1_DWLEN (1) +union uprbac_padding_dat1_u { + struct uprbac_padding_dat1 { + u32 data:32; /* [31:0] Default:0x05060708 RW */ + } __packed info; + u32 data[NBL_UPRBAC_PADDING_DAT1_DWLEN]; +} __packed; + +#define NBL_UPRBAC_PADDING_DAT2_ADDR (0xc308) +#define NBL_UPRBAC_PADDING_DAT2_DEPTH (1) +#define NBL_UPRBAC_PADDING_DAT2_WIDTH (32) +#define NBL_UPRBAC_PADDING_DAT2_DWLEN (1) +union uprbac_padding_dat2_u { + struct uprbac_padding_dat2 { + u32 data:32; /* [31:0] Default:0x090a0b0c RW */ + } __packed info; + u32 data[NBL_UPRBAC_PADDING_DAT2_DWLEN]; +} __packed; + +#define NBL_UPRBAC_PADDING_DAT3_ADDR (0xc30c) +#define NBL_UPRBAC_PADDING_DAT3_DEPTH (1) +#define NBL_UPRBAC_PADDING_DAT3_WIDTH (32) +#define NBL_UPRBAC_PADDING_DAT3_DWLEN (1) +union uprbac_padding_dat3_u { + struct uprbac_padding_dat3 { + u32 data:32; /* [31:0] Default:0x0d0e0f10 RW */ + } __packed info; + u32 data[NBL_UPRBAC_PADDING_DAT3_DWLEN]; +} __packed; + +#define NBL_UPRBAC_ACTION_PRI0_ADDR (0xc310) +#define NBL_UPRBAC_ACTION_PRI0_DEPTH (1) +#define NBL_UPRBAC_ACTION_PRI0_WIDTH (32) +#define NBL_UPRBAC_ACTION_PRI0_DWLEN (1) +union uprbac_action_pri0_u { + struct uprbac_action_pri0 { + u32 id3:2; /* [1:0] Default:0x0 RW */ + u32 id4:2; /* [3:2] Default:0x0 RW */ + u32 id5:2; /* [5:4] Default:0x0 RW */ + u32 id6:2; /* [7:6] Default:0x0 RW */ + u32 id7:2; /* [9:8] Default:0x0 RW */ + u32 id8:2; /* [11:10] Default:0x0 RW */ + u32 id9:2; /* [13:12] Default:0x0 RW */ + u32 id10:2; /* [15:14] Default:0x0 RW */ + u32 id11:2; /* [17:16] Default:0x0 RW */ + u32 id12:2; /* [19:18] Default:0x0 RW */ + u32 id13:2; /* [21:20] Default:0x0 RW */ + u32 id14:2; /* [23:22] Default:0x0 RW */ + u32 id15:2; /* [25:24] Default:0x0 RW */ + u32 id16:2; /* [27:26] Default:0x0 RW */ + u32 id17:2; /* [29:28] Default:0x3 RW */ + u32 id18:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_ACTION_PRI0_DWLEN]; +} __packed; + +#define NBL_UPRBAC_ACTION_PRI1_ADDR (0xc314) +#define NBL_UPRBAC_ACTION_PRI1_DEPTH (1) +#define NBL_UPRBAC_ACTION_PRI1_WIDTH (32) +#define NBL_UPRBAC_ACTION_PRI1_DWLEN (1) +union uprbac_action_pri1_u { + struct uprbac_action_pri1 { + u32 id19:2; /* [1:0] Default:0x0 RW */ + u32 id20:2; /* [3:2] Default:0x0 RW */ + u32 id21:2; /* [5:4] Default:0x0 RW */ + u32 id22:2; /* [7:6] Default:0x0 RW */ + u32 id23:2; /* [9:8] Default:0x0 RW */ + u32 id24:2; /* [11:10] Default:0x0 RW */ + u32 id25:2; /* [13:12] Default:0x0 RW */ + u32 id26:2; /* [15:14] Default:0x0 RW */ + u32 id27:2; /* [17:16] Default:0x0 RW */ + u32 id28:2; /* [19:18] Default:0x0 RW */ + u32 id29:2; /* [21:20] Default:0x0 RW */ + u32 id30:2; /* [23:22] Default:0x0 RW */ + u32 id31:2; /* [25:24] Default:0x0 RW */ + u32 id32:2; /* [27:26] Default:0x0 RW */ + u32 id33:2; /* [29:28] Default:0x0 RW */ + u32 id34:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_ACTION_PRI1_DWLEN]; +} __packed; + +#define NBL_UPRBAC_ACTION_PRI2_ADDR (0xc318) +#define NBL_UPRBAC_ACTION_PRI2_DEPTH (1) +#define NBL_UPRBAC_ACTION_PRI2_WIDTH (32) +#define NBL_UPRBAC_ACTION_PRI2_DWLEN (1) +union uprbac_action_pri2_u { + struct uprbac_action_pri2 { + u32 id35:2; /* [1:0] Default:0x0 RW */ + u32 id36:2; /* [3:2] Default:0x0 RW */ + u32 id37:2; /* [5:4] Default:0x0 RW */ + u32 id38:2; /* [7:6] Default:0x0 RW */ + u32 id39:2; /* [9:8] Default:0x0 RW */ + u32 id40:2; /* [11:10] Default:0x0 RW */ + u32 id41:2; /* [13:12] Default:0x0 RW */ + u32 id42:2; /* [15:14] Default:0x0 RW */ + u32 id43:2; /* [17:16] Default:0x0 RW */ + u32 id44:2; /* [19:18] Default:0x0 RW */ + u32 id45:2; /* [21:20] Default:0x0 RW */ + u32 id46:2; /* [23:22] Default:0x0 RW */ + u32 id47:2; /* [25:24] Default:0x0 RW */ + u32 id48:2; /* [27:26] Default:0x0 RW */ + u32 id49:2; /* [29:28] Default:0x0 RW */ + u32 id50:2; /* [31:30] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_ACTION_PRI2_DWLEN]; +} __packed; + +#define NBL_UPRBAC_ACTION_PRI3_ADDR (0xc31c) +#define NBL_UPRBAC_ACTION_PRI3_DEPTH (1) +#define NBL_UPRBAC_ACTION_PRI3_WIDTH (32) +#define NBL_UPRBAC_ACTION_PRI3_DWLEN (1) +union uprbac_action_pri3_u { + struct uprbac_action_pri3 { + u32 id51:2; /* [1:0] Default:0x0 RW */ + u32 id52:2; /* [3:2] Default:0x0 RW */ + u32 id53:2; /* [5:4] Default:0x0 RW */ + u32 id54:2; /* [7:6] Default:0x0 RW */ + u32 id55:2; /* [9:8] Default:0x0 RW */ + u32 id56:2; /* [11:10] Default:0x0 RW */ + u32 id57:2; /* [13:12] Default:0x0 RW */ + u32 id58:2; /* [15:14] Default:0x0 RW */ + u32 id59:2; /* [17:16] Default:0x0 RW */ + u32 id60:2; /* [19:18] Default:0x0 RW */ + u32 id61:2; /* [21:20] Default:0x0 RW */ + u32 id62:2; /* [23:22] Default:0x0 RW */ + u32 rsv:8; /* [31:24] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_ACTION_PRI3_DWLEN]; +} __packed; + +#define NBL_UPRBAC_ACTION0_ADDR (0xc328) +#define NBL_UPRBAC_ACTION0_DEPTH (1) +#define NBL_UPRBAC_ACTION0_WIDTH (32) +#define NBL_UPRBAC_ACTION0_DWLEN (1) +union uprbac_action0_u { + struct uprbac_action0 { + u32 action:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_ACTION0_DWLEN]; +} __packed; + +#define NBL_UPRBAC_ACTION1_ADDR (0xc32c) +#define NBL_UPRBAC_ACTION1_DEPTH (1) +#define NBL_UPRBAC_ACTION1_WIDTH (32) +#define NBL_UPRBAC_ACTION1_DWLEN (1) +union uprbac_action1_u { + struct uprbac_action1 { + u32 action:22; /* [21:0] Default:0x0 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_ACTION1_DWLEN]; +} __packed; + +#define NBL_UPRBAC_ACTION2_ADDR (0xc330) +#define NBL_UPRBAC_ACTION2_DEPTH (1) +#define NBL_UPRBAC_ACTION2_WIDTH (32) +#define NBL_UPRBAC_ACTION2_DWLEN (1) +union uprbac_action2_u { + struct uprbac_action2 { + u32 action:22; /* [21:0] Default:0x110000 RW */ + u32 rsv:10; /* [31:22] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_ACTION2_DWLEN]; +} __packed; + +#define NBL_UPRBAC_PRBAC_SET_FLAG_ADDR (0xc334) +#define NBL_UPRBAC_PRBAC_SET_FLAG_DEPTH (1) +#define NBL_UPRBAC_PRBAC_SET_FLAG_WIDTH (32) +#define NBL_UPRBAC_PRBAC_SET_FLAG_DWLEN (1) +union uprbac_prbac_set_flag_u { + struct uprbac_prbac_set_flag { + u32 flag:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_PRBAC_SET_FLAG_DWLEN]; +} __packed; + +#define NBL_UPRBAC_PRBAC_CLR_FLAG_ADDR (0xc338) +#define NBL_UPRBAC_PRBAC_CLR_FLAG_DEPTH (1) +#define NBL_UPRBAC_PRBAC_CLR_FLAG_WIDTH (32) +#define NBL_UPRBAC_PRBAC_CLR_FLAG_DWLEN (1) +union uprbac_prbac_clr_flag_u { + struct uprbac_prbac_clr_flag { + u32 flag:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_PRBAC_CLR_FLAG_DWLEN]; +} __packed; + +#define NBL_UPRBAC_NORMAL_SET_FLAG_ADDR (0xc33c) +#define NBL_UPRBAC_NORMAL_SET_FLAG_DEPTH (1) +#define NBL_UPRBAC_NORMAL_SET_FLAG_WIDTH (32) +#define NBL_UPRBAC_NORMAL_SET_FLAG_DWLEN (1) +union uprbac_normal_set_flag_u { + struct uprbac_normal_set_flag { + u32 flag:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_NORMAL_SET_FLAG_DWLEN]; +} __packed; + +#define NBL_UPRBAC_NORMAL_CLR_FLAG_ADDR (0xc340) +#define NBL_UPRBAC_NORMAL_CLR_FLAG_DEPTH (1) +#define NBL_UPRBAC_NORMAL_CLR_FLAG_WIDTH (32) +#define NBL_UPRBAC_NORMAL_CLR_FLAG_DWLEN (1) +union uprbac_normal_clr_flag_u { + struct uprbac_normal_clr_flag { + u32 flag:32; /* [31:0] Default:0x0 RW */ + } __packed info; + u32 data[NBL_UPRBAC_NORMAL_CLR_FLAG_DWLEN]; +} __packed; + +#define NBL_UPRBAC_UPCALL_ENABLE_ADDR (0xc380) +#define NBL_UPRBAC_UPCALL_ENABLE_DEPTH (1) +#define NBL_UPRBAC_UPCALL_ENABLE_WIDTH (32) +#define NBL_UPRBAC_UPCALL_ENABLE_DWLEN (1) +union uprbac_upcall_enable_u { + struct uprbac_upcall_enable { + u32 enable:1; /* [0] Default:0x0 RW */ + u32 rsv:31; /* [31:1] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_UPCALL_ENABLE_DWLEN]; +} __packed; + +#define NBL_UPRBAC_HT_ACCESS_CTRL_ADDR (0xc3a0) +#define NBL_UPRBAC_HT_ACCESS_CTRL_DEPTH (1) +#define NBL_UPRBAC_HT_ACCESS_CTRL_WIDTH (32) +#define NBL_UPRBAC_HT_ACCESS_CTRL_DWLEN (1) +union uprbac_ht_access_ctrl_u { + struct uprbac_ht_access_ctrl { + u32 ptr:2; /* [1:0] Default:0x0 RW */ + u32 rsv1:2; /* [3:2] Default:0x0 RO */ + u32 choice:1; /* [4] Default:0x1 RW */ + u32 rsv:27; /* [31:5] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_HT_ACCESS_CTRL_DWLEN]; +} __packed; + +#define NBL_UPRBAC_CPU_INSERT_SEARCH_CTRL_ADDR (0xc3a4) +#define NBL_UPRBAC_CPU_INSERT_SEARCH_CTRL_DEPTH (1) +#define NBL_UPRBAC_CPU_INSERT_SEARCH_CTRL_WIDTH (32) +#define NBL_UPRBAC_CPU_INSERT_SEARCH_CTRL_DWLEN (1) +union uprbac_cpu_insert_search_ctrl_u { + struct uprbac_cpu_insert_search_ctrl { + u32 rsv:31; /* [30:0] Default:0x0 RO */ + u32 start:1; /* [31] Default:0x0 WO */ + } __packed info; + u32 data[NBL_UPRBAC_CPU_INSERT_SEARCH_CTRL_DWLEN]; +} __packed; + +#define NBL_UPRBAC_CPU_INSERT_SEARCH_ACK_ADDR (0xc3a8) +#define NBL_UPRBAC_CPU_INSERT_SEARCH_ACK_DEPTH (1) +#define NBL_UPRBAC_CPU_INSERT_SEARCH_ACK_WIDTH (32) +#define NBL_UPRBAC_CPU_INSERT_SEARCH_ACK_DWLEN (1) +union uprbac_cpu_insert_search_ack_u { + struct uprbac_cpu_insert_search_ack { + u32 done:1; /* [0] Default:0x0 RC */ + u32 status:2; /* [2:1] Default:0x0 RWW */ + u32 rsv:29; /* [31:3] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_CPU_INSERT_SEARCH_ACK_DWLEN]; +} __packed; + +#define NBL_UPRBAC_CPU_INSERT_SEARCH_DATA_ADDR (0xc3b0) +#define NBL_UPRBAC_CPU_INSERT_SEARCH_DATA_DEPTH (5) +#define NBL_UPRBAC_CPU_INSERT_SEARCH_DATA_WIDTH (32) +#define NBL_UPRBAC_CPU_INSERT_SEARCH_DATA_DWLEN (1) +union uprbac_cpu_insert_search_data_u { + struct uprbac_cpu_insert_search_data { + u32 data:32; /* [31:0] Default:0x0 RWW */ + } __packed info; + u32 data[NBL_UPRBAC_CPU_INSERT_SEARCH_DATA_DWLEN]; +} __packed; +#define NBL_UPRBAC_CPU_INSERT_SEARCH_DATA_REG(r) (NBL_UPRBAC_CPU_INSERT_SEARCH_DATA_ADDR + \ + (NBL_UPRBAC_CPU_INSERT_SEARCH_DATA_DWLEN * 4) * (r)) + +#define NBL_UPRBAC_XOFF_TO_URMUX_ADDR (0xc430) +#define NBL_UPRBAC_XOFF_TO_URMUX_DEPTH (1) +#define NBL_UPRBAC_XOFF_TO_URMUX_WIDTH (32) +#define NBL_UPRBAC_XOFF_TO_URMUX_DWLEN (1) +union uprbac_xoff_to_urmux_u { + struct uprbac_xoff_to_urmux { + u32 bp_set:1; /* [0] Default:0x0 RW */ + u32 bp_mask:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_XOFF_TO_URMUX_DWLEN]; +} __packed; + +#define NBL_UPRBAC_XOFF_FROM_UPA_ADDR (0xc434) +#define NBL_UPRBAC_XOFF_FROM_UPA_DEPTH (1) +#define NBL_UPRBAC_XOFF_FROM_UPA_WIDTH (32) +#define NBL_UPRBAC_XOFF_FROM_UPA_DWLEN (1) +union uprbac_xoff_from_upa_u { + struct uprbac_xoff_from_upa { + u32 bp_set:1; /* [0] Default:0x0 RW */ + u32 bp_mask:1; /* [1] Default:0x0 RW */ + u32 rsv:30; /* [31:2] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_XOFF_FROM_UPA_DWLEN]; +} __packed; + +#define NBL_UPRBAC_WIDE_TABLE_TIME_ADDR (0xd000) +#define NBL_UPRBAC_WIDE_TABLE_TIME_DEPTH (1) +#define NBL_UPRBAC_WIDE_TABLE_TIME_WIDTH (32) +#define NBL_UPRBAC_WIDE_TABLE_TIME_DWLEN (1) +union uprbac_wide_table_time_u { + struct uprbac_wide_table_time { + u32 xoff_th:8; /* [7:0] Default:0x80 RW */ + u32 rsv:24; /* [31:8] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_WIDE_TABLE_TIME_DWLEN]; +} __packed; + +#define NBL_UPRBAC_EM_PROFILE_TABLE_ADDR (0xe000) +#define NBL_UPRBAC_EM_PROFILE_TABLE_DEPTH (1) +#define NBL_UPRBAC_EM_PROFILE_TABLE_WIDTH (64) +#define NBL_UPRBAC_EM_PROFILE_TABLE_DWLEN (2) +union uprbac_em_profile_table_u { + struct uprbac_em_profile_table { + u32 pp_cmd_type:1; /* [0] Default:0x0 RW */ + u32 key_size:1; /* [1] Default:0x0 RW */ + u32 mask_btm_l:32; /* [41:2] Default:0x0 RW */ + u32 mask_btm_h:8; /* [41:2] Default:0x0 RW */ + u32 hash_sel0:2; /* [43:42] Default:0x0 RW */ + u32 hash_sel1:2; /* [45:44] Default:0x0 RW */ + u32 action0:1; /* [46] Default:0x0 RW */ + u32 act_num:4; /* [50:47] Default:0x0 RW */ + u32 vld:1; /* [51] Default:0x0 RW */ + u32 rsv:12; /* [63:52] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_EM_PROFILE_TABLE_DWLEN]; +} __packed; + +#define NBL_UPRBAC_EM_TCAM_TABLE_ADDR (0xe800) +#define NBL_UPRBAC_EM_TCAM_TABLE_DEPTH (64) +#define NBL_UPRBAC_EM_TCAM_TABLE_WIDTH (128) +#define NBL_UPRBAC_EM_TCAM_TABLE_DWLEN (4) +union uprbac_em_tcam_table_u { + struct uprbac_em_tcam_table { + u32 key_dat:16; /* [79:0] Default:0x0 RW */ + u32 key_dat_arr[2]; /* [79:0] Default:0x0 RW */ + u32 key_vld:1; /* [80] Default:0x0 RW */ + u32 key_size:1; /* [81] Default:0x0 RW */ + u32 rsv_l:32; /* [127:82] Default:0x0 RO */ + u32 rsv_h:14; /* [127:82] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_EM_TCAM_TABLE_DWLEN]; +} __packed; +#define NBL_UPRBAC_EM_TCAM_TABLE_REG(r) (NBL_UPRBAC_EM_TCAM_TABLE_ADDR + \ + (NBL_UPRBAC_EM_TCAM_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPRBAC_EM_AD_TABLE_ADDR (0xf000) +#define NBL_UPRBAC_EM_AD_TABLE_DEPTH (64) +#define NBL_UPRBAC_EM_AD_TABLE_WIDTH (32) +#define NBL_UPRBAC_EM_AD_TABLE_DWLEN (1) +union uprbac_em_ad_table_u { + struct uprbac_em_ad_table { + u32 sad_index:11; /* [10:0] Default:0x0 RW */ + u32 rsv:21; /* [31:11] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_EM_AD_TABLE_DWLEN]; +} __packed; +#define NBL_UPRBAC_EM_AD_TABLE_REG(r) (NBL_UPRBAC_EM_AD_TABLE_ADDR + \ + (NBL_UPRBAC_EM_AD_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPRBAC_HT0_TABLE_ADDR (0x10000) +#define NBL_UPRBAC_HT0_TABLE_DEPTH (512) +#define NBL_UPRBAC_HT0_TABLE_WIDTH (128) +#define NBL_UPRBAC_HT0_TABLE_DWLEN (4) +union uprbac_ht0_table_u { + struct uprbac_ht0_table { + u32 table:20; /* [83:0] Default:0x0 RW */ + u32 table_arr[2]; /* [83:0] Default:0x0 RW */ + u32 rsv_l:32; /* [127:84] Default:0x0 RO */ + u32 rsv_h:12; /* [127:84] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_HT0_TABLE_DWLEN]; +} __packed; +#define NBL_UPRBAC_HT0_TABLE_REG(r) (NBL_UPRBAC_HT0_TABLE_ADDR + \ + (NBL_UPRBAC_HT0_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPRBAC_HT1_TABLE_ADDR (0x14000) +#define NBL_UPRBAC_HT1_TABLE_DEPTH (512) +#define NBL_UPRBAC_HT1_TABLE_WIDTH (128) +#define NBL_UPRBAC_HT1_TABLE_DWLEN (4) +union uprbac_ht1_table_u { + struct uprbac_ht1_table { + u32 table:20; /* [83:0] Default:0x0 RW */ + u32 table_arr[2]; /* [83:0] Default:0x0 RW */ + u32 rsv_l:32; /* [127:84] Default:0x0 RO */ + u32 rsv_h:12; /* [127:84] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_HT1_TABLE_DWLEN]; +} __packed; +#define NBL_UPRBAC_HT1_TABLE_REG(r) (NBL_UPRBAC_HT1_TABLE_ADDR + \ + (NBL_UPRBAC_HT1_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPRBAC_KT_TABLE_ADDR (0x1c000) +#define NBL_UPRBAC_KT_TABLE_DEPTH (2048) +#define NBL_UPRBAC_KT_TABLE_WIDTH (256) +#define NBL_UPRBAC_KT_TABLE_DWLEN (8) +union uprbac_kt_table_u { + struct uprbac_kt_table { + u32 key:32; /* [159:0] Default:0x0 RW */ + u32 key_arr[4]; /* [159:0] Default:0x0 RW */ + u32 sad_index:11; /* [170:160] Default:0x0 RW */ + u32 rsv:21; /* [255:171] Default:0x0 RO */ + u32 rsv_arr[2]; /* [255:171] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_KT_TABLE_DWLEN]; +} __packed; +#define NBL_UPRBAC_KT_TABLE_REG(r) (NBL_UPRBAC_KT_TABLE_ADDR + \ + (NBL_UPRBAC_KT_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPRBAC_SAD_BOTTOM_TABLE_ADDR (0x2c000) +#define NBL_UPRBAC_SAD_BOTTOM_TABLE_DEPTH (2048) +#define NBL_UPRBAC_SAD_BOTTOM_TABLE_WIDTH (128) +#define NBL_UPRBAC_SAD_BOTTOM_TABLE_DWLEN (4) +union uprbac_sad_bottom_table_u { + struct uprbac_sad_bottom_table { + u32 sn:32; /* [31:0] Default:0x0 RW */ + u32 esn:32; /* [63:32] Default:0x0 RW */ + u32 esn_overlap:1; /* [64] Default:0x0 RW */ + u32 esn_enable:1; /* [65] Default:0x0 RW */ + u32 rsv_l:32; /* [127:66] Default:0x0 RO */ + u32 rsv_h:30; /* [127:66] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_SAD_BOTTOM_TABLE_DWLEN]; +} __packed; +#define NBL_UPRBAC_SAD_BOTTOM_TABLE_REG(r) (NBL_UPRBAC_SAD_BOTTOM_TABLE_ADDR + \ + (NBL_UPRBAC_SAD_BOTTOM_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPRBAC_SAD_LIFETIME_TABLE_ADDR (0x3c000) +#define NBL_UPRBAC_SAD_LIFETIME_TABLE_DEPTH (2048) +#define NBL_UPRBAC_SAD_LIFETIME_TABLE_WIDTH (128) +#define NBL_UPRBAC_SAD_LIFETIME_TABLE_DWLEN (4) +union uprbac_sad_lifetime_table_u { + struct uprbac_sad_lifetime_table { + u32 diff:32; /* [31:0] Default:0x0 RW */ + u32 cnt:32; /* [63:32] Default:0x0 RW */ + u32 flag:1; /* [64] Default:0x0 RW */ + u32 unit:1; /* [65] Default:0x0 RW */ + u32 enable:1; /* [66] Default:0x0 RW */ + u32 rsv_l:32; /* [127:67] Default:0x0 RO */ + u32 rsv_h:29; /* [127:67] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_SAD_LIFETIME_TABLE_DWLEN]; +} __packed; +#define NBL_UPRBAC_SAD_LIFETIME_TABLE_REG(r) (NBL_UPRBAC_SAD_LIFETIME_TABLE_ADDR + \ + (NBL_UPRBAC_SAD_LIFETIME_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPRBAC_SAD_CRYPTO_INFO_TABLE_ADDR (0x4c000) +#define NBL_UPRBAC_SAD_CRYPTO_INFO_TABLE_DEPTH (2048) +#define NBL_UPRBAC_SAD_CRYPTO_INFO_TABLE_WIDTH (512) +#define NBL_UPRBAC_SAD_CRYPTO_INFO_TABLE_DWLEN (16) +union uprbac_sad_crypto_info_table_u { + struct uprbac_sad_crypto_info_table { + u32 key_arr[8]; /* [255:0] Default:0x0 RW */ + u32 salt:32; /* [287:256] Default:0x0 RW */ + u32 crypto_type:3; /* [290:288] Default:0x0 RW */ + u32 tunnel_mode:1; /* [291] Default:0x0 RW */ + u32 icv_len:2; /* [293:292] Default:0x0 RW */ + u32 rsv:26; /* [511:294] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:294] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_SAD_CRYPTO_INFO_TABLE_DWLEN]; +} __packed; +#define NBL_UPRBAC_SAD_CRYPTO_INFO_TABLE_REG(r) (NBL_UPRBAC_SAD_CRYPTO_INFO_TABLE_ADDR + \ + (NBL_UPRBAC_SAD_CRYPTO_INFO_TABLE_DWLEN * 4) * (r)) + +#define NBL_UPRBAC_SAD_SLIDE_WINDOW_TABLE_ADDR (0x6c000) +#define NBL_UPRBAC_SAD_SLIDE_WINDOW_TABLE_DEPTH (2048) +#define NBL_UPRBAC_SAD_SLIDE_WINDOW_TABLE_WIDTH (512) +#define NBL_UPRBAC_SAD_SLIDE_WINDOW_TABLE_DWLEN (16) +union uprbac_sad_slide_window_table_u { + struct uprbac_sad_slide_window_table { + u32 bitmap_arr[8]; /* [255:0] Default:0x0 RW */ + u32 choice:2; /* [257:256] Default:0x0 RW */ + u32 enable:1; /* [258] Default:0x0 RW */ + u32 rsv_l:32; /* [511:259] Default:0x0 RO */ + u32 rsv_h:29; /* [511:259] Default:0x0 RO */ + u32 rsv_arr[6]; /* [511:259] Default:0x0 RO */ + } __packed info; + u32 data[NBL_UPRBAC_SAD_SLIDE_WINDOW_TABLE_DWLEN]; +} __packed; +#define NBL_UPRBAC_SAD_SLIDE_WINDOW_TABLE_REG(r) (NBL_UPRBAC_SAD_SLIDE_WINDOW_TABLE_ADDR + \ + (NBL_UPRBAC_SAD_SLIDE_WINDOW_TABLE_DWLEN * 4) * (r)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..8614cd58694ef1c3f0aab2a5baf078a07c6fd005 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_fc_leonis.h" +#include "nbl_fc.h" + +static inline void nbl_fc_get_cmd_hdr(struct nbl_fc_mgt *mgt) +{ + static const struct nbl_cmd_hdr g_cmd_hdr[] = { + [NBL_ACL_STATID_READ] = {NBL_BLOCK_PPE, NBL_MODULE_ACL, + NBL_TABLE_ACL_STATID, NBL_CMD_OP_READ}, + [NBL_ACL_FLOWID_READ] = {NBL_BLOCK_PPE, NBL_MODULE_ACL, + NBL_TABLE_ACL_FLOWID, NBL_CMD_OP_READ} + }; + memcpy(mgt->cmd_hdr, g_cmd_hdr, sizeof(g_cmd_hdr)); +} + +static void nbl_fc_get_spec_sz(u16 *hit_sz, u16 *bytes_sz) +{ + *hit_sz = NBL_SPEC_STAT_HIT_SIZE; + *bytes_sz = NBL_SPEC_STAT_BYTES_SIZE; +} + +static void nbl_fc_get_flow_sz(u16 *hit_sz, u16 *bytes_sz) +{ + *hit_sz = NBL_FLOW_STAT_HIT_SIZE; + *bytes_sz = NBL_FLOW_STAT_BYTES_SIZE; +} + +static void nbl_fc_get_spec_stats(struct nbl_flow_counter *counter, u64 *pkts, u64 *bytes) +{ + NBL_GET_SPEC_STAT_HITS(counter->cache.packets, counter->lastpackets, pkts); + NBL_GET_SPEC_STAT_BYTES(counter->cache.bytes, counter->lastbytes, bytes); +} + +static void nbl_fc_get_flow_stats(struct nbl_flow_counter *counter, u64 *pkts, u64 *bytes) +{ + NBL_GET_FLOW_STAT_HITS(counter->cache.packets, counter->lastpackets, pkts); + NBL_GET_FLOW_STAT_BYTES(counter->cache.bytes, counter->lastbytes, bytes); +} + +static int nbl_fc_update_flow_stats(struct nbl_fc_mgt *mgt, + struct nbl_flow_query_counter *counter_array, + u32 flow_num, u32 clear, enum nbl_pp_fc_type fc_type) +{ + int ret = 0; + u32 idx = 0; + u16 hit_size; + u16 bytes_size; + union nbl_cmd_acl_flowid_u fquery_out; + union nbl_cmd_acl_statid_u squery_out; + struct nbl_stats_data data_info = { 0 }; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_cmd_hdr hdr = mgt->cmd_hdr[NBL_ACL_FLOWID_READ]; + + memset(&fquery_out, 0, sizeof(fquery_out)); + memset(&squery_out, 0, sizeof(squery_out)); + + cmd.out_va = &fquery_out; + if (fc_type == NBL_FC_SPEC_TYPE) { + hdr = mgt->cmd_hdr[NBL_ACL_STATID_READ]; + cmd.out_va = &squery_out; + mgt->fc_ops.get_spec_stat_sz(&hit_size, &bytes_size); + } else { + mgt->fc_ops.get_flow_stat_sz(&hit_size, &bytes_size); + } + + cmd.in_va = counter_array->counter_id; + cmd.in_params = (clear << NBL_FLOW_STAT_CLR_OFT) | + ((flow_num - 1) & NBL_FLOW_STAT_NUM_MASK); + cmd.in_length = NBL_CMDQ_ACL_STAT_BASE_LEN; + + ret = nbl_tc_call_inst_cmdq(mgt->common->tc_inst_id, (void *)&hdr, (void *)&cmd); + if (ret) + goto cmd_send_error; + + /* clear no need update cache */ + if (clear) { + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc flush hw-stats success"); + return 0; + } + + for (idx = 0; idx < flow_num; idx++) { + if (fc_type == NBL_FC_SPEC_TYPE) { + memcpy(&data_info.bytes, squery_out.info.all_data[idx].bytes, bytes_size); + memcpy(&data_info.packets, &squery_out.info.all_data[idx].hits, hit_size); + } else { + memcpy(&data_info.bytes, fquery_out.info.all_data[idx].bytes, bytes_size); + memcpy(&data_info.packets, &fquery_out.info.all_data[idx].hits, hit_size); + } + data_info.flow_id = counter_array->counter_id[idx]; + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc get %u-%lu: packets:%llu-bytes:%llu\n", + data_info.flow_id, counter_array->cookie[idx], + data_info.packets, data_info.bytes); + ret = nbl_fc_set_stats(mgt, &data_info, counter_array->cookie[idx]); + if (ret) + goto set_stat_error; + } + + return 0; + +cmd_send_error: + nbl_err(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc get hw stats failed. ret %d", ret); + return ret; + +set_stat_error: + nbl_debug(mgt->common, NBL_DEBUG_FLOW, "nbl flow fc set flow stats failed." + " count_id:%u, cookie: %lu, ret(%u): %d", counter_array->counter_id[idx], + counter_array->cookie[idx], idx, ret); + return ret; +} + +static void nbl_fc_init_ops_leonis(struct nbl_fc_mgt *mgt) +{ + mgt->fc_ops.get_spec_stat_sz = &nbl_fc_get_spec_sz; + mgt->fc_ops.get_flow_stat_sz = &nbl_fc_get_flow_sz; + mgt->fc_ops.get_spec_stats = &nbl_fc_get_spec_stats; + mgt->fc_ops.get_flow_stats = &nbl_fc_get_flow_stats; + mgt->fc_ops.update_stats = &nbl_fc_update_flow_stats; +} + +int nbl_fc_add_stats_leonis(void *priv, enum nbl_pp_fc_type fc_type, unsigned long cookie) +{ + return nbl_fc_add_stats(priv, fc_type, cookie); +} + +int nbl_fc_del_stats_leonis(void *priv, unsigned long cookie) +{ + return nbl_fc_del_stats(priv, cookie); +} + +int nbl_fc_setup_ops_leonis(struct nbl_resource_ops *res_ops) +{ + return nbl_fc_setup_ops(res_ops); +} + +void nbl_fc_remove_ops_leonis(struct nbl_resource_ops *res_ops) +{ + return nbl_fc_remove_ops(res_ops); +} + +int nbl_fc_mgt_start_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_fc_mgt **fc_mgt; + struct device *dev; + int ret = -ENOMEM; + struct nbl_fc_mgt *mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_common_info *common; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + fc_mgt = &NBL_RES_MGT_TO_COUNTER_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = phy_ops->init_acl_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc init phy-stats failed"); + return ret; + } + + ret = nbl_fc_setup_mgt(dev, fc_mgt); + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, "nbl flow fc init mgt failed"); + return ret; + } + + mgt = (*fc_mgt); + mgt->common = common; + nbl_fc_init_ops_leonis(mgt); + nbl_fc_get_cmd_hdr(mgt); + return nbl_fc_mgt_start(mgt); +} + +void nbl_fc_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt) +{ + return nbl_fc_mgt_stop(res_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..5b58b8bfdfa24b5141acd442e55c3ec1e6a3bfe9 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_fc_leonis.h @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_FC_LEONIS_H_ +#define _NBL_FC_LEONIS_H_ +#include "nbl_resource.h" +#include "nbl_core.h" +#include "nbl_hw.h" + +#define NBL_FLOW_STAT_HIT_SIZE 5 +#define NBL_FLOW_STAT_BYTES_SIZE 6 +#define NBL_SPEC_STAT_HIT_SIZE 8 +#define NBL_SPEC_STAT_BYTES_SIZE 8 + +#define NBL_FLOW_STATS_BYTES_WIDE (0xffffffffffff) +#define NBL_FLOW_STATS_HITS_WIDE (0xffffffffff) +#define NBL_GET_FLOW_STAT_BYTES(_cur_v, _pre_v, _v) do \ +{ \ + typeof(_v) v = _v; \ + typeof(_cur_v) cur_v = _cur_v; \ + typeof(_pre_v) pre_v = _pre_v; \ + if (cur_v >= pre_v) \ + *v = cur_v - pre_v; \ + else \ + *v = NBL_FLOW_STATS_BYTES_WIDE - pre_v + cur_v; \ +} while (0) + +#define NBL_GET_FLOW_STAT_HITS(_cur_v, _pre_v, _v) do \ +{ \ + typeof(_v) v = _v; \ + typeof(_cur_v) cur_v = _cur_v; \ + typeof(_pre_v) pre_v = _pre_v; \ + if (cur_v >= pre_v) \ + *v = cur_v - pre_v; \ + else \ + *v = NBL_FLOW_STATS_HITS_WIDE - pre_v + cur_v; \ +} while (0) + +#define NBL_SPEC_STATS_BYTES_WIDE (0xffffffffffffffff) +#define NBL_SPEC_STATS_HITS_WIDE (0xffffffffffffffff) +#define NBL_GET_SPEC_STAT_BYTES(_cur_v, _pre_v, _v) do \ +{ \ + typeof(_v) v = _v; \ + typeof(_cur_v) cur_v = _cur_v; \ + typeof(_pre_v) pre_v = _pre_v; \ + if (cur_v >= pre_v) \ + *v = cur_v - pre_v; \ + else \ + *v = NBL_SPEC_STATS_BYTES_WIDE - pre_v + cur_v; \ +} while (0) + +#define NBL_GET_SPEC_STAT_HITS(_cur_v, _pre_v, _v) do \ +{ \ + typeof(_v) v = _v; \ + typeof(_cur_v) cur_v = _cur_v; \ + typeof(_pre_v) pre_v = _pre_v; \ + if (cur_v >= pre_v) \ + *v = cur_v - pre_v; \ + else \ + *v = NBL_SPEC_STATS_HITS_WIDE - pre_v + cur_v; \ +} while (0) + +#pragma pack(1) +/* CMDQ data content for ACL-FLOW ID */ +struct nbl_cmd_acl_stat_flowid_addr { + u32 addr:17; + u32 rsv:15; +} __packed; + +struct nbl_cmd_acl_stat_flowid_data { + u8 bytes[NBL_FLOW_STAT_BYTES_SIZE]; + u8 hits[NBL_FLOW_STAT_HIT_SIZE]; + u8 rsv; + +} __packed; + +union nbl_cmd_acl_flowid_u { + struct nbl_cmd_acl_flowid { + struct nbl_cmd_acl_stat_flowid_addr all_addr[NBL_FLOW_COUNT_NUM]; + struct nbl_cmd_acl_stat_flowid_data all_data[NBL_FLOW_COUNT_NUM]; + } __packed info; +#define NBL_CMD_ACL_FLOWID_TAB_WIDTH (sizeof(struct nbl_cmd_acl_flowid) \ + / sizeof(u32)) + u32 data[NBL_CMD_ACL_FLOWID_TAB_WIDTH]; +}; + +/* CMDQ data content for ACL-STAT ID */ +struct nbl_cmd_acl_stat_statid_addr { + u32 addr:11; + u32 rsv:21; +} __packed; + +struct nbl_cmd_acl_stat_statid_data { + u8 bytes[NBL_SPEC_STAT_BYTES_SIZE]; + u8 hits[NBL_SPEC_STAT_HIT_SIZE]; +} __packed; + +union nbl_cmd_acl_statid_u { + struct nbl_cmd_acl_statid { + struct nbl_cmd_acl_stat_statid_addr all_addr[NBL_FLOW_COUNT_NUM]; + struct nbl_cmd_acl_stat_statid_data all_data[NBL_FLOW_COUNT_NUM]; + } __packed info; +#define NBL_CMD_ACL_STATID_TAB_WIDTH (sizeof(struct nbl_cmd_acl_statid) \ + / sizeof(u32)) + u32 data[NBL_CMD_ACL_STATID_TAB_WIDTH]; +}; + +#pragma pack() + +int nbl_fc_add_stats_leonis(void *priv, enum nbl_pp_fc_type fc_type, unsigned long cookie); +int nbl_fc_del_stats_leonis(void *priv, unsigned long cookie); +int nbl_fc_setup_ops_leonis(struct nbl_resource_ops *res_ops); +void nbl_fc_remove_ops_leonis(struct nbl_resource_ops *res_ops); +int nbl_fc_mgt_start_leonis(struct nbl_resource_mgt *res_mgt); +void nbl_fc_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..38824b8b14c00341d8440886a4906d3a9f4cfdf2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.c @@ -0,0 +1,2037 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_flow_leonis.h" +#include "nbl_p4_actions.h" +#include "nbl_resource_leonis.h" + +static u32 nbl_flow_cfg_action_set_dport(u16 upcall_flag, u16 port_type, u16 vsi, u16 next_stg_sel) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.upcall_flag = upcall_flag; + set_dport.dport.up.port_type = port_type; + set_dport.dport.up.port_id = vsi; + set_dport.dport.up.next_stg_sel = next_stg_sel; + + return set_dport.data + (NBL_ACT_SET_DPORT << 16); +} + +static u16 nbl_flow_cfg_action_set_dport_mcc_eth(u8 eth) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.down.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.down.port_type = SET_DPORT_TYPE_ETH_LAG; + set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_EPRO; + set_dport.dport.down.lag_vld = 0; + set_dport.dport.down.eth_vld = 1; + set_dport.dport.down.eth_id = eth; + + return set_dport.data; +} + +static u16 nbl_flow_cfg_action_set_dport_mcc_vsi(u16 vsi) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.port_id = vsi; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_ACL_S0; + + return set_dport.data; +} + +static u16 nbl_flow_cfg_action_set_dport_mcc_lag(u16 lag_id) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.down.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.down.port_type = SET_DPORT_TYPE_ETH_LAG; + set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_EPRO; + set_dport.dport.down.lag_vld = 1; + set_dport.dport.down.eth_vld = 0; + set_dport.dport.down.lag_id = lag_id; + + return set_dport.data; +} + +static u32 nbl_flow_cfg_action_set_dport_mcc_bmc(void) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.up.port_type = SET_DPORT_TYPE_SP_PORT; + set_dport.dport.up.port_id = NBL_FLOW_MCC_BMC_DPORT; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + + return set_dport.data + (NBL_ACT_SET_DPORT << 16); +} + +static int nbl_flow_cfg_action_mcc(u16 mcc_id, u32 *action0, u32 *action1) +{ + union nbl_action_data mcc_idx_act = {.data = 0}, set_aux_act = {.data = 0}; + + mcc_idx_act.mcc_idx.mcc_id = mcc_id; + *action0 = (u32)mcc_idx_act.data + (NBL_ACT_SET_MCC << 16); + + set_aux_act.set_aux.sub_id = NBL_SET_AUX_SET_AUX; + set_aux_act.set_aux.nstg_vld = 1; + set_aux_act.set_aux.nstg_val = NBL_NEXT_STG_MCC; + + *action1 = (u32)set_aux_act.data + (NBL_ACT_SET_AUX_FIELD << 16); + + return 0; +} + +static int nbl_flow_cfg_action_up_tnl(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + *action1 = 0; + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_ACL_S0); + + return 0; +} + +static int nbl_flow_cfg_action_lldp_lacp_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + *action1 = 0; + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_ACL_S0); + + return 0; +} + +static int nbl_flow_cfg_action_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + *action1 = 0; + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_NONE); + + return 0; +} + +static int nbl_flow_cfg_action_down(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + *action1 = 0; + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_NML_FWD, SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_EPRO); + + return 0; +} + +static int nbl_flow_cfg_action_l2_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} + +static int nbl_flow_cfg_action_l2_down(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} + +static int nbl_flow_cfg_action_l3_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} + +static int nbl_flow_cfg_action_l3_down(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + return nbl_flow_cfg_action_mcc(param.mcc_id, action0, action1); +} + +static int nbl_flow_cfg_up_tnl_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + u64 dst_mac = 0; + u8 sport; + u8 reverse_mac[ETH_ALEN]; + + nbl_convert_mac(param.mac, reverse_mac); + + memset(kt_data->hash_key, 0x0, sizeof(kt_data->hash_key)); + ether_addr_copy((u8 *)&dst_mac, reverse_mac); + + kt_data->info.dst_mac = dst_mac; + kt_data->info.svlan_id = param.vid; + kt_data->info.template = NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2; + kt_data->info.padding = 0; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_lldp_lacp_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_lldp_lacp_data_u *kt_data = (union nbl_l2_phy_lldp_lacp_data_u *)data; + u8 sport; + + kt_data->info.template = NBL_EM0_PT_PHY_UP_LLDP_LACP; + + kt_data->info.ether_type = param.ether_type; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + u64 dst_mac = 0; + u8 sport; + u8 reverse_mac[ETH_ALEN]; + + nbl_convert_mac(param.mac, reverse_mac); + + memset(kt_data->hash_key, 0x0, sizeof(kt_data->hash_key)); + ether_addr_copy((u8 *)&dst_mac, reverse_mac); + + kt_data->info.dst_mac = dst_mac; + kt_data->info.svlan_id = param.vid; + kt_data->info.template = NBL_EM0_PT_PHY_UP_UNICAST_L2; + kt_data->info.padding = 0; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_down_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_down_data_u *kt_data = (union nbl_l2_phy_down_data_u *)data; + u64 dst_mac = 0; + u8 sport; + u8 reverse_mac[ETH_ALEN]; + + nbl_convert_mac(param.mac, reverse_mac); + + memset(kt_data->hash_key, 0x0, sizeof(kt_data->hash_key)); + ether_addr_copy((u8 *)&dst_mac, reverse_mac); + + kt_data->info.dst_mac = dst_mac; + kt_data->info.svlan_id = param.vid; + kt_data->info.template = NBL_EM0_PT_PHY_DOWN_UNICAST_L2; + kt_data->info.padding = 0; + + sport = param.vsi >> 8; + if (eth_mode == NBL_TWO_ETHERNET_PORT) + sport &= 0xFE; + kt_data->info.sport = sport; + + return 0; +} + +static int nbl_flow_cfg_l2_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_up_multi_data_u *kt_data = (union nbl_l2_phy_up_multi_data_u *)data; + u8 sport; + + kt_data->info.dst_mac = 0xFFFFFFFFFFFF; + kt_data->info.template = NBL_EM0_PT_PHY_UP_MULTICAST_L2; + kt_data->info.padding = 0; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_l2_down_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l2_phy_down_multi_data_u *kt_data = (union nbl_l2_phy_down_multi_data_u *)data; + u8 sport; + + kt_data->info.dst_mac = 0xFFFFFFFFFFFF; + kt_data->info.template = NBL_EM0_PT_PHY_DOWN_MULTICAST_L2; + kt_data->info.padding = 0; + + sport = param.eth; + if (eth_mode == NBL_TWO_ETHERNET_PORT) + sport &= 0xFE; + kt_data->info.sport = sport; + + return 0; +} + +static int nbl_flow_cfg_l3_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l3_phy_up_multi_data_u *kt_data = (union nbl_l3_phy_up_multi_data_u *)data; + u8 sport; + + kt_data->info.dst_mac = 0x3333; + kt_data->info.template = NBL_EM0_PT_PHY_UP_MULTICAST_L3; + kt_data->info.padding = 0; + + sport = param.eth; + kt_data->info.sport = sport + NBL_SPORT_ETH_OFFSET; + + return 0; +} + +static int nbl_flow_cfg_l3_down_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_l3_phy_down_multi_data_u *kt_data = (union nbl_l3_phy_down_multi_data_u *)data; + u8 sport; + + kt_data->info.dst_mac = 0x3333; + kt_data->info.template = NBL_EM0_PT_PHY_DOWN_MULTICAST_L3; + kt_data->info.padding = 0; + + sport = param.eth; + if (eth_mode == NBL_TWO_ETHERNET_PORT) + sport &= 0xFE; + kt_data->info.sport = sport; + + return 0; +} + +static void nbl_flow_cfg_kt_action_up_tnl(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + + kt_data->info.act0 = action0; +} + +static void nbl_flow_cfg_kt_action_lldp_lacp_up(union nbl_common_data_u *data, + u32 action0, u32 action1) +{ + union nbl_l2_phy_lldp_lacp_data_u *kt_data = (union nbl_l2_phy_lldp_lacp_data_u *)data; + + kt_data->info.act0 = action0; +} + +static void nbl_flow_cfg_kt_action_up(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_up_data_u *kt_data = (union nbl_l2_phy_up_data_u *)data; + + kt_data->info.act0 = action0; +} + +static void nbl_flow_cfg_kt_action_down(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_down_data_u *kt_data = (union nbl_l2_phy_down_data_u *)data; + + kt_data->info.act0 = action0; +} + +static void nbl_flow_cfg_kt_action_l2_up(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_up_multi_data_u *kt_data = (union nbl_l2_phy_up_multi_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +static void nbl_flow_cfg_kt_action_l2_down(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l2_phy_down_multi_data_u *kt_data = (union nbl_l2_phy_down_multi_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +static void nbl_flow_cfg_kt_action_l3_up(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l3_phy_up_multi_data_u *kt_data = (union nbl_l3_phy_up_multi_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +static void nbl_flow_cfg_kt_action_l3_down(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_l3_phy_down_multi_data_u *kt_data = (union nbl_l3_phy_down_multi_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +static int nbl_flow_cfg_action_tls_up(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + union nbl_action_data set_prbac_idx = {.data = 0}; + + set_prbac_idx.prbac_idx.prbac_id = (u16)param.index; + + *action0 = set_prbac_idx.data + (NBL_ACT_SET_PRBAC << 16); + + return 0; +} + +static int nbl_flow_cfg_tls_up_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_phy_ul4s_data_u *kt_data = (union nbl_phy_ul4s_data_u *)data; + u16 sport, dport; + + sport = param.eth + NBL_SPORT_ETH_OFFSET; + dport = (0x2 << 10) + param.vsi; + + if (param.type == NBL_KT_HALF_MODE) { + kt_data->ipv4_info.template = NBL_EM0_PT_PHY_UL4S_IPV4; + kt_data->ipv4_info.sip_high = param.data[1] >> 4; + kt_data->ipv4_info.sip_low = param.data[1]; + kt_data->ipv4_info.dip_high = param.data[5] >> 4; + kt_data->ipv4_info.dip_low = param.data[5]; + kt_data->ipv4_info.l4_sport = param.data[9]; + kt_data->ipv4_info.l4_dport = param.data[10]; + kt_data->ipv4_info.sport = sport; + } else { + kt_data->ipv6_info.template = NBL_EM0_PT_PHY_UL4S_IPV6; + kt_data->ipv6_info.sip1 = ((u64)param.data[1] << 28) + (param.data[2] >> 4); + kt_data->ipv6_info.sip2 = ((u64)param.data[2] << 60) + + ((u64)param.data[3] << 28) + (param.data[4] >> 4); + kt_data->ipv6_info.sip3 = param.data[4]; + kt_data->ipv6_info.l4_sport = param.data[9]; + kt_data->ipv6_info.l4_dport = param.data[10]; + kt_data->ipv6_info.dport = dport; + kt_data->ipv6_info.sport = sport; + } + + return 0; +} + +static void nbl_flow_cfg_kt_action_tls_up(union nbl_common_data_u *data, u32 action0, u32 action1) +{ + union nbl_phy_ul4s_data_u *kt_data = (union nbl_phy_ul4s_data_u *)data; + + kt_data->ipv4_info.act0 = action0; +} + +static int nbl_flow_cfg_action_ipsec_down(struct nbl_flow_param param, u32 *action0, u32 *action1) +{ + union nbl_action_data set_prbac_idx = {.data = 0}; + + set_prbac_idx.prbac_idx.prbac_id = (u16)param.index; + + *action0 = set_prbac_idx.data + (NBL_ACT_SET_PRBAC << 16); + + return 0; +} + +static int nbl_flow_cfg_ipsec_down_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_phy_dprbac_data_u *kt_data = (union nbl_phy_dprbac_data_u *)data; + u16 sport; + + sport = param.eth; + if (param.type == NBL_KT_HALF_MODE) { + kt_data->ipv4_info.template = NBL_EM0_PT_PHY_DPRBAC_IPV4; + kt_data->ipv4_info.sip_high = param.data[1] >> 4; + kt_data->ipv4_info.sip_low = param.data[1]; + kt_data->ipv4_info.dip_high = param.data[5] >> 4; + kt_data->ipv4_info.dip_low = param.data[5]; + kt_data->ipv4_info.sport = sport; + } else { + kt_data->ipv6_info.template = NBL_EM0_PT_PHY_DPRBAC_IPV6; + kt_data->ipv6_info.sip1 = (param.data[1] >> 4); + kt_data->ipv6_info.sip2 = ((u64)param.data[1] << 60) + ((u64)param.data[2] << 28) + + (param.data[3] >> 4); + kt_data->ipv6_info.sip3 = ((u64)param.data[3] << 32) + param.data[4]; + kt_data->ipv6_info.dip1 = (param.data[5] >> 4); + kt_data->ipv6_info.dip2 = ((u64)param.data[5] << 60) + ((u64)param.data[6] << 28) + + (param.data[7] >> 4); + kt_data->ipv6_info.dip3 = ((u64)param.data[7] << 32) + param.data[8]; + kt_data->ipv6_info.sport = sport; + } + + return 0; +} + +static void nbl_flow_cfg_kt_action_ipsec_down(union nbl_common_data_u *data, + u32 action0, u32 action1) +{ + union nbl_phy_dprbac_data_u *kt_data = (union nbl_phy_dprbac_data_u *)data; + + kt_data->ipv4_info.act0 = action0; +} + +static int nbl_flow_cfg_action_nd_upcall(struct nbl_flow_param param, + u32 *action0, u32 *action1) +{ + *action1 = 0; + + /* For TC, jump to ACL, the upcall action will be overwritten; + * For PMD, upcall and jump to EPRO, skipping ACL + */ + if (param.for_pmd) + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_UPCALL, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_EPRO); + else + *action0 = nbl_flow_cfg_action_set_dport(AUX_FWD_TYPE_UPCALL, + SET_DPORT_TYPE_VSI_HOST, + param.vsi, NEXT_STG_SEL_ACL_S0); + return 0; +} + +static int nbl_flow_cfg_nd_upcall_key_value(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode) +{ + union nbl_nd_upcall_data_u *kt_data = (union nbl_nd_upcall_data_u *)data; + + kt_data->info.template = NBL_EM0_PT_PMD_ND_UPCALL; + kt_data->info.ptype = param.priv_data; + + return 0; +} + +static void nbl_flow_cfg_kt_action_nd_upcall(union nbl_common_data_u *data, + u32 action0, u32 action1) +{ + union nbl_nd_upcall_data_u *kt_data = (union nbl_nd_upcall_data_u *)data; + + kt_data->info.act0 = action0; + kt_data->info.act1 = action1; +} + +#define NBL_FLOW_OPS_ARR_ENTRY(type, action_func, kt_func, kt_action_func) \ + [type] = {.cfg_action = action_func, .cfg_key = kt_func, \ + .cfg_kt_action = kt_action_func} +static const struct nbl_flow_rule_cfg_ops cfg_ops[] = { + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_UP_TNL, + nbl_flow_cfg_action_up_tnl, + nbl_flow_cfg_up_tnl_key_value, + nbl_flow_cfg_kt_action_up_tnl), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_LLDP_LACP_UP, + nbl_flow_cfg_action_lldp_lacp_up, + nbl_flow_cfg_lldp_lacp_up_key_value, + nbl_flow_cfg_kt_action_lldp_lacp_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_UP, + nbl_flow_cfg_action_up, + nbl_flow_cfg_up_key_value, + nbl_flow_cfg_kt_action_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_DOWN, + nbl_flow_cfg_action_down, + nbl_flow_cfg_down_key_value, + nbl_flow_cfg_kt_action_down), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_UP, + nbl_flow_cfg_action_l2_up, + nbl_flow_cfg_l2_up_key_value, + nbl_flow_cfg_kt_action_l2_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L2_DOWN, + nbl_flow_cfg_action_l2_down, + nbl_flow_cfg_l2_down_key_value, + nbl_flow_cfg_kt_action_l2_down), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_UP, + nbl_flow_cfg_action_l3_up, + nbl_flow_cfg_l3_up_key_value, + nbl_flow_cfg_kt_action_l3_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_L3_DOWN, + nbl_flow_cfg_action_l3_down, + nbl_flow_cfg_l3_down_key_value, + nbl_flow_cfg_kt_action_l3_down), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_TLS_UP, + nbl_flow_cfg_action_tls_up, + nbl_flow_cfg_tls_up_key_value, + nbl_flow_cfg_kt_action_tls_up), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_IPSEC_DOWN, + nbl_flow_cfg_action_ipsec_down, + nbl_flow_cfg_ipsec_down_key_value, + nbl_flow_cfg_kt_action_ipsec_down), + NBL_FLOW_OPS_ARR_ENTRY(NBL_FLOW_PMD_ND_UPCALL, + nbl_flow_cfg_action_nd_upcall, + nbl_flow_cfg_nd_upcall_key_value, + nbl_flow_cfg_kt_action_nd_upcall), +}; + +static int nbl_flow_alloc_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow_fem_entry *flow) +{ + u32 flow_id; + + if (flow->flow_type == NBL_KT_HALF_MODE) { + flow_id = find_first_zero_bit(flow_mgt->flow_id_bitmap, NBL_MACVLAN_TABLE_LEN); + if (flow_id == NBL_MACVLAN_TABLE_LEN) + return -ENOSPC; + set_bit(flow_id, flow_mgt->flow_id_bitmap); + } else { + flow_id = nbl_common_find_available_idx(flow_mgt->flow_id_bitmap, + NBL_MACVLAN_TABLE_LEN, 2, 2); + if (flow_id == NBL_MACVLAN_TABLE_LEN) + return -ENOSPC; + set_bit(flow_id, flow_mgt->flow_id_bitmap); + set_bit(flow_id + 1, flow_mgt->flow_id_bitmap); + } + + flow->flow_id = flow_id; + return 0; +} + +static void nbl_flow_free_flow_id(struct nbl_flow_mgt *flow_mgt, struct nbl_flow_fem_entry *flow) +{ + if (flow->flow_id == U16_MAX) + return; + + if (flow->flow_type == NBL_KT_HALF_MODE) { + clear_bit(flow->flow_id, flow_mgt->flow_id_bitmap); + flow->flow_id = 0xFFFF; + } else { + clear_bit(flow->flow_id, flow_mgt->flow_id_bitmap); + clear_bit(flow->flow_id + 1, flow_mgt->flow_id_bitmap); + flow->flow_id = 0xFFFF; + } +} + +static int nbl_flow_alloc_tcam_id(struct nbl_flow_mgt *flow_mgt, + struct nbl_tcam_item *tcam_item) +{ + u32 tcam_id; + + tcam_id = find_first_zero_bit(flow_mgt->tcam_id, NBL_TCAM_TABLE_LEN); + if (tcam_id == NBL_TCAM_TABLE_LEN) + return -ENOSPC; + + set_bit(tcam_id, flow_mgt->tcam_id); + tcam_item->tcam_index = tcam_id; + + return 0; +} + +static void nbl_flow_free_tcam_id(struct nbl_flow_mgt *flow_mgt, + struct nbl_tcam_item *tcam_item) +{ + clear_bit(tcam_item->tcam_index, flow_mgt->tcam_id); + tcam_item->tcam_index = 0; +} + +static void nbl_flow_set_mt_input(struct nbl_mt_input *mt_input, union nbl_common_data_u *kt_data, + u8 type, u16 flow_id) +{ + int i; + u16 key_len; + + key_len = ((type) == NBL_KT_HALF_MODE ? NBL_KT_BYTE_HALF_LEN : NBL_KT_BYTE_LEN); + for (i = 0; i < key_len; i++) + mt_input->key[i] = kt_data->hash_key[key_len - 1 - i]; + + mt_input->tbl_id = flow_id + NBL_EM_PHY_KT_OFFSET; + mt_input->depth = 0; + mt_input->power = 10; +} + +static void nbl_flow_key_hash(struct nbl_flow_fem_entry *flow, struct nbl_mt_input *mt_input) +{ + u16 ht0_hash = 0; + u16 ht1_hash = 0; + + ht0_hash = NBL_CRC16_CCITT(mt_input->key, NBL_KT_BYTE_LEN); + ht1_hash = NBL_CRC16_IBM(mt_input->key, NBL_KT_BYTE_LEN); + flow->ht0_hash = nbl_hash_transfer(ht0_hash, mt_input->power, mt_input->depth); + flow->ht1_hash = nbl_hash_transfer(ht1_hash, mt_input->power, mt_input->depth); +} + +static bool nbl_pp_ht0_ht1_search(struct nbl_flow_ht_mng *pp_ht0_mng, u16 ht0_hash, + struct nbl_flow_ht_mng *pp_ht1_mng, u16 ht1_hash, + struct nbl_common_info *common) +{ + struct nbl_flow_ht_tbl *node0 = NULL; + struct nbl_flow_ht_tbl *node1 = NULL; + u16 i = 0; + bool is_find = false; + + node0 = pp_ht0_mng->hash_map[ht0_hash]; + if (node0) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node0->key[i].vid && node0->key[i].ht_other_index == ht1_hash) { + is_find = true; + nbl_info(common, NBL_DEBUG_FLOW, + "Conflicted ht on vid %d and kt_index %u\n", + node0->key[i].vid, node0->key[i].kt_index); + return is_find; + } + + node1 = pp_ht1_mng->hash_map[ht1_hash]; + if (node1) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node1->key[i].vid && node1->key[i].ht_other_index == ht0_hash) { + is_find = true; + nbl_info(common, NBL_DEBUG_FLOW, + "Conflicted ht on vid %d and kt_index %u\n", + node1->key[i].vid, node1->key[i].kt_index); + return is_find; + } + + return is_find; +} + +static bool nbl_flow_check_ht_conflict(struct nbl_flow_ht_mng *pp_ht0_mng, + struct nbl_flow_ht_mng *pp_ht1_mng, + u16 ht0_hash, u16 ht1_hash, struct nbl_common_info *common) +{ + return nbl_pp_ht0_ht1_search(pp_ht0_mng, ht0_hash, pp_ht1_mng, ht1_hash, common); +} + +static int nbl_flow_find_ht_avail_table(struct nbl_flow_ht_mng *pp_ht0_mng, + struct nbl_flow_ht_mng *pp_ht1_mng, + u16 ht0_hash, u16 ht1_hash) +{ + struct nbl_flow_ht_tbl *pp_ht0_node = NULL; + struct nbl_flow_ht_tbl *pp_ht1_node = NULL; + + pp_ht0_node = pp_ht0_mng->hash_map[ht0_hash]; + pp_ht1_node = pp_ht1_mng->hash_map[ht1_hash]; + + if (!pp_ht0_node && !pp_ht1_node) { + return 0; + } else if (pp_ht0_node && !pp_ht1_node) { + if (pp_ht0_node->ref_cnt >= NBL_HASH_CFT_AVL) + return 1; + else + return 0; + } else if (!pp_ht0_node && pp_ht1_node) { + if (pp_ht1_node->ref_cnt >= NBL_HASH_CFT_AVL) + return 0; + else + return 1; + } else { + if ((pp_ht0_node->ref_cnt <= NBL_HASH_CFT_AVL || + (pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht0_node->ref_cnt < NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL))) + return 0; + else if (((pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt <= NBL_HASH_CFT_AVL) || + (pp_ht0_node->ref_cnt == NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt < NBL_HASH_CFT_MAX))) + return 1; + else + return -1; + } +} + +static int nbl_flow_insert_pp_ht(struct nbl_flow_ht_mng *pp_ht_mng, + u16 hash, u16 hash_other, u32 key_index) +{ + struct nbl_flow_ht_tbl *node; + int i; + + node = pp_ht_mng->hash_map[hash]; + if (!node) { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOSPC; + pp_ht_mng->hash_map[hash] = node; + } + + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (node->key[i].vid == 0) { + node->key[i].vid = 1; + node->key[i].ht_other_index = hash_other; + node->key[i].kt_index = key_index; + node->ref_cnt++; + break; + } + } + + return i; +} + +static void nbl_flow_add_ht(struct nbl_ht_item *ht_item, struct nbl_flow_fem_entry *flow, + u32 key_index, struct nbl_flow_ht_mng *pp_ht_mng, u8 ht_table) +{ + u16 ht_hash; + u16 ht_other_hash; + + ht_hash = ht_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash; + ht_other_hash = ht_table == NBL_HT0 ? flow->ht1_hash : flow->ht0_hash; + + ht_item->hash_bucket = nbl_flow_insert_pp_ht(pp_ht_mng, ht_hash, ht_other_hash, key_index); + if (ht_item->hash_bucket < 0) + return; + + ht_item->ht_table = ht_table; + ht_item->key_index = key_index; + ht_item->ht0_hash = flow->ht0_hash; + ht_item->ht1_hash = flow->ht1_hash; + + flow->hash_bucket = ht_item->hash_bucket; + flow->hash_table = ht_item->ht_table; +} + +static void nbl_flow_del_ht(struct nbl_ht_item *ht_item, struct nbl_flow_fem_entry *flow, + struct nbl_flow_ht_mng *pp_ht_mng) +{ + struct nbl_flow_ht_tbl *pp_ht_node = NULL; + u16 ht_hash; + u16 ht_other_hash; + int i; + + ht_hash = ht_item->ht_table == NBL_HT0 ? flow->ht0_hash : flow->ht1_hash; + ht_other_hash = ht_item->ht_table == NBL_HT0 ? flow->ht1_hash : flow->ht0_hash; + + pp_ht_node = pp_ht_mng->hash_map[ht_hash]; + if (!pp_ht_node) + return; + + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht_node->key[i].vid == 1 && + pp_ht_node->key[i].ht_other_index == ht_other_hash) { + memset(&pp_ht_node->key[i], 0, sizeof(pp_ht_node->key[i])); + pp_ht_node->ref_cnt--; + break; + } + } + + if (!pp_ht_node->ref_cnt) { + kfree(pp_ht_node); + pp_ht_mng->hash_map[ht_hash] = NULL; + } +} + +static int nbl_flow_send_2hw(struct nbl_resource_mgt *res_mgt, struct nbl_ht_item ht_item, + struct nbl_kt_item kt_item, u8 key_type) +{ + struct nbl_phy_ops *phy_ops; + u16 hash, hash_other; + int ret = 0; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = phy_ops->set_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), kt_item.kt_data.hash_key, + ht_item.key_index, key_type); + if (ret) + goto set_kt_fail; + + hash = ht_item.ht_table == NBL_HT0 ? ht_item.ht0_hash : ht_item.ht1_hash; + hash_other = ht_item.ht_table == NBL_HT0 ? ht_item.ht1_hash : ht_item.ht0_hash; + ret = phy_ops->set_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hash, hash_other, ht_item.ht_table, + ht_item.hash_bucket, ht_item.key_index, 1); + if (ret) + goto set_ht_fail; + + ret = phy_ops->search_key(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + kt_item.kt_data.hash_key, key_type); + if (ret) + goto search_fail; + + return 0; + +search_fail: + ret = phy_ops->set_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hash, 0, ht_item.ht_table, + ht_item.hash_bucket, 0, 0); +set_ht_fail: + memset(kt_item.kt_data.hash_key, 0, sizeof(kt_item.kt_data.hash_key)); + phy_ops->set_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), kt_item.kt_data.hash_key, + ht_item.key_index, key_type); +set_kt_fail: + return ret; +} + +static int nbl_flow_del_2hw(struct nbl_resource_mgt *res_mgt, struct nbl_ht_item ht_item, + struct nbl_kt_item kt_item, u8 key_type) +{ + struct nbl_phy_ops *phy_ops; + u16 hash; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + memset(kt_item.kt_data.hash_key, 0, sizeof(kt_item.kt_data.hash_key)); + phy_ops->set_kt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), kt_item.kt_data.hash_key, + ht_item.key_index, key_type); + + hash = ht_item.ht_table == NBL_HT0 ? ht_item.ht0_hash : ht_item.ht1_hash; + phy_ops->set_ht(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hash, 0, ht_item.ht_table, + ht_item.hash_bucket, 0, 0); + + return 0; +} + +static void nbl_flow_cfg_tcam(struct nbl_tcam_item *tcam_item, struct nbl_ht_item *ht_item, + struct nbl_kt_item *kt_item, u32 action0, u32 action1) +{ + tcam_item->key_mode = NBL_KT_HALF_MODE; + tcam_item->pp_type = NBL_PT_PP0; + tcam_item->tcam_action[0] = action0; + tcam_item->tcam_action[1] = action1; + memcpy(&tcam_item->ht_item, ht_item, sizeof(struct nbl_ht_item)); + memcpy(&tcam_item->kt_item, kt_item, sizeof(struct nbl_kt_item)); +} + +static int nbl_flow_add_tcam(struct nbl_resource_mgt *res_mgt, struct nbl_tcam_item tcam_item) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->add_tcam(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_item.tcam_index, + tcam_item.kt_item.kt_data.hash_key, tcam_item.tcam_action, + tcam_item.key_mode, NBL_PT_PP0); +} + +static void nbl_flow_del_tcam(struct nbl_resource_mgt *res_mgt, struct nbl_tcam_item tcam_item) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->del_tcam(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), tcam_item.tcam_index, + tcam_item.key_mode, NBL_PT_PP0); +} + +static int nbl_flow_add_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_param param, + s32 type, struct nbl_flow_fem_entry *flow) +{ + struct nbl_flow_mgt *flow_mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_common_info *common; + struct nbl_mt_input mt_input; + struct nbl_ht_item ht_item; + struct nbl_kt_item kt_item; + struct nbl_tcam_item *tcam_item = NULL; + struct nbl_flow_ht_mng *pp_ht_mng = NULL; + u32 action0, action1; + u32 cost = 0; + int ht_table; + int ret = 0; + + memset(&mt_input, 0, sizeof(mt_input)); + memset(&ht_item, 0, sizeof(ht_item)); + memset(&kt_item, 0, sizeof(kt_item)); + + tcam_item = kzalloc(sizeof(*tcam_item), GFP_KERNEL); + if (!tcam_item) + return -ENOMEM; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + flow->flow_type = param.type; + flow->type = type; + flow->flow_id = 0xFFFF; + + if (type >= NBL_FLOW_TYPE_MAX && type < NBL_FLOW_ACCEL_MAX) { + if (flow->flow_type == NBL_KT_FULL_MODE) + cost = 2; + else + cost = 1; + + if ((flow_mgt->accel_flow_count + cost) > NBL_MACVLAN_TABLE_LEN / 2) { + ret = -ENOSPC; + goto free_mem; + } + } + + ret = nbl_flow_alloc_flow_id(flow_mgt, flow); + if (ret) + goto free_mem; + + ret = cfg_ops[type].cfg_action(param, &action0, &action1); + if (ret) + goto free_mem; + + ret = cfg_ops[type].cfg_key(&kt_item.kt_data, param, NBL_COMMON_TO_ETH_MODE(common)); + if (ret) + goto free_mem; + + nbl_flow_set_mt_input(&mt_input, &kt_item.kt_data, param.type, flow->flow_id); + nbl_flow_key_hash(flow, &mt_input); + + if (nbl_flow_check_ht_conflict(&flow_mgt->pp0_ht0_mng, &flow_mgt->pp0_ht1_mng, + flow->ht0_hash, flow->ht1_hash, common)) + flow->tcam_flag = true; + + ht_table = nbl_flow_find_ht_avail_table(&flow_mgt->pp0_ht0_mng, + &flow_mgt->pp0_ht1_mng, + flow->ht0_hash, flow->ht1_hash); + if (ht_table < 0) + flow->tcam_flag = true; + + if (!flow->tcam_flag) { + pp_ht_mng = ht_table == NBL_HT0 ? &flow_mgt->pp0_ht0_mng : &flow_mgt->pp0_ht1_mng; + nbl_flow_add_ht(&ht_item, flow, mt_input.tbl_id, pp_ht_mng, ht_table); + + cfg_ops[type].cfg_kt_action(&kt_item.kt_data, action0, action1); + ret = nbl_flow_send_2hw(res_mgt, ht_item, kt_item, param.type); + } else { + ret = nbl_flow_alloc_tcam_id(flow_mgt, tcam_item); + if (ret) + goto out; + + nbl_flow_cfg_tcam(tcam_item, &ht_item, &kt_item, action0, action1); + flow->tcam_index = tcam_item->tcam_index; + + ret = nbl_flow_add_tcam(res_mgt, *tcam_item); + } + +out: + if (ret) { + if (flow->tcam_flag) + nbl_flow_free_tcam_id(flow_mgt, tcam_item); + else + nbl_flow_del_ht(&ht_item, flow, pp_ht_mng); + + nbl_flow_free_flow_id(flow_mgt, flow); + } else { + flow_mgt->accel_flow_count += cost; + } + +free_mem: + kfree(tcam_item); + + return ret; +} + +static void nbl_flow_del_flow(struct nbl_resource_mgt *res_mgt, struct nbl_flow_fem_entry *flow) +{ + struct nbl_flow_mgt *flow_mgt; + struct nbl_phy_ops *phy_ops; + struct nbl_ht_item ht_item; + struct nbl_kt_item kt_item; + struct nbl_tcam_item tcam_item; + struct nbl_flow_ht_mng *pp_ht_mng = NULL; + + if (flow->flow_id == 0xFFFF) + return; + + memset(&ht_item, 0, sizeof(ht_item)); + memset(&kt_item, 0, sizeof(kt_item)); + memset(&tcam_item, 0, sizeof(tcam_item)); + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (!flow->tcam_flag) { + ht_item.ht_table = flow->hash_table; + ht_item.ht0_hash = flow->ht0_hash; + ht_item.ht1_hash = flow->ht1_hash; + ht_item.hash_bucket = flow->hash_bucket; + + pp_ht_mng = flow->hash_table == NBL_HT0 ? &flow_mgt->pp0_ht0_mng + : &flow_mgt->pp0_ht1_mng; + + nbl_flow_del_ht(&ht_item, flow, pp_ht_mng); + nbl_flow_del_2hw(res_mgt, ht_item, kt_item, flow->flow_type); + } else { + tcam_item.tcam_index = flow->tcam_index; + nbl_flow_del_tcam(res_mgt, tcam_item); + nbl_flow_free_tcam_id(flow_mgt, &tcam_item); + } + + nbl_flow_free_flow_id(flow_mgt, flow); + + if (flow->type >= NBL_FLOW_TYPE_MAX && flow->type < NBL_FLOW_ACCEL_MAX) { + if (flow->flow_type == NBL_KT_FULL_MODE) + flow_mgt->accel_flow_count -= 2; + else + flow_mgt->accel_flow_count -= 1; + } +} + +static int nbl_flow_add_mcc_node(struct nbl_flow_multi_group *multi_group, + struct nbl_resource_mgt *res_mgt, + int type, u16 data, u16 mcc_id, u16 head) +{ + struct nbl_flow_mcc_node *mcc_node = NULL, *mcc_head = NULL; + struct nbl_phy_ops *phy_ops; + u16 prev_mcc_id, mcc_action; + int ret = 0; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + mcc_node = kzalloc(sizeof(*mcc_node), GFP_KERNEL); + if (!mcc_node) + return -ENOMEM; + + switch (type) { + case NBL_MCC_INDEX_BOND: + case NBL_MCC_INDEX_ETH: + mcc_action = nbl_flow_cfg_action_set_dport_mcc_eth((u8)data); + break; + case NBL_MCC_INDEX_VSI: + mcc_action = nbl_flow_cfg_action_set_dport_mcc_vsi(data); + break; + case NBL_MCC_INDEX_BMC: + mcc_action = nbl_flow_cfg_action_set_dport_mcc_bmc(); + break; + default: + return -EINVAL; + } + + mcc_node->mcc_id = mcc_id; + mcc_node->mcc_head = head; + + /* mcc_head must init before mcc_list */ + if (head) { + list_add_tail(&mcc_node->node, &multi_group->mcc_head); + prev_mcc_id = NBL_MCC_ID_INVALID; + + WARN_ON(!nbl_list_empty(&multi_group->mcc_list)); + ret = phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_id, + prev_mcc_id, mcc_action); + goto check_ret; + } + + list_add_tail(&mcc_node->node, &multi_group->mcc_list); + + if (nbl_list_is_first(&mcc_node->node, &multi_group->mcc_list)) + prev_mcc_id = NBL_MCC_ID_INVALID; + else + prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; + + /* first add mcc_list */ + if (prev_mcc_id == NBL_MCC_ID_INVALID && !nbl_list_empty(&multi_group->mcc_head)) { + list_for_each_entry(mcc_head, &multi_group->mcc_head, node) { + prev_mcc_id = mcc_head->mcc_id; + ret |= phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_id, + prev_mcc_id, mcc_action); + } + goto check_ret; + } + + ret = phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + mcc_id, prev_mcc_id, mcc_action); +check_ret: + if (ret) { + list_del(&mcc_node->node); + kfree(mcc_node); + return -EFAULT; + } + + return 0; +} + +static void nbl_flow_del_mcc_node(struct nbl_flow_multi_group *multi_group, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_mcc_node *mcc_node) +{ + struct nbl_phy_ops *phy_ops; + struct nbl_flow_mcc_node *mcc_head = NULL; + u16 prev_mcc_id, next_mcc_id; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (nbl_list_entry_is_head(mcc_node, &multi_group->mcc_list, node) || + nbl_list_entry_is_head(mcc_node, &multi_group->mcc_head, node)) + return; + + if (mcc_node->mcc_head) { + WARN_ON(!nbl_list_empty(&multi_group->mcc_list)); + prev_mcc_id = NBL_MCC_ID_INVALID; + next_mcc_id = NBL_MCC_ID_INVALID; + phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, next_mcc_id); + goto free_node; + } + + if (nbl_list_is_first(&mcc_node->node, &multi_group->mcc_list)) + prev_mcc_id = NBL_MCC_ID_INVALID; + else + prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; + + if (nbl_list_is_last(&mcc_node->node, &multi_group->mcc_list)) + next_mcc_id = NBL_MCC_ID_INVALID; + else + next_mcc_id = list_next_entry(mcc_node, node)->mcc_id; + + if (prev_mcc_id == NBL_MCC_ID_INVALID && !nbl_list_empty(&multi_group->mcc_head)) { + list_for_each_entry(mcc_head, &multi_group->mcc_head, node) { + prev_mcc_id = mcc_head->mcc_id; + phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + mcc_node->mcc_id, prev_mcc_id, next_mcc_id); + } + goto free_node; + } + + phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, next_mcc_id); +free_node: + list_del(&mcc_node->node); + kfree(mcc_node); +} + +static void nbl_flow_macvlan_node_del_action_func(void *priv, void *x_key, void *y_key, + void *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; + int i; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); +} + +static int nbl_flow_add_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_common_info *common; + struct nbl_flow_l2_data *rule_data; + void *mac_hash_tbl; + struct nbl_flow_param param = {0}; + int i; + int ret; + u16 eth_id; + u16 node_num; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + node_num = nbl_common_get_hash_xy_node_num(mac_hash_tbl); + if (node_num >= flow_mgt->unicast_mac_threshold) + return -ENOSPC; + + if (nbl_common_get_hash_xy_node(mac_hash_tbl, mac, &vlan)) + return -EEXIST; + + rule_data = kzalloc(sizeof(*rule_data), GFP_KERNEL); + if (!rule_data) + return -ENOMEM; + + param.mac = mac; + param.vid = vlan; + param.eth = eth_id; + param.vsi = vsi; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) { + if (nbl_flow_add_flow(res_mgt, param, i, &rule_data->entry[i])) + break; + } + if (i != NBL_FLOW_MACVLAN_MAX) { + while (--i + 1) + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + goto rule_err; + } + + rule_data->vsi = vsi; + ret = nbl_common_alloc_hash_xy_node(mac_hash_tbl, mac, &vlan, rule_data); + if (ret) + goto node_err; + + kfree(rule_data); + + return 0; + +node_err: + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); +rule_err: + kfree(rule_data); + return -EFAULT; +} + +static void nbl_flow_del_macvlan(void *priv, u8 *mac, u16 vlan, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_l2_data *rule_data; + void *mac_hash_tbl; + int i; + u16 eth_id; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + + rule_data = nbl_common_get_hash_xy_node(mac_hash_tbl, mac, &vlan); + if (!rule_data) + return; + + if (rule_data->vsi != vsi) + return; + + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + nbl_flow_del_flow(res_mgt, &rule_data->entry[i]); + + nbl_common_free_hash_xy_node(mac_hash_tbl, mac, &vlan); +} + +static int nbl_flow_add_lag(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_lacp_rule *rule; + struct nbl_flow_param param = {0}; + + list_for_each_entry(rule, &flow_mgt->lacp_list, node) + if (rule->vsi == vsi) + return 0; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + param.eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + param.vsi = vsi; + param.ether_type = ETH_P_SLOW; + + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_LLDP_LACP_UP, &rule->entry)) { + nbl_err(common, NBL_DEBUG_FLOW, "Fail to add lag flow for vsi %d", vsi); + kfree(rule); + return -EFAULT; + } + + rule->vsi = vsi; + list_add_tail(&rule->node, &flow_mgt->lacp_list); + + return 0; +} + +static void nbl_flow_del_lag(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_lacp_rule *rule; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + list_for_each_entry(rule, &flow_mgt->lacp_list, node) + if (rule->vsi == vsi) + break; + + if (nbl_list_entry_is_head(rule, &flow_mgt->lacp_list, node)) + return; + + nbl_flow_del_flow(res_mgt, &rule->entry); + + list_del(&rule->node); + kfree(rule); +} + +static int nbl_flow_add_lldp(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_lldp_rule *rule; + struct nbl_flow_param param = {0}; + + list_for_each_entry(rule, &flow_mgt->lldp_list, node) + if (rule->vsi == vsi) + return 0; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + param.eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + param.vsi = vsi; + param.ether_type = ETH_P_LLDP; + + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_LLDP_LACP_UP, &rule->entry)) { + nbl_err(common, NBL_DEBUG_FLOW, "Fail to add lldp flow for vsi %d", vsi); + kfree(rule); + return -EFAULT; + } + + rule->vsi = vsi; + list_add_tail(&rule->node, &flow_mgt->lldp_list); + + return 0; +} + +static void nbl_flow_del_lldp(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_lldp_rule *rule; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + list_for_each_entry(rule, &flow_mgt->lldp_list, node) + if (rule->vsi == vsi) + break; + + if (nbl_list_entry_is_head(rule, &flow_mgt->lldp_list, node)) + return; + + nbl_flow_del_flow(res_mgt, &rule->entry); + + list_del(&rule->node); + kfree(rule); +} + +static int nbl_flow_cfg_lag_mcc(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_multi_group *multi_group; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_flow_mcc_index_key index_key = {0}; + u16 mcc_id, mcc_action; + + multi_group = &flow_mgt->multi_flow[eth_id]; + NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_ETH, eth_id); + mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); + + mcc_action = enable ? nbl_flow_cfg_action_set_dport_mcc_lag(lag_id) + : nbl_flow_cfg_action_set_dport_mcc_eth(eth_id); + + return phy_ops->cfg_lag_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_id, mcc_action); +} + +static int nbl_flow_add_multi_rule(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_multi_group *multi_group; + struct nbl_flow_mcc_index_key index_key = {0}; + u16 mcc_id; + u8 eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + + NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_VSI, vsi); + mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); + + multi_group = &flow_mgt->multi_flow[eth]; + + return nbl_flow_add_mcc_node(multi_group, res_mgt, NBL_MCC_INDEX_VSI, vsi, mcc_id, 0); +} + +static void nbl_flow_del_multi_rule(void *priv, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_multi_group *multi_group; + struct nbl_flow_mcc_node *mcc_node; + struct nbl_flow_mcc_index_key index_key = {0}; + u8 eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + u16 mcc_id; + + NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_VSI, vsi); + mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); + nbl_common_free_index(flow_mgt->mcc_tbl_priv, &index_key); + + multi_group = &flow_mgt->multi_flow[eth]; + + list_for_each_entry(mcc_node, &multi_group->mcc_list, node) + if (mcc_node->mcc_id == mcc_id) { + nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); + return; + } +} + +static int nbl_flow_add_multi_group(struct nbl_resource_mgt *res_mgt, u8 eth) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_multi_group *multi_group = &flow_mgt->multi_flow[eth]; + struct nbl_flow_mcc_index_key index_key = {0}; + struct nbl_flow_param param_down = {0}, param_up = {0}; + struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; + int i, ret; + + /* The structure for MCC list is: + * + * l2/l3_mc_up + * | + * | + * BMC -> | + * PF -> VF -> ... + * ETH -> | + * | + * | + * l2/l3_mc_down + * + * So that the up mc pkts will be send to BMC, not need broadcast to eth, + * but the down mc pkts will send to eth, not send to BMC. + */ + NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_ETH, eth); + param_down.mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); + param_down.eth = eth; + + NBL_FLOW_MCC_INDEX_KEY_INIT(&index_key, NBL_MCC_INDEX_BMC, eth); + param_up.mcc_id = nbl_common_get_index(flow_mgt->mcc_tbl_priv, &index_key, NULL); + param_up.eth = eth; + + multi_group = &flow_mgt->multi_flow[eth]; + for (i = 0; i < NBL_FLOW_TYPE_MAX - NBL_FLOW_MACVLAN_MAX; i++) { + if (i % 2) + ret = nbl_flow_add_flow(res_mgt, param_down, i + NBL_FLOW_MACVLAN_MAX, + &multi_group->entry[i]); + else + ret = nbl_flow_add_flow(res_mgt, param_up, i + NBL_FLOW_MACVLAN_MAX, + &multi_group->entry[i]); + if (ret) + goto add_macvlan_fail; + } + + ret = nbl_flow_add_mcc_node(multi_group, res_mgt, NBL_MCC_INDEX_BMC, + NBL_FLOW_MCC_BMC_DPORT, param_up.mcc_id, 1); + if (ret) + goto add_mcc_bmc_fail; + + ret = nbl_flow_add_mcc_node(multi_group, res_mgt, NBL_MCC_INDEX_ETH, eth, + param_down.mcc_id, 1); + if (ret) + goto add_mcc_eth_fail; + + multi_group->ether_id = eth; + multi_group->mcc_id = param_up.mcc_id; + + return 0; + +add_mcc_eth_fail: + list_for_each_entry_safe(mcc_node, mcc_node_safe, &multi_group->mcc_head, node) + nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); +add_mcc_bmc_fail: +add_macvlan_fail: + while (--i >= 0) + nbl_flow_del_flow(res_mgt, &multi_group->entry[i]); + return ret; +} + +static void nbl_flow_del_multi_group(struct nbl_resource_mgt *res_mgt, u8 eth) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_multi_group *multi_group = &flow_mgt->multi_flow[eth]; + struct nbl_flow_mcc_node *mcc_node, *mcc_node_safe; + int i; + + if (!multi_group->mcc_id) + return; + + for (i = NBL_FLOW_MACVLAN_MAX; i < NBL_FLOW_TYPE_MAX; i++) + nbl_flow_del_flow(res_mgt, &multi_group->entry[i - NBL_FLOW_MACVLAN_MAX]); + + list_for_each_entry_safe(mcc_node, mcc_node_safe, &multi_group->mcc_list, node) + nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); + + list_for_each_entry_safe(mcc_node, mcc_node_safe, &multi_group->mcc_head, node) + nbl_flow_del_mcc_node(multi_group, res_mgt, mcc_node); + + memset(multi_group, 0, sizeof(*multi_group)); + INIT_LIST_HEAD(&multi_group->mcc_list); + INIT_LIST_HEAD(&multi_group->mcc_head); +} + +static void nbl_flow_remove_multi_group(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) + nbl_flow_del_multi_group(res_mgt, i); +} + +static int nbl_flow_setup_multi_group(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i, ret = 0; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + ret = nbl_flow_add_multi_group(res_mgt, i); + if (ret) + goto fail; + } + + return 0; + +fail: + nbl_flow_remove_multi_group(res_mgt); + return ret; +} + +static int nbl_res_flow_cfg_duppkt_mcc(void *priv, struct nbl_lag_member_list_param *param) +{ + return 0; +} + +static int nbl_flow_macvlan_node_vsi_match_func(void *condition, void *x_key, void *y_key, + void *data) +{ + u16 vsi = *(u16 *)condition; + struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; + + return rule_data->vsi == vsi ? 0 : -1; +} + +static void nbl_flow_clear_accel_flow(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_dipsec_rule *dipsec_rule, *dipsec_rule_safe; + struct nbl_flow_ul4s_rule *ul4s_rule, *ul4s_rule_safe; + + list_for_each_entry_safe(dipsec_rule, dipsec_rule_safe, &flow_mgt->dprbac_head, node) + if (dipsec_rule->vsi == vsi_id) { + nbl_flow_del_flow(res_mgt, &dipsec_rule->dipsec_entry); + list_del(&dipsec_rule->node); + kfree(dipsec_rule); + } + + list_for_each_entry_safe(ul4s_rule, ul4s_rule_safe, &flow_mgt->ul4s_head, node) + if (ul4s_rule->vsi == vsi_id) { + nbl_flow_del_flow(res_mgt, &ul4s_rule->ul4s_entry); + list_del(&ul4s_rule->node); + kfree(ul4s_rule); + } +} + +static void nbl_flow_clear_flow(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + void *mac_hash_tbl; + struct nbl_hash_xy_tbl_scan_key scan_key; + u8 eth_id; + + eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); + mac_hash_tbl = flow_mgt->mac_hash_tbl[eth_id]; + + NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_DELETE, NBL_HASH_TBL_ALL_SCAN, + false, NULL, NULL, &vsi_id, + &nbl_flow_macvlan_node_vsi_match_func, res_mgt, + &nbl_flow_macvlan_node_del_action_func); + nbl_common_scan_hash_xy_node(mac_hash_tbl, &scan_key); + + nbl_flow_del_multi_rule(res_mgt, vsi_id); +} + +char templete_name[NBL_FLOW_TYPE_MAX][16] = { + "up_tnl", + "up", + "down", + "l2_mc_up", + "l2_mc_down", + "l3_mc_up", + "l3_mc_down" +}; + +static void nbl_flow_id_dump(struct seq_file *m, struct nbl_flow_fem_entry *entry, char *title) +{ + seq_printf(m, "%s: flow_id %u, ht0 0x%x, ht1 0x%x, table: %u, bucket: %u\n", title, + entry->flow_id, entry->ht0_hash, entry->ht1_hash, + entry->hash_table, entry->hash_bucket); +} + +static void nbl_flow_macvlan_node_show_action_func(void *priv, void *x_key, void *y_key, + void *data) +{ + struct seq_file *m = (struct seq_file *)priv; + u8 *mac = (u8 *)x_key; + u16 vlan = *(u16 *)y_key; + struct nbl_flow_l2_data *rule_data = (struct nbl_flow_l2_data *)data; + int i; + + seq_printf(m, "\nvsi %d, vlan %d MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", + rule_data->vsi, vlan, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + for (i = 0; i < NBL_FLOW_MACVLAN_MAX; i++) + nbl_flow_id_dump(m, &rule_data->entry[i], templete_name[i]); +} + +static void nbl_flow_dump_flow(void *priv, struct seq_file *m) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_flow_multi_group *multi_group; + struct nbl_flow_lldp_rule *lldp_rule; + struct nbl_flow_lacp_rule *lacp_rule; + struct nbl_hash_xy_tbl_scan_key scan_key; + int i, j; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + multi_group = &flow_mgt->multi_flow[i]; + seq_printf(m, "\nether_id %d, mcc_id %d, status %u\n" + !i, + multi_group->ether_id, multi_group->mcc_id, multi_group->network_status); + for (j = NBL_FLOW_MACVLAN_MAX; j < NBL_FLOW_TYPE_MAX; j++) + nbl_flow_id_dump(m, &multi_group->entry[j - NBL_FLOW_MACVLAN_MAX], + templete_name[j]); + } + + NBL_HASH_XY_TBL_SCAN_KEY_INIT(&scan_key, NBL_HASH_TBL_OP_SHOW, NBL_HASH_TBL_ALL_SCAN, + false, NULL, NULL, NULL, NULL, m, + &nbl_flow_macvlan_node_show_action_func); + for (i = 0; i < NBL_MAX_ETHERNET; i++) + nbl_common_scan_hash_xy_node(flow_mgt->mac_hash_tbl[i], &scan_key); + + seq_puts(m, "\n"); + + list_for_each_entry(lldp_rule, &flow_mgt->lldp_list, node) + seq_printf(m, "LLDP rule: vsi %d\n", lldp_rule->vsi); + + seq_puts(m, "\n"); + list_for_each_entry(lacp_rule, &flow_mgt->lacp_list, node) + seq_printf(m, "LACP rule: vsi %d\n", lacp_rule->vsi); +} + +static int nbl_flow_add_ktls_rx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_common_info *common; + struct nbl_flow_ul4s_rule *rule; + struct nbl_flow_param param = {0}; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + list_for_each_entry(rule, &flow_mgt->ul4s_head, node) + if (rule->index == index) + return -EEXIST; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + param.index = index; + param.data = data; + param.vsi = vsi; + param.eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + if (data[0] == AF_INET6) + param.type = NBL_KT_FULL_MODE; + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_TLS_UP, &rule->ul4s_entry)) { + kfree(rule); + return -EFAULT; + } + + rule->index = index; + rule->vsi = vsi; + list_add_tail(&rule->node, &flow_mgt->ul4s_head); + + return 0; +} + +static void nbl_flow_del_ktls_rx_flow(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_ul4s_rule *rule; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + list_for_each_entry(rule, &flow_mgt->ul4s_head, node) + if (rule->index == index) + break; + + if (nbl_list_entry_is_head(rule, &flow_mgt->ul4s_head, node)) + return; + + nbl_flow_del_flow(res_mgt, &rule->ul4s_entry); + list_del(&rule->node); + kfree(rule); +} + +static int nbl_flow_add_ipsec_tx_flow(void *priv, u32 index, u32 *data, u16 vsi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_common_info *common; + struct nbl_flow_dipsec_rule *rule; + struct nbl_flow_param param = {0}; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + list_for_each_entry(rule, &flow_mgt->dprbac_head, node) + if (rule->index == index) + return -EEXIST; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + param.index = index; + param.data = data; + param.vsi = vsi; + param.eth = nbl_res_vsi_id_to_eth_id(res_mgt, vsi); + if (data[0] == AF_INET6) + param.type = NBL_KT_FULL_MODE; + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_IPSEC_DOWN, &rule->dipsec_entry)) { + kfree(rule); + return -EFAULT; + } + + rule->index = index; + rule->vsi = vsi; + list_add_tail(&rule->node, &flow_mgt->dprbac_head); + + return 0; +} + +static void nbl_flow_del_ipsec_tx_flow(void *priv, u32 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_flow_mgt *flow_mgt; + struct nbl_flow_dipsec_rule *rule; + + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + + list_for_each_entry(rule, &flow_mgt->dprbac_head, node) + if (rule->index == index) + break; + + if (nbl_list_entry_is_head(rule, &flow_mgt->dprbac_head, node)) + return; + + nbl_flow_del_flow(res_mgt, &rule->dipsec_entry); + list_del(&rule->node); + kfree(rule); +} + +static void nbl_res_flr_clear_accel_flow(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + + if (nbl_res_vf_is_active(priv, func_id)) + nbl_flow_clear_accel_flow(priv, vsi_id); +} + +static void nbl_res_flr_clear_flow(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + + if (nbl_res_vf_is_active(priv, func_id)) + nbl_flow_clear_flow(priv, vsi_id); +} + +static void nbl_res_flow_del_nd_upcall_flow(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_flow_nd_upcall_rule *rule = NULL; + int i; + + info->nd_upcall_refnt--; + if (info->nd_upcall_refnt > 0) { + nbl_info(common, NBL_DEBUG_FLOW, "nd upcall flow reference count %d", + info->nd_upcall_refnt); + return; + } + + rule = list_entry(flow_mgt->nd_upcall_list.next, struct nbl_flow_nd_upcall_rule, node); + if (nbl_list_entry_is_head(rule, &flow_mgt->nd_upcall_list, node)) + return; + + for (i = 0; i < NBL_FLOW_PMD_ND_UPCALL_FLOW_NUM; i++) + nbl_flow_del_flow(res_mgt, &rule->entry[i]); + + list_del(&rule->node); + kfree(rule); + nbl_info(common, NBL_DEBUG_FLOW, "deleting all flows for nd upcall"); +} + +static int nbl_res_flow_add_nd_upcall_flow(void *priv, u16 vsi, bool for_pmd) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_flow_nd_upcall_rule *rule; + struct nbl_flow_param param = {0}; + + /* TC case: use refcount to track adding flow */ + if (info->nd_upcall_refnt && !for_pmd) { + info->nd_upcall_refnt++; + nbl_info(common, NBL_DEBUG_FLOW, "tc: nd upcall flow reference count %d", + info->nd_upcall_refnt); + return 0; + } + + /* PMD case: if nd flows exist, simply delete them and add flow again */ + if (info->nd_upcall_refnt && for_pmd) { + nbl_info(common, NBL_DEBUG_FLOW, "pmd active: nd upcall flow will be reset"); + nbl_res_flow_del_nd_upcall_flow(priv); + } + + nbl_info(common, NBL_DEBUG_FLOW, "adding all flows for nd upcall"); + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + param.vsi = vsi; + param.for_pmd = for_pmd; + param.ether_type = ETH_P_IPV6; + param.priv_data = NBL_DUPPKT_PTYPE_NA; + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_PMD_ND_UPCALL, + &rule->entry[NBL_FLOW_PMD_ND_UPCALL_NA])) { + nbl_err(common, NBL_DEBUG_FLOW, "Fail to add icmpv6 na flow for vsi %d", vsi); + kfree(rule); + return -EFAULT; + } + + param.priv_data = NBL_DUPPKT_PTYPE_NS; + if (nbl_flow_add_flow(res_mgt, param, NBL_FLOW_PMD_ND_UPCALL, + &rule->entry[NBL_FLOW_PMD_ND_UPCALL_NS])) { + nbl_flow_del_flow(res_mgt, &rule->entry[NBL_FLOW_PMD_ND_UPCALL_NA]); + nbl_err(common, NBL_DEBUG_FLOW, "Fail to add icmpv6 ns flow for vsi %d", vsi); + kfree(rule); + return -EFAULT; + } + + info->nd_upcall_refnt++; + list_add_tail(&rule->node, &flow_mgt->nd_upcall_list); + return 0; +} + +/* NBL_FLOW_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_FLOW_OPS_TBL \ +do { \ + NBL_FLOW_SET_OPS(add_macvlan, nbl_flow_add_macvlan); \ + NBL_FLOW_SET_OPS(del_macvlan, nbl_flow_del_macvlan); \ + NBL_FLOW_SET_OPS(add_lag_flow, nbl_flow_add_lag); \ + NBL_FLOW_SET_OPS(del_lag_flow, nbl_flow_del_lag); \ + NBL_FLOW_SET_OPS(add_lldp_flow, nbl_flow_add_lldp); \ + NBL_FLOW_SET_OPS(del_lldp_flow, nbl_flow_del_lldp); \ + NBL_FLOW_SET_OPS(cfg_lag_mcc, nbl_flow_cfg_lag_mcc); \ + NBL_FLOW_SET_OPS(add_multi_rule, nbl_flow_add_multi_rule); \ + NBL_FLOW_SET_OPS(del_multi_rule, nbl_flow_del_multi_rule); \ + NBL_FLOW_SET_OPS(setup_multi_group, nbl_flow_setup_multi_group); \ + NBL_FLOW_SET_OPS(remove_multi_group, nbl_flow_remove_multi_group); \ + NBL_FLOW_SET_OPS(clear_accel_flow, nbl_flow_clear_accel_flow); \ + NBL_FLOW_SET_OPS(clear_flow, nbl_flow_clear_flow); \ + NBL_FLOW_SET_OPS(dump_flow, nbl_flow_dump_flow); \ + NBL_FLOW_SET_OPS(add_ktls_rx_flow, nbl_flow_add_ktls_rx_flow); \ + NBL_FLOW_SET_OPS(del_ktls_rx_flow, nbl_flow_del_ktls_rx_flow); \ + NBL_FLOW_SET_OPS(add_ipsec_tx_flow, nbl_flow_add_ipsec_tx_flow); \ + NBL_FLOW_SET_OPS(del_ipsec_tx_flow, nbl_flow_del_ipsec_tx_flow); \ + NBL_FLOW_SET_OPS(flr_clear_accel_flow, nbl_res_flr_clear_accel_flow); \ + NBL_FLOW_SET_OPS(flr_clear_flows, nbl_res_flr_clear_flow); \ + NBL_FLOW_SET_OPS(cfg_duppkt_mcc, nbl_res_flow_cfg_duppkt_mcc); \ + NBL_FLOW_SET_OPS(add_nd_upcall_flow, nbl_res_flow_add_nd_upcall_flow); \ + NBL_FLOW_SET_OPS(del_nd_upcall_flow, nbl_res_flow_del_nd_upcall_flow); \ +} while (0) + +static void nbl_flow_remove_mgt(struct device *dev, struct nbl_resource_mgt *res_mgt) +{ + struct nbl_flow_mgt *flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + int i; + struct nbl_hash_xy_tbl_del_key del_key; + + nbl_common_remove_index_table(flow_mgt->mcc_tbl_priv, NULL); + + NBL_HASH_XY_TBL_DEL_KEY_INIT(&del_key, res_mgt, &nbl_flow_macvlan_node_del_action_func); + for (i = 0; i < NBL_MAX_ETHERNET; i++) + nbl_common_remove_hash_xy_table(flow_mgt->mac_hash_tbl[i], &del_key); + + devm_kfree(dev, flow_mgt->flow_id_bitmap); + devm_kfree(dev, flow_mgt); + NBL_RES_MGT_TO_FLOW_MGT(res_mgt) = NULL; +} + +static int nbl_flow_setup_mgt(struct device *dev, struct nbl_resource_mgt *res_mgt) +{ + struct nbl_index_tbl_key mcc_tbl_key; + struct nbl_hash_xy_tbl_key macvlan_tbl_key; + struct nbl_flow_mgt *flow_mgt; + struct nbl_eth_info *eth_info; + int i; + + flow_mgt = devm_kzalloc(dev, sizeof(struct nbl_flow_mgt), GFP_KERNEL); + if (!flow_mgt) + return -ENOMEM; + + NBL_RES_MGT_TO_FLOW_MGT(res_mgt) = flow_mgt; + + flow_mgt->flow_id_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(NBL_MACVLAN_TABLE_LEN), + sizeof(long), GFP_KERNEL); + if (!flow_mgt->flow_id_bitmap) + goto settup_mgt_failed; + + NBL_INDEX_TBL_KEY_INIT(&mcc_tbl_key, dev, NBL_FLOW_MCC_INDEX_START, + NBL_FLOW_MCC_INDEX_SIZE, sizeof(struct nbl_flow_mcc_index_key)); + flow_mgt->mcc_tbl_priv = nbl_common_init_index_table(&mcc_tbl_key); + if (!flow_mgt->mcc_tbl_priv) + goto settup_mgt_failed; + + NBL_HASH_XY_TBL_KEY_INIT(&macvlan_tbl_key, dev, ETH_ALEN, sizeof(u16), + sizeof(struct nbl_flow_l2_data), + NBL_MACVLAN_TBL_BUCKET_SIZE, NBL_MACVLAN_X_AXIS_BUCKET_SIZE, + NBL_MACVLAN_Y_AXIS_BUCKET_SIZE, false); + for (i = 0; i < NBL_MAX_ETHERNET; i++) { + (flow_mgt)->mac_hash_tbl[i] = nbl_common_init_hash_xy_table(&macvlan_tbl_key); + if (!flow_mgt->mac_hash_tbl[i]) + goto settup_mgt_failed; + } + + for (i = 0; i < NBL_MAX_ETHERNET; i++) { + INIT_LIST_HEAD(&flow_mgt->multi_flow[i].mcc_list); + INIT_LIST_HEAD(&flow_mgt->multi_flow[i].mcc_head); + } + + INIT_LIST_HEAD(&flow_mgt->lldp_list); + INIT_LIST_HEAD(&flow_mgt->lacp_list); + INIT_LIST_HEAD(&flow_mgt->ul4s_head); + INIT_LIST_HEAD(&flow_mgt->dprbac_head); + INIT_LIST_HEAD(&flow_mgt->nd_upcall_list); + + eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + flow_mgt->unicast_mac_threshold = NBL_TOTAL_MACVLAN_NUM / eth_info->eth_num; + + return 0; + +settup_mgt_failed: + nbl_flow_remove_mgt(dev, res_mgt); + return -1; +} + +int nbl_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops; + struct device *dev; + int ret = 0; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = nbl_flow_setup_mgt(dev, res_mgt); + if (ret) + goto setup_mgt_fail; + + ret = phy_ops->init_fem(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + if (ret) + goto init_fem_fail; + + return 0; + +init_fem_fail: + nbl_flow_remove_mgt(dev, res_mgt); +setup_mgt_fail: + return -1; +} + +void nbl_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_flow_mgt *flow_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + flow_mgt = NBL_RES_MGT_TO_FLOW_MGT(res_mgt); + if (!flow_mgt) + return; + + nbl_flow_remove_mgt(dev, res_mgt); +} + +int nbl_flow_setup_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_FLOW_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_FLOW_OPS_TBL; +#undef NBL_FLOW_SET_OPS + + return 0; +} + +void nbl_flow_remove_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_FLOW_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_FLOW_OPS_TBL; +#undef NBL_FLOW_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..c16b90ae1c567ecfe1be9df39423872bd60e1886 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_flow_leonis.h @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ +#ifndef _NBL_FLOW_LEONIS_H_ +#define _NBL_FLOW_LEONIS_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" +#include "nbl_resource.h" + +#define NBL_EM_PHY_KT_OFFSET (0x1F000) +#define NBL_TOTAL_MACVLAN_NUM 2048 +#define NBL_MAX_ACTION_NUM 16 + +#define NBL_MCC_NUM_PER_SWITCH 256 + +#define NBL_FLOW_MCC_PXE_SIZE 8 +#define NBL_FLOW_MCC_INDEX_SIZE (1024 - NBL_FLOW_MCC_PXE_SIZE) +#define NBL_FLOW_MCC_INDEX_START (7 * 1024) +#define NBL_FLOW_MCC_BMC_DPORT 0x30D + +#define NBL_MACVLAN_TBL_BUCKET_SIZE 64 +#define NBL_MACVLAN_X_AXIS_BUCKET_SIZE 64 +#define NBL_MACVLAN_Y_AXIS_BUCKET_SIZE 16 + +enum nbl_flow_mcc_index_type { + NBL_MCC_INDEX_ETH, + NBL_MCC_INDEX_VSI, + NBL_MCC_INDEX_BOND, + NBL_MCC_INDEX_BMC, +}; + +struct nbl_flow_mcc_index_key { + enum nbl_flow_mcc_index_type type; + union { + u8 eth_id; + u16 vsi_id; + u32 data; + }; +}; + +#define NBL_FLOW_MCC_INDEX_KEY_INIT(key, key_type_arg, value_arg) \ +do { \ + typeof(key) __key = key; \ + typeof(key_type_arg) __type = key_type_arg; \ + typeof(value_arg) __value = value_arg; \ + __key->type = __type; \ + if (__type == NBL_MCC_INDEX_ETH) \ + __key->eth_id = __value; \ + else if (__type == NBL_MCC_INDEX_VSI || __type == NBL_MCC_INDEX_BOND) \ + __key->vsi_id = __value; \ + else \ + __key->data = __value; \ +} while (0) + +#pragma pack(1) + +#define NBL_DUPPKT_PTYPE_NA 135 +#define NBL_DUPPKT_PTYPE_NS 136 + +struct nbl_flow_l2_data { + struct nbl_flow_fem_entry entry[NBL_FLOW_MACVLAN_MAX]; + u16 vsi; +}; + +union nbl_l2_phy_up_data_u { + struct nbl_l2_phy_up_data { + u32 act0:22; + u64 rsv1:62; + u32 padding:4; + u32 sport:4; + u32 svlan_id:16; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_UP_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_up_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_UP_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_up_data)]; +}; + +union nbl_l2_phy_lldp_lacp_data_u { + struct nbl_l2_phy_lldp_lacp_data { + u32 act0:22; + u32 rsv1:2; + u8 padding[14]; + u32 sport:4; + u32 ether_type:16; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_LLDP_LACP_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_lldp_lacp_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_LLDP_LACP_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_lldp_lacp_data)]; +}; + +union nbl_l2_phy_down_data_u { + struct nbl_l2_phy_down_data { + u32 act0:22; + u32 rsv2:10; + u64 rsv1:52; + u32 padding:6; + u32 sport:2; + u32 svlan_id:16; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_DOWN_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_DOWN_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_down_data)]; +}; + +union nbl_l2_phy_up_multi_data_u { + struct nbl_l2_phy_up_multi_data { + u32 act0:22; + u32 act1:22; + u32 rsv2:20; + u64 rsv1:36; + u32 padding:4; + u32 sport:4; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_UP_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_up_multi_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_UP_MULTI_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_up_multi_data)]; +}; + +union nbl_l2_phy_down_multi_data_u { + struct nbl_l2_phy_down_multi_data { + u32 act0:22; + u32 act1:22; + u32 rsv2:20; + u64 rsv1:36; + u32 padding:6; + u32 sport:2; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_PHY_DOWN_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l2_phy_down_multi_data) \ + / sizeof(u32)) + u32 data[NBL_L2_PHY_DOWN_MULTI_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_phy_down_multi_data)]; +}; + +union nbl_l3_phy_up_multi_data_u { + struct nbl_l3_phy_up_multi_data { + u32 act0:22; + u32 act1:22; + u32 rsv2:20; + u64 rsv1:60; + u32 padding:12; + u32 sport:4; + u64 dst_mac:16; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L3_PHY_UP_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l3_phy_up_multi_data) \ + / sizeof(u32)) + u32 data[NBL_L3_PHY_UP_MULTI_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l3_phy_up_multi_data)]; +}; + +union nbl_l3_phy_down_multi_data_u { + struct nbl_l3_phy_down_multi_data { + u32 act0:22; + u32 act1:22; + u32 rsv3:20; + u64 rsv2; + u64 rsv1:4; + u32 padding:6; + u32 sport:2; + u64 dst_mac:16; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L3_PHY_DOWN_MULTI_DATA_TAB_WIDTH (sizeof(struct nbl_l3_phy_down_multi_data) \ + / sizeof(u32)) + u32 data[NBL_L3_PHY_DOWN_MULTI_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l3_phy_down_multi_data)]; +}; + +union nbl_phy_ul4s_data_u { + struct nbl_phy_ul4s_ipv4 { + u32 act0:22; + u32 rsv2:10; + u32 rsv1:24; + u32 sport:4; + u32 l4_dport:16; + u32 l4_sport:16; + u32 dip_low:4; + u32 dip_high:28; + u32 sip_low:4; + u32 sip_high:28; + u32 template:4; + u32 rsv[5]; + } __packed ipv4_info; + struct nbl_phy_ul4s_ipv6 { + u64 act0:22; + u64 rsv3:42; + u64 rsv2; + u64 rsv1:8; + u64 sport:4; + u64 dport:16; + u64 l4_dport:16; + u64 l4_sport:16; + u64 sip3:4; + u64 sip2; + u64 sip1:60; + u64 template:4; + } __packed ipv6_info; + u32 data[NBL_KT_BYTE_LEN / 4]; + u8 hash_key[NBL_KT_BYTE_LEN]; +}; + +union nbl_phy_dprbac_data_u { + struct nbl_phy_dprbac_ipv4 { + u32 act0:22; + u32 rsv2:10; + u64 rsv1:56; + u32 padding:2; + u32 sport:2; + u32 dip_low:4; + u32 dip_high:28; + u32 sip_low:4; + u32 sip_high:28; + u32 template:4; + u32 rsv[5]; + } __packed ipv4_info; + struct nbl_phy_dprbac_ipv6 { + u32 act0:22; + u32 rsv2:10; + u64 rsv1:24; + u32 padding:2; + u32 sport:2; + u64 dip3:36; + u64 dip2; + u64 dip1:28; + u64 sip3:36; + u64 sip2; + u64 sip1:28; + u32 template:4; + } __packed ipv6_info; + u32 data[NBL_KT_BYTE_LEN / 4]; + u8 hash_key[NBL_KT_BYTE_LEN]; +}; + +union nbl_nd_upcall_data_u { + struct nbl_nd_upcall_data { + u32 act0:22; + u32 act1:22; + u32 rsv2:4; + u8 padding[10]; + u32 rsv1:12; + u32 ptype:16; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_PMD_ND_UPCALL_DATA_TAB_WIDTH (sizeof(struct nbl_nd_upcall_data) \ + / sizeof(u32)) + u32 data[NBL_PMD_ND_UPCALL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_nd_upcall_data)]; +}; + +union nbl_common_data_u { + struct nbl_common_data { + u32 rsv[10]; + } __packed info; +#define NBL_COMMON_DATA_TAB_WIDTH (sizeof(struct nbl_common_data) \ + / sizeof(u32)) + u32 data[NBL_COMMON_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_common_data)]; +}; + +#pragma pack() + +struct nbl_flow_param { + u8 *mac; + u8 type; + u8 eth; + u16 ether_type; + u16 vid; + u16 vsi; + u16 mcc_id; + u32 index; + u32 *data; + u32 priv_data; + bool for_pmd; +}; + +struct nbl_mt_input { + u8 key[NBL_KT_BYTE_LEN]; + u8 at_num; + u8 kt_left_num; + u32 tbl_id; + u16 depth; + u16 power; +}; + +struct nbl_ht_item { + u16 ht0_hash; + u16 ht1_hash; + u16 hash_bucket; + u32 key_index; + u8 ht_table; +}; + +struct nbl_kt_item { + union nbl_common_data_u kt_data; +}; + +struct nbl_tcam_item { + struct nbl_ht_item ht_item; + struct nbl_kt_item kt_item; + u32 tcam_action[NBL_MAX_ACTION_NUM]; + bool tcam_flag; + u8 key_mode; + u8 pp_type; + u32 *pp_tcam_count; + u16 tcam_index; +}; + +struct nbl_tcam_ad_item { + u32 action[NBL_MAX_ACTION_NUM]; +}; + +struct nbl_flow_rule_cfg_ops { + int (*cfg_action)(struct nbl_flow_param param, u32 *action0, u32 *action1); + int (*cfg_key)(union nbl_common_data_u *data, + struct nbl_flow_param param, u8 eth_mode); + void (*cfg_kt_action)(union nbl_common_data_u *data, u32 action0, u32 action1); +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..1d1f05fc88f788cd30cbc0b88d706f1cc9923ca7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.c @@ -0,0 +1,6954 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_phy_leonis.h" +#include "nbl_hw/nbl_p4_actions.h" +#include "nbl_hw/nbl_hw_leonis/base/nbl_datapath.h" +#include "nbl_hw/nbl_hw_leonis/base/nbl_ppe.h" +#include "nbl_hw/nbl_hw_leonis/base/nbl_intf.h" +#include "nbl_hw/nbl_hw_leonis/base/nbl_datapath_dped.h" + +static int dvn_descreq_num_cfg = DEFAULT_DVN_DESCREQ_NUMCFG; /* default 8 and 8 */ +module_param(dvn_descreq_num_cfg, int, 0); +MODULE_PARM_DESC(dvn_descreq_num_cfg, "bit[31:16]:split ring,support 8/16," + " bit[15:0]:packed ring, support 8/12/16/20/24/28/32"); + +static u32 nbl_phy_get_quirks(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = priv; + u32 quirks; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_LEONIS_QUIRKS_OFFSET, + (u8 *)&quirks, sizeof(u32)); + + if (quirks == NBL_LEONIS_ILLEGAL_REG_VALUE) + return 0; + + return quirks; +} + +static int nbl_send_kt_data(struct nbl_phy_mgt *phy_mgt, union nbl_fem_kt_acc_ctrl_u *kt_ctrl, + u8 *data, struct nbl_common_info *common) +{ + union nbl_fem_kt_acc_ack_u kt_ack = {.info = {0}}; + u32 times = 3; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_ACC_DATA, data, NBL_KT_PHY_L2_DW_LEN); + nbl_debug(common, NBL_DEBUG_FLOW, "Set kt = %08x-%08x-%08x-%08x-%08x", + ((u32 *)data)[0], ((u32 *)data)[1], ((u32 *)data)[2], + ((u32 *)data)[3], ((u32 *)data)[4]); + + kt_ctrl->info.rw = NBL_ACC_MODE_WRITE; + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_ACC_CTRL, + kt_ctrl->data, NBL_FEM_KT_ACC_CTRL_TBL_WIDTH); + + times = 3; + do { + nbl_hw_read_regs(phy_mgt, NBL_FEM_KT_ACC_ACK, kt_ack.data, + NBL_FEM_KT_ACC_ACK_TBL_WIDTH); + if (!kt_ack.info.done) { + times--; + usleep_range(100, 200); + } else { + break; + } + } while (times); + + if (!times) { + nbl_err(common, NBL_DEBUG_FLOW, "Config kt flowtale failed"); + return -EIO; + } + + return 0; +} + +static int nbl_send_ht_data(struct nbl_phy_mgt *phy_mgt, union nbl_fem_ht_acc_ctrl_u *ht_ctrl, + u8 *data, struct nbl_common_info *common) +{ + union nbl_fem_ht_acc_ack_u ht_ack = {.info = {0}}; + u32 times = 3; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_ACC_DATA, data, NBL_FEM_HT_ACC_DATA_TBL_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Set ht data = %x", *(u32 *)data); + + ht_ctrl->info.rw = NBL_ACC_MODE_WRITE; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_ACC_CTRL, + ht_ctrl->data, NBL_FEM_HT_ACC_CTRL_TBL_WIDTH); + + times = 3; + do { + nbl_hw_read_regs(phy_mgt, NBL_FEM_HT_ACC_ACK, ht_ack.data, + NBL_FEM_HT_ACC_ACK_TBL_WIDTH); + if (!ht_ack.info.done) { + times--; + usleep_range(100, 200); + } else { + break; + } + } while (times); + + if (!times) { + nbl_err(common, NBL_DEBUG_FLOW, "Config ht flowtale failed"); + return -EIO; + } + + return 0; +} + +static void nbl_check_kt_data(struct nbl_phy_mgt *phy_mgt, union nbl_fem_kt_acc_ctrl_u *kt_ctrl, + struct nbl_common_info *common) +{ + union nbl_fem_kt_acc_ack_u ack = {.info = {0}}; + u32 data[10] = {0}; + + kt_ctrl->info.rw = NBL_ACC_MODE_READ; + kt_ctrl->info.access_size = NBL_ACC_SIZE_320B; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_ACC_CTRL, kt_ctrl->data, + NBL_FEM_KT_ACC_CTRL_TBL_WIDTH); + + nbl_hw_read_regs(phy_mgt, NBL_FEM_KT_ACC_ACK, ack.data, NBL_FEM_KT_ACC_ACK_TBL_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Check kt done:%u status:%u.", + ack.info.done, ack.info.status); + if (ack.info.done) { + nbl_hw_read_regs(phy_mgt, NBL_FEM_KT_ACC_DATA, (u8 *)data, NBL_KT_PHY_L2_DW_LEN); + nbl_debug(common, NBL_DEBUG_FLOW, "Check kt data:0x%x-%x-%x-%x-%x-%x-%x-%x-%x-%x.", + data[9], data[8], data[7], data[6], data[5], + data[4], data[3], data[2], data[1], data[0]); + } +} + +static void nbl_check_ht_data(struct nbl_phy_mgt *phy_mgt, union nbl_fem_ht_acc_ctrl_u *ht_ctrl, + struct nbl_common_info *common) +{ + union nbl_fem_ht_acc_ack_u ack = {.info = {0}}; + u32 data[4] = {0}; + + ht_ctrl->info.rw = NBL_ACC_MODE_READ; + ht_ctrl->info.access_size = NBL_ACC_SIZE_128B; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_ACC_CTRL, ht_ctrl->data, + NBL_FEM_HT_ACC_CTRL_TBL_WIDTH); + + nbl_hw_read_regs(phy_mgt, NBL_FEM_HT_ACC_ACK, ack.data, NBL_FEM_HT_ACC_ACK_TBL_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Check ht done:%u status:%u.", + ack.info.done, ack.info.status); + if (ack.info.done) { + nbl_hw_read_regs(phy_mgt, NBL_FEM_HT_ACC_DATA, + (u8 *)data, NBL_FEM_HT_ACC_DATA_TBL_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Check ht data:0x%x-%x-%x-%x.", + data[0], data[1], data[2], data[3]); + } +} + +static void nbl_phy_fem_set_bank(struct nbl_phy_mgt *phy_mgt) +{ + u32 bank_sel = 0; + + /* HT bank sel */ + bank_sel = HT_PORT0_BANK_SEL | HT_PORT1_BANK_SEL << NBL_8BIT + | HT_PORT2_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_BANK_SEL_BITMAP, (u8 *)&bank_sel, sizeof(bank_sel)); + + /* KT bank sel */ + bank_sel = KT_PORT0_BANK_SEL | KT_PORT1_BANK_SEL << NBL_8BIT + | KT_PORT2_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_BANK_SEL_BITMAP, (u8 *)&bank_sel, sizeof(bank_sel)); + + /* AT bank sel */ + bank_sel = AT_PORT0_BANK_SEL | AT_PORT1_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_AT_BANK_SEL_BITMAP, (u8 *)&bank_sel, sizeof(bank_sel)); + bank_sel = AT_PORT2_BANK_SEL; + nbl_hw_write_regs(phy_mgt, NBL_FEM_AT_BANK_SEL_BITMAP2, (u8 *)&bank_sel, sizeof(bank_sel)); +} + +static void nbl_phy_fem_clear_tcam_ad(struct nbl_phy_mgt *phy_mgt) +{ + union fem_em_tcam_table_u tcam_table; + union fem_em_ad_table_u ad_table = {.info = {0}}; + int i; + int j; + + memset(&tcam_table, 0, sizeof(tcam_table)); + + for (i = 0; i < NBL_PT_LEN; i++) { + for (j = 0; j < NBL_TCAM_TABLE_LEN; j++) { + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(i, j), + tcam_table.hash_key, sizeof(tcam_table)); + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_AD_TABLE_REG(i, j), + ad_table.hash_key, sizeof(ad_table)); + nbl_hw_rd32(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(i, 1)); + } + } +} + +static int nbl_phy_fem_em0_pt_phy_l2_init(struct nbl_phy_mgt *phy_mgt, int pt_idx) +{ + union nbl_fem_profile_tbl_u em0_pt_tbl = {.info = {0}}; + + em0_pt_tbl.info.pt_vld = 1; + em0_pt_tbl.info.pt_hash_sel0 = 0; + em0_pt_tbl.info.pt_hash_sel1 = 3; + + switch (pt_idx) { + case NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_12; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_UP_UNICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_12; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_DOWN_UNICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_4; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_UP_MULTICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_68; + em0_pt_tbl.info.pt_act_num = 2; + break; + case NBL_EM0_PT_PHY_DOWN_MULTICAST_L2: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_60; + em0_pt_tbl.info.pt_act_num = 2; + break; + case NBL_EM0_PT_PHY_UP_MULTICAST_L3: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_36; + em0_pt_tbl.info.pt_act_num = 2; + break; + case NBL_EM0_PT_PHY_DOWN_MULTICAST_L3: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_LEN_28; + em0_pt_tbl.info.pt_act_num = 2; + break; + case NBL_EM0_PT_PHY_DPRBAC_IPV4: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_0; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_DPRBAC_IPV6: + em0_pt_tbl.info.pt_key_size = 1; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_64 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_128; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_UL4S_IPV4: + em0_pt_tbl.info.pt_key_size = 0; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_32; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + case NBL_EM0_PT_PHY_UL4S_IPV6: + em0_pt_tbl.info.pt_key_size = 1; + em0_pt_tbl.info.pt_mask_bmap0 = NBL_EM_PT_MASK_LEN_0 >> 2; + em0_pt_tbl.info.pt_mask_bmap1 = NBL_EM_PT_MASK1_LEN_112; + em0_pt_tbl.info.pt_mask_bmap2 = NBL_EM_PT_MASK2_SEC_72; + em0_pt_tbl.info.pt_act_num = 1; + break; + default: + return -EOPNOTSUPP; + } + + nbl_hw_write_regs(phy_mgt, NBL_FEM0_PROFILE_TABLE(pt_idx), em0_pt_tbl.data, + NBL_FEM_PROFILE_TBL_WIDTH); + return 0; +} + +static __maybe_unused int nbl_phy_fem_em0_pt_init(struct nbl_phy_mgt *phy_mgt) +{ + int i, ret = 0; + + for (i = NBL_EM0_PT_PHY_UP_TUNNEL_UNICAST_L2; i <= NBL_EM0_PT_PHY_UL4S_IPV6; i++) { + ret = nbl_phy_fem_em0_pt_phy_l2_init(phy_mgt, i); + if (ret) + return ret; + } + + return 0; +} + +static int nbl_phy_set_ht(void *priv, u16 hash, u16 hash_other, u8 ht_table, + u8 bucket, u32 key_index, u8 valid) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common; + union nbl_fem_ht_acc_data_u ht = {.info = {0}}; + union nbl_fem_ht_acc_ctrl_u ht_ctrl = {.info = {0}}; + + common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + + ht.info.vld = valid; + ht.info.hash = hash_other; + ht.info.kt_index = key_index; + + ht_ctrl.info.ht_id = ht_table == NBL_HT0 ? NBL_ACC_HT0 : NBL_ACC_HT1; + ht_ctrl.info.entry_id = hash; + ht_ctrl.info.bucket_id = bucket; + ht_ctrl.info.port = NBL_PT_PP0; + ht_ctrl.info.access_size = NBL_ACC_SIZE_32B; + ht_ctrl.info.start = 1; + + if (nbl_send_ht_data(phy_mgt, &ht_ctrl, ht.data, common)) + return -EIO; + + nbl_check_ht_data(phy_mgt, &ht_ctrl, common); + return 0; +} + +static int nbl_phy_set_kt(void *priv, u8 *key, u32 key_index, u8 key_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common; + union nbl_fem_kt_acc_ctrl_u kt_ctrl = {.info = {0}}; + + common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + + kt_ctrl.info.addr = key_index; + kt_ctrl.info.access_size = key_type == NBL_KT_HALF_MODE ? NBL_ACC_SIZE_160B + : NBL_ACC_SIZE_320B; + kt_ctrl.info.start = 1; + + if (nbl_send_kt_data(phy_mgt, &kt_ctrl, key, common)) + return -EIO; + + nbl_check_kt_data(phy_mgt, &kt_ctrl, common); + return 0; +} + +static int nbl_phy_search_key(void *priv, u8 *key, u8 key_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common; + union nbl_search_ctrl_u s_ctrl = {.info = {0}}; + union nbl_search_ack_u s_ack = {.info = {0}}; + u8 key_data[NBL_KT_BYTE_LEN] = {0}; + u8 search_key[NBL_FEM_SEARCH_KEY_LEN] = {0}; + u8 data[NBL_FEM_SEARCH_KEY_LEN] = {0}; + u8 times = 3; + + common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + + if (key_type == NBL_KT_HALF_MODE) + memcpy(key_data, key, NBL_KT_BYTE_HALF_LEN); + else + memcpy(key_data, key, NBL_KT_BYTE_LEN); + + key_data[0] &= KT_MASK_LEN32_ACTION_INFO; + key_data[1] &= KT_MASK_LEN12_ACTION_INFO; + if (key_type == NBL_KT_HALF_MODE) + memcpy(&search_key[20], key_data, NBL_KT_BYTE_HALF_LEN); + else + memcpy(search_key, key_data, NBL_KT_BYTE_LEN); + + nbl_debug(common, NBL_DEBUG_FLOW, "Search key:0x%x-%x-%x-%x-%x-%x-%x-%x-%x-%x", + ((u32 *)search_key)[9], ((u32 *)search_key)[8], + ((u32 *)search_key)[7], ((u32 *)search_key)[6], + ((u32 *)search_key)[5], ((u32 *)search_key)[4], + ((u32 *)search_key)[3], ((u32 *)search_key)[2], + ((u32 *)search_key)[1], ((u32 *)search_key)[0]); + nbl_hw_write_regs(phy_mgt, NBL_FEM_INSERT_SEARCH0_DATA, search_key, NBL_FEM_SEARCH_KEY_LEN); + + s_ctrl.info.start = 1; + nbl_hw_write_regs(phy_mgt, NBL_FEM_INSERT_SEARCH0_CTRL, (u8 *)&s_ctrl, + NBL_SEARCH_CTRL_WIDTH); + + do { + nbl_hw_read_regs(phy_mgt, NBL_FEM_INSERT_SEARCH0_ACK, + s_ack.data, NBL_SEARCH_ACK_WIDTH); + nbl_debug(common, NBL_DEBUG_FLOW, "Search key ack:done:%u status:%u.", + s_ack.info.done, s_ack.info.status); + + if (!s_ack.info.done) { + times--; + usleep_range(100, 200); + } else { + nbl_hw_read_regs(phy_mgt, NBL_FEM_INSERT_SEARCH0_DATA, + data, NBL_FEM_SEARCH_KEY_LEN); + nbl_debug(common, NBL_DEBUG_FLOW, + "Search key data:0x%x-%x-%x-%x-%x-%x-%x-%x-%x-%x-%x.", + ((u32 *)data)[10], ((u32 *)data)[9], + ((u32 *)data)[8], ((u32 *)data)[7], + ((u32 *)data)[6], ((u32 *)data)[5], + ((u32 *)data)[4], ((u32 *)data)[3], + ((u32 *)data)[2], ((u32 *)data)[1], + ((u32 *)data)[0]); + break; + } + } while (times); + + if (!times) { + nbl_err(common, NBL_DEBUG_PHY, "Search ht/kt failed."); + return -EAGAIN; + } + + return 0; +} + +static int nbl_phy_add_tcam(void *priv, u32 index, u8 *key, u32 *action, u8 key_type, u8 pp_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union fem_em_tcam_table_u tcam_table; + union fem_em_tcam_table_u tcam_table_second; + union fem_em_ad_table_u ad_table; + + memset(&tcam_table, 0, sizeof(tcam_table)); + memset(&tcam_table_second, 0, sizeof(tcam_table_second)); + memset(&ad_table, 0, sizeof(ad_table)); + + memcpy(tcam_table.info.key, key, NBL_KT_BYTE_HALF_LEN); + tcam_table.info.key_vld = 1; + + if (key_type == NBL_KT_FULL_MODE) { + tcam_table.info.key_size = 1; + memcpy(tcam_table_second.info.key, &key[5], NBL_KT_BYTE_HALF_LEN); + tcam_table_second.info.key_vld = 1; + tcam_table_second.info.key_size = 1; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(pp_type, index + 1), + tcam_table_second.hash_key, NBL_FLOW_TCAM_TOTAL_LEN); + } + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(pp_type, index), + tcam_table.hash_key, NBL_FLOW_TCAM_TOTAL_LEN); + + ad_table.info.action0 = action[0]; + ad_table.info.action1 = action[1]; + ad_table.info.action2 = action[2]; + ad_table.info.action3 = action[3]; + ad_table.info.action4 = action[4]; + ad_table.info.action5 = action[5]; + ad_table.info.action6 = action[6]; + ad_table.info.action7 = action[7]; + ad_table.info.action8 = action[8]; + ad_table.info.action9 = action[9]; + ad_table.info.action10 = action[10]; + ad_table.info.action11 = action[11]; + ad_table.info.action12 = action[12]; + ad_table.info.action13 = action[13]; + ad_table.info.action14 = action[14]; + ad_table.info.action15 = action[15]; + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_AD_TABLE_REG(pp_type, index), + ad_table.hash_key, NBL_FLOW_AD_TOTAL_LEN); + + return 0; +} + +static void nbl_phy_del_tcam(void *priv, u32 index, u8 key_type, u8 pp_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union fem_em_tcam_table_u tcam_table; + union fem_em_tcam_table_u tcam_table_second; + union fem_em_ad_table_u ad_table; + + memset(&tcam_table, 0, sizeof(tcam_table)); + memset(&tcam_table_second, 0, sizeof(tcam_table_second)); + memset(&ad_table, 0, sizeof(ad_table)); + if (key_type == NBL_KT_FULL_MODE) + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(pp_type, index + 1), + tcam_table_second.hash_key, NBL_FLOW_TCAM_TOTAL_LEN); + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_TCAM_TABLE_REG(pp_type, index), + tcam_table.hash_key, NBL_FLOW_TCAM_TOTAL_LEN); + + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM_AD_TABLE_REG(pp_type, index), + ad_table.hash_key, NBL_FLOW_AD_TOTAL_LEN); +} + +static int nbl_phy_add_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 action) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_mcc_tbl node = {0}; + + node.vld = 1; + node.next_pntr = 0; + node.tail = 1; + node.stateid_filter = 1; + node.flowid_filter = 1; + node.dport_act = action; + + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); + if (prev_mcc_id != NBL_MCC_ID_INVALID) { + nbl_hw_read_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(prev_mcc_id), + (u8 *)&node, sizeof(node)); + node.next_pntr = mcc_id; + node.tail = 0; + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(prev_mcc_id), + (u8 *)&node, sizeof(node)); + } + + return 0; +} + +static void nbl_phy_del_mcc(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_mcc_tbl node = {0}; + + if (prev_mcc_id != NBL_MCC_ID_INVALID) { + nbl_hw_read_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(prev_mcc_id), + (u8 *)&node, sizeof(node)); + + if (next_mcc_id != NBL_MCC_ID_INVALID) { + node.next_pntr = next_mcc_id; + } else { + node.next_pntr = 0; + node.tail = 1; + } + + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(prev_mcc_id), + (u8 *)&node, sizeof(node)); + } + + memset(&node, 0, sizeof(node)); + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); +} + +static int nbl_phy_add_tnl_encap(void *priv, const u8 encap_buf[], u16 encap_idx, + union nbl_flow_encap_offset_tbl_u encap_idx_info) +{ + u8 id; + u8 temp = 0; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u8 send_buf[NBL_FLOW_ACTION_ENCAP_TOTAL_LEN] = { 0 }; + + memcpy(send_buf, encap_buf, NBL_FLOW_ACTION_ENCAP_MAX_LEN); + + for (id = 0; id < NBL_FLOW_ACTION_ENCAP_HALF_LEN; id++) { + temp = send_buf[id]; + send_buf[id] = send_buf[NBL_FLOW_ACTION_ENCAP_MAX_LEN - 1 - id]; + send_buf[NBL_FLOW_ACTION_ENCAP_MAX_LEN - 1 - id] = temp; + } + + memcpy(&send_buf[NBL_FLOW_ACTION_ENCAP_MAX_LEN], + encap_idx_info.data, NBL_FLOW_ACTION_ENCAP_OFFSET_LEN); + + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_TNL_REG(encap_idx), + (u8 *)send_buf, NBL_FLOW_ACTION_ENCAP_TOTAL_LEN); + + return 0; +} + +static void nbl_phy_del_tnl_encap(void *priv, u16 encap_idx) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u8 send_buf[NBL_FLOW_ACTION_ENCAP_TOTAL_LEN] = { 0 }; + + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_TNL_REG(encap_idx), + (u8 *)send_buf, NBL_FLOW_ACTION_ENCAP_TOTAL_LEN); +} + +static int nbl_phy_init_fem(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fem_ht_size_table_u ht_size = {.info = {0}}; + u32 fem_start = NBL_FEM_INIT_START_KERN; + int ret = 0; + + nbl_hw_write_regs(phy_mgt, NBL_FEM_INIT_START, (u8 *)&fem_start, sizeof(fem_start)); + + nbl_phy_fem_set_bank(phy_mgt); + + ht_size.info.pp0_size = HT_PORT0_BTM; + ht_size.info.pp1_size = HT_PORT1_BTM; + ht_size.info.pp2_size = HT_PORT2_BTM; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_SIZE_REG, ht_size.data, NBL_FEM_HT_SIZE_TBL_WIDTH); + + nbl_phy_fem_clear_tcam_ad(phy_mgt); + + /*ret = nbl_phy_fem_em0_pt_init(phy_mgt);*/ + return ret; +} + +static void nbl_configure_dped_checksum(struct nbl_phy_mgt *phy_mgt) +{ + union dped_l4_ck_cmd_40_u l4_ck_cmd_40; + + /* DPED dped_l4_ck_cmd_40 for sctp */ + nbl_hw_read_regs(phy_mgt, NBL_DPED_L4_CK_CMD_40_ADDR, + (u8 *)&l4_ck_cmd_40, sizeof(l4_ck_cmd_40)); + l4_ck_cmd_40.info.en = 1; + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_40_ADDR, + (u8 *)&l4_ck_cmd_40, sizeof(l4_ck_cmd_40)); +} + +static int nbl_dped_init(struct nbl_phy_mgt *phy_mgt) +{ + nbl_hw_wr32(phy_mgt, NBL_DPED_VLAN_OFFSET, 0xC); + nbl_hw_wr32(phy_mgt, NBL_DPED_DSCP_OFFSET_0, 0x8); + nbl_hw_wr32(phy_mgt, NBL_DPED_DSCP_OFFSET_1, 0x4); + + // dped checksum offload + nbl_configure_dped_checksum(phy_mgt); + + return 0; +} + +static int nbl_uped_init(struct nbl_phy_mgt *phy_mgt) +{ + struct ped_hw_edit_profile hw_edit; + + nbl_hw_read_regs(phy_mgt, NBL_UPED_HW_EDT_PROF_TABLE(5), (u8 *)&hw_edit, sizeof(hw_edit)); + hw_edit.l3_len = 0; + nbl_hw_write_regs(phy_mgt, NBL_UPED_HW_EDT_PROF_TABLE(5), (u8 *)&hw_edit, sizeof(hw_edit)); + + nbl_hw_read_regs(phy_mgt, NBL_UPED_HW_EDT_PROF_TABLE(6), (u8 *)&hw_edit, sizeof(hw_edit)); + hw_edit.l3_len = 1; + nbl_hw_write_regs(phy_mgt, NBL_UPED_HW_EDT_PROF_TABLE(6), (u8 *)&hw_edit, sizeof(hw_edit)); + + return 0; +} + +static void nbl_shaping_eth_init(struct nbl_phy_mgt *phy_mgt, u8 eth_id, u8 speed) +{ + struct nbl_shaping_dport dport = {0}; + struct nbl_shaping_dvn_dport dvn_dport = {0}; + struct nbl_shaping_rdma_dport rdma_dport = {0}; + u32 rate, half_rate; + + if (speed == NBL_FW_PORT_SPEED_100G) { + rate = NBL_SHAPING_DPORT_100G_RATE; + half_rate = NBL_SHAPING_DPORT_HALF_100G_RATE; + } else { + rate = NBL_SHAPING_DPORT_25G_RATE; + half_rate = NBL_SHAPING_DPORT_HALF_25G_RATE; + } + + dport.cir = rate; + dport.pir = rate; + dport.depth = max(dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + dport.cbs = dport.depth; + dport.pbs = dport.depth; + dport.valid = 1; + + dvn_dport.cir = half_rate; + dvn_dport.pir = rate; + dvn_dport.depth = dport.depth; + dvn_dport.cbs = dvn_dport.depth; + dvn_dport.pbs = dvn_dport.depth; + dvn_dport.valid = 1; + + rdma_dport.cir = half_rate; + rdma_dport.pir = rate; + rdma_dport.depth = dport.depth; + rdma_dport.cbs = rdma_dport.depth; + rdma_dport.pbs = rdma_dport.depth; + rdma_dport.valid = 1; + + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), (u8 *)&dport, sizeof(dport)); + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id), + (u8 *)&dvn_dport, sizeof(dvn_dport)); + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_RDMA_DPORT_REG(eth_id), + (u8 *)&rdma_dport, sizeof(rdma_dport)); +} + +static int nbl_shaping_init(struct nbl_phy_mgt *phy_mgt, u8 speed) +{ + struct dsch_psha_en psha_en = {0}; + int i; + + for (i = 0; i < NBL_MAX_ETHERNET; i++) + nbl_shaping_eth_init(phy_mgt, i, speed); + + psha_en.en = 0xF; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_PSHA_EN_ADDR, (u8 *)&psha_en, sizeof(psha_en)); + + return 0; +} + +static int nbl_dsch_qid_max_init(struct nbl_phy_mgt *phy_mgt) +{ + struct dsch_vn_quanta quanta = {0}; + + quanta.h_qua = NBL_HOST_QUANTA; + quanta.e_qua = NBL_ECPU_QUANTA; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_QUANTA_ADDR, + (u8 *)&quanta, sizeof(quanta)); + nbl_hw_wr32(phy_mgt, NBL_DSCH_HOST_QID_MAX, NBL_MAX_QUEUE_ID); + + nbl_hw_wr32(phy_mgt, NBL_DVN_ECPU_QUEUE_NUM, 0); + nbl_hw_wr32(phy_mgt, NBL_UVN_ECPU_QUEUE_NUM, 0); + + return 0; +} + +static int nbl_ustore_init(struct nbl_phy_mgt *phy_mgt, u8 eth_num) +{ + struct ustore_pkt_len pkt_len; + struct nbl_ustore_port_drop_th drop_th; + int i; + + nbl_hw_read_regs(phy_mgt, NBL_USTORE_PKT_LEN_ADDR, (u8 *)&pkt_len, sizeof(pkt_len)); + /* min arp packet length 42 (14 + 28) */ + pkt_len.min = 42; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_PKT_LEN_ADDR, (u8 *)&pkt_len, sizeof(pkt_len)); + + drop_th.en = 1; + if (eth_num == 1) + drop_th.disc_th = NBL_USTORE_SIGNLE_ETH_DROP_TH; + else if (eth_num == 2) + drop_th.disc_th = NBL_USTORE_DUAL_ETH_DROP_TH; + else + drop_th.disc_th = NBL_USTORE_QUAD_ETH_DROP_TH; + + for (i = 0; i < 4; i++) + nbl_hw_write_regs(phy_mgt, NBL_USTORE_PORT_DROP_TH_REG_ARR(i), + (u8 *)&drop_th, sizeof(drop_th)); + + return 0; +} + +static int nbl_dstore_init(struct nbl_phy_mgt *phy_mgt, u8 speed) +{ + struct dstore_d_dport_fc_th fc_th; + struct dstore_port_drop_th drop_th; + struct dstore_disc_bp_th bp_th; + int i; + + for (i = 0; i < 6; i++) { + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_PORT_DROP_TH_REG(i), + (u8 *)&drop_th, sizeof(drop_th)); + drop_th.en = 0; + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_PORT_DROP_TH_REG(i), + (u8 *)&drop_th, sizeof(drop_th)); + } + + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_DISC_BP_TH, + (u8 *)&bp_th, sizeof(bp_th)); + bp_th.en = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_DISC_BP_TH, + (u8 *)&bp_th, sizeof(bp_th)); + + for (i = 0; i < 4; i++) { + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(i), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH; + } + + fc_th.fc_en = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(i), + (u8 *)&fc_th, sizeof(fc_th)); + } + + return 0; +} + +static int nbl_ul4s_init(struct nbl_phy_mgt *phy_mgt) +{ + struct ul4s_sch_pad sch_pad; + + nbl_hw_read_regs(phy_mgt, NBL_UL4S_SCH_PAD_ADDR, (u8 *)&sch_pad, sizeof(sch_pad)); + sch_pad.en = 1; + nbl_hw_write_regs(phy_mgt, NBL_UL4S_SCH_PAD_ADDR, (u8 *)&sch_pad, sizeof(sch_pad)); + + return 0; +} + +static void nbl_dvn_descreq_num_cfg(struct nbl_phy_mgt *phy_mgt, u32 descreq_num) +{ + struct nbl_dvn_descreq_num_cfg descreq_num_cfg = { 0 }; + u32 packet_ring_prefect_num = descreq_num & 0xffff; + u32 split_ring_prefect_num = (descreq_num >> 16) & 0xffff; + + packet_ring_prefect_num = packet_ring_prefect_num > 32 ? 32 : packet_ring_prefect_num; + packet_ring_prefect_num = packet_ring_prefect_num < 8 ? 8 : packet_ring_prefect_num; + descreq_num_cfg.packed_l1_num = (packet_ring_prefect_num - 8) / 4; + + split_ring_prefect_num = split_ring_prefect_num > 16 ? 16 : split_ring_prefect_num; + split_ring_prefect_num = split_ring_prefect_num < 8 ? 8 : split_ring_prefect_num; + descreq_num_cfg.avring_cfg_num = split_ring_prefect_num > 8 ? 1 : 0; + + nbl_hw_write_regs(phy_mgt, NBL_DVN_DESCREQ_NUM_CFG, + (u8 *)&descreq_num_cfg, sizeof(descreq_num_cfg)); +} + +static int nbl_dvn_init(struct nbl_phy_mgt *phy_mgt, u8 speed) +{ + struct nbl_dvn_desc_wr_merge_timeout timeout = {0}; + struct nbl_dvn_dif_req_rd_ro_flag ro_flag = {0}; + + timeout.cfg_cycle = DEFAULT_DVN_DESC_WR_MERGE_TIMEOUT_MAX; + nbl_hw_write_regs(phy_mgt, NBL_DVN_DESC_WR_MERGE_TIMEOUT, + (u8 *)&timeout, sizeof(timeout)); + + ro_flag.rd_desc_ro_en = 1; + ro_flag.rd_data_ro_en = 1; + ro_flag.rd_avring_ro_en = 1; + nbl_hw_write_regs(phy_mgt, NBL_DVN_DIF_REQ_RD_RO_FLAG, + (u8 *)&ro_flag, sizeof(ro_flag)); + + if (speed == NBL_FW_PORT_SPEED_100G) + nbl_dvn_descreq_num_cfg(phy_mgt, DEFAULT_DVN_100G_DESCREQ_NUMCFG); + else + nbl_dvn_descreq_num_cfg(phy_mgt, dvn_descreq_num_cfg); + + return 0; +} + +static int nbl_uvn_init(struct nbl_phy_mgt *phy_mgt) +{ + struct pci_dev *pdev; + struct uvn_queue_err_mask mask = {0}; + struct uvn_dif_req_ro_flag flag = {0}; + struct uvn_desc_prefetch_init prefetch_init = {0}; + u32 timeout = 119760; /* 200us 200000/1.67 */ + u32 quirks; + + pdev = NBL_COMMON_TO_PDEV(phy_mgt->common); + nbl_hw_wr32(phy_mgt, NBL_UVN_DESC_RD_WAIT, timeout); + + flag.avail_rd = 1; + flag.desc_rd = 1; + flag.pkt_wr = 1; + flag.desc_wr = 0; + nbl_hw_write_regs(phy_mgt, NBL_UVN_DIF_REQ_RO_FLAG, (u8 *)&flag, sizeof(flag)); + + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_MASK, (u8 *)&mask, sizeof(mask)); + mask.dif_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_ERR_MASK, (u8 *)&mask, sizeof(mask)); + + prefetch_init.num = NBL_UVN_DESC_PREFETCH_NUM; + prefetch_init.sel = 0; + + quirks = nbl_phy_get_quirks(phy_mgt); + + if (performance_mode & BIT(NBL_QUIRKS_UVN_PREFETCH_ALIGN) || + !(quirks & BIT(NBL_QUIRKS_UVN_PREFETCH_ALIGN))) + prefetch_init.sel = 1; + + nbl_hw_write_regs(phy_mgt, NBL_UVN_DESC_PREFETCH_INIT, + (u8 *)&prefetch_init, sizeof(prefetch_init)); + + return 0; +} + +static int nbl_uqm_init(struct nbl_phy_mgt *phy_mgt) +{ + u32 cnt = 0; + int i; + + nbl_hw_write_regs(phy_mgt, NBL_UQM_FWD_DROP_CNT, (u8 *)&cnt, sizeof(cnt)); + + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_PKT_CNT, (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_PKT_SLICE_CNT, (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_PKT_LEN_ADD_CNT, (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_HEAD_PNTR_ADD_CNT, (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_DROP_WEIGHT_ADD_CNT, (u8 *)&cnt, sizeof(cnt)); + + for (i = 0; i < NBL_UQM_PORT_DROP_DEPTH; i++) { + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_PKT_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_PKT_SLICE_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_PKT_LEN_ADD_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_HEAD_PNTR_ADD_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + nbl_hw_write_regs(phy_mgt, NBL_UQM_PORT_DROP_WEIGHT_ADD_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + } + + for (i = 0; i < NBL_UQM_DPORT_DROP_DEPTH; i++) + nbl_hw_write_regs(phy_mgt, NBL_UQM_DPORT_DROP_CNT + (sizeof(cnt) * i), + (u8 *)&cnt, sizeof(cnt)); + + return 0; +} + +static int nbl_dp_init(struct nbl_phy_mgt *phy_mgt, u8 speed, u8 eth_num) +{ + nbl_dped_init(phy_mgt); + nbl_uped_init(phy_mgt); + nbl_shaping_init(phy_mgt, speed); + nbl_dsch_qid_max_init(phy_mgt); + nbl_ustore_init(phy_mgt, eth_num); + nbl_dstore_init(phy_mgt, speed); + nbl_ul4s_init(phy_mgt); + nbl_dvn_init(phy_mgt, speed); + nbl_uvn_init(phy_mgt); + nbl_uqm_init(phy_mgt); + + return 0; +} + +static void nbl_epro_mirror_act_pri_init(struct nbl_phy_mgt *phy_mgt, + struct nbl_epro_mirror_act_pri *cfg) +{ + struct nbl_epro_mirror_act_pri epro_mirror_act_pri_def = { + .car_idx_pri = EPRO_MIRROR_ACT_CARIDX_PRI, + .dqueue_pri = EPRO_MIRROR_ACT_DQUEUE_PRI, + .dport_pri = EPRO_MIRROR_ACT_DPORT_PRI, + .rsv = 0 + }; + + if (cfg) + epro_mirror_act_pri_def = *cfg; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MIRROR_ACT_PRI_REG, (u8 *)&epro_mirror_act_pri_def, 1); +} + +static struct nbl_epro_action_filter_tbl epro_action_filter_tbl_def[NBL_FWD_TYPE_MAX] = { + [NBL_FWD_TYPE_NORMAL] = { + BIT(NBL_MD_ACTION_MCIDX) | BIT(NBL_MD_ACTION_TABLE_INDEX) | + BIT(NBL_MD_ACTION_MIRRIDX)}, + [NBL_FWD_TYPE_CPU_ASSIGNED] = { + BIT(NBL_MD_ACTION_MCIDX) | BIT(NBL_MD_ACTION_TABLE_INDEX) | + BIT(NBL_MD_ACTION_MIRRIDX) + }, + [NBL_FWD_TYPE_UPCALL] = {0}, + [NBL_FWD_TYPE_SRC_MIRROR] = { + BIT(NBL_MD_ACTION_FLOWID0) | BIT(NBL_MD_ACTION_FLOWID1) | + BIT(NBL_MD_ACTION_RSSIDX) | BIT(NBL_MD_ACTION_TABLE_INDEX) | + BIT(NBL_MD_ACTION_MCIDX) | BIT(NBL_MD_ACTION_VNI0) | + BIT(NBL_MD_ACTION_VNI1) | BIT(NBL_MD_ACTION_PRBAC_IDX) | + BIT(NBL_MD_ACTION_L4S_IDX) | BIT(NBL_MD_ACTION_DP_HASH0) | + BIT(NBL_MD_ACTION_DP_HASH1) | BIT(NBL_MD_ACTION_MDF_PRI) | + BIT(NBL_MD_ACTION_FLOW_CARIDX) | + ((u64)0xffffffff << 32)}, + [NBL_FWD_TYPE_OTHER_MIRROR] = { + BIT(NBL_MD_ACTION_FLOWID0) | BIT(NBL_MD_ACTION_FLOWID1) | + BIT(NBL_MD_ACTION_RSSIDX) | BIT(NBL_MD_ACTION_TABLE_INDEX) | + BIT(NBL_MD_ACTION_MCIDX) | BIT(NBL_MD_ACTION_VNI0) | + BIT(NBL_MD_ACTION_VNI1) | BIT(NBL_MD_ACTION_PRBAC_IDX) | + BIT(NBL_MD_ACTION_L4S_IDX) | BIT(NBL_MD_ACTION_DP_HASH0) | + BIT(NBL_MD_ACTION_DP_HASH1) | BIT(NBL_MD_ACTION_MDF_PRI)}, + [NBL_FWD_TYPE_MNG] = {0}, + [NBL_FWD_TYPE_GLB_LB] = {0}, + [NBL_FWD_TYPE_DROP] = {0}, +}; + +static void nbl_epro_action_filter_cfg(struct nbl_phy_mgt *phy_mgt, u32 fwd_type, + struct nbl_epro_action_filter_tbl *cfg) +{ + if (fwd_type >= NBL_FWD_TYPE_MAX) { + pr_err("fwd_type %u exceed the max num %u.", fwd_type, NBL_FWD_TYPE_MAX); + return; + } + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ACTION_FILTER_TABLE(fwd_type), + (u8 *)cfg, sizeof(*cfg)); +} + +static int nbl_epro_init(struct nbl_phy_mgt *phy_mgt) +{ + u32 fwd_type = 0; + + nbl_epro_mirror_act_pri_init(phy_mgt, NULL); + + for (fwd_type = 0; fwd_type < NBL_FWD_TYPE_MAX; fwd_type++) + nbl_epro_action_filter_cfg(phy_mgt, fwd_type, + &epro_action_filter_tbl_def[fwd_type]); + + return 0; +} + +static int nbl_ppe_init(struct nbl_phy_mgt *phy_mgt) +{ + nbl_epro_init(phy_mgt); + + return 0; +} + +static int nbl_host_padpt_init(struct nbl_phy_mgt *phy_mgt) +{ + /* padpt flow control register */ + nbl_hw_wr32(phy_mgt, NBL_HOST_PADPT_HOST_CFG_FC_CPLH_UP, 0x10400); + nbl_hw_wr32(phy_mgt, NBL_HOST_PADPT_HOST_CFG_FC_PD_DN, 0x10080); + nbl_hw_wr32(phy_mgt, NBL_HOST_PADPT_HOST_CFG_FC_PH_DN, 0x10010); + nbl_hw_wr32(phy_mgt, NBL_HOST_PADPT_HOST_CFG_FC_NPH_DN, 0x10010); + + return 0; +} + +/* set padpt debug reg to cap for aged stop */ +static void nbl_host_pcap_init(struct nbl_phy_mgt *phy_mgt) +{ + int addr; + + /* tx */ + nbl_hw_wr32(phy_mgt, 0x15a4204, 0x4); + nbl_hw_wr32(phy_mgt, 0x15a4208, 0x10); + + for (addr = 0x15a4300; addr <= 0x15a4338; addr += 4) + nbl_hw_wr32(phy_mgt, addr, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a433c, 0xdf000000); + + for (addr = 0x15a4340; addr <= 0x15a437c; addr += 4) + nbl_hw_wr32(phy_mgt, addr, 0x0); + + /* rx */ + nbl_hw_wr32(phy_mgt, 0x15a4804, 0x4); + nbl_hw_wr32(phy_mgt, 0x15a4808, 0x20); + + for (addr = 0x15a4940; addr <= 0x15a4978; addr += 4) + nbl_hw_wr32(phy_mgt, addr, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a497c, 0x0a000000); + + for (addr = 0x15a4900; addr <= 0x15a4938; addr += 4) + nbl_hw_wr32(phy_mgt, addr, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a493c, 0xbe000000); + + nbl_hw_wr32(phy_mgt, 0x15a420c, 0x1); + nbl_hw_wr32(phy_mgt, 0x15a480c, 0x1); + nbl_hw_wr32(phy_mgt, 0x15a420c, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a480c, 0x0); + nbl_hw_wr32(phy_mgt, 0x15a4200, 0x1); + nbl_hw_wr32(phy_mgt, 0x15a4800, 0x1); +} + +static int nbl_intf_init(struct nbl_phy_mgt *phy_mgt) +{ + nbl_host_padpt_init(phy_mgt); + nbl_host_pcap_init(phy_mgt); + + return 0; +} + +static int nbl_phy_init_chip_module(void *priv, u8 eth_speed, u8 eth_num) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_PHY, "phy_chip_init"); + + nbl_dp_init(phy_mgt, eth_speed, eth_num); + nbl_ppe_init(phy_mgt); + nbl_intf_init(phy_mgt); + + phy_mgt->version = nbl_hw_rd32(phy_mgt, 0x1300904); + + return 0; +} + +static int nbl_phy_init_qid_map_table(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_virtio_qid_map_table info = {0}, info2 = {0}; + struct device *dev = NBL_PHY_MGT_TO_DEV(phy_mgt); + u16 i, j, k; + + memset(&info, 0, sizeof(info)); + info.local_qid = 0x1FF; + info.notify_addr_l = 0x7FFFFF; + info.notify_addr_h = 0xFFFFFFFF; + info.global_qid = 0xFFF; + info.ctrlq_flag = 0X1; + info.rsv1 = 0; + info.rsv2 = 0; + + for (k = 0; k < 2; k++) { /* 0 is primary table , 1 is standby table */ + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + j = 0; + do { + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_QID_MAP_REG_ARR(k, i), + (u8 *)&info, sizeof(info)); + nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_QID_MAP_REG_ARR(k, i), + (u8 *)&info2, sizeof(info2)); + if (likely(!memcmp(&info, &info2, sizeof(info)))) + break; + j++; + } while (j < NBL_REG_WRITE_MAX_TRY_TIMES); + + if (j == NBL_REG_WRITE_MAX_TRY_TIMES) + dev_err(dev, "Write to qid map table entry %hu failed\n", i); + } + } + + return 0; +} + +static int nbl_phy_set_qid_map_table(void *priv, void *data, int qid_map_select) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_qid_map_param *param = (struct nbl_qid_map_param *)data; + struct nbl_virtio_qid_map_table info = {0}, info_data = {0}; + struct nbl_queue_table_select select = {0}; + u64 reg; + int i, j; + + if (phy_mgt->hw_status) + return 0; + + for (i = 0; i < param->len; i++) { + j = 0; + + info.local_qid = param->qid_map[i].local_qid; + info.notify_addr_l = param->qid_map[i].notify_addr_l; + info.notify_addr_h = param->qid_map[i].notify_addr_h; + info.global_qid = param->qid_map[i].global_qid; + info.ctrlq_flag = param->qid_map[i].ctrlq_flag; + + do { + reg = NBL_PCOMPLETER_QID_MAP_REG_ARR(qid_map_select, param->start + i); + nbl_hw_write_regs(phy_mgt, reg, (u8 *)(&info), sizeof(info)); + nbl_hw_read_regs(phy_mgt, reg, (u8 *)(&info_data), sizeof(info_data)); + if (likely(!memcmp(&info, &info_data, sizeof(info)))) + break; + j++; + } while (j < NBL_REG_WRITE_MAX_TRY_TIMES); + + if (j == NBL_REG_WRITE_MAX_TRY_TIMES) + nbl_err(common, NBL_DEBUG_QUEUE, "Write to qid map table entry %d failed\n", + param->start + i); + } + + select.select = qid_map_select; + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_QUEUE_TABLE_SELECT_REG, + (u8 *)&select, sizeof(select)); + + return 0; +} + +static int nbl_phy_set_qid_map_ready(void *priv, bool ready) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_table_ready queue_table_ready = {0}; + + queue_table_ready.ready = ready; + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_QUEUE_TABLE_READY_REG, + (u8 *)&queue_table_ready, sizeof(queue_table_ready)); + + return 0; +} + +static int nbl_phy_cfg_ipro_queue_tbl(void *priv, u16 queue_id, u16 vsi_id, u8 enable) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_queue_tbl ipro_queue_tbl = {0}; + + ipro_queue_tbl.vsi_en = enable; + ipro_queue_tbl.vsi_id = vsi_id; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_QUEUE_TBL(queue_id), + (u8 *)&ipro_queue_tbl, sizeof(ipro_queue_tbl)); + + return 0; +} + +static int nbl_phy_cfg_ipro_dn_sport_tbl(void *priv, u16 vsi_id, u16 dst_eth_id, + u16 bmode, bool binit) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + + if (binit) { + dpsport.entry_vld = 1; + dpsport.phy_flow = 1; + dpsport.set_dport.dport.down.upcall_flag = AUX_FWD_TYPE_NML_FWD; + dpsport.set_dport.dport.down.port_type = SET_DPORT_TYPE_ETH_LAG; + dpsport.set_dport.dport.down.lag_vld = 0; + dpsport.set_dport.dport.down.eth_vld = 1; + dpsport.set_dport.dport.down.eth_id = dst_eth_id; + dpsport.vlan_layer_num_1 = 3; + dpsport.set_dport_en = 1; + } else { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + } + + if (bmode == BRIDGE_MODE_VEPA) + dpsport.set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_EPRO; + else + dpsport.set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_NONE; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + return 0; +} + +static int nbl_phy_set_vnet_queue_info(void *priv, struct nbl_vnet_queue_info_param *param, + u16 queue_id) +{ + struct nbl_phy_mgt_leonis *phy_mgt_leonis = (struct nbl_phy_mgt_leonis *)priv; + struct nbl_phy_mgt *phy_mgt = &phy_mgt_leonis->phy_mgt; + struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; + + host_vnet_qinfo.function_id = param->function_id; + host_vnet_qinfo.device_id = param->device_id; + host_vnet_qinfo.bus_id = param->bus_id; + host_vnet_qinfo.valid = param->valid; + host_vnet_qinfo.msix_idx = param->msix_idx; + host_vnet_qinfo.msix_idx_valid = param->msix_idx_valid; + if (phy_mgt_leonis->ro_enable) { + host_vnet_qinfo.ido_en = 1; + host_vnet_qinfo.rlo_en = 1; + } + + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + + return 0; +} + +static int nbl_phy_clear_vnet_queue_info(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; + + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + return 0; +} + +static int nbl_phy_cfg_vnet_qinfo_log(void *priv, u16 queue_id, bool vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + host_vnet_qinfo.log_en = vld; + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + + return 0; +} + +static int nbl_phy_reset_dvn_cfg(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_dvn_queue_reset queue_reset = {0}; + struct nbl_dvn_queue_reset_done queue_reset_done = {0}; + int i = 0; + + queue_reset.dvn_queue_index = queue_id; + queue_reset.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DVN_QUEUE_RESET_REG, + (u8 *)&queue_reset, sizeof(queue_reset)); + + udelay(5); + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_RESET_DONE_REG, + (u8 *)&queue_reset_done, sizeof(queue_reset_done)); + while (!queue_reset_done.flag) { + i++; + if (!(i % 10)) { + nbl_err(common, NBL_DEBUG_QUEUE, "Wait too long for tx queue reset to be done"); + break; + } + + udelay(5); + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_RESET_DONE_REG, + (u8 *)&queue_reset_done, sizeof(queue_reset_done)); + } + + nbl_debug(common, NBL_DEBUG_QUEUE, "dvn:%u cfg reset succedd, wait %d 5ns\n", queue_id, i); + return 0; +} + +static int nbl_phy_reset_uvn_cfg(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_uvn_queue_reset queue_reset = {0}; + struct nbl_uvn_queue_reset_done queue_reset_done = {0}; + int i = 0; + + queue_reset.index = queue_id; + queue_reset.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_RESET_REG, + (u8 *)&queue_reset, sizeof(queue_reset)); + + udelay(5); + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_RESET_DONE_REG, + (u8 *)&queue_reset_done, sizeof(queue_reset_done)); + while (!queue_reset_done.flag) { + i++; + if (!(i % 10)) { + nbl_err(common, NBL_DEBUG_QUEUE, "Wait too long for rx queue reset to be done"); + break; + } + + udelay(5); + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_RESET_DONE_REG, + (u8 *)&queue_reset_done, sizeof(queue_reset_done)); + } + + nbl_debug(common, NBL_DEBUG_QUEUE, "uvn:%u cfg reset succedd, wait %d 5ns\n", queue_id, i); + return 0; +} + +static int nbl_phy_restore_dvn_context(void *priv, u16 queue_id, u16 split, u16 last_avail_index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct dvn_queue_context cxt = {0}; + + cxt.dvn_ring_wrap_counter = last_avail_index >> 15; + if (split) + cxt.dvn_avail_ring_read = last_avail_index; + else + cxt.dvn_l1_ring_read = last_avail_index & 0x7FFF; + + nbl_hw_write_regs(phy_mgt, NBL_DVN_QUEUE_CXT_TABLE_ARR(queue_id), (u8 *)&cxt, sizeof(cxt)); + nbl_info(common, NBL_DEBUG_QUEUE, "config tx ring: %u, last avail idx: %u\n", + queue_id, last_avail_index); + + return 0; +} + +static int nbl_phy_restore_uvn_context(void *priv, u16 queue_id, u16 split, u16 last_avail_index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct uvn_queue_cxt cxt = {0}; + + cxt.wrap_count = last_avail_index >> 15; + if (split) + cxt.queue_head = last_avail_index; + else + cxt.queue_head = last_avail_index & 0x7FFF; + + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_CXT_TABLE_ARR(queue_id), (u8 *)&cxt, sizeof(cxt)); + nbl_info(common, NBL_DEBUG_QUEUE, "config rx ring: %u, last avail idx: %u\n", + queue_id, last_avail_index); + + return 0; +} + +static int nbl_phy_get_tx_queue_cfg(void *priv, void *data, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_cfg_param *queue_cfg = (struct nbl_queue_cfg_param *)data; + struct dvn_queue_table info = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + + queue_cfg->desc = info.dvn_queue_baddr; + queue_cfg->avail = info.dvn_avail_baddr; + queue_cfg->used = info.dvn_used_baddr; + queue_cfg->size = info.dvn_queue_size; + queue_cfg->split = info.dvn_queue_type; + queue_cfg->extend_header = info.dvn_extend_header_en; + + return 0; +} + +static int nbl_phy_get_rx_queue_cfg(void *priv, void *data, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_cfg_param *queue_cfg = (struct nbl_queue_cfg_param *)data; + struct uvn_queue_table info = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + + queue_cfg->desc = info.queue_baddr; + queue_cfg->avail = info.avail_baddr; + queue_cfg->used = info.used_baddr; + queue_cfg->size = info.queue_size_mask_pow; + queue_cfg->split = info.queue_type; + queue_cfg->extend_header = info.extend_header_en; + queue_cfg->half_offload_en = info.half_offload_en; + queue_cfg->rxcsum = info.guest_csum_en; + + return 0; +} + +static int nbl_phy_cfg_tx_queue(void *priv, void *data, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_cfg_param *queue_cfg = (struct nbl_queue_cfg_param *)data; + struct dvn_queue_table info = {0}; + + info.dvn_queue_baddr = queue_cfg->desc; + if (!queue_cfg->split && !queue_cfg->extend_header) + queue_cfg->avail = queue_cfg->avail | 3; + info.dvn_avail_baddr = queue_cfg->avail; + info.dvn_used_baddr = queue_cfg->used; + info.dvn_queue_size = ilog2(queue_cfg->size); + info.dvn_queue_type = queue_cfg->split; + info.dvn_queue_en = 1; + info.dvn_extend_header_en = queue_cfg->extend_header; + + nbl_hw_write_regs(phy_mgt, NBL_DVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + + return 0; +} + +static int nbl_phy_cfg_rx_queue(void *priv, void *data, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_queue_cfg_param *queue_cfg = (struct nbl_queue_cfg_param *)data; + struct uvn_queue_table info = {0}; + + info.queue_baddr = queue_cfg->desc; + info.avail_baddr = queue_cfg->avail; + info.used_baddr = queue_cfg->used; + info.queue_size_mask_pow = ilog2(queue_cfg->size); + info.queue_type = queue_cfg->split; + info.extend_header_en = queue_cfg->extend_header; + info.half_offload_en = queue_cfg->half_offload_en; + info.guest_csum_en = queue_cfg->rxcsum; + info.queue_enable = 1; + + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + + return 0; +} + +static bool nbl_phy_check_q2tc(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dsch_vn_q2tc_cfg_tbl info; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + return info.vld; +} + +static int nbl_phy_cfg_q2tc_netid(void *priv, u16 queue_id, u16 netid, u16 vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dsch_vn_q2tc_cfg_tbl info; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + info.tcid = (info.tcid & 0x7) | (netid << 3); + info.vld = vld; + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + return 0; +} + +static int nbl_phy_cfg_q2tc_tcid(void *priv, u16 queue_id, u16 tcid) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dsch_vn_q2tc_cfg_tbl info; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + info.tcid = (info.tcid & 0xFFF8) | tcid; + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(queue_id), + (u8 *)&info, sizeof(info)); + return 0; +} + +static int nbl_phy_set_tc_wgt(void *priv, u16 func_id, u8 *weight, u16 num_tc) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union dsch_vn_tc_wgt_cfg_tbl_u wgt_cfg = {.info = {0}}; + int i; + + for (i = 0; i < num_tc; i++) + wgt_cfg.data[i] = weight[i]; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_TC_WGT_CFG_TABLE_REG_ARR(func_id), + wgt_cfg.data, sizeof(wgt_cfg)); + + return 0; +} + +static void nbl_phy_active_shaping(void *priv, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net shaping_net = {0}; + struct dsch_vn_sha2net_map_tbl sha2net = {0}; + struct dsch_vn_net2sha_map_tbl net2sha = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + if (!shaping_net.depth) + return; + + sha2net.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); + + shaping_net.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + net2sha.vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); +} + +static void nbl_phy_deactive_shaping(void *priv, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net shaping_net = {0}; + struct dsch_vn_sha2net_map_tbl sha2net = {0}; + struct dsch_vn_net2sha_map_tbl net2sha = {0}; + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + shaping_net.valid = 0; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); +} + +static int nbl_phy_set_shaping(void *priv, u16 func_id, u64 total_tx_rate, u8 vld, bool active) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net shaping_net = {0}; + struct dsch_vn_sha2net_map_tbl sha2net = {0}; + struct dsch_vn_net2sha_map_tbl net2sha = {0}; + + if (vld) { + sha2net.vld = active; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); + } else { + net2sha.vld = vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); + } + + /* cfg shaping cir/pir */ + if (vld) { + shaping_net.valid = active; + /* total_tx_rate unit Mb/s */ + /* cir 1 default represents 1Mbps */ + shaping_net.cir = total_tx_rate; + /* pir equal cir */ + shaping_net.pir = shaping_net.cir; + shaping_net.depth = max(shaping_net.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + shaping_net.cbs = shaping_net.depth; + shaping_net.pbs = shaping_net.depth; + } + + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET(func_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + if (!vld) { + sha2net.vld = vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(func_id), + (u8 *)&sha2net, sizeof(sha2net)); + } else { + net2sha.vld = active; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(func_id), + (u8 *)&net2sha, sizeof(net2sha)); + } + + return 0; +} + +static void nbl_phy_set_offload_shaping(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_regs_info *reg_info, u32 *value) +{ + struct nbl_shaping_net *shaping_net; + struct dsch_vn_sha2net_map_tbl *sha2net; + struct dsch_vn_net2sha_map_tbl *net2sha; + struct dsch_vn_n2g_cfg_tbl dsch_info = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_N2G_CFG_TABLE_REG_ARR(reg_info->depth), + (u8 *)&dsch_info, sizeof(dsch_info)); + + switch (reg_info->tbl_name) { + case NBL_FLOW_SHAPING_NET_REG: + shaping_net = (struct nbl_shaping_net *)value; + shaping_net->valid &= dsch_info.vld; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET(reg_info->depth), + (u8 *)shaping_net, sizeof(*shaping_net)); + break; + case NBL_FLOW_DSCH_VN_NET2SHA_MAP_TBL_REG: + sha2net = (struct dsch_vn_sha2net_map_tbl *)value; + sha2net->vld &= dsch_info.vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(reg_info->depth), + (u8 *)sha2net, sizeof(*sha2net)); + break; + case NBL_FLOW_DSCH_VN_SHA2NET_MAP_TBL_REG: + net2sha = (struct dsch_vn_net2sha_map_tbl *)value; + net2sha->vld &= dsch_info.vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(reg_info->depth), + (u8 *)net2sha, sizeof(*net2sha)); + break; + } +} + +static void nbl_phy_set_shaping_dport_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_net shaping_net = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), + (u8 *)&shaping_net, sizeof(shaping_net)); + + if (vld) + shaping_net.valid = 1; + else + shaping_net.valid = 0; + + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), + (u8 *)&shaping_net, sizeof(shaping_net)); +} + +static void nbl_phy_set_dport_fc_th_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dstore_d_dport_fc_th fc_th = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + + if (vld) + fc_th.fc_en = 1; + else + fc_th.fc_en = 0; + + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(eth_id), + (u8 *)&fc_th, sizeof(fc_th)); +} + +static int nbl_phy_cfg_dsch_net_to_group(void *priv, u16 func_id, u16 group_id, u16 vld) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dsch_vn_n2g_cfg_tbl info = {0}; + + info.grpid = group_id; + info.vld = vld; + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_N2G_CFG_TABLE_REG_ARR(func_id), + (u8 *)&info, sizeof(info)); + return 0; +} + +static int nbl_phy_cfg_epro_rss_ret(void *priv, u32 index, u8 size_type, u32 q_num, u16 *queue_list) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_epro_rss_ret_tbl rss_ret = {0}; + u32 table_id, table_end, group_count, odd_num, queue_id = 0; + + group_count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << size_type; + if (group_count > 256) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Rss group entry size type %u exceed the max value %u", + size_type, NBL_EPRO_RSS_ENTRY_SIZE_256); + return -EINVAL; + } + + if (q_num > group_count) { + nbl_err(common, NBL_DEBUG_QUEUE, + "q_num %u exceed the rss group count %u\n", q_num, group_count); + return -EINVAL; + } + if (index >= NBL_EPRO_RSS_RET_TBL_DEPTH || + (index + group_count) > NBL_EPRO_RSS_RET_TBL_DEPTH) { + nbl_err(common, NBL_DEBUG_QUEUE, + "index %u exceed the max table entry %u, entry size: %u\n", + index, NBL_EPRO_RSS_RET_TBL_DEPTH, group_count); + return -EINVAL; + } + + table_id = index / 2; + table_end = (index + group_count) / 2; + odd_num = index % 2; + nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + + if (odd_num) { + rss_ret.vld1 = 1; + rss_ret.dqueue1 = queue_list[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + table_id++; + } + + queue_id = queue_id % q_num; + for (; table_id < table_end; table_id++) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = queue_list[queue_id++]; + queue_id = queue_id % q_num; + rss_ret.vld1 = 1; + rss_ret.dqueue1 = queue_list[queue_id++]; + queue_id = queue_id % q_num; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + + if (odd_num) { + rss_ret.vld0 = 1; + rss_ret.dqueue0 = queue_list[queue_id++]; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + } + + return 0; +} + +static struct nbl_epro_rss_key epro_rss_key_def = { + .key0 = 0x6d5a6d5a6d5a6d5a, + .key1 = 0x6d5a6d5a6d5a6d5a, + .key2 = 0x6d5a6d5a6d5a6d5a, + .key3 = 0x6d5a6d5a6d5a6d5a, + .key4 = 0x6d5a6d5a6d5a6d5a, +}; + +static int nbl_phy_init_epro_rss_key(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_KEY_REG, + (u8 *)&epro_rss_key_def, sizeof(epro_rss_key_def)); + + return 0; +} + +static void nbl_phy_read_epro_rss_key(void *priv, u8 *rss_key) +{ + nbl_hw_read_regs(priv, NBL_EPRO_RSS_KEY_REG, + rss_key, sizeof(struct nbl_epro_rss_key)); +} + +static void nbl_phy_read_rss_indir(void *priv, u16 vsi_id, u32 *rss_indir, + u16 rss_ret_base, u16 rss_entry_size) +{ + struct nbl_epro_rss_ret_tbl rss_ret = {0}; + int i = 0; + u32 table_id, table_end, group_count, odd_num; + + group_count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << rss_entry_size; + table_id = rss_ret_base / 2; + table_end = (rss_ret_base + group_count) / 2; + odd_num = rss_ret_base % 2; + + if (odd_num) { + nbl_hw_read_regs(priv, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + rss_indir[i++] = rss_ret.dqueue1; + } + + for (; table_id < table_end; table_id++) { + nbl_hw_read_regs(priv, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + rss_indir[i++] = rss_ret.dqueue0; + rss_indir[i++] = rss_ret.dqueue1; + } + + if (odd_num) { + nbl_hw_read_regs(priv, NBL_EPRO_RSS_RET_TABLE(table_id), + (u8 *)&rss_ret, sizeof(rss_ret)); + rss_indir[i++] = rss_ret.dqueue0; + } +} + +static void nbl_phy_get_rss_alg_sel(void *priv, u8 eth_id, u8 *alg_sel) +{ + struct nbl_epro_ept_tbl ept_tbl = {0}; + + nbl_hw_read_regs(priv, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + + if (ept_tbl.lag_alg_sel == NBL_EPRO_RSS_ALG_TOEPLITZ_HASH) + *alg_sel = ETH_RSS_HASH_TOP; + else if (ept_tbl.lag_alg_sel == NBL_EPRO_RSS_ALG_CRC32) + *alg_sel = ETH_RSS_HASH_CRC32; +} + +static int nbl_phy_init_epro_vpt_tbl(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_vpt_tbl epro_vpt_tbl = {0}; + + epro_vpt_tbl.vld = 1; + epro_vpt_tbl.fwd = NBL_EPRO_FWD_TYPE_DROP; + epro_vpt_tbl.rss_alg_sel = NBL_EPRO_RSS_ALG_TOEPLITZ_HASH; + epro_vpt_tbl.rss_key_type_ipv4 = NBL_EPRO_RSS_KEY_TYPE_IPV4_L4; + epro_vpt_tbl.rss_key_type_ipv6 = NBL_EPRO_RSS_KEY_TYPE_IPV6_L4; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), + (u8 *)&epro_vpt_tbl, + sizeof(struct nbl_epro_vpt_tbl)); + + return 0; +} + +static int nbl_phy_set_epro_rss_default(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_vpt_tbl epro_vpt_tbl = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + + epro_vpt_tbl.rss_alg_sel = NBL_EPRO_RSS_ALG_TOEPLITZ_HASH; + epro_vpt_tbl.rss_key_type_ipv4 = NBL_EPRO_RSS_KEY_TYPE_IPV4_L4; + epro_vpt_tbl.rss_key_type_ipv6 = NBL_EPRO_RSS_KEY_TYPE_IPV6_L4; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), + (u8 *)&epro_vpt_tbl, + sizeof(struct nbl_epro_vpt_tbl)); + return 0; +} + +static int nbl_phy_set_epro_rss_pt(void *priv, u16 vsi_id, u16 rss_ret_base, u16 rss_entry_size) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_rss_pt_tbl epro_rss_pt_tbl = {0}; + struct nbl_epro_vpt_tbl epro_vpt_tbl; + + epro_rss_pt_tbl.vld = 1; + epro_rss_pt_tbl.entry_size = rss_entry_size; + epro_rss_pt_tbl.offset0_vld = 1; + epro_rss_pt_tbl.offset0 = rss_ret_base; + epro_rss_pt_tbl.offset1_vld = 0; + epro_rss_pt_tbl.offset1 = 0; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_PT_TABLE(vsi_id), (u8 *)&epro_rss_pt_tbl, + sizeof(epro_rss_pt_tbl)); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + epro_vpt_tbl.fwd = NBL_EPRO_FWD_TYPE_NORMAL; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + + return 0; +} + +static int nbl_phy_clear_epro_rss_pt(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_rss_pt_tbl epro_rss_pt_tbl = {0}; + struct nbl_epro_vpt_tbl epro_vpt_tbl; + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_PT_TABLE(vsi_id), (u8 *)&epro_rss_pt_tbl, + sizeof(epro_rss_pt_tbl)); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + epro_vpt_tbl.fwd = NBL_EPRO_FWD_TYPE_DROP; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_TABLE(vsi_id), (u8 *)&epro_vpt_tbl, + sizeof(epro_vpt_tbl)); + + return 0; +} + +static int nbl_phy_disable_dvn(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dvn_queue_table info = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + info.dvn_queue_en = 0; + nbl_hw_write_regs(phy_mgt, NBL_DVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + return 0; +} + +static int nbl_phy_disable_uvn(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct uvn_queue_table info = {0}; + + nbl_hw_write_regs(phy_mgt, NBL_UVN_QUEUE_TABLE_ARR(queue_id), (u8 *)&info, sizeof(info)); + return 0; +} + +static bool nbl_phy_is_txq_drain_out(struct nbl_phy_mgt *phy_mgt, u16 queue_id) +{ + struct dsch_vn_tc_q_list_tbl tc_q_list = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_TC_Q_LIST_TABLE_REG_ARR(queue_id), + (u8 *)&tc_q_list, sizeof(tc_q_list)); + if (!tc_q_list.regi && !tc_q_list.fly && !tc_q_list.vld) + return true; + + return false; +} + +static bool nbl_phy_is_rxq_drain_out(struct nbl_phy_mgt *phy_mgt, u16 queue_id) +{ + struct uvn_desc_cxt cache_ctx = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_UVN_DESC_CXT_TABLE_ARR(queue_id), + (u8 *)&cache_ctx, sizeof(cache_ctx)); + if (cache_ctx.cache_pref_num_prev == cache_ctx.cache_pref_num_post) + return true; + + return false; +} + +static int nbl_phy_lso_dsch_drain(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + int i = 0; + + do { + if (nbl_phy_is_txq_drain_out(phy_mgt, queue_id)) + break; + + usleep_range(10, 20); + } while (++i < NBL_DRAIN_WAIT_TIMES); + + if (i >= NBL_DRAIN_WAIT_TIMES) { + nbl_err(common, NBL_DEBUG_QUEUE, "nbl queue %u lso dsch drain\n", queue_id); + return -1; + } + + return 0; +} + +static int nbl_phy_rsc_cache_drain(void *priv, u16 queue_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + int i = 0; + + do { + if (nbl_phy_is_rxq_drain_out(phy_mgt, queue_id)) + break; + + usleep_range(10, 20); + } while (++i < NBL_DRAIN_WAIT_TIMES); + + if (i >= NBL_DRAIN_WAIT_TIMES) { + nbl_err(common, NBL_DEBUG_QUEUE, "nbl queue %u rsc cache drain timeout\n", + queue_id); + return -1; + } + + return 0; +} + +static u16 nbl_phy_save_dvn_ctx(void *priv, u16 queue_id, u16 split) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct dvn_queue_context dvn_ctx = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_QUEUE_CXT_TABLE_ARR(queue_id), + (u8 *)&dvn_ctx, sizeof(dvn_ctx)); + + nbl_debug(common, NBL_DEBUG_QUEUE, "DVNQ save ctx: %d packed: %08x %08x split: %08x\n", + queue_id, dvn_ctx.dvn_ring_wrap_counter, dvn_ctx.dvn_l1_ring_read, + dvn_ctx.dvn_avail_ring_idx); + + if (split) + return (dvn_ctx.dvn_avail_ring_idx); + else + return (dvn_ctx.dvn_l1_ring_read & 0x7FFF) | (dvn_ctx.dvn_ring_wrap_counter << 15); +} + +static u16 nbl_phy_save_uvn_ctx(void *priv, u16 queue_id, u16 split, u16 queue_size) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct uvn_queue_cxt queue_cxt = {0}; + struct uvn_desc_cxt desc_cxt = {0}; + u16 cache_diff, queue_head, wrap_count; + + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_CXT_TABLE_ARR(queue_id), + (u8 *)&queue_cxt, sizeof(queue_cxt)); + nbl_hw_read_regs(phy_mgt, NBL_UVN_DESC_CXT_TABLE_ARR(queue_id), + (u8 *)&desc_cxt, sizeof(desc_cxt)); + + nbl_debug(common, NBL_DEBUG_QUEUE, + "UVN save ctx: %d cache_tail: %08x cache_head %08x queue_head: %08x\n", + queue_id, desc_cxt.cache_tail, desc_cxt.cache_head, queue_cxt.queue_head); + + cache_diff = (desc_cxt.cache_tail - desc_cxt.cache_head + 64) & (0x3F); + queue_head = (queue_cxt.queue_head - cache_diff + 65536) & (0xFFFF); + if (queue_size) + wrap_count = !((queue_head / queue_size) & 0x1); + else + return 0xffff; + + nbl_debug(common, NBL_DEBUG_QUEUE, "UVN save ctx: %d packed: %08x %08x split: %08x\n", + queue_id, wrap_count, queue_head, queue_head); + + if (split) + return (queue_head); + else + return (queue_head & 0x7FFF) | (wrap_count << 15); +} + +static void nbl_phy_get_rx_queue_err_stats(void *priv, u16 queue_id, + struct nbl_queue_err_stats *queue_err_stats) +{ + queue_err_stats->uvn_stat_pkt_drop = + nbl_hw_rd32(priv, NBL_UVN_STATIS_PKT_DROP(queue_id)); +} + +static void nbl_phy_get_tx_queue_err_stats(void *priv, u16 queue_id, + struct nbl_queue_err_stats *queue_err_stats) +{ + struct nbl_dvn_stat_cnt dvn_stat_cnt; + + nbl_hw_read_regs(priv, NBL_DVN_STAT_CNT(queue_id), + (u8 *)&dvn_stat_cnt, sizeof(dvn_stat_cnt)); + queue_err_stats->dvn_pkt_drop_cnt = dvn_stat_cnt.dvn_pkt_drop_cnt; +} + +static void nbl_phy_setup_queue_switch(void *priv, u16 eth_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_upsport_tbl upsport = {0}; + struct nbl_epro_ept_tbl ept_tbl = {0}; + struct dsch_vn_g2p_cfg_tbl info = {0}; + + upsport.phy_flow = 1; + upsport.entry_vld = 1; + upsport.set_dport_en = 1; + upsport.set_dport_pri = 0; + upsport.vlan_layer_num_0 = 3; + upsport.vlan_layer_num_1 = 3; + /* default we close promisc */ + upsport.set_dport.data = 0xFFF; + + ept_tbl.vld = 1; + ept_tbl.fwd = 1; + + info.vld = 1; + info.port = (eth_id << 1); + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + + nbl_hw_write_regs(phy_mgt, NBL_DSCH_VN_G2P_CFG_TABLE_REG_ARR(eth_id), + (u8 *)&info, sizeof(info)); +} + +static int nbl_phy_cfg_phy_flow(void *priv, u16 vsi_id, u16 count, u8 eth_id, bool status) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_upsport_tbl upsport = {0}; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + int i = 0; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), (u8 *)&upsport, sizeof(upsport)); + + upsport.phy_flow = !status; + upsport.set_dport_en = !status; + if (!status) { + upsport.entry_vld = 1; + upsport.mirror_en = 0; + upsport.car_en = 0; + } + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); + + for (i = vsi_id; i < vsi_id + count; i++) { + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(i), + (u8 *)&dpsport, sizeof(dpsport)); + + dpsport.phy_flow = !status; + dpsport.set_dport_en = !status; + if (!status) { + dpsport.entry_vld = 1; + dpsport.mirror_en = 0; + dpsport.dqueue_en = 0; + } + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(i), + (u8 *)&dpsport, sizeof(dpsport)); + } + + return 0; +} + +static int nbl_phy_cfg_eth_port_priority_replace(void *priv, u8 eth_id, bool status) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_port_pri_mdf_en_cfg pri_mdf_en_cfg = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_PORT_PRI_MDF_EN, (u8 *)(&pri_mdf_en_cfg), + sizeof(pri_mdf_en_cfg)); + switch (eth_id) { + case 0: + pri_mdf_en_cfg.eth0 = status; + break; + case 1: + pri_mdf_en_cfg.eth1 = status; + break; + case 2: + pri_mdf_en_cfg.eth2 = status; + break; + case 3: + pri_mdf_en_cfg.eth3 = status; + break; + default: + break; + } + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_PORT_PRI_MDF_EN, (u8 *)(&pri_mdf_en_cfg), + sizeof(pri_mdf_en_cfg)); + return 0; +} + +static void nbl_phy_init_pfc(void *priv, u8 ether_ports) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_cos_map cos_map = {0}; + struct nbl_upa_pri_sel_conf sel_conf = {0}; + struct nbl_upa_pri_conf conf_table = {0}; + struct nbl_dqm_rxmac_tx_port_bp_en_cfg dqm_port_bp_en = {0}; + struct nbl_dqm_rxmac_tx_cos_bp_en_cfg dqm_cos_bp_en = {0}; + struct nbl_uqm_rx_cos_bp_en_cfg uqm_rx_cos_bp_en = {0}; + struct nbl_uqm_tx_cos_bp_en_cfg uqm_tx_cos_bp_en = {0}; + struct nbl_ustore_port_fc_th ustore_port_fc_th = {0}; + struct nbl_ustore_cos_fc_th ustore_cos_fc_th = {0}; + struct nbl_epro_port_pri_mdf_en_cfg pri_mdf_en_cfg = {0}; + int i, j; + + /* DQM */ + /* set default bp_mode: port */ + /* TX bp: dqm send received ETH RX Pause to DSCH */ + /* dqm rxmac_tx_port_bp_en */ + dqm_port_bp_en.eth0 = 1; + dqm_port_bp_en.eth1 = 1; + dqm_port_bp_en.eth2 = 1; + dqm_port_bp_en.eth3 = 1; + nbl_hw_write_regs(phy_mgt, NBL_DQM_RXMAC_TX_PORT_BP_EN, + (u8 *)(&dqm_port_bp_en), sizeof(dqm_port_bp_en)); + + /* TX bp: dqm donot send received ETH RX PFC to DSCH */ + /* dqm rxmac_tx_cos_bp_en */ + dqm_cos_bp_en.eth0 = 0; + dqm_cos_bp_en.eth1 = 0; + dqm_cos_bp_en.eth2 = 0; + dqm_cos_bp_en.eth3 = 0; + nbl_hw_write_regs(phy_mgt, NBL_DQM_RXMAC_TX_COS_BP_EN, + (u8 *)(&dqm_cos_bp_en), sizeof(dqm_cos_bp_en)); + + /* UQM */ + /* RX bp: uqm receive loopback/emp/rdma_e/rdma_h/l4s_e/l4s_h port bp */ + /* uqm rx_port_bp_en_cfg is ok */ + /* RX bp: uqm receive loopback/emp/rdma_e/rdma_h/l4s_e/l4s_h port bp */ + /* uqm tx_port_bp_en_cfg is ok */ + + /* RX bp: uqm receive loopback/emp/rdma_e/rdma_h/l4s_e/l4s_h cos bp */ + /* uqm rx_cos_bp_en */ + uqm_rx_cos_bp_en.vld_l = 0xFFFFFFFF; + uqm_rx_cos_bp_en.vld_h = 0xFFFF; + nbl_hw_write_regs(phy_mgt, NBL_UQM_RX_COS_BP_EN, (u8 *)(&uqm_rx_cos_bp_en), + sizeof(uqm_rx_cos_bp_en)); + + /* RX bp: uqm send received loopback/emp/rdma_e/rdma_h/l4s_e/l4s_h cos bp to USTORE */ + /* uqm tx_cos_bp_en */ + uqm_tx_cos_bp_en.vld_l = 0xFFFFFFFF; + uqm_tx_cos_bp_en.vld_l = 0xFF; + nbl_hw_write_regs(phy_mgt, NBL_UQM_TX_COS_BP_EN, (u8 *)(&uqm_tx_cos_bp_en), + sizeof(uqm_tx_cos_bp_en)); + + /* TX bp: DSCH dp0-3 response to DQM dp0-3 pfc/port bp */ + /* dsch_dpt_pfc_map_vnh default value is ok */ + /* TX bp: DSCH response to DQM cos bp, pkt_cos -> sch_cos map table */ + /* dsch vn_host_dpx_prixx_p2s_map_cfg is ok */ + + /* downstream: enable modify packet pri */ + /* epro port_pri_mdf_en */ + pri_mdf_en_cfg.eth0 = 0; + pri_mdf_en_cfg.eth1 = 0; + pri_mdf_en_cfg.eth2 = 0; + pri_mdf_en_cfg.eth3 = 0; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_PORT_PRI_MDF_EN, (u8 *)(&pri_mdf_en_cfg), + sizeof(pri_mdf_en_cfg)); + + for (i = 0; i < ether_ports; i++) { + /* set default bp_mode: port */ + /* RX bp: USTORE port bp th, enable send pause frame */ + /* ustore port_fc_th */ + ustore_port_fc_th.xoff_th = 0x190; + ustore_port_fc_th.xon_th = 0x190; + ustore_port_fc_th.fc_set = 0; + ustore_port_fc_th.fc_en = 1; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_PORT_FC_TH_REG_ARR(i), + (u8 *)(&ustore_port_fc_th), sizeof(ustore_port_fc_th)); + + for (j = 0; j < 8; j++) { + /* RX bp: ustore cos bp th, disable send pfc frame */ + /* ustore cos_fc_th */ + ustore_cos_fc_th.xoff_th = 0x64; + ustore_cos_fc_th.xon_th = 0x64; + ustore_cos_fc_th.fc_set = 0; + ustore_cos_fc_th.fc_en = 0; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(i * 8 + j), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + + /* downstream: sch_cos->pkt_cos or sch_cos->dscp */ + /* epro sch_cos_map */ + cos_map.pkt_cos = j; + cos_map.dscp = j << 3; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_SCH_COS_MAP_TABLE(i, j), + (u8 *)(&cos_map), sizeof(cos_map)); + } + } + + /* upstream: pkt dscp/802.1p -> sch_cos */ + for (i = 0; i < ether_ports; i++) { + /* upstream: when pfc_mode is 802.1p, vlan pri -> sch_cos map table */ + /* upa pri_conf_table */ + conf_table.pri0 = 0; + conf_table.pri1 = 1; + conf_table.pri2 = 2; + conf_table.pri3 = 3; + conf_table.pri4 = 4; + conf_table.pri5 = 5; + conf_table.pri6 = 6; + conf_table.pri7 = 7; + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_CONF_TABLE(i * 8), + (u8 *)(&conf_table), sizeof(conf_table)); + + /* upstream: set default pfc_mode is 802.1p, use outer vlan */ + /* upa pri_sel_conf */ + sel_conf.pri_sel = (1 << 4 | 1 << 3); + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_SEL_CONF_TABLE(i), + (u8 *)(&sel_conf), sizeof(sel_conf)); + } +} + +static void nbl_phy_configure_pfc(void *priv, u8 eth_id, u8 *pfc) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dqm_rxmac_tx_port_bp_en_cfg dqm_port_bp_en = {0}; + struct nbl_dqm_rxmac_tx_cos_bp_en_cfg dqm_cos_bp_en = {0}; + struct nbl_ustore_port_fc_th ustore_port_fc_th = {0}; + struct nbl_ustore_cos_fc_th ustore_cos_fc_th = {0}; + struct nbl_epro_cos_map cos_map = {0}; + u32 enable = 0; + u32 cos_en = 0; + int i; + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + if (pfc[i]) + enable = 1; + cos_en |= pfc[i] << i; + } + + /* set rx */ + nbl_hw_read_regs(phy_mgt, NBL_DQM_RXMAC_TX_PORT_BP_EN, + (u8 *)(&dqm_port_bp_en), sizeof(dqm_port_bp_en)); + nbl_hw_read_regs(phy_mgt, NBL_DQM_RXMAC_TX_COS_BP_EN, + (u8 *)(&dqm_cos_bp_en), sizeof(dqm_cos_bp_en)); + + switch (eth_id) { + case 0: + dqm_port_bp_en.eth0 = !enable; + dqm_cos_bp_en.eth0 = cos_en; + break; + case 1: + dqm_port_bp_en.eth1 = !enable; + dqm_cos_bp_en.eth1 = cos_en; + break; + case 2: + dqm_port_bp_en.eth2 = !enable; + dqm_cos_bp_en.eth2 = cos_en; + break; + case 3: + dqm_port_bp_en.eth3 = !enable; + dqm_cos_bp_en.eth3 = cos_en; + break; + default: + return; + } + + nbl_hw_write_regs(phy_mgt, NBL_DQM_RXMAC_TX_PORT_BP_EN, + (u8 *)(&dqm_port_bp_en), sizeof(dqm_port_bp_en)); + nbl_hw_write_regs(phy_mgt, NBL_DQM_RXMAC_TX_COS_BP_EN, + (u8 *)(&dqm_cos_bp_en), sizeof(dqm_cos_bp_en)); + + /* set tx */ + nbl_hw_read_regs(phy_mgt, NBL_USTORE_PORT_FC_TH_REG_ARR(eth_id), + (u8 *)(&ustore_port_fc_th), sizeof(ustore_port_fc_th)); + ustore_port_fc_th.fc_en = !enable; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_PORT_FC_TH_REG_ARR(eth_id), + (u8 *)(&ustore_port_fc_th), sizeof(ustore_port_fc_th)); + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + nbl_hw_read_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + i), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + ustore_cos_fc_th.fc_en = pfc[i]; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + i), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + + /* downstream: sch_cos->pkt_cos or sch_cos->dscp */ + /* epro sch_cos_map */ + cos_map.pkt_cos = i; + cos_map.dscp = i << 3; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_SCH_COS_MAP_TABLE(eth_id, i), + (u8 *)(&cos_map), sizeof(cos_map)); + } +} + +static void nbl_phy_configure_trust(void *priv, u8 eth_id, u8 trust, u8 *dscp2prio_map) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_upa_pri_sel_conf sel_conf = {0}; + struct nbl_upa_pri_conf conf_table = {0}; + struct nbl_epro_ept_tbl ept_tbl = {0}; + int i; + + if (trust) { /* dscp */ + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + ept_tbl.pfc_mode = 1; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + + for (i = 0; i < NBL_MAX_PFC_PRIORITIES; i++) { + conf_table.pri0 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES]; + conf_table.pri1 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 1]; + conf_table.pri2 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 2]; + conf_table.pri3 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 3]; + conf_table.pri4 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 4]; + conf_table.pri5 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 5]; + conf_table.pri6 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 6]; + conf_table.pri7 = dscp2prio_map[i * NBL_MAX_PFC_PRIORITIES + 7]; + + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_CONF_TABLE(eth_id * 8 + i), + (u8 *)(&conf_table), sizeof(conf_table)); + } + + sel_conf.pri_sel = (1 << 3); + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_SEL_CONF_TABLE(eth_id), + (u8 *)(&sel_conf), sizeof(sel_conf)); + } else { + /* upstream: when pfc_mode is 802.1p, vlan pri -> sch_cos map table */ + /* upa pri_conf_table */ + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + ept_tbl.pfc_mode = 0; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(eth_id), (u8 *)&ept_tbl, + sizeof(struct nbl_epro_ept_tbl)); + conf_table.pri0 = 0; + conf_table.pri1 = 1; + conf_table.pri2 = 2; + conf_table.pri3 = 3; + conf_table.pri4 = 4; + conf_table.pri5 = 5; + conf_table.pri6 = 6; + conf_table.pri7 = 7; + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_CONF_TABLE(eth_id * 8), + (u8 *)(&conf_table), sizeof(conf_table)); + + /* upstream: set default pfc_mode is 802.1p, use outer vlan */ + /* upa pri_sel_conf */ + sel_conf.pri_sel = (1 << 4 | 1 << 3); + nbl_hw_write_regs(phy_mgt, NBL_UPA_PRI_SEL_CONF_TABLE(eth_id), + (u8 *)(&sel_conf), sizeof(sel_conf)); + } +} + +static void nbl_phy_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) +{ + nbl_phy_configure_pfc(priv, eth_id, pfc); + nbl_phy_configure_trust(priv, eth_id, trust, dscp2prio_map); +} + +static int nbl_phy_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ustore_cos_fc_th ustore_cos_fc_th = {0}; + + if (xoff > NBL_MAX_USTORE_COS_FC_TH || xon > NBL_MAX_USTORE_COS_FC_TH || + xoff <= 0 || xon <= 0) + return -EINVAL; + + nbl_hw_read_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + prio), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + ustore_cos_fc_th.xoff_th = xoff; + ustore_cos_fc_th.xon_th = xon; + nbl_hw_write_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + prio), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + + return 0; +} + +static void nbl_phy_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ustore_cos_fc_th ustore_cos_fc_th = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_USTORE_COS_FC_TH_REG_ARR(eth_id * 8 + prio), + (u8 *)(&ustore_cos_fc_th), sizeof(ustore_cos_fc_th)); + *xoff = ustore_cos_fc_th.xoff_th; + *xon = ustore_cos_fc_th.xon_th; +} + +static void nbl_phy_enable_mailbox_irq(void *priv, u16 func_id, bool enable_msix, + u16 global_vector_id) +{ + struct nbl_mailbox_qinfo_map_table mb_qinfo_map = { 0 }; + + nbl_hw_read_regs(priv, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), + (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); + + if (enable_msix) { + mb_qinfo_map.msix_idx = global_vector_id; + mb_qinfo_map.msix_idx_valid = 1; + } else { + mb_qinfo_map.msix_idx = 0; + mb_qinfo_map.msix_idx_valid = 0; + } + + nbl_hw_write_regs(priv, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), + (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); +} + +static void nbl_abnormal_intr_init(struct nbl_phy_mgt *phy_mgt) +{ + struct nbl_fem_int_mask fem_mask = {0}; + struct nbl_epro_int_mask epro_mask = {0}; + u32 top_ctrl_mask = 0xFFFFFFFF; + + /* Mask and clear fem cfg_err */ + nbl_hw_read_regs(phy_mgt, NBL_FEM_INT_MASK, (u8 *)&fem_mask, sizeof(fem_mask)); + fem_mask.cfg_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_FEM_INT_MASK, (u8 *)&fem_mask, sizeof(fem_mask)); + + memset(&fem_mask, 0, sizeof(fem_mask)); + fem_mask.cfg_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_FEM_INT_STATUS, (u8 *)&fem_mask, sizeof(fem_mask)); + + nbl_hw_read_regs(phy_mgt, NBL_FEM_INT_MASK, (u8 *)&fem_mask, sizeof(fem_mask)); + + /* Mask and clear epro cfg_err */ + nbl_hw_read_regs(phy_mgt, NBL_EPRO_INT_MASK, (u8 *)&epro_mask, sizeof(epro_mask)); + epro_mask.cfg_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_INT_MASK, (u8 *)&epro_mask, sizeof(epro_mask)); + + memset(&epro_mask, 0, sizeof(epro_mask)); + epro_mask.cfg_err = 1; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_INT_STATUS, (u8 *)&epro_mask, sizeof(epro_mask)); + + /* Mask and clear all top_tcrl abnormal intrs. + * TODO: might not need this + */ + nbl_hw_write_regs(phy_mgt, NBL_TOP_CTRL_INT_MASK, + (u8 *)&top_ctrl_mask, sizeof(top_ctrl_mask)); + + nbl_hw_write_regs(phy_mgt, NBL_TOP_CTRL_INT_STATUS, + (u8 *)&top_ctrl_mask, sizeof(top_ctrl_mask)); +} + +static void nbl_phy_enable_abnormal_irq(void *priv, bool enable_msix, + u16 global_vector_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_abnormal_msix_vector abnormal_msix_vetcor = { 0 }; + u32 abnormal_timeout = 0x927C0; /* 600000, 1ms */ + u32 quirks; + + if (enable_msix) { + abnormal_msix_vetcor.idx = global_vector_id; + abnormal_msix_vetcor.vld = 1; + } + + quirks = nbl_phy_get_quirks(phy_mgt); + + if (performance_mode & BIT(NBL_QUIRKS_NO_TOE) || + !(quirks & BIT(NBL_QUIRKS_NO_TOE))) + abnormal_timeout = 0x3938700; /* 1s */ + + nbl_hw_write_regs(phy_mgt, NBL_PADPT_ABNORMAL_TIMEOUT, + (u8 *)&abnormal_timeout, sizeof(abnormal_timeout)); + + nbl_hw_write_regs(phy_mgt, NBL_PADPT_ABNORMAL_MSIX_VEC, + (u8 *)&abnormal_msix_vetcor, sizeof(abnormal_msix_vetcor)); + + nbl_abnormal_intr_init(phy_mgt); +} + +static void nbl_phy_enable_msix_irq(void *priv, u16 global_vector_id) +{ + struct nbl_msix_notify msix_notify = { 0 }; + + msix_notify.glb_msix_idx = global_vector_id; + + nbl_hw_write_regs(priv, NBL_PCOMPLETER_MSIX_NOTIRY_OFFSET, + (u8 *)&msix_notify, sizeof(msix_notify)); +} + +static u8 *nbl_phy_get_msix_irq_enable_info(void *priv, u16 global_vector_id, u32 *irq_data) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_msix_notify msix_notify = { 0 }; + + msix_notify.glb_msix_idx = global_vector_id; + memcpy(irq_data, &msix_notify, sizeof(msix_notify)); + + return (phy_mgt->hw_addr + NBL_PCOMPLETER_MSIX_NOTIRY_OFFSET); +} + +static void nbl_phy_configure_msix_map(void *priv, u16 func_id, bool valid, + dma_addr_t dma_addr, u8 bus, u8 devid, u8 function) +{ + struct nbl_function_msix_map function_msix_map = { 0 }; + + if (valid) { + function_msix_map.msix_map_base_addr = dma_addr; + /* use af's bdf, because dma memmory is alloc by af */ + function_msix_map.function = function; + function_msix_map.devid = devid; + function_msix_map.bus = bus; + function_msix_map.valid = 1; + } + + nbl_hw_write_regs(priv, NBL_PCOMPLETER_FUNCTION_MSIX_MAP_REG_ARR(func_id), + (u8 *)&function_msix_map, sizeof(function_msix_map)); +} + +static void nbl_phy_configure_msix_info(void *priv, u16 func_id, bool valid, u16 interrupt_id, + u8 bus, u8 devid, u8 function, bool msix_mask_en) +{ + struct nbl_pcompleter_host_msix_fid_table host_msix_fid_table = { 0 }; + struct nbl_host_msix_info msix_info = { 0 }; + + if (valid) { + host_msix_fid_table.vld = 1; + host_msix_fid_table.fid = func_id; + + msix_info.intrl_pnum = 0; + msix_info.intrl_rate = 0; + msix_info.function = function; + msix_info.devid = devid; + msix_info.bus = bus; + msix_info.valid = 1; + if (msix_mask_en) + msix_info.msix_mask_en = 1; + } + + nbl_hw_write_regs(priv, NBL_PADPT_HOST_MSIX_INFO_REG_ARR(interrupt_id), + (u8 *)&msix_info, sizeof(msix_info)); + nbl_hw_write_regs(priv, NBL_PCOMPLETER_HOST_MSIX_FID_TABLE(interrupt_id), + (u8 *)&host_msix_fid_table, sizeof(host_msix_fid_table)); +} + +static void nbl_phy_update_mailbox_queue_tail_ptr(void *priv, u16 tail_ptr, u8 txrx) +{ + /* local_qid 0 and 1 denote rx and tx queue respectively */ + u32 local_qid = txrx; + u32 value = ((u32)tail_ptr << 16) | local_qid; + + /* wmb for mbx notify */ + wmb(); + nbl_mbx_wr32(priv, NBL_MAILBOX_NOTIFY_ADDR, value); +} + +static void nbl_phy_config_mailbox_rxq(void *priv, dma_addr_t dma_addr, int size_bwid) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_rx_table = { 0 }; + + qinfo_cfg_rx_table.queue_rst = 1; + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); + + qinfo_cfg_rx_table.queue_base_addr_l = (u32)(dma_addr & 0xFFFFFFFF); + qinfo_cfg_rx_table.queue_base_addr_h = (u32)(dma_addr >> 32); + qinfo_cfg_rx_table.queue_size_bwind = (u32)size_bwid; + qinfo_cfg_rx_table.queue_rst = 0; + qinfo_cfg_rx_table.queue_en = 1; + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); +} + +static void nbl_phy_config_mailbox_txq(void *priv, dma_addr_t dma_addr, int size_bwid) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tx_table = { 0 }; + + qinfo_cfg_tx_table.queue_rst = 1; + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); + + qinfo_cfg_tx_table.queue_base_addr_l = (u32)(dma_addr & 0xFFFFFFFF); + qinfo_cfg_tx_table.queue_base_addr_h = (u32)(dma_addr >> 32); + qinfo_cfg_tx_table.queue_size_bwind = (u32)size_bwid; + qinfo_cfg_tx_table.queue_rst = 0; + qinfo_cfg_tx_table.queue_en = 1; + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); +} + +static void nbl_phy_stop_mailbox_rxq(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_rx_table = { 0 }; + + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); +} + +static void nbl_phy_stop_mailbox_txq(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tx_table = { 0 }; + + nbl_hw_write_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); +} + +static u16 nbl_phy_get_mailbox_rx_tail_ptr(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_dbg_tbl cfg_dbg_tbl = { 0 }; + + nbl_hw_read_mbx_regs(priv, NBL_MAILBOX_QINFO_CFG_DBG_TABLE_ADDR, + (u8 *)&cfg_dbg_tbl, sizeof(cfg_dbg_tbl)); + return cfg_dbg_tbl.rx_tail_ptr; +} + +static bool nbl_phy_check_mailbox_dma_err(void *priv, bool tx) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tbl = { 0 }; + u64 addr; + + if (tx) + addr = NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR; + else + addr = NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR; + + nbl_hw_read_mbx_regs(priv, addr, (u8 *)&qinfo_cfg_tbl, sizeof(qinfo_cfg_tbl)); + return !!qinfo_cfg_tbl.dif_err; +} + +static u32 nbl_phy_get_host_pf_mask(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 data; + + nbl_hw_read_regs(phy_mgt, NBL_PCIE_HOST_K_PF_MASK_REG, (u8 *)&data, sizeof(data)); + return data; +} + +static u32 nbl_phy_get_host_pf_fid(void *priv, u8 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 data; + + nbl_hw_read_regs(phy_mgt, NBL_PCIE_HOST_K_PF_FID(func_id), (u8 *)&data, sizeof(data)); + return data; +} + +static void nbl_phy_cfg_mailbox_qinfo(void *priv, u16 func_id, u16 bus, u16 devid, u16 function) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_mailbox_qinfo_map_table mb_qinfo_map; + + memset(&mb_qinfo_map, 0, sizeof(mb_qinfo_map)); + mb_qinfo_map.function = function; + mb_qinfo_map.devid = devid; + mb_qinfo_map.bus = bus; + mb_qinfo_map.msix_idx_valid = 0; + nbl_hw_write_regs(phy_mgt, NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id), + (u8 *)&mb_qinfo_map, sizeof(mb_qinfo_map)); +} + +static void nbl_phy_update_tail_ptr(void *priv, struct nbl_notify_param *param) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u8 __iomem *notify_addr = phy_mgt->hw_addr; + u32 local_qid = param->notify_qid; + u32 tail_ptr = param->tail_ptr; + + writel((((u32)tail_ptr << 16) | (u32)local_qid), notify_addr); +} + +static u8 *nbl_phy_get_tail_ptr(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return phy_mgt->hw_addr; +} + +static void nbl_phy_set_promisc_mode(void *priv, u16 vsi_id, u16 eth_id, u16 mode) +{ + struct nbl_ipro_upsport_tbl upsport; + + nbl_hw_read_regs(priv, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); + if (mode) { + upsport.set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + upsport.set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + upsport.set_dport.dport.up.port_id = vsi_id; + upsport.set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_NONE; + } else { + upsport.set_dport.data = 0xFFF; + } + nbl_hw_write_regs(priv, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); +} + +static void nbl_phy_get_coalesce(void *priv, u16 interrupt_id, u16 *pnum, u16 *rate) +{ + struct nbl_host_msix_info msix_info = { 0 }; + + nbl_hw_read_regs(priv, NBL_PADPT_HOST_MSIX_INFO_REG_ARR(interrupt_id), + (u8 *)&msix_info, sizeof(msix_info)); + + *pnum = msix_info.intrl_pnum; + *rate = msix_info.intrl_rate; +} + +static void nbl_phy_set_coalesce(void *priv, u16 interrupt_id, u16 pnum, u16 rate) +{ + struct nbl_host_msix_info msix_info = { 0 }; + + nbl_hw_read_regs(priv, NBL_PADPT_HOST_MSIX_INFO_REG_ARR(interrupt_id), + (u8 *)&msix_info, sizeof(msix_info)); + + msix_info.intrl_pnum = pnum; + msix_info.intrl_rate = rate; + nbl_hw_write_regs(priv, NBL_PADPT_HOST_MSIX_INFO_REG_ARR(interrupt_id), + (u8 *)&msix_info, sizeof(msix_info)); +} + +static int nbl_phy_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + u8 reverse_mac[ETH_ALEN]; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + nbl_convert_mac(mac, reverse_mac); + dpsport.smac_low = reverse_mac[0] | reverse_mac[1] << 8; + memcpy(&dpsport.smac_high, &reverse_mac[2], sizeof(u32)); + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + return 0; +} + +static int nbl_phy_set_spoof_check_enable(void *priv, u16 vsi_id, u8 enable) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_dn_src_port_tbl dpsport = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + dpsport.addr_check_en = enable; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TABLE(vsi_id), + (u8 *)&dpsport, sizeof(struct nbl_ipro_dn_src_port_tbl)); + + return 0; +} + +static void nbl_phy_config_adminq_rxq(void *priv, dma_addr_t dma_addr, int size_bwid) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_rx_table = { 0 }; + + qinfo_cfg_rx_table.queue_rst = 1; + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); + + qinfo_cfg_rx_table.queue_base_addr_l = (u32)(dma_addr & 0xFFFFFFFF); + qinfo_cfg_rx_table.queue_base_addr_h = (u32)(dma_addr >> 32); + qinfo_cfg_rx_table.queue_size_bwind = (u32)size_bwid; + qinfo_cfg_rx_table.queue_rst = 0; + qinfo_cfg_rx_table.queue_en = 1; + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); +} + +static void nbl_phy_config_adminq_txq(void *priv, dma_addr_t dma_addr, int size_bwid) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tx_table = { 0 }; + + qinfo_cfg_tx_table.queue_rst = 1; + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); + + qinfo_cfg_tx_table.queue_base_addr_l = (u32)(dma_addr & 0xFFFFFFFF); + qinfo_cfg_tx_table.queue_base_addr_h = (u32)(dma_addr >> 32); + qinfo_cfg_tx_table.queue_size_bwind = (u32)size_bwid; + qinfo_cfg_tx_table.queue_rst = 0; + qinfo_cfg_tx_table.queue_en = 1; + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); +} + +static void nbl_phy_stop_adminq_rxq(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_rx_table = { 0 }; + + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR, + (u8 *)&qinfo_cfg_rx_table, sizeof(qinfo_cfg_rx_table)); +} + +static void nbl_phy_stop_adminq_txq(void *priv) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tx_table = { 0 }; + + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR, + (u8 *)&qinfo_cfg_tx_table, sizeof(qinfo_cfg_tx_table)); +} + +static void nbl_phy_cfg_adminq_qinfo(void *priv, u16 bus, u16 devid, u16 function) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_adminq_qinfo_map_table adminq_qinfo_map = {0}; + + memset(&adminq_qinfo_map, 0, sizeof(adminq_qinfo_map)); + adminq_qinfo_map.function = function; + adminq_qinfo_map.devid = devid; + adminq_qinfo_map.bus = bus; + + nbl_hw_write_mbx_regs(phy_mgt, NBL_ADMINQ_MSIX_MAP_TABLE_ADDR, + (u8 *)&adminq_qinfo_map, sizeof(adminq_qinfo_map)); +} + +static void nbl_phy_enable_adminq_irq(void *priv, bool enable_msix, u16 global_vector_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + struct nbl_adminq_qinfo_map_table adminq_qinfo_map = { 0 }; + + adminq_qinfo_map.bus = common->bus; + adminq_qinfo_map.devid = common->devid; + adminq_qinfo_map.function = NBL_COMMON_TO_PCI_FUNC_ID(common); + + if (enable_msix) { + adminq_qinfo_map.msix_idx = global_vector_id; + adminq_qinfo_map.msix_idx_valid = 1; + } else { + adminq_qinfo_map.msix_idx = 0; + adminq_qinfo_map.msix_idx_valid = 0; + } + + nbl_hw_write_mbx_regs(priv, NBL_ADMINQ_MSIX_MAP_TABLE_ADDR, + (u8 *)&adminq_qinfo_map, sizeof(adminq_qinfo_map)); +} + +static void nbl_phy_update_adminq_queue_tail_ptr(void *priv, u16 tail_ptr, u8 txrx) +{ + /* local_qid 0 and 1 denote rx and tx queue respectively */ + u32 local_qid = txrx; + u32 value = ((u32)tail_ptr << 16) | local_qid; + + /* wmb for adminq notify */ + wmb(); + nbl_mbx_wr32(priv, NBL_ADMINQ_NOTIFY_ADDR, value); +} + +static u16 nbl_phy_get_adminq_rx_tail_ptr(void *priv) +{ + struct nbl_adminq_qinfo_cfg_dbg_tbl cfg_dbg_tbl = { 0 }; + + nbl_hw_read_mbx_regs(priv, NBL_ADMINQ_QINFO_CFG_DBG_TABLE_ADDR, + (u8 *)&cfg_dbg_tbl, sizeof(cfg_dbg_tbl)); + return cfg_dbg_tbl.rx_tail_ptr; +} + +static bool nbl_phy_check_adminq_dma_err(void *priv, bool tx) +{ + struct nbl_mailbox_qinfo_cfg_table qinfo_cfg_tbl = { 0 }; + u64 addr; + + if (tx) + addr = NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR; + else + addr = NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR; + + nbl_hw_read_mbx_regs(priv, addr, (u8 *)&qinfo_cfg_tbl, sizeof(qinfo_cfg_tbl)); + + if (!qinfo_cfg_tbl.rsv1 && !qinfo_cfg_tbl.rsv2 && qinfo_cfg_tbl.dif_err) + return true; + + return false; +} + +static u8 __iomem *nbl_phy_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + if (size) + *size = (size_t)phy_mgt->hw_size; + return phy_mgt->hw_addr; +} + +static void nbl_phy_cfg_ktls_tx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ktls_keymat keymat; + u8 salt_len = 4; + int i; + + memset(&keymat, 0, sizeof(keymat)); + + keymat.ena = 1; + keymat.mode = mode; + + for (i = 0; i < salt_len; i++) + keymat.salt[salt_len - 1 - i] = salt[i]; + + for (i = 0; i < key_len; i++) + keymat.key[key_len - 1 - i] = key[i]; + + nbl_hw_write_regs(phy_mgt, NBL_DL4S_KEY_SALT(index), (u8 *)&keymat, sizeof(keymat)); +} + +static void nbl_phy_cfg_ktls_rx_keymat(void *priv, u32 index, u8 mode, + u8 *salt, u8 *key, u8 key_len) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ktls_keymat keymat; + u8 salt_len = 4; + int i; + + memset(&keymat, 0, sizeof(keymat)); + + keymat.ena = 1; + keymat.mode = mode; + + for (i = 0; i < salt_len; i++) + keymat.salt[salt_len - 1 - i] = salt[i]; + + for (i = 0; i < key_len; i++) + keymat.key[key_len - 1 - i] = key[i]; + + nbl_hw_write_regs(phy_mgt, NBL_UL4S_KEY_SALT(index), (u8 *)&keymat, sizeof(keymat)); +} + +static void nbl_phy_cfg_ktls_rx_record(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_ktls_sync_trig sync_trig = {0}; + + if (init) { + sync_trig.trig = 0; + sync_trig.init_sync = 0; + } else { + sync_trig.trig = 0; + sync_trig.init_sync = 1; + } + nbl_hw_wr32(phy_mgt, NBL_UL4S_SYNC_TRIG, sync_trig.data); + + nbl_hw_wr32(phy_mgt, NBL_UL4S_SYNC_SID, index); + nbl_hw_wr32(phy_mgt, NBL_UL4S_SYNC_TCP_SN, tcp_sn); + nbl_hw_write_regs(phy_mgt, NBL_UL4S_SYNC_REC_NUM, (u8 *)&rec_num, sizeof(u64)); + + if (init) { + sync_trig.trig = 1; + sync_trig.init_sync = 0; + } else { + sync_trig.trig = 1; + sync_trig.init_sync = 1; + } + nbl_hw_wr32(phy_mgt, NBL_UL4S_SYNC_TRIG, sync_trig.data); +} + +static void nbl_phy_cfg_dipsec_nat(void *priv, u16 sport) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_dprbac_nat dprbac_nat = {.data = 0}; + + dprbac_nat.sport = sport; + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_NAT, dprbac_nat.data); +} + +static void nbl_phy_cfg_dipsec_sad_iv(void *priv, u32 index, u64 iv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dprbac_sad_iv ipsec_iv = {0}; + + ipsec_iv.iv = iv; + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_IV(index), (u8 *)&ipsec_iv, sizeof(ipsec_iv)); +} + +static void nbl_phy_cfg_dipsec_sad_esn(void *priv, u32 index, u32 sn, + u32 esn, u8 wrap_en, u8 enable) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dprbac_sad_esn ipsec_esn = {0}; + + ipsec_esn.sn = sn; + ipsec_esn.esn = esn; + ipsec_esn.wrap_en = wrap_en; + ipsec_esn.enable = enable; + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_ESN(index), (u8 *)&ipsec_esn, sizeof(ipsec_esn)); +} + +static void nbl_phy_cfg_dipsec_sad_lifetime(void *priv, u32 index, u32 lft_cnt, + u32 lft_diff, u8 limit_enable, u8 limit_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dprbac_sad_lifetime lifetime = {0}; + + lifetime.cnt = lft_cnt; + lifetime.diff = lft_diff; + lifetime.enable = limit_enable; + lifetime.unit = limit_type; + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_LIFETIME(index), + (u8 *)&lifetime, sizeof(lifetime)); +} + +static void nbl_phy_cfg_dipsec_sad_crypto(void *priv, u32 index, u32 *key, u32 salt, + u32 crypto_type, u8 tunnel_mode, u8 icv_len) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dprbac_sad_crypto_info crypto_info; + + memset(&crypto_info, 0, sizeof(crypto_info)); + + memcpy(crypto_info.key, key, sizeof(crypto_info.key)); + crypto_info.salt = salt; + crypto_info.crypto_type = crypto_type; + crypto_info.tunnel_mode = tunnel_mode; + crypto_info.icv_len = icv_len; + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_CRYPTO_INFO(index), + (u8 *)&crypto_info, sizeof(crypto_info)); +} + +static void nbl_phy_cfg_dipsec_sad_encap(void *priv, u32 index, u8 nat_flag, + u16 dport, u32 spi, u32 *ip_data) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_dprbac_sad_encap_info encap_info; + + memset(&encap_info, 0, sizeof(encap_info)); + + encap_info.nat_flag = nat_flag; + encap_info.dport = dport; + encap_info.spi = spi; + memcpy(encap_info.dip_addr, ip_data, 16); + memcpy(encap_info.sip_addr, ip_data + 4, 16); + nbl_hw_write_regs(phy_mgt, NBL_DPRBAC_SAD_ENCAP_INFO(index), + (u8 *)&encap_info, sizeof(encap_info)); +} + +static u32 nbl_phy_read_dipsec_status(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_hw_rd32(phy_mgt, NBL_DPRBAC_INT_STATUS); +} + +static u32 nbl_phy_reset_dipsec_status(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 dipsec_status; + + dipsec_status = nbl_hw_rd32(phy_mgt, NBL_DPRBAC_INT_STATUS); + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_INT_STATUS, dipsec_status); + + return dipsec_status; +} + +static u32 nbl_phy_read_dipsec_lft_info(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_hw_rd32(phy_mgt, NBL_DPRBAC_LIFETIME_INFO); +} + +static void nbl_phy_cfg_dipsec_lft_info(void *priv, u32 index, u32 lifetime_diff, + u32 flag_wen, u32 msb_wen) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_ipsec_lifetime_diff modify_liftime; + + memset(&modify_liftime, 0, sizeof(modify_liftime)); + + modify_liftime.sad_index = index; + if (flag_wen) { + modify_liftime.lifetime_diff = lifetime_diff; + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_LIFETIME_DIFF, modify_liftime.data[1]); + modify_liftime.flag_wen = 1; + modify_liftime.flag_value = 1; + } + + if (msb_wen) { + modify_liftime.msb_wen = 1; + modify_liftime.msb_value = 1; + } + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_SAD_LIFEDIFF, modify_liftime.data[0]); +} + +static void nbl_phy_init_dprbac(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_dprbac_enable dprbac_enable = {.data = 0}; + union nbl_dprbac_dbg_cnt_en dbg_cnt_en = {.data = 0}; + + dprbac_enable.prbac = 1; + dprbac_enable.mf_fwd = 1; + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_ENABLE, dprbac_enable.data); + + dbg_cnt_en.total = 1; + dbg_cnt_en.in_right_bypass = 1; + dbg_cnt_en.in_drop_bypass = 1; + dbg_cnt_en.in_drop_prbac = 1; + dbg_cnt_en.out_drop_prbac = 1; + dbg_cnt_en.out_right_prbac = 1; + nbl_hw_wr32(phy_mgt, NBL_DPRBAC_DBG_CNT_EN, dbg_cnt_en.data); +} + +static void nbl_phy_cfg_uipsec_nat(void *priv, u8 nat_flag, u16 dport) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_nat uprbac_nat = {.data = 0}; + + uprbac_nat.enable = nat_flag; + uprbac_nat.dport = dport; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_NAT, uprbac_nat.data); +} + +static void nbl_phy_cfg_uipsec_sad_esn(void *priv, u32 index, u32 sn, + u32 esn, u8 overlap, u8 enable) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_sad_bottom ipsec_esn = {0}; + + ipsec_esn.sn = sn; + ipsec_esn.esn = esn; + ipsec_esn.overlap = overlap; + ipsec_esn.enable = enable; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_SAD_BOTTOM(index), + (u8 *)&ipsec_esn, sizeof(ipsec_esn)); +} + +static void nbl_phy_cfg_uipsec_sad_lifetime(void *priv, u32 index, u32 lft_cnt, + u32 lft_diff, u8 limit_enable, u8 limit_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_sad_lifetime lifetime = {0}; + + lifetime.cnt = lft_cnt; + lifetime.diff = lft_diff; + lifetime.enable = limit_enable; + lifetime.unit = limit_type; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_SAD_LIFETIME(index), + (u8 *)&lifetime, sizeof(lifetime)); +} + +static void nbl_phy_cfg_uipsec_sad_crypto(void *priv, u32 index, u32 *key, u32 salt, + u32 crypto_type, u8 tunnel_mode, u8 icv_len) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_sad_crypto_info crypto_info; + + memset(&crypto_info, 0, sizeof(crypto_info)); + + memcpy(crypto_info.key, key, sizeof(crypto_info.key)); + crypto_info.salt = salt; + crypto_info.crypto_type = crypto_type; + crypto_info.tunnel_mode = tunnel_mode; + crypto_info.icv_len = icv_len; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_SAD_CRYPTO_INFO(index), + (u8 *)&crypto_info, sizeof(crypto_info)); +} + +static void nbl_phy_cfg_uipsec_sad_window(void *priv, u32 index, u8 window_en, u8 option) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_sad_slide_window slide_window; + + memset(&slide_window, 0, sizeof(slide_window)); + slide_window.enable = window_en; + slide_window.option = option; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_SAD_SLIDE_WINDOW(index), + (u8 *)&slide_window, sizeof(slide_window)); +} + +static void nbl_phy_cfg_uipsec_em_tcam(void *priv, u16 tcam_index, u32 *data) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_em_tcam em_tcam = {0}; + + em_tcam.key_dat0 = data[0]; + em_tcam.key_dat1 = data[1]; + em_tcam.key_dat2 = data[2] >> 16; + em_tcam.key_vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_EM_TCAM(2 * tcam_index + 1), + (u8 *)&em_tcam, sizeof(em_tcam)); + + em_tcam.key_dat0 = (data[2] << 16) + (data[3] >> 16); + em_tcam.key_dat1 = (data[3] << 16) + (data[4] >> 16); + em_tcam.key_dat2 = data[4]; + em_tcam.key_vld = 1; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_EM_TCAM(2 * tcam_index), + (u8 *)&em_tcam, sizeof(em_tcam)); +} + +static void nbl_phy_cfg_uipsec_em_ad(void *priv, u16 tcam_index, u32 index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_em_ad em_ad = {0}; + + em_ad.sad_index = index; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_EM_AD(2 * tcam_index), em_ad.data); +} + +static void nbl_phy_clear_uipsec_tcam_ad(void *priv, u16 tcam_index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_em_tcam em_tcam = {0}; + union nbl_uprbac_em_ad em_ad = {0}; + + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_EM_TCAM(2 * tcam_index + 1), + (u8 *)&em_tcam, sizeof(em_tcam)); + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_EM_TCAM(2 * tcam_index), + (u8 *)&em_tcam, sizeof(em_tcam)); + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_EM_AD(2 * tcam_index), em_ad.data); +} + +static void nbl_phy_cfg_uipsec_em_ht(void *priv, u32 index, u16 ht_table, u16 ht_index, + u16 ht_other_index, u16 ht_bucket) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_ht uprbac_ht; + + memset(&uprbac_ht, 0, sizeof(uprbac_ht)); + + nbl_hw_read_regs(phy_mgt, NBL_UPRBAC_HT(ht_table, ht_index), uprbac_ht.data, 16); + if (ht_bucket == 0) { + uprbac_ht.vld0 = 1; + uprbac_ht.ht_other_index0 = ht_other_index; + uprbac_ht.kt_index0 = index; + } + if (ht_bucket == 1) { + uprbac_ht.vld1 = 1; + uprbac_ht.ht_other_index1 = ht_other_index; + uprbac_ht.kt_index1 = index; + } + if (ht_bucket == 2) { + uprbac_ht.vld2 = 1; + uprbac_ht.ht_other_index2 = ht_other_index; + uprbac_ht.kt_index2 = index; + } + if (ht_bucket == 3) { + uprbac_ht.vld3 = 1; + uprbac_ht.ht_other_index3 = ht_other_index; + uprbac_ht.kt_index3 = index; + } + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_HT(ht_table, ht_index), uprbac_ht.data, 16); +} + +static void nbl_phy_cfg_uipsec_em_kt(void *priv, u32 index, u32 *data) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_uprbac_kt uprbac_kt; + + memset(&uprbac_kt, 0, sizeof(uprbac_kt)); + memcpy(uprbac_kt.key, data, 20); + uprbac_kt.sad_index = index; + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_KT(index), (u8 *)&uprbac_kt, sizeof(uprbac_kt)); +} + +static void nbl_phy_clear_uipsec_ht_kt(void *priv, u32 index, u16 ht_table, + u16 ht_index, u16 ht_bucket) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_ht uprbac_ht; + struct nbl_uprbac_kt uprbac_kt; + + memset(&uprbac_ht, 0, sizeof(uprbac_ht)); + memset(&uprbac_kt, 0, sizeof(uprbac_kt)); + nbl_hw_read_regs(phy_mgt, NBL_UPRBAC_HT(ht_table, ht_index), uprbac_ht.data, 16); + if (ht_bucket == 0) { + uprbac_ht.vld0 = 0; + uprbac_ht.ht_other_index0 = 0; + uprbac_ht.kt_index0 = 0; + } + if (ht_bucket == 1) { + uprbac_ht.vld1 = 0; + uprbac_ht.ht_other_index1 = 0; + uprbac_ht.kt_index1 = 0; + } + if (ht_bucket == 2) { + uprbac_ht.vld2 = 0; + uprbac_ht.ht_other_index2 = 0; + uprbac_ht.kt_index2 = 0; + } + if (ht_bucket == 3) { + uprbac_ht.vld3 = 0; + uprbac_ht.ht_other_index3 = 0; + uprbac_ht.kt_index3 = 0; + } + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_HT(ht_table, ht_index), uprbac_ht.data, 16); + + nbl_hw_write_regs(phy_mgt, NBL_UPRBAC_KT(index), (u8 *)&uprbac_kt, sizeof(uprbac_kt)); +} + +static u32 nbl_phy_read_uipsec_status(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_hw_rd32(phy_mgt, NBL_UPRBAC_INT_STATUS); +} + +static u32 nbl_phy_reset_uipsec_status(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 uipsec_status; + + uipsec_status = nbl_hw_rd32(phy_mgt, NBL_UPRBAC_INT_STATUS); + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_INT_STATUS, uipsec_status); + + return uipsec_status; +} + +static void nbl_phy_cfg_uipsec_lft_info(void *priv, u32 index, u32 lifetime_diff, + u32 flag_wen, u32 msb_wen) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_ipsec_lifetime_diff modify_liftime; + + memset(&modify_liftime, 0, sizeof(modify_liftime)); + + modify_liftime.sad_index = index; + if (flag_wen) { + modify_liftime.lifetime_diff = lifetime_diff; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_LIFETIME_DIFF, modify_liftime.data[1]); + modify_liftime.flag_wen = 1; + modify_liftime.flag_value = 1; + } + + if (msb_wen) { + modify_liftime.msb_wen = 1; + modify_liftime.msb_value = 1; + } + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_SAD_LIFEDIFF, modify_liftime.data[0]); +} + +static u32 nbl_phy_read_uipsec_lft_info(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_hw_rd32(phy_mgt, NBL_UPRBAC_LIFETIME_INFO); +} + +static void nbl_phy_init_uprbac(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_uprbac_enable uprbac_enable = {0}; + union nbl_uprbac_dbg_cnt_en dbg_cnt_en = {0}; + struct nbl_uprbac_em_profile em_profile = {0}; + + uprbac_enable.prbac = 1; + uprbac_enable.padding_check = 1; + uprbac_enable.pad_err = 1; + uprbac_enable.icv_err = 1; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_ENABLE, uprbac_enable.data); + + dbg_cnt_en.drop_prbac = 1; + dbg_cnt_en.right_prbac = 1; + dbg_cnt_en.replay = 1; + dbg_cnt_en.right_misc = 1; + dbg_cnt_en.error_misc = 1; + dbg_cnt_en.xoff_drop = 1; + dbg_cnt_en.intf_cell = 1; + dbg_cnt_en.sad_miss = 1; + nbl_hw_wr32(phy_mgt, NBL_UPRBAC_DBG_CNT_EN, dbg_cnt_en.data); + + em_profile.vld = 1; + em_profile.hash_sel0 = 0; + em_profile.hash_sel1 = 3; + nbl_hw_write_regs(phy_mgt, LEONIS_UPRBAC_EM_PROFILE, + (u8 *)&em_profile, sizeof(em_profile)); +} + +static u32 nbl_phy_get_fw_ping(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 ping; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_HEARTBEAT_PING, (u8 *)&ping, sizeof(ping)); + + return ping; +} + +static void nbl_phy_set_fw_ping(void *priv, u32 ping) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_write_mbx_regs(phy_mgt, NBL_FW_HEARTBEAT_PING, (u8 *)&ping, sizeof(ping)); +} + +static u32 nbl_phy_get_fw_pong(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 pong; + + nbl_hw_read_regs(phy_mgt, NBL_FW_HEARTBEAT_PONG, (u8 *)&pong, sizeof(pong)); + + return pong; +} + +static void nbl_phy_set_fw_pong(void *priv, u32 pong) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_write_regs(phy_mgt, NBL_FW_HEARTBEAT_PONG, (u8 *)&pong, sizeof(pong)); +} + +static void nbl_phy_load_p4(void *priv, u32 addr, u32 size, u8 *data) +{ + nbl_hw_write_be_regs(priv, addr, data, size); +} + +static int nbl_phy_init_offload_fwd(void *priv, u16 vsi_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union epro_no_dport_redirect_u epro_no_dport = {.info = {0}}; + union nbl_action_data set_dport = {.data = 0}; + union epro_vpt_u vpt; + + memset(&vpt, 0, sizeof(vpt)); + + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_UPCALL; + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.port_id = vsi_id; + + epro_no_dport.info.dport = set_dport.data; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_NO_DPORT_REDIRECT_ADDR, + (u8 *)epro_no_dport.data, sizeof(epro_no_dport)); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)vpt.data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + vpt.info.rss_alg_sel = NBL_SYM_TOEPLITZ_INT; + vpt.info.rss_key_type_btm = NBL_KEY_IP4_L4_RSS_BIT | NBL_KEY_IP6_L4_RSS_BIT; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)vpt.data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + return 0; +} + +static int nbl_phy_cmdq_init(void *priv, void *param, u16 func_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_chan_cmdq_init_info *cmdq_param = + (struct nbl_chan_cmdq_init_info *)param; + union pcompleter_host_cfg_function_id_cmdq_u cfg_func_id = { + .info.dbg = func_id, + .info.vld = 1, + }; + u32 value = 0; + + /* dis-enable the queue, this will reset queue head to 0 */ + nbl_warn(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "CMDQ start init: size %u %llu %u\n", + cmdq_param->len, cmdq_param->pa, cmdq_param->bdf_num); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + + /* write registers */ + value = 0; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_TAIL_ADDR, value); + value = cmdq_param->len; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_SIZE_ADDR, value); + value = NBL_CMDQ_HI_DWORD(cmdq_param->pa); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_BADDR_H_ADDR, value); + value = NBL_CMDQ_LO_DWORD(cmdq_param->pa); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_BADDR_L_ADDR, value); + + nbl_hw_wr32(phy_mgt, NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CMDQ_ADDR, + *(u32 *)&cfg_func_id); + + /* enable the queue */ + value = 1; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + /* write dif registers (mode and bdf) for receive queue */ + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_DIF_BDF_ADDR, + cmdq_param->bdf_num); + value = NBL_CMDQ_DIF_MODE_VALUE; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_DIF_MODE_ADDR, value); + value = 0x1fffff; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_FLOW_EN_ADDR, value); + return 0; +} + +static int nbl_phy_cmdq_destroy(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 value = 0; + + nbl_hw_wr32(phy_mgt, NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_CMDQ_ADDR, + value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_SIZE_ADDR, value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_BADDR_H_ADDR, value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_BADDR_L_ADDR, value); + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_DIF_INT_ADDR, value); + + return NBL_OK; +} + +static int nbl_phy_cmdq_reset(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 value = 0; + u32 delay_count = 0; + u32 r_head = 0; + u32 r_tail = 0; + + /* disable the command queue */ + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "CMDQ resetting now...\n"); + + /* wait until tail equals head, then reset tail */ + while (true) { + usleep_range(NBL_CMDQ_DELAY_200US, NBL_CMDQ_DELAY_300US); + r_head = nbl_hw_rd32(phy_mgt, NBL_CMDQ_HOST_CMDQ_CURR_ADDR); + r_tail = nbl_hw_rd32(phy_mgt, NBL_CMDQ_HOST_CMDQ_TAIL_ADDR); + if (r_head == r_tail) + break; + + delay_count++; + if (delay_count >= NBL_CMDQ_RESET_MAX_WAIT) + return -EBADRQC; + } + + /* enable the queue, and resend the command */ + value = 0; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_TAIL_ADDR, value); + value = 1; + nbl_hw_wr32(phy_mgt, NBL_CMDQ_HOST_CMDQ_EN_ADDR, value); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "CMDQ finished resetting!\n"); + return 0; +} + +static void nbl_phy_update_cmdq_tail(void *priv, u32 doorbell) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_wr32(phy_mgt, NBL_CMD_NOTIFY_ADDR, doorbell); +} + +static int nbl_acl_set_act_pri(struct nbl_phy_mgt *phy_mgt) +{ + union acl_action_priority0_u act0_pri = { + .info.action_id9_pri = 3, + }; + + union acl_action_priority4_u act4_pri = { + .info.action_id9_pri = 3, + }; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_PRIORITY0_ADDR, + (u8 *)act0_pri.data, sizeof(act0_pri)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_PRIORITY4_ADDR, + (u8 *)act4_pri.data, sizeof(act4_pri)); + return NBL_OK; +} + +static int nbl_acl_check_init(struct nbl_phy_mgt *phy_mgt) +{ + int ret = NBL_OK; + union acl_init_done_u acl_init; + + nbl_hw_read_regs(phy_mgt, NBL_ACL_INIT_DONE_ADDR, (u8 *)acl_init.data, + sizeof(acl_init)); + if (!acl_init.info.done) + ret = NBL_FAIL; + if (ret == NBL_OK) + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "NBL ACL init start success"); + else + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "NBL ACL init start fail"); + + return ret; +} + +static int nbl_acl_flow_stat_on(struct nbl_phy_mgt *phy_mgt) +{ + union acl_flow_id_stat_act_u flow_id_act = { + .info.flow_id_en = 1, + }; + + union acl_stat_id_act_u stat_id_act = { + .info.act_en = 1, + .info.act_id = NBL_ACT_SET_SPECIAL_FLOW_STAT, + }; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_FLOW_ID_STAT_ACT_ADDR, + (u8 *)flow_id_act.data, sizeof(flow_id_act)); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_STAT_ID_ACT_ADDR, + (u8 *)stat_id_act.data, sizeof(stat_id_act)); + return NBL_OK; +} + +static int nbl_acl_set_tcam_info_regs(struct nbl_phy_mgt *phy_mgt, + struct nbl_acl_cfg_param *acl_param) +{ + u8 *acl_key_cfg_ptr = (u8 *)(acl_param->tcam_cfg); + u8 *act_cfg_ptr = (u8 *)(acl_param->action_cfg); + + nbl_hw_write_regs(phy_mgt, + NBL_ACL_TCAM_CFG_REG(acl_param->acl_stage), + acl_key_cfg_ptr, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, + NBL_ACL_ACTION_RAM_CFG_REG(acl_param->acl_stage), + act_cfg_ptr, sizeof(union acl_action_ram_cfg_u)); + + return NBL_OK; +} + +static int nbl_acl_set_tcam_info(struct nbl_phy_mgt *phy_mgt, + struct nbl_acl_cfg_param *acl_param) +{ + int ret = 0; + + ret = nbl_acl_set_tcam_info_regs(phy_mgt, acl_param); + ret = nbl_acl_set_tcam_info_regs(phy_mgt, acl_param + 1); + return ret; +} + +static int nbl_acl_flow_stat_clear(struct nbl_phy_mgt *phy_mgt) +{ + union acl_flow_id_stat_glb_clr_u flow_stat_clear = { + .info.glb_clr = 1, + }; + union acl_stat_id_stat_glb_clr_u stat_stat_clear = { + .info.glb_clr = 1, + }; + union acl_flow_id_stat_done_u flow_done_info = {.info = {0}}; + u32 rd_retry = 0; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_FLOW_ID_STAT_GLB_CLR_ADDR, + (u8 *)flow_stat_clear.data, sizeof(flow_stat_clear)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_STAT_ID_STAT_GLB_CLR_ADDR, + (u8 *)stat_stat_clear.data, sizeof(stat_stat_clear)); + while (1) { + nbl_hw_read_regs(phy_mgt, NBL_ACL_FLOW_ID_STAT_DONE_ADDR, + (u8 *)flow_done_info.data, + sizeof(flow_done_info)); + if (flow_done_info.info.glb_clr_done) + break; + if (rd_retry++ == NBL_ACL_RD_RETRY) { + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "NBL ACL init start fail"); + return NBL_FAIL; + } + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } + + return NBL_OK; +} + +static int nbl_acl_flow_tcam_clear(struct nbl_phy_mgt *phy_mgt, u16 tcam_btm, + u16 tcam_start_idx, u16 tcam_end_idx) +{ + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_indirect_ctrl_u indirect_ctrl = { + .info.tcam_addr = 0, + .info.cpu_acl_cfg_start = 1, + .info.acc_btm = tcam_btm, + .info.cpu_acl_cfg_rw = NBL_ACL_CPU_WRITE, + }; + union acl_indirect_access_ack_u indirect_ack = {.info = {0}}; + /* set invalid in each tcam */ + union acl_valid_bit_u tcam_data_valid = {.info = {0}}; + int try_time = NBL_ACL_RD_RETRY; + + for (; tcam_start_idx < tcam_end_idx; ++tcam_start_idx) { + nbl_hw_write_regs(phy_mgt, NBL_ACL_VALID_BIT_ADDR, + (u8 *)tcam_data_valid.data, + sizeof(tcam_data_valid)); + indirect_ctrl.info.tcam_addr = tcam_start_idx; + nbl_hw_write_regs(phy_mgt, NBL_ACL_INDIRECT_CTRL_ADDR, + (u8 *)indirect_ctrl.data, + sizeof(indirect_ctrl)); + + while (try_time--) { + nbl_hw_read_regs(phy_mgt, + NBL_ACL_INDIRECT_ACCESS_ACK_ADDR, + (u8 *)indirect_ack.data, + sizeof(indirect_ack)); + if (indirect_ack.info.done) + break; + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } + + if (!indirect_ack.info.done) { + nbl_info(common, NBL_DEBUG_FLOW, "indirect access failed(%u-%u), done: %u, status: %08x.", + tcam_start_idx, try_time + 1, 0, indirect_ack.info.status); + return NBL_FAIL; + } + + indirect_ack.info.done = 0; + try_time = NBL_ACL_RD_RETRY; + } + nbl_debug(common, NBL_DEBUG_FLOW, "-----clear acl flow:idx(depth):%d(%d)-----\n", + tcam_start_idx, tcam_end_idx); + return NBL_OK; +} + +static int nbl_acl_init_regs(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_flow_init_info *param) +{ + /* set act priority */ + nbl_acl_set_act_pri(phy_mgt); + + /* read acl init done */ + if (nbl_acl_check_init(phy_mgt)) + return NBL_FAIL; + + /* set flow-stat enable */ + nbl_acl_flow_stat_on(phy_mgt); + + /* set tcam info */ + nbl_acl_set_tcam_info(phy_mgt, param->acl_cfg); + + /* clear flow stat */ + if (nbl_acl_flow_stat_clear(phy_mgt)) + return NBL_FAIL; + + /* clear key/mask/act tcam tab */ + if (nbl_acl_flow_tcam_clear(phy_mgt, NBL_ACL_FLUSH_FLOW_BTM, 0, NBL_ACL_TCAM_DEPTH)) + return NBL_FAIL; + return NBL_OK; +} + +static int nbl_phy_init_acl_stats(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + /* init acl stat */ + nbl_acl_flow_stat_on(phy_mgt); + /* clear flow stat */ + if (nbl_acl_flow_stat_clear(phy_mgt)) + return NBL_FAIL; + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, "flow stat init: finished"); + return 0; +} + +static int nbl_phy_acl_unset_upcall_rule(void *priv, u8 idx) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return nbl_acl_flow_tcam_clear(phy_mgt, NBL_ACL_FLUSH_UPCALL_BTM, idx, idx * 2); +} + +static void nbl_phy_acl_set_dport(int *action, u16 vsi_id) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_UPCALL; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_NONE; + set_dport.dport.up.port_id = vsi_id; + + *action = set_dport.data + (NBL_ACT_SET_DPORT << NBL_16BIT); +} + +static int nbl_phy_acl_set_upcall_rule(void *priv, u8 idx, u16 vsi_id) +{ + int tcam_entry = idx << 1; + int fwd_act = 0; + int rd_retry = NBL_ACL_RD_RETRY; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_action_ram15_u action_ram; + union acl_indirect_ctrl_u indirect_ctrl = { + .info.tcam_addr = tcam_entry, + .info.cpu_acl_cfg_start = 1, + .info.cpu_acl_cfg_rw = NBL_ACL_INDIRECT_ACCESS_WRITE, + .info.acc_btm = NBL_ACL_FLUSH_UPCALL_BTM, + }; + union acl_indirect_access_ack_u indirect_ack; + union acl_valid_bit_u tcam_data_valid = { + .info.valid_bit = NBL_ACL_FLUSH_UPCALL_BTM, + }; + union nbl_acl_tcam_upcall_data_u eth_data = { + .eth_pt_id = NBL_ACL_ETH_PF_UPCALL, + }; + union nbl_acl_tcam_upcall_data_u eth_mask; + union nbl_acl_tcam_upcall_data_u vsi_data = { + .vsi_pt_id = NBL_ACL_VSI_PF_UPCALL, + }; + union nbl_acl_tcam_upcall_data_u vsi_mask; + + memset(&action_ram, 0, sizeof(action_ram)); + memset(&indirect_ack, 0, sizeof(indirect_ack)); + nbl_info(common, NBL_DEBUG_FLOW, "-----set acl tcam_cfg and act_cfg:%d-----\n", idx); + /* mask all fields default */ + memset(ð_mask, 0xff, sizeof(eth_mask)); + eth_mask.eth_pt_id = 0; + eth_mask.eth_id = 0; + + memset(&vsi_mask, 0xff, sizeof(vsi_mask)); + vsi_mask.sw_id = 0; + vsi_mask.vsi_pt_id = 0; + /* eth acl rule */ + nbl_phy_acl_set_dport(&fwd_act, NBL_GET_PF_VSI_ID(idx)); + NBL_ACL_GET_ACTION_DATA(fwd_act, action_ram.info.action0); + indirect_ctrl.info.tcam_addr = tcam_entry; + nbl_info(common, NBL_DEBUG_FLOW, "---addr:%d, size:%lu---\n", + tcam_entry, sizeof(action_ram)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_TBL(NBL_ACL_TCAM_UPCALL_IDX, tcam_entry), + (u8 *)action_ram.data, sizeof(action_ram)); + + eth_data.eth_id = NBL_GET_PF_ETH_ID(idx); + nbl_info(common, NBL_DEBUG_FLOW, "-----key(mask): %d(%d), %d(%d)\n", + eth_data.eth_pt_id, eth_mask.eth_pt_id, eth_data.eth_id, eth_mask.eth_id); + nbl_tcam_truth_value_convert(ð_data.tcam_data, ð_mask.tcam_data); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_X(NBL_ACL_TCAM_UPCALL_IDX), + eth_data.data, sizeof(eth_data)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_Y(NBL_ACL_TCAM_UPCALL_IDX), + eth_mask.data, sizeof(eth_mask)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_VALID_BIT_ADDR, + (u8 *)&tcam_data_valid, sizeof(tcam_data_valid)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_INDIRECT_CTRL_ADDR, + (u8 *)&indirect_ctrl, sizeof(indirect_ctrl)); + do { + nbl_hw_read_regs(phy_mgt, NBL_ACL_INDIRECT_ACCESS_ACK_ADDR, + (u8 *)&indirect_ack, sizeof(indirect_ack)); + if (!indirect_ack.info.done) { + rd_retry--; + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } else { + break; + } + } while (rd_retry); + + if (!indirect_ack.info.done) { + nbl_err(common, NBL_DEBUG_FLOW, "acl init flows error in pf%d\n", idx); + return -EIO; + } + memset(indirect_ack.data, 0, sizeof(indirect_ack)); + + /* vsi acl rule */ + nbl_phy_acl_set_dport(&fwd_act, vsi_id); + NBL_ACL_GET_ACTION_DATA(fwd_act, action_ram.info.action0); + indirect_ctrl.info.tcam_addr = ++tcam_entry; + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_TBL(NBL_ACL_TCAM_UPCALL_IDX, tcam_entry), + (u8 *)&action_ram, sizeof(action_ram)); + + vsi_data.sw_id = idx; + nbl_info(common, NBL_DEBUG_FLOW, "-----key(mask):%d(%d), %d(%d)\n", + vsi_data.vsi_pt_id, vsi_mask.vsi_pt_id, vsi_data.sw_id, vsi_mask.sw_id); + nbl_tcam_truth_value_convert(&vsi_data.tcam_data, &vsi_mask.tcam_data); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_X(NBL_ACL_TCAM_UPCALL_IDX), + vsi_data.data, sizeof(vsi_data)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_Y(NBL_ACL_TCAM_UPCALL_IDX), + vsi_mask.data, sizeof(vsi_mask)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_VALID_BIT_ADDR, + (u8 *)&tcam_data_valid, sizeof(tcam_data_valid)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_INDIRECT_CTRL_ADDR, + (u8 *)&indirect_ctrl, sizeof(indirect_ctrl)); + do { + nbl_hw_read_regs(phy_mgt, NBL_ACL_INDIRECT_ACCESS_ACK_ADDR, + (u8 *)&indirect_ack, sizeof(indirect_ack)); + if (!indirect_ack.info.done) { + rd_retry--; + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } else { + break; + } + } while (rd_retry); + + if (!indirect_ack.info.done) { + nbl_err(common, NBL_DEBUG_FLOW, "acl init flows error in pf%d\n", idx); + return -EIO; + } + + return 0; +} + +static void nbl_phy_uninit_acl(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_tcam_cfg_u acl_key_cfg; + union acl_action_ram_cfg_u acl_act_cfg; + union acl_loop_back_en_u loop_en; + + memset(&acl_key_cfg, 0, sizeof(acl_key_cfg)); + memset(&acl_act_cfg, 0, sizeof(acl_act_cfg)); + memset(&loop_en, 0, sizeof(loop_en)); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_LOOP_BACK_EN_ADDR, (u8 *)&loop_en, + sizeof(loop_en)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_ACL_VSI_PF_UPCALL), + (u8 *)&acl_key_cfg, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_ACL_VSI_PF_UPCALL), + (u8 *)&acl_act_cfg, sizeof(union acl_action_ram_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_ACL_ETH_PF_UPCALL), + (u8 *)&acl_key_cfg, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_ACL_ETH_PF_UPCALL), + (u8 *)&acl_act_cfg, sizeof(union acl_action_ram_cfg_u)); + nbl_info(common, NBL_DEBUG_FLOW, "nbl uninit acl done\n"); +} + +static void nbl_phy_init_acl(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_tcam_cfg_u acl_key_cfg = { + .info.startcompare15 = 1, + .info.startset15 = 1, + .info.tcam15_enable = 1, + .info.key_id15 = 0, + }; + union acl_action_ram_cfg_u acl_act_cfg = { + .info.action_ram15_enable = 1, + .info.action_ram15_alloc_id = NBL_ACL_TCAM_UPCALL_IDX, + }; + union acl_loop_back_en_u loop_en = { + .info.loop_back_en = 1, + }; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_LOOP_BACK_EN_ADDR, (u8 *)&loop_en, + sizeof(loop_en)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_ACL_VSI_PF_UPCALL), + (u8 *)&acl_key_cfg, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_ACL_VSI_PF_UPCALL), + (u8 *)&acl_act_cfg, sizeof(union acl_action_ram_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_ACL_ETH_PF_UPCALL), + (u8 *)&acl_key_cfg, sizeof(union acl_tcam_cfg_u)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_ACL_ETH_PF_UPCALL), + (u8 *)&acl_act_cfg, sizeof(union acl_action_ram_cfg_u)); + nbl_info(common, NBL_DEBUG_FLOW, "nbl init acl done\n"); +} + +static int nbl_ipro_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + /* write error code for smac-spoof and vlan check */ + union ipro_anti_fake_addr_errcode_u errcode_def = { + .info.num = NBL_ERROR_CODE_DN_SMAC, + .info.rsv = 0, + }; + union ipro_anti_fake_addr_action_u default_drop = { + .info.dqueue = 0, + .info.dqueue_en = 0, + .info.proc_done = 1, + .info.set_dport_en = 1, + .info.set_dport = NBL_SET_DPORT(AUX_FWD_TYPE_UPCALL, + NEXT_STG_SEL_BYPASS, + SET_DPORT_TYPE_SP_PORT, + PORT_TYPE_SP_DROP), + .info.rsv = 0, + }; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: ipro errcode & actions"); + nbl_hw_write_regs(phy_mgt, NBL_IPRO_ANTI_FAKE_ADDR_ERRCODE_ADDR, + (u8 *)errcode_def.data, sizeof(errcode_def)); + errcode_def.info.num = NBL_ERROR_CODE_VLAN; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_VLAN_NUM_CHK_ERRCODE_ADDR, + (u8 *)errcode_def.data, sizeof(errcode_def)); + + /* default drop for underlay pkt flt, smac-spoof and vlan check */ + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UDL_PKT_FLT_ACTION_ADDR, + (u8 *)default_drop.data, sizeof(default_drop)); + nbl_hw_write_regs(phy_mgt, NBL_IPRO_ANTI_FAKE_ADDR_ACTION_ADDR, + (u8 *)default_drop.data, sizeof(default_drop)); + nbl_hw_write_regs(phy_mgt, NBL_IPRO_VLAN_NUM_CHK_ACTION_ADDR, + (u8 *)default_drop.data, sizeof(default_drop)); + + return NBL_OK; +} + +static int nbl_pp_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + u32 action_dport_pri = 0x3000; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: pp action priority"); + nbl_hw_write_regs(phy_mgt, NBL_PP0_ACTION_PRIORITY0_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + nbl_hw_write_regs(phy_mgt, NBL_PP0_ACTION_PRIORITY4_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + + nbl_hw_write_regs(phy_mgt, NBL_PP1_ACTION_PRIORITY0_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + nbl_hw_write_regs(phy_mgt, NBL_PP1_ACTION_PRIORITY4_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + + nbl_hw_write_regs(phy_mgt, NBL_PP2_ACTION_PRIORITY0_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + nbl_hw_write_regs(phy_mgt, NBL_PP2_ACTION_PRIORITY4_ADDR, + (u8 *)&action_dport_pri, sizeof(action_dport_pri)); + return NBL_OK; +} + +static void nbl_fem_profile_table_action_set(struct nbl_phy_mgt *phy_mgt, u32 pp_id, + u32 pt_idx, u16 vsi_id, bool is_set_upcall) +{ + union fem_em0_profile_table_u em_pt_tbl; + union fem_em0_profile_table_u em_pt_tbl_tmp; + union nbl_action_data set_dport = {.data = 0}; + + memset(&em_pt_tbl, 0, sizeof(em_pt_tbl)); + memset(&em_pt_tbl_tmp, 0, sizeof(em_pt_tbl_tmp)); + if (is_set_upcall) { + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_ACL_S0; + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_UPCALL; + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.port_id = vsi_id; + em_pt_tbl_tmp.info.action0 = set_dport.data + + (NBL_ACT_SET_DPORT << NBL_ACT_DATA_BITS); + } + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: pt upcall: %u %u %u", pp_id, pt_idx, vsi_id); + /* read profile table configured with P4 ELF, set the upcall action */ + switch (pp_id) { + case NBL_PP_TYPE_0: + nbl_hw_read_regs(phy_mgt, NBL_FEM_EM0_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + em_pt_tbl.info.action0 = em_pt_tbl_tmp.info.action0; + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM0_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + break; + case NBL_PP_TYPE_1: + nbl_hw_read_regs(phy_mgt, NBL_FEM_EM1_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + em_pt_tbl.info.action0 = em_pt_tbl_tmp.info.action0; + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM1_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + break; + case NBL_PP_TYPE_2: + nbl_hw_read_regs(phy_mgt, NBL_FEM_EM2_PROFILE_TABLE_REG(pt_idx), + (u8 *)&em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + em_pt_tbl.info.action0 = em_pt_tbl_tmp.info.action0; + nbl_hw_write_regs(phy_mgt, NBL_FEM_EM2_PROFILE_TABLE_REG(pt_idx), + (u8 *)em_pt_tbl.data, + NBL_FEM_EM0_PROFILE_TABLE_DWLEN * + NBL_BYTES_IN_REG); + break; + default: + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "wrong pp id for this profile"); + } +} + +static int nbl_fem_init_regs(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_flow_init_info *param) +{ + u8 i = 0; + u32 bank_sel = 0; + struct nbl_flow_prf_data *prf_data; + union fem_ht_bank_sel_btm_u ht_bank_sel = {.info = {0}}; + + /* HT bank sel */ + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: fem bank selection"); + bank_sel = HT_PORT0_BANK_SEL | HT_PORT1_BANK_SEL << NBL_8BIT | + HT_PORT2_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_BANK_SEL_BITMAP, + (u8 *)&bank_sel, sizeof(bank_sel)); + + /* KT bank sel */ + bank_sel = KT_PORT0_BANK_SEL | KT_PORT1_BANK_SEL << NBL_8BIT | + KT_PORT2_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_KT_BANK_SEL_BITMAP, + (u8 *)&bank_sel, sizeof(bank_sel)); + + /* AT bank sel */ + bank_sel = AT_PORT0_BANK_SEL | AT_PORT1_BANK_SEL << NBL_16BIT; + nbl_hw_write_regs(phy_mgt, NBL_FEM_AT_BANK_SEL_BITMAP, + (u8 *)&bank_sel, sizeof(bank_sel)); + bank_sel = AT_PORT2_BANK_SEL; + nbl_hw_write_regs(phy_mgt, NBL_FEM_AT_BANK_SEL_BITMAP2, + (u8 *)&bank_sel, sizeof(bank_sel)); + + ht_bank_sel.info.port0_ht_depth = HT_PORT0_BTM; + ht_bank_sel.info.port1_ht_depth = HT_PORT1_BTM; + ht_bank_sel.info.port2_ht_depth = HT_PORT2_BTM; + nbl_hw_write_regs(phy_mgt, NBL_FEM_HT_BANK_SEL_BTM_ADDR, + (u8 *)ht_bank_sel.data, sizeof(ht_bank_sel)); + + for (i = 0; i < param->flow_cfg.item_cnt; i++) { + prf_data = ¶m->flow_cfg.prf_data[i]; + nbl_fem_profile_table_action_set(phy_mgt, prf_data->pp_id, + prf_data->prf_id, param->vsi_id, true); + } + + return NBL_OK; +} + +static int nbl_mcc_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + union mcc_action_priority_u act_pri = { + .info.dport_act_pri = 3, + .info.statidx_act_pri = 3, + .info.dqueue_act_pri = 3, + }; + + nbl_hw_write_regs(phy_mgt, NBL_MCC_ACTION_PRIORITY_ADDR, + (u8 *)act_pri.data, sizeof(act_pri)); + return NBL_OK; +} + +static void nbl_ped_vlan_type_init(struct nbl_phy_mgt *phy_mgt) +{ + union dped_vlan_type0_u vlan_type = {.info = {0}}; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: vlan type init"); + vlan_type.info.vau = RTE_ETHER_TYPE_VLAN; + nbl_hw_write_regs(phy_mgt, NBL_UPED_VLAN_TYPE0_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_VLAN_TYPE0_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + vlan_type.info.vau = RTE_ETHER_TYPE_QINQ; + nbl_hw_write_regs(phy_mgt, NBL_UPED_VLAN_TYPE1_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_VLAN_TYPE1_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + vlan_type.info.vau = RTE_ETHER_TYPE_QINQ1; + nbl_hw_write_regs(phy_mgt, NBL_UPED_VLAN_TYPE2_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_VLAN_TYPE2_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + vlan_type.info.vau = RTE_ETHER_TYPE_QINQ2; + nbl_hw_write_regs(phy_mgt, NBL_UPED_VLAN_TYPE3_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_VLAN_TYPE3_ADDR, + (u8 *)vlan_type.data, sizeof(vlan_type)); +} + +static void nbl_ped_csum_cmd_init(struct nbl_phy_mgt *phy_mgt) +{ + union uped_l4_ck_cmd_50_u l4_ck_cmd_50 = {.info = {0}}; + union uped_l4_ck_cmd_51_u l4_ck_cmd_51 = {.info = {0}}; + union uped_l4_ck_cmd_60_u l4_ck_cmd_60 = {.info = {0}}; + union uped_l4_ck_cmd_61_u l4_ck_cmd_61 = {.info = {0}}; + + l4_ck_cmd_50.info.len_in_oft = 0x2; + l4_ck_cmd_50.info.len_phid = 0x2; + l4_ck_cmd_50.info.data_vld = 0x1; + l4_ck_cmd_50.info.in_oft = 0x2; + l4_ck_cmd_50.info.phid = 0x3; + l4_ck_cmd_50.info.en = 0x1; + + l4_ck_cmd_51.info.ck_start0 = 0xc; + l4_ck_cmd_51.info.ck_phid0 = 0x2; + l4_ck_cmd_51.info.ck_len0 = 0x8; + l4_ck_cmd_51.info.ck_phid1 = 0x3; + l4_ck_cmd_51.info.ck_vld1 = 0x1; + + l4_ck_cmd_60.info.value = 0x62; + l4_ck_cmd_60.info.len_in_oft = 0x4; + l4_ck_cmd_60.info.len_phid = 0x2; + l4_ck_cmd_60.info.len_vld = 0x1; + l4_ck_cmd_60.info.data_vld = 0x1; + l4_ck_cmd_60.info.in_oft = 0x2; + l4_ck_cmd_60.info.phid = 0x3; + l4_ck_cmd_60.info.en = 0x1; + + l4_ck_cmd_61.info.ck_start0 = 0x8; + l4_ck_cmd_61.info.ck_phid0 = 0x2; + l4_ck_cmd_61.info.ck_len0 = 0x20; + l4_ck_cmd_61.info.ck_vld0 = 0x1; + l4_ck_cmd_61.info.ck_phid1 = 0x3; + l4_ck_cmd_61.info.ck_vld1 = 0x1; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: ped checksum commands"); + nbl_hw_write_regs(phy_mgt, NBL_UPED_L4_CK_CMD_50_ADDR, + (u8 *)l4_ck_cmd_50.data, sizeof(l4_ck_cmd_50)); + nbl_hw_write_regs(phy_mgt, NBL_UPED_L4_CK_CMD_51_ADDR, + (u8 *)l4_ck_cmd_51.data, sizeof(l4_ck_cmd_51)); + nbl_hw_write_regs(phy_mgt, NBL_UPED_L4_CK_CMD_60_ADDR, + (u8 *)l4_ck_cmd_60.data, sizeof(l4_ck_cmd_60)); + nbl_hw_write_regs(phy_mgt, NBL_UPED_L4_CK_CMD_61_ADDR, + (u8 *)l4_ck_cmd_61.data, sizeof(l4_ck_cmd_61)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_50_ADDR, + (u8 *)l4_ck_cmd_50.data, sizeof(l4_ck_cmd_50)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_51_ADDR, + (u8 *)l4_ck_cmd_51.data, sizeof(l4_ck_cmd_51)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_60_ADDR, + (u8 *)l4_ck_cmd_60.data, sizeof(l4_ck_cmd_60)); + nbl_hw_write_regs(phy_mgt, NBL_DPED_L4_CK_CMD_61_ADDR, + (u8 *)l4_ck_cmd_61.data, sizeof(l4_ck_cmd_61)); +} + +static int nbl_ped_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + nbl_ped_vlan_type_init(phy_mgt); + nbl_ped_csum_cmd_init(phy_mgt); + return NBL_OK; +} + +static void nbl_flow_clear_tcam_ad(struct nbl_phy_mgt *phy_mgt) +{ + union fem_em0_tcam_table_u tcam_table; + union fem_em0_ad_table_u ad_table; + u8 *tcam_ptr = (u8 *)tcam_table.data; + u8 *ad_ptr = (u8 *)ad_table.data; + u16 i = 0; + + memset(&tcam_table, 0, sizeof(tcam_table)); + memset(&ad_table, 0, sizeof(ad_table)); + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: to clear flow pp tcam"); + for (; i < NBL_FEM_TCAM_MAX_NUM; i++) { + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM0_TCAM_TABLE_REG(i), + tcam_ptr, sizeof(tcam_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM0_AD_TABLE_REG(i), + ad_ptr, sizeof(ad_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM1_TCAM_TABLE_REG(i), + tcam_ptr, sizeof(tcam_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM1_AD_TABLE_REG(i), + ad_ptr, sizeof(ad_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM2_TCAM_TABLE_REG(i), + tcam_ptr, sizeof(tcam_table)); + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM2_AD_TABLE_REG(i), + ad_ptr, sizeof(ad_table)); + nbl_hw_rd32(phy_mgt, NBL_FEM_EM2_AD_TABLE_REG(i)); + } +} + +static union __maybe_unused epro_aft_u aft_def[NBL_FWD_TYPE_MAX] = { + [NBL_FWD_TYPE_NORMAL] = { + .data = BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MIRROR), + }, + [NBL_FWD_TYPE_CPU_ASSIGNED] = { + .data = BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MIRROR), + }, + [NBL_FWD_TYPE_UPCALL] = { + .data = BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MIRROR) | BIT(NBL_ACT_SET_VNI0) | + BIT(NBL_ACT_SET_VNI1) | BIT(NBL_ACT_REP_IPV4_SIP) | + BIT(NBL_ACT_REP_IPV4_DIP) | BIT(NBL_ACT_REP_IPV6_SIP) | + BIT(NBL_ACT_REP_IPV6_DIP) | BIT(NBL_ACT_REP_DPORT) | + BIT(NBL_ACT_REP_SPORT) | BIT(NBL_ACT_REP_DMAC) | + BIT(NBL_ACT_REP_SMAC) | BIT(NBL_ACT_REP_IPV4_DSCP) | + BIT(NBL_ACT_REP_IPV6_DSCP) | BIT(NBL_ACT_REP_IPV4_TTL) | + BIT(NBL_ACT_REP_IPV6_TTL) | BIT(NBL_ACT_DEL_SVLAN) | + BIT(NBL_ACT_DEL_CVLAN) | BIT(NBL_ACT_REP_SVLAN) | + BIT(NBL_ACT_REP_CVLAN) | BIT(NBL_ACT_ADD_CVLAN) | + BIT(NBL_ACT_ADD_SVLAN) | BIT(NBL_ACT_TNL_ENCAP) | + BIT(NBL_ACT_TNL_DECAP) | BIT(NBL_ACT_REP_OUTER_SPORT) | + BIT(NBL_ACT_SET_PRI_MDF0), + }, + [NBL_FWD_TYPE_SRC_MIRROR] = { + .data = BIT(NBL_ACT_SET_FLOW_STAT0) | BIT(NBL_ACT_SET_FLOW_STAT1) | + BIT(NBL_ACT_SET_RSS) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_VNI0) | + BIT(NBL_ACT_SET_VNI1) | BIT(NBL_ACT_SET_PRBAC) | + BIT(NBL_ACT_SET_DP_HASH0) | BIT(NBL_ACT_SET_DP_HASH1) | + BIT(NBL_ACT_SET_PRI_MDF0) | BIT(NBL_ACT_SET_FLOW_CAR) | + ((u64)0xffffffff << 32), + }, + [NBL_FWD_TYPE_OTHER_MIRROR] = { + .data = BIT(NBL_ACT_SET_FLOW_STAT0) | BIT(NBL_ACT_SET_FLOW_STAT1) | + BIT(NBL_ACT_SET_RSS) | BIT(NBL_ACT_SET_TAB_INDEX) | + BIT(NBL_ACT_SET_MCC) | BIT(NBL_ACT_SET_VNI0) | + BIT(NBL_ACT_SET_VNI1) | BIT(NBL_ACT_SET_PRBAC) | + BIT(NBL_ACT_SET_DP_HASH0) | BIT(NBL_ACT_SET_DP_HASH1) | + BIT(NBL_ACT_SET_PRI_MDF0), + }, + [NBL_FWD_TYPE_MNG] = {.data = 0,}, + [NBL_FWD_TYPE_GLB_LB] = {.data = 0,}, + [NBL_FWD_TYPE_DROP] = {.data = 0,}, +}; + +static void nbl_epro_act_pri_cfg(struct nbl_phy_mgt *phy_mgt) +{ + union epro_action_priority_u act_pri = { + .info.mirroridx = EPRO_ACT_MIRRORIDX_PRI, + .info.car = EPRO_ACT_CARIDX_PRI, + .info.dqueue = EPRO_ACT_DQUEUE_PRI, + .info.dport = EPRO_ACT_DPORT_PRI, + .info.pop_8021q = EPRO_ACT_POP_IVLAN_PRI, + .info.pop_qinq = EPRO_ACT_POP_OVLAN_PRI, + .info.replace_inner_vlan = EPRO_ACT_REPLACE_IVLAN_PRI, + .info.replace_outer_vlan = EPRO_ACT_REPLACE_OVLAN_PRI, + .info.push_inner_vlan = EPRO_ACT_PUSH_IVLAN_PRI, + .info.push_outer_vlan = EPRO_ACT_PUSH_OVLAN_PRI, + .info.outer_sport_mdf = EPRO_ACT_OUTER_SPORT_MDF_PRI, + .info.pri_mdf = EPRO_ACT_PRI_MDF_PRI, + .info.dp_hash0 = EPRO_ACT_DP_HASH0_PRI, + .info.dp_hash1 = EPRO_ACT_DP_HASH1_PRI, + .info.rsv = 0, + }; + union epro_mirror_action_priority_u mir_act_pri = { + .info.car = EPRO_MIRROR_ACT_CARIDX_PRI, + .info.dqueue = EPRO_MIRROR_ACT_DQUEUE_PRI, + .info.dport = EPRO_MIRROR_ACT_DPORT_PRI, + .info.rsv = 0, + }; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: epro action priority"); + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ACTION_PRIORITY_ADDR, + (u8 *)act_pri.data, sizeof(act_pri)); + + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MIRROR_ACTION_PRIORITY_ADDR, + (u8 *)mir_act_pri.data, sizeof(mir_act_pri)); +} + +static void nbl_epro_act_sel_en_cfg(struct nbl_phy_mgt *phy_mgt) +{ + union epro_act_sel_en_u act_sel_en = { + .info.rssidx_en = 1, + .info.dport_en = 1, + .info.mirroridx_en = 1, + .info.dqueue_en = 1, + .info.encap_en = 1, + .info.pop_8021q_en = 1, + .info.pop_qinq_en = 1, + .info.push_cvlan_en = 1, + .info.push_svlan_en = 1, + .info.replace_cvlan_en = 1, + .info.replace_svlan_en = 1, + .info.rsv = 0, + }; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: epro action enable"); + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ACT_SEL_EN_ADDR, + (u8 *)act_sel_en.data, sizeof(act_sel_en)); +} + +static void nbl_epro_act_cfg_init(struct nbl_phy_mgt *phy_mgt) +{ + union epro_am_act_id0_u am_act_id0 = {.info = {0}}; + union epro_am_act_id1_u am_act_id1 = {.info = {0}}; + union epro_am_act_id2_u am_act_id2 = {.info = {0}}; + union epro_am_act_id3_u am_act_id3 = {.info = {0}}; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: epro action id"); + am_act_id0.info.replace_cvlan = NBL_ACT_REP_CVLAN; + am_act_id0.info.replace_svlan = NBL_ACT_REP_SVLAN; + am_act_id0.info.push_cvlan = NBL_ACT_ADD_CVLAN; + am_act_id0.info.push_svlan = NBL_ACT_ADD_SVLAN; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AM_ACT_ID0_ADDR, + (u8 *)am_act_id0.data, sizeof(am_act_id0)); + am_act_id1.info.pop_qinq = NBL_ACT_DEL_CVLAN; + am_act_id1.info.pop_8021q = NBL_ACT_DEL_SVLAN; + am_act_id1.info.dport = NBL_ACT_SET_DPORT; + am_act_id1.info.dqueue = NBL_ACT_SET_QUE_IDX; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AM_ACT_ID1_ADDR, + (u8 *)am_act_id1.data, sizeof(am_act_id1)); + am_act_id2.info.rssidx = NBL_ACT_SET_RSS; + am_act_id2.info.mirroridx = NBL_ACT_SET_MIRROR; + am_act_id2.info.car = NBL_ACT_SET_CAR; + am_act_id2.info.encap = NBL_ACT_TNL_ENCAP; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AM_ACT_ID2_ADDR, + (u8 *)am_act_id2.data, sizeof(am_act_id2)); + am_act_id3.info.outer_sport_mdf = NBL_ACT_REP_OUTER_SPORT; + am_act_id3.info.pri_mdf = NBL_ACT_SET_PRI_MDF0; + am_act_id3.info.dp_hash0 = NBL_ACT_SET_DP_HASH0; + am_act_id3.info.dp_hash1 = NBL_ACT_SET_DP_HASH1; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AM_ACT_ID3_ADDR, + (u8 *)am_act_id3.data, sizeof(am_act_id3)); + + nbl_epro_act_pri_cfg(phy_mgt); + nbl_epro_act_sel_en_cfg(phy_mgt); +} + +static int nbl_epro_init_regs(struct nbl_phy_mgt *phy_mgt) +{ + u32 fwd_type = 0; + union epro_rss_sk_u rss_sk_def; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: epro rss"); + /* init default rss toeplitz hash key */ + rss_sk_def.info.sk_arr[0] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[1] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[2] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[3] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[4] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[5] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[6] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[7] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[8] = NBL_EPRO_RSS_KEY_32; + rss_sk_def.info.sk_arr[9] = NBL_EPRO_RSS_KEY_32; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_RSS_SK_ADDR, (u8 *)rss_sk_def.data, + sizeof(rss_sk_def)); + + nbl_epro_act_cfg_init(phy_mgt); + + for (fwd_type = 0; fwd_type < NBL_FWD_TYPE_MAX; fwd_type++) + nbl_hw_write_regs(phy_mgt, NBL_EPRO_AFT_REG(fwd_type), + (u8 *)&aft_def[fwd_type].data, sizeof(union epro_aft_u)); + + return NBL_OK; +} + +static int nbl_phy_flow_init(void *priv, void *param) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_chan_flow_init_info *info = + (struct nbl_chan_flow_init_info *)param; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: start"); + nbl_hw_wr32(phy_mgt, NBL_FEM_INIT_START_ADDR, NBL_FEM_INIT_START_VALUE); + nbl_flow_clear_tcam_ad(phy_mgt); + nbl_ipro_init_regs(phy_mgt); + nbl_pp_init_regs(phy_mgt); + nbl_fem_init_regs(phy_mgt, info); + nbl_mcc_init_regs(phy_mgt); + nbl_acl_init_regs(phy_mgt, info); + nbl_epro_init_regs(phy_mgt); + nbl_ped_init_regs(phy_mgt); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow init: finished"); + + return NBL_OK; +} + +static void nbl_phy_clear_profile_table_action(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u8 i = 0; + u8 pp_id = 0; + u8 prf_id = 0; + + for (i = NBL_PP1_PROFILE_ID_MIN; i <= NBL_PP2_PROFILE_ID_MAX; i++) { + pp_id = i / NBL_PP_PROFILE_NUM; + prf_id = i % NBL_PP_PROFILE_NUM; + nbl_fem_profile_table_action_set(phy_mgt, pp_id, + prf_id, 0, false); + } +} + +static int nbl_phy_flow_deinit(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow deinit: start"); + + nbl_phy_clear_profile_table_action(phy_mgt); + // clear FEM & ACL tcams + nbl_flow_clear_tcam_ad(phy_mgt); + nbl_acl_flow_tcam_clear(phy_mgt, NBL_ACL_FLUSH_FLOW_BTM, 0, NBL_ACL_TCAM_DEPTH); + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "flow deinit: finished"); + return NBL_OK; +} + +static int nbl_phy_flow_get_acl_switch(void *priv, u8 *acl_enable) +{ + union acl_init_done_u init_done; + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + init_done.data[0] = nbl_hw_rd32(phy_mgt, NBL_ACL_INIT_DONE_ADDR); + *acl_enable = init_done.info.done; + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "acl switch: %u", *acl_enable); + return 0; +} + +static void nbl_phy_get_line_rate_info(void *priv, void *data, void *result) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_rep_line_rate_info *req = (struct nbl_rep_line_rate_info *)data; + struct nbl_rep_line_rate_info *resp = (struct nbl_rep_line_rate_info *)result; + u16 table_id = req->func_id; + union epro_vpt_u *vpt = (union epro_vpt_u *)resp->data; + + struct dsch_vn_sha2net_map_tbl *sha2net = + (struct dsch_vn_sha2net_map_tbl *)(resp->data + NBL_EPRO_VPT_DWLEN); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(req->vsi_id), + (u8 *)vpt->data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + nbl_hw_read_regs(phy_mgt, NBL_DSCH_VN_SHA2NET_MAP_TBL_REG(table_id), + (u8 *)sha2net, + NBL_DSCH_VN_SHA2NET_MAP_TBL_DWLEN * NBL_BYTES_IN_REG); +} + +static void nbl_and_parsed_reg(u32 *ptr, u32 *value, u32 reg_len) +{ + u32 idx = 0; + + for (idx = 0; idx < reg_len; idx++) { + *value = (*value) & (*ptr); + value++; + ptr++; + } +} + +static void nbl_or_parsed_reg(u32 *ptr, u32 *value, u32 reg_len) +{ + u32 idx = 0; + + for (idx = 0; idx < reg_len; idx++) { + *value = (*value) | (*ptr); + value++; + ptr++; + } +} + +static void nbl_write_parsed_reg(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_regs_info *reg_info, u32 *value) +{ + u32 *ptr = (u32 *)reg_info->data; + u32 reg_len = reg_info->data_len; + + if (reg_info->mode == NBL_FLOW_READ_OR_WRITE_MODE) { + nbl_or_parsed_reg(ptr, value, reg_len); + } else if (reg_info->mode == NBL_FLOW_READ_AND_WRITE_MODE) { + nbl_and_parsed_reg(ptr, value, reg_len); + } else if (reg_info->mode == NBL_FLOW_READ_OR_AND_WRITE_MODE) { + reg_len = reg_len / 2; + nbl_or_parsed_reg(ptr, value, reg_len); + nbl_and_parsed_reg(ptr + reg_len, value, reg_len); + } else { + // point the value to mailbox received data + value = reg_info->data; + } + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs to write(%u): size %u, depth %u, data %u", + reg_info->tbl_name, reg_len, reg_info->depth, reg_info->data[0]); + + switch (reg_info->tbl_name) { + case NBL_FLOW_EPRO_ECPVPT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ECPVPT_REG(reg_info->depth), + (u8 *)value, NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_ECPIPT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_ECPIPT_REG(reg_info->depth), + (u8 *)value, NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_TAB_TNL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_DPED_TAB_TNL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_REPLACE: + nbl_hw_write_regs(phy_mgt, + NBL_DPED_TAB_REPLACE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UPED_REPLACE: + nbl_hw_write_regs(phy_mgt, + NBL_UPED_TAB_REPLACE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_MIRROR_TABLE: + nbl_hw_write_regs(phy_mgt, + NBL_DPED_TAB_MIR_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_MIR_CMD_0_TABLE: + nbl_hw_write_regs(phy_mgt, + NBL_DPED_MIR_CMD_0_TABLE(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_MT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_MT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM0_TCAM_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM0_TCAM_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM1_TCAM_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM1_TCAM_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM2_TCAM_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM2_TCAM_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM0_AD_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM0_AD_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM1_AD_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM1_AD_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EM2_AD_TABLE_REG: + nbl_hw_write_regs(phy_mgt, + NBL_FEM_EM2_AD_TABLE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_IPRO_UDL_PKT_FLT_DMAC_REG: + nbl_hw_write_regs(phy_mgt, + NBL_IPRO_UDL_PKT_FLT_DMAC_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_IPRO_UDL_PKT_FLT_CTRL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_IPRO_UDL_PKT_FLT_CTRL_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_ACTION_RAM_TBL: + nbl_hw_write_regs(phy_mgt, + NBL_ACL_ACTION_RAM_TBL(reg_info->ram_id, + reg_info->s_depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_MCC_TBL_REG: + nbl_hw_write_regs(phy_mgt, NBL_MCC_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_EPT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_IPRO_UP_SRC_PORT_TBL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_IPRO_UP_SRC_PORT_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_FLOW_REG: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_VPT_REG: + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_FLOW_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_GRP_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_GRP_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_GRP_REG: + nbl_hw_write_regs(phy_mgt, + NBL_SHAPING_GRP_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DSCH_VN_SHA2GRP_MAP_TBL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_DSCH_VN_SHA2GRP_MAP_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DSCH_VN_GRP2SHA_MAP_TBL_REG: + nbl_hw_write_regs(phy_mgt, + NBL_DSCH_VN_GRP2SHA_MAP_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_DPORT_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_DPORT_REG: + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DSCH_PSHA_EN_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_DSCH_PSHA_EN_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_FLOW_4K_REG: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_4K_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_FLOW_4K_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_FLOW_4K_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_NET_TIMMING_ADD_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_NET_TIMMING_ADD_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_SHAPING_NET_REG: + case NBL_FLOW_DSCH_VN_NET2SHA_MAP_TBL_REG: + case NBL_FLOW_DSCH_VN_SHA2NET_MAP_TBL_REG: + nbl_phy_set_offload_shaping(phy_mgt, reg_info, value); + break; + case NBL_FLOW_UCAR_CAR_CTRL_ADDR: + nbl_hw_write_regs(phy_mgt, NBL_UCAR_CAR_CTRL_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UPED_VSI_TYPE_REG: + nbl_hw_write_regs(phy_mgt, NBL_UPED_TAB_VSI_TYPE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DPED_VSI_TYPE_REG: + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_VSI_TYPE_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + default: + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: unrecognized register(%u) to write, will not handle", + reg_info->tbl_name); + break; + } +} + +static void nbl_read_parsed_reg(struct nbl_phy_mgt *phy_mgt, + struct nbl_chan_regs_info *reg_info, u32 *value) +{ + u32 reg_len = reg_info->data_len; + + if (reg_info->mode == NBL_FLOW_READ_OR_AND_WRITE_MODE) + reg_len = reg_len / 2; + + if (reg_len > NBL_CHAN_REG_MAX_LEN) { + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: read length longer than data allocated"); + return; + } + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs to read(%u): size %u, depth %u, data %u", + reg_info->tbl_name, reg_len, reg_info->depth, reg_info->data[0]); + + switch (reg_info->tbl_name) { + case NBL_FLOW_EPRO_ECPVPT_REG: + nbl_hw_read_regs(phy_mgt, NBL_EPRO_ECPVPT_REG(reg_info->depth), + (u8 *)value, NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_ECPIPT_REG: + nbl_hw_read_regs(phy_mgt, NBL_EPRO_ECPIPT_REG(reg_info->depth), + (u8 *)value, NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_EPT_REG: + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_IPRO_UP_SRC_PORT_TBL_REG: + nbl_hw_read_regs(phy_mgt, + NBL_IPRO_UP_SRC_PORT_TBL_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_EPRO_VPT_REG: + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_DSCH_PSHA_EN_ADDR: + nbl_hw_read_regs(phy_mgt, NBL_DSCH_PSHA_EN_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_CAR_CTRL_ADDR: + nbl_hw_read_regs(phy_mgt, NBL_UCAR_CAR_CTRL_ADDR, + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_GREEN_CELL_ADDR: + nbl_hw_read_regs(phy_mgt, + (NBL_UCAR_GREEN_CELL_ADDR + reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + case NBL_FLOW_UCAR_GREEN_PKT_ADDR: + nbl_hw_read_regs(phy_mgt, + (NBL_UCAR_GREEN_PKT_ADDR + reg_info->depth), + (u8 *)value, reg_len * NBL_BYTES_IN_REG); + break; + default: + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: unrecognized register(%u) to read, will not handle", + reg_info->tbl_name); + break; + } +} + +static int nbl_phy_offload_flow_rule(void *priv, void *param) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_chan_bulk_regs_info *hdr_info = + (struct nbl_chan_bulk_regs_info *)param; + struct nbl_chan_regs_info *reg_info = + (struct nbl_chan_regs_info *)(hdr_info + 1); + u8 regs_count = hdr_info->item_cnt; + u32 value[NBL_CHAN_REG_MAX_LEN] = { 0 }; + u8 i; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: flow regs received: to parse and read/write: " + "regs info: count %u, total size %u, " + "1st reg: table %u, mode %u, size %u, depth %u, data %u", + hdr_info->item_cnt, hdr_info->data_len, + reg_info->tbl_name, reg_info->mode, reg_info->data_len, + reg_info->depth, reg_info->data[0]); + + if (reg_info->data_len == 0 || regs_count == 0) { + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "send regs: reg count or data length invalid"); + return -1; + } + + for (i = 0; i < regs_count; i++) { + if (reg_info->mode == NBL_FLOW_READ_MODE) { + nbl_read_parsed_reg(phy_mgt, reg_info, value); + } else if (reg_info->mode == NBL_FLOW_WRITE_MODE) { + nbl_write_parsed_reg(phy_mgt, reg_info, value); + } else if (reg_info->mode == NBL_FLOW_READ_OR_WRITE_MODE || + reg_info->mode == NBL_FLOW_READ_AND_WRITE_MODE || + reg_info->mode == NBL_FLOW_READ_OR_AND_WRITE_MODE) { + nbl_read_parsed_reg(phy_mgt, reg_info, value); + nbl_write_parsed_reg(phy_mgt, reg_info, value); + } else { + nbl_err(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "failed parsing reg info: unrecognized mode: " + "tab %u, mode %u, size %u, ", reg_info->tbl_name, + reg_info->mode, reg_info->data_len); + } + + reg_info = (struct nbl_chan_regs_info *) + (reg_info + reg_info->data_len + 1); + } + + return NBL_OK; +} + +static void +nbl_repr_eth_dev_ipro_dn_init(struct nbl_phy_mgt *phy_mgt, u16 vsi_id) +{ + union ipro_dn_src_port_tbl_u dn_src_port = {.info = {0}}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)dn_src_port.data, + NBL_IPRO_DN_SRC_PORT_TBL_DWLEN * NBL_BYTES_IN_REG); + dn_src_port.info.phy_flow = 0; + dn_src_port.info.set_dport_en = 0; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_DN_SRC_PORT_TBL_REG(vsi_id), + (u8 *)dn_src_port.data, + NBL_IPRO_DN_SRC_PORT_TBL_DWLEN * NBL_BYTES_IN_REG); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "init rep: ipro dn src written"); +} + +static void +nbl_repr_eth_dev_ipro_up_src_init(struct nbl_phy_mgt *phy_mgt, u16 eth_id) +{ + union ipro_up_src_port_tbl_u up_src_port = {.info = {0}}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_UP_SRC_PORT_TBL_REG(eth_id), + (u8 *)up_src_port.data, + NBL_IPRO_UP_SRC_PORT_TBL_DWLEN * NBL_BYTES_IN_REG); + up_src_port.info.phy_flow = 0; + up_src_port.info.set_dport_en = 0; + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UP_SRC_PORT_TBL_REG(eth_id), + (u8 *)up_src_port.data, + NBL_IPRO_UP_SRC_PORT_TBL_DWLEN * NBL_BYTES_IN_REG); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "init rep: ipro up src written"); +} + +static void +nbl_ped_port_vlan_type_cfg(struct nbl_phy_mgt *phy_mgt, u32 port_id, + enum nbl_ped_vlan_type_e type, + enum nbl_ped_vlan_tpid_e tpid) +{ + union nbl_ped_port_vlan_type_u cfg = {.info = {0}}; + + if (port_id >= NBL_DPED_VLAN_TYPE_PORT_NUM || tpid >= PED_VLAN_TYPE_NUM) { + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "port_id %u exceed the max num %u.", + port_id, NBL_DPED_VLAN_TYPE_PORT_NUM); + return; + } + + nbl_hw_read_regs(phy_mgt, NBL_DPED_TAB_VSI_TYPE_REG(port_id), + (u8 *)cfg.data, NBL_BYTES_IN_REG); + switch (type) { + case INNER_VLAN_TYPE: + cfg.info.i_vlan_sel = tpid & 0b11; + break; + case OUTER_VLAN_TYPE: + cfg.info.o_vlan_sel = tpid & 0b11; + break; + } + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_VSI_TYPE_REG(port_id), + (u8 *)cfg.data, NBL_BYTES_IN_REG); + + nbl_hw_read_regs(phy_mgt, NBL_UPED_TAB_VSI_TYPE_REG(port_id), + (u8 *)cfg.data, NBL_BYTES_IN_REG); + switch (type) { + case INNER_VLAN_TYPE: + cfg.info.i_vlan_sel = tpid & 0b11; + break; + case OUTER_VLAN_TYPE: + cfg.info.o_vlan_sel = tpid & 0b11; + break; + } + nbl_hw_write_regs(phy_mgt, NBL_UPED_TAB_VSI_TYPE_REG(port_id), + (u8 *)cfg.data, NBL_BYTES_IN_REG); +} + +static int nbl_phy_init_rep(void *priv, u16 vsi_id, u8 inner_type, + u8 outer_type, u8 rep_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union epro_vpt_u vpt = {.info = {0}}; + + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "init rep: vsi id %u, rep type %u", vsi_id, rep_type); + if (rep_type == NBL_ETHDEV_PF_REP || + rep_type == NBL_ETHDEV_VIRTIO_REP) { + nbl_repr_eth_dev_ipro_dn_init(phy_mgt, vsi_id); + /* configure vlan tpid type for vsi */ + nbl_ped_port_vlan_type_cfg(phy_mgt, vsi_id, INNER_VLAN_TYPE, + inner_type); + nbl_ped_port_vlan_type_cfg(phy_mgt, vsi_id, OUTER_VLAN_TYPE, + outer_type); + } else if (rep_type == NBL_ETHDEV_ETH_REP) { + vsi_id = vsi_id - NBL_ETH_REP_INFO_BASE; + nbl_repr_eth_dev_ipro_up_src_init(phy_mgt, vsi_id); + /* configure vlan tpid type for eth */ + nbl_ped_port_vlan_type_cfg(phy_mgt, + (vsi_id + NBL_PED_VSI_TYPE_ETH_BASE), + INNER_VLAN_TYPE, inner_type); + nbl_ped_port_vlan_type_cfg(phy_mgt, + (vsi_id + NBL_PED_VSI_TYPE_ETH_BASE), + OUTER_VLAN_TYPE, outer_type); + } + + /* init rss l4 */ + if (rep_type == NBL_ETHDEV_PF_REP || rep_type == NBL_ETHDEV_VIRTIO_REP) { + nbl_hw_read_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)vpt.data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + vpt.info.rss_alg_sel = NBL_SYM_TOEPLITZ_INT; + vpt.info.rss_key_type_btm = NBL_KEY_IP4_L4_RSS_BIT | NBL_KEY_IP6_L4_RSS_BIT; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_VPT_REG(vsi_id), (u8 *)vpt.data, + NBL_EPRO_VPT_DWLEN * NBL_BYTES_IN_REG); + nbl_debug(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_FLOW, + "init rep: epro rss written"); + } + + return NBL_OK; +} + +static int nbl_phy_init_vdpaq(void *priv, u16 func_id, u16 bdf, u64 pa, u32 size) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union pcompleter_host_cfg_function_id_vdpa_net_u cfg_func_id = { + .info.dbg = func_id, + .info.vld = 1, + }; + + /* disable vdpa queue */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_EN_ADDR, 0); + + /* cfg vdpa queue base */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_BASE_ADDR_L_ADDR, (u32)pa); + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_BASE_ADDR_H_ADDR, (u32)(pa >> 32)); + + /* cfg vdpa queue size */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_SIZE_MASK_ADDR, size - 1); + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_TPNTR_ADDR, size); + + /* reset vdpa queue head */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_HPNTR_RST_ADDR, 1); + + /* cfg vdpa queue bdf */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_DIF_BDF_ADDR, bdf); + + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_HOST_CFG_FUNCTION_ID_VDPA_NET_ADDR, + (u8 *)&cfg_func_id, sizeof(cfg_func_id)); + + /* all registers set, enable vdpa queue again */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_EN_ADDR, 1); + + return 0; +} + +static void nbl_phy_destroy_vdpaq(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_SIZE_MASK_ADDR, 0); + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_BASE_ADDR_L_ADDR, 0); + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_BASE_ADDR_H_ADDR, 0); + + /* reset the head */ + nbl_hw_wr32(phy_mgt, NBL_VDPA_RING_HPNTR_RST_ADDR, 1); + nbl_hw_wr32(phy_mgt, NBL_VDPA_EN_ADDR, 0); +} + +static const u32 nbl_phy_reg_dump_list[] = { + NBL_TOP_CTRL_VERSION_INFO, + NBL_TOP_CTRL_VERSION_DATE, +}; + +static void nbl_phy_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + int i; + + for (i = 0; i < ARRAY_SIZE(nbl_phy_reg_dump_list) && i < len; i++) + nbl_hw_read_regs(phy_mgt, nbl_phy_reg_dump_list[i], + (u8 *)&data[i], sizeof(data[i])); +} + +static int nbl_phy_get_reg_dump_len(void *priv) +{ + return ARRAY_SIZE(nbl_phy_reg_dump_list) * sizeof(u32); +} + +/* return value need to convert to Mil degree Celsius(1/1000) */ +static u32 nbl_phy_get_chip_temperature(void *priv, enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + u32 temp = 0; + + switch (type) { + case NBL_HWMON_TEMP_INPUT: + temp = nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_TVSENSOR0); + temp = (temp & 0x1ff) * 1000; + break; + case NBL_HWMON_TEMP_MAX: + temp = NBL_LEONIS_TEMP_MAX * 1000; + break; + case NBL_HWMON_TEMP_CRIT: + temp = NBL_LEONIS_TEMP_CRIT * 1000; + break; + case NBL_HWMON_TEMP_HIGHEST: + temp = nbl_hw_rd32(phy_mgt, NBL_TOP_CTRL_TVSENSOR0); + temp = (temp >> 16) * 1000; + break; + default: + break; + } + return temp; +} + +static int nbl_phy_process_abnormal_queue(struct nbl_phy_mgt *phy_mgt, u16 queue_id, int type, + struct nbl_abnormal_details *detail) +{ + struct nbl_ipro_queue_tbl ipro_queue_tbl = {0}; + struct nbl_host_vnet_qinfo host_vnet_qinfo = {0}; + u32 qinfo_id = type == NBL_ABNORMAL_EVENT_DVN ? NBL_PAIR_ID_GET_TX(queue_id) : + NBL_PAIR_ID_GET_RX(queue_id); + + if (type >= NBL_ABNORMAL_EVENT_MAX) + return -EINVAL; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_QUEUE_TBL(queue_id), + (u8 *)&ipro_queue_tbl, sizeof(ipro_queue_tbl)); + + detail->abnormal = true; + detail->qid = queue_id; + detail->vsi_id = ipro_queue_tbl.vsi_id; + + nbl_hw_read_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(qinfo_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + host_vnet_qinfo.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_PADPT_HOST_VNET_QINFO_REG_ARR(qinfo_id), + (u8 *)&host_vnet_qinfo, sizeof(host_vnet_qinfo)); + + return 0; +} + +static int nbl_phy_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct device *dev = NBL_PHY_MGT_TO_DEV(phy_mgt); + struct dvn_desc_dif_err_info desc_dif_err_info = {0}; + struct dvn_pkt_dif_err_info pkt_dif_err_info = {0}; + struct dvn_err_queue_id_get err_queue_id_get = {0}; + struct uvn_queue_err_info queue_err_info = {0}; + struct nbl_abnormal_details *detail; + u32 int_status = 0, rdma_other_abn = 0, tlp_out_drop_cnt = 0; + u32 desc_dif_err_cnt = 0, pkt_dif_err_cnt = 0; + u32 queue_err_cnt; + int ret = 0; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + if (int_status == U32_MAX) + dev_info(dev, "dvn int_status:0x%x", int_status); + + if (int_status && int_status != U32_MAX) { + if (int_status & BIT(NBL_DVN_INT_DESC_DIF_ERR)) { + nbl_hw_read_regs(phy_mgt, NBL_DVN_DESC_DIF_ERR_CNT, + (u8 *)&desc_dif_err_cnt, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_DVN_DESC_DIF_ERR_INFO, + (u8 *)&desc_dif_err_info, + sizeof(struct dvn_desc_dif_err_info)); + dev_info(dev, "dvn int_status:0x%x, desc_dif_mf_cnt:%d, queue_id:%d\n", + int_status, desc_dif_err_cnt, desc_dif_err_info.queue_id); + detail = &abnomal_info->details[NBL_ABNORMAL_EVENT_DVN]; + nbl_phy_process_abnormal_queue(phy_mgt, desc_dif_err_info.queue_id, + NBL_ABNORMAL_EVENT_DVN, detail); + + ret |= BIT(NBL_ABNORMAL_EVENT_DVN); + } + + if (int_status & BIT(NBL_DVN_INT_PKT_DIF_ERR)) { + nbl_hw_read_regs(phy_mgt, NBL_DVN_PKT_DIF_ERR_CNT, + (u8 *)&pkt_dif_err_cnt, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_DVN_PKT_DIF_ERR_INFO, + (u8 *)&pkt_dif_err_info, + sizeof(struct dvn_pkt_dif_err_info)); + dev_info(dev, "dvn int_status:0x%x, pkt_dif_mf_cnt:%d, queue_id:%d\n", + int_status, pkt_dif_err_cnt, pkt_dif_err_info.queue_id); + } + + /* clear dvn abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_DVN_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + + /* enable new queue error irq */ + err_queue_id_get.desc_flag = 1; + err_queue_id_get.pkt_flag = 1; + nbl_hw_write_regs(phy_mgt, NBL_DVN_ERR_QUEUE_ID_GET, + (u8 *)&err_queue_id_get, sizeof(err_queue_id_get)); + } + + int_status = 0; + nbl_hw_read_regs(phy_mgt, NBL_UVN_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + if (int_status == U32_MAX) + dev_info(dev, "uvn int_status:0x%x", int_status); + if (int_status && int_status != U32_MAX) { + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_CNT, + (u8 *)&queue_err_cnt, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_UVN_QUEUE_ERR_INFO, + (u8 *)&queue_err_info, sizeof(struct uvn_queue_err_info)); + dev_info(dev, "uvn int_status:%x queue_err_cnt: 0x%x qid 0x%x\n", + int_status, queue_err_cnt, queue_err_info.queue_id); + + if (int_status & BIT(NBL_UVN_INT_QUEUE_ERR)) { + detail = &abnomal_info->details[NBL_ABNORMAL_EVENT_UVN]; + nbl_phy_process_abnormal_queue(phy_mgt, queue_err_info.queue_id, + NBL_ABNORMAL_EVENT_UVN, detail); + + ret |= BIT(NBL_ABNORMAL_EVENT_UVN); + } + + /* clear uvn abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_UVN_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + } + + int_status = 0; + nbl_hw_read_regs(phy_mgt, NBL_DSCH_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + nbl_hw_read_regs(phy_mgt, NBL_DSCH_RDMA_OTHER_ABN, (u8 *)&rdma_other_abn, sizeof(u32)); + if (int_status == U32_MAX) + dev_info(dev, "dsch int_status:0x%x", int_status); + if (int_status && int_status != U32_MAX && + (int_status != NBL_DSCH_RDMA_OTHER_ABN_BIT || + rdma_other_abn != NBL_DSCH_RDMA_DPQM_DB_LOST)) { + dev_info(dev, "dsch int_status:%x\n", int_status); + + /* clear dsch abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_DSCH_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + } + + int_status = 0; + nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_INT_STATUS, (u8 *)&int_status, sizeof(u32)); + if (int_status == U32_MAX) + dev_info(dev, "pcomleter int_status:0x%x", int_status); + if (int_status && int_status != U32_MAX) { + nbl_hw_read_regs(phy_mgt, NBL_PCOMPLETER_TLP_OUT_DROP_CNT, + (u8 *)&tlp_out_drop_cnt, sizeof(u32)); + dev_info(dev, "pcomleter int_status:0x%x tlp_out_drop_cnt 0x%x\n", + int_status, tlp_out_drop_cnt); + + /* clear pcomleter abnormal irq */ + nbl_hw_write_regs(phy_mgt, NBL_PCOMPLETER_INT_STATUS, + (u8 *)&int_status, sizeof(int_status)); + } + + return ret; +} + +static u32 nbl_phy_get_uvn_desc_entry_stats(void *priv) +{ + return nbl_hw_rd32(priv, NBL_UVN_DESC_RD_ENTRY); +} + +static void nbl_phy_set_uvn_desc_wr_timeout(void *priv, u16 timeout) +{ + struct uvn_desc_wr_timeout wr_timeout = {0}; + + wr_timeout.num = timeout; + nbl_hw_write_regs(priv, NBL_UVN_DESC_WR_TIMEOUT, (u8 *)&wr_timeout, sizeof(wr_timeout)); +} + +static int nbl_phy_cfg_lag_algorithm(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_ept_tbl ept_tbl = {0}; + u8 hw_hash_type = NBL_EPRO_LAG_ALG_L2_HASH; + + switch (hash_type) { + case NETDEV_LAG_HASH_L23: + case NETDEV_LAG_HASH_E23: + hw_hash_type = NBL_EPRO_LAG_ALG_L23_HASH; + break; + case NETDEV_LAG_HASH_L34: + case NETDEV_LAG_HASH_E34: + hw_hash_type = NBL_EPRO_LAG_ALG_LINUX_L34_HASH; + break; + default: + break; + } + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + ept_tbl.lag_alg_sel = hw_hash_type; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_PHY, + "Nbl phy set lag hash type %d", hw_hash_type); + return 0; +} + +static int nbl_phy_cfg_lag_member_list(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_ept_tbl ept_tbl = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(param->lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + if (param->lag_num) { + ept_tbl.fwd = 1; + ept_tbl.vld = 1; + } else { + ept_tbl.fwd = 0; + ept_tbl.vld = 0; + } + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(param->lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + + nbl_info(NBL_PHY_MGT_TO_COMMON(phy_mgt), NBL_DEBUG_PHY, + "Nbl phy set port lag member list done, lag_id:%d, port0:%d, port1:%d\n", + param->lag_id, param->port_list[0], param->port_list[1]); + + return 0; +} + +static int nbl_phy_cfg_lag_member_fwd(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_epro_ept_tbl ept_tbl = {0}; + struct nbl_ipro_upsport_tbl upsport = {0}; + u8 lag_btm = 0, lag_btm_new = 0; + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + lag_btm = ept_tbl.lag_port_btm; + lag_btm_new = fwd ? lag_btm | (1 << eth_id) : lag_btm & ~(1 << eth_id); + + nbl_hw_read_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + ept_tbl.lag_port_btm = lag_btm_new; + nbl_hw_write_regs(phy_mgt, NBL_EPRO_EPT_TABLE(lag_id + NBL_EPRO_EPT_LAG_OFFSET), + (u8 *)&ept_tbl, sizeof(struct nbl_epro_ept_tbl)); + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), (u8 *)&upsport, sizeof(upsport)); + + upsport.lag_id = fwd ? lag_id : 0; + upsport.lag_vld = fwd; + + nbl_hw_write_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), + (u8 *)&upsport, sizeof(upsport)); + + return 0; +} + +static bool nbl_phy_get_lag_fwd(void *priv, u16 eth_id) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_ipro_upsport_tbl upsport = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_IPRO_UP_SPORT_TABLE(eth_id), (u8 *)&upsport, sizeof(upsport)); + return upsport.lag_vld; +} + +static int nbl_phy_cfg_lag_member_up_attr(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + return 0; +} + +static int nbl_phy_cfg_lag_mcc(void *priv, u16 mcc_id, u16 action) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_mcc_tbl node = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); + + node.dport_act = action; + + nbl_hw_write_regs(phy_mgt, NBL_MCC_LEAF_NODE_TABLE(mcc_id), (u8 *)&node, sizeof(node)); + + return 0; +} + +static void nbl_phy_get_board_info(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fw_board_cfg_dw3 dw3 = {.info = {0}}; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW3_OFFSET, (u8 *)&dw3, sizeof(dw3)); + board_info->eth_num = dw3.info.port_num; + board_info->eth_speed = dw3.info.port_speed; + board_info->p4_version = dw3.info.p4_version; +} + +static u32 nbl_phy_get_fw_eth_num(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fw_board_cfg_dw3 dw3 = {.info = {0}}; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW3_OFFSET, (u8 *)&dw3, sizeof(dw3)); + return dw3.info.port_num; +} + +static u32 nbl_phy_get_fw_eth_map(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union nbl_fw_board_cfg_dw6 dw6 = {.info = {0}}; + + nbl_hw_read_mbx_regs(phy_mgt, NBL_FW_BOARD_DW6_OFFSET, (u8 *)&dw6, sizeof(dw6)); + return dw6.info.eth_bitmap; +} + +static int nbl_phy_cfg_bond_shaping(void *priv, u8 eth_id, u8 speed, bool enable) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_shaping_dport dport = {0}; + struct nbl_shaping_dvn_dport dvn_dport = {0}; + struct nbl_shaping_rdma_dport rdma_dport = {0}; + u32 rate, dvn_rate, rdma_rate; + + if (!enable) { + nbl_shaping_eth_init(phy_mgt, eth_id, speed); + return 0; + } + + if (speed == NBL_FW_PORT_SPEED_100G) { + rate = NBL_SHAPING_DPORT_100G_RATE * 2; + dvn_rate = NBL_SHAPING_DPORT_HALF_100G_RATE; + rdma_rate = NBL_SHAPING_DPORT_100G_RATE; + } else { + rate = NBL_SHAPING_DPORT_25G_RATE * 2; + dvn_rate = NBL_SHAPING_DPORT_HALF_25G_RATE; + rdma_rate = NBL_SHAPING_DPORT_25G_RATE; + } + + dport.cir = rate; + dport.pir = rate; + dport.depth = max(dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + dport.cbs = dport.depth; + dport.pbs = dport.depth; + dport.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DPORT_REG(eth_id), (u8 *)&dport, sizeof(dport)); + + dvn_dport.cir = dvn_rate; + dvn_dport.pir = dvn_rate; + dvn_dport.depth = max(dvn_dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + dvn_dport.cbs = dvn_dport.depth; + dvn_dport.pbs = dvn_dport.depth; + dvn_dport.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_DVN_DPORT_REG(eth_id), + (u8 *)&dvn_dport, sizeof(dvn_dport)); + + rdma_dport.cir = rdma_rate; + rdma_dport.pir = rdma_rate; + rdma_dport.depth = max(rdma_dport.cir * 2, NBL_LR_LEONIS_NET_BUCKET_DEPTH); + rdma_dport.cbs = rdma_dport.depth; + rdma_dport.pbs = rdma_dport.depth; + rdma_dport.valid = 1; + nbl_hw_write_regs(phy_mgt, NBL_SHAPING_RDMA_DPORT_REG(eth_id), + (u8 *)&rdma_dport, sizeof(rdma_dport)); + return 0; +} + +static void nbl_phy_cfg_dvn_bp_mask(struct dvn_back_pressure_mask *mask, u8 eth_id, bool enable) +{ + switch (eth_id) { + case 0: + mask->dstore_port0_flag = enable; + break; + case 1: + mask->dstore_port1_flag = enable; + break; + case 2: + mask->dstore_port2_flag = enable; + break; + case 3: + mask->dstore_port3_flag = enable; + break; + default: + return; + } +} + +static void nbl_phy_set_bond_fc_th(struct nbl_phy_mgt *phy_mgt, + u8 main_eth_id, u8 other_eth_id, u8 speed) +{ + struct dstore_d_dport_fc_th fc_th = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(main_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G_BOND_MAIN; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G_BOND_MAIN; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_BOND_MAIN; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_BOND_MAIN; + } + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(main_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(other_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G_BOND_OTHER; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G_BOND_OTHER; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_BOND_OTHER; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_BOND_OTHER; + } + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(other_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); +} + +static void nbl_phy_remove_bond_fc_th(struct nbl_phy_mgt *phy_mgt, + u8 main_eth_id, u8 other_eth_id, u8 speed) +{ + struct dstore_d_dport_fc_th fc_th = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(main_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH; + } + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(main_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + + nbl_hw_read_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(other_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); + if (speed == NBL_FW_PORT_SPEED_100G) { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH_100G; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH_100G; + } else { + fc_th.xoff_th = NBL_DSTORE_DROP_XOFF_TH; + fc_th.xon_th = NBL_DSTORE_DROP_XON_TH; + } + nbl_hw_write_regs(phy_mgt, NBL_DSTORE_D_DPORT_FC_TH_REG(other_eth_id), + (u8 *)&fc_th, sizeof(fc_th)); +} + +static void nbl_phy_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, u8 other_eth_id, + bool enable, u8 speed) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct dvn_back_pressure_mask mask = {0}; + + nbl_hw_read_regs(phy_mgt, NBL_DVN_BACK_PRESSURE_MASK, (u8 *)&mask, sizeof(mask)); + nbl_phy_cfg_dvn_bp_mask(&mask, main_eth_id, enable); + nbl_phy_cfg_dvn_bp_mask(&mask, other_eth_id, enable); + nbl_hw_write_regs(phy_mgt, NBL_DVN_BACK_PRESSURE_MASK, (u8 *)&mask, sizeof(mask)); + + if (enable) + nbl_phy_set_bond_fc_th(phy_mgt, main_eth_id, other_eth_id, speed); + else + nbl_phy_remove_bond_fc_th(phy_mgt, main_eth_id, other_eth_id, speed); +} + +static void nbl_phy_set_tc_kgen_cvlan_zero(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union pp1_kgen_key_prf_u kgen_key_prf = {.info = {0}}; + + nbl_hw_read_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(2), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + kgen_key_prf.info.ext16_2_src = 0x19; + nbl_hw_write_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(2), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + + nbl_hw_read_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(3), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + kgen_key_prf.info.ext16_2_src = 0x19; + nbl_hw_write_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(3), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); +} + +static void nbl_phy_unset_tc_kgen_cvlan(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union pp1_kgen_key_prf_u kgen_key_prf = {.info = {0}}; + + nbl_hw_read_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(2), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + kgen_key_prf.info.ext16_2_src = 0x99; + nbl_hw_write_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(2), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + + nbl_hw_read_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(3), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); + kgen_key_prf.info.ext16_2_src = 0x99; + nbl_hw_write_regs(phy_mgt, NBL_PP1_KGEN_KEY_PRF_REG(3), (u8 *)&kgen_key_prf, + sizeof(kgen_key_prf)); +} + +static void nbl_phy_set_ped_tab_vsi_type(void *priv, u32 port_id, u16 eth_proto) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union dped_tab_vsi_type_u dped_vsi_type = {.info = {0}}; + union uped_tab_vsi_type_u uped_vsi_type = {.info = {0}}; + + dped_vsi_type.info.sel = eth_proto; + nbl_hw_write_regs(phy_mgt, NBL_DPED_TAB_VSI_TYPE_REG(port_id), (u8 *)&dped_vsi_type, + sizeof(dped_vsi_type)); + + uped_vsi_type.info.sel = eth_proto; + nbl_hw_write_regs(phy_mgt, NBL_UPED_TAB_VSI_TYPE_REG(port_id), (u8 *)&uped_vsi_type, + sizeof(uped_vsi_type)); +} + +static void nbl_phy_clear_acl(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_acl_flow_tcam_clear(phy_mgt, NBL_ACL_FLUSH_FLOW_BTM, 0, NBL_ACL_TCAM_DEPTH); +} + +static int nbl_phy_clr_fd_udf_l2(struct nbl_phy_mgt *phy_mgt) +{ + union upa_ext_conf_table_u clear = {{0}}; + u8 index[] = {0, 1, 2, 3, 4, 5}; + u8 entry[] = {2, 3, 12}; + u8 i = 0; + u8 j = 0; + + for (i = 0; i < ARRAY_SIZE(index); i++) + for (j = 0; j < ARRAY_SIZE(entry); j++) + nbl_hw_write_regs(phy_mgt, + NBL_UPA_EXT_CONF_TABLE_REG(16 * index[i] + entry[j]), + (u8 *)&clear, sizeof(clear)); + + return 0; +} + +static int nbl_phy_clr_fd_udf_l3(struct nbl_phy_mgt *phy_mgt) +{ + union upa_ext_conf_table_u clear = {{0}}; + u8 index0[] = {8, 10}; + u8 entry0[] = {9, 10, 13}; + u8 index1[] = {9, 11, 12}; + u8 entry1[] = {9, 10, 11}; + u8 i = 0; + u8 j = 0; + + for (i = 0; i < ARRAY_SIZE(index0); i++) + for (j = 0; j < ARRAY_SIZE(entry0); j++) + nbl_hw_write_regs(phy_mgt, + NBL_UPA_EXT_CONF_TABLE_REG(16 * index0[i] + entry0[j]), + (u8 *)&clear, sizeof(clear)); + + for (i = 0; i < ARRAY_SIZE(index1); i++) + for (j = 0; j < ARRAY_SIZE(entry1); j++) + nbl_hw_write_regs(phy_mgt, + NBL_UPA_EXT_CONF_TABLE_REG(16 * index1[i] + entry1[j]), + (u8 *)&clear, sizeof(clear)); + + return 0; +} + +static int nbl_phy_clr_fd_udf_l4(struct nbl_phy_mgt *phy_mgt) +{ + union upa_ext_conf_table_u clear = {{0}}; + u8 index[] = {16, 17, 18, 19, 21, 22, 24}; + u8 entry[] = {2, 10, 11, 13}; + u8 entry1[] = {2, 10, 11, 7, 8, 9}; /* for index = 20 */ + u8 entry2[] = {2, 10, 11, 14, 15}; /* for index = 24 */ + u8 i = 0; + u8 j = 0; + + for (i = 0; i < ARRAY_SIZE(index); i++) + for (j = 0; j < ARRAY_SIZE(entry); j++) + nbl_hw_write_regs(phy_mgt, + NBL_UPA_EXT_CONF_TABLE_REG(16 * index[i] + entry[j]), + (u8 *)&clear, sizeof(clear)); + + i = 20; + for (j = 0; j < ARRAY_SIZE(entry1); j++) + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * i + entry1[j]), + (u8 *)&clear, sizeof(clear)); + + i = 25; + for (j = 0; j < ARRAY_SIZE(entry2); j++) + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * i + entry2[j]), + (u8 *)&clear, sizeof(clear)); + + return 0; +} + +static int nbl_phy_clr_fd_udf(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + nbl_phy_clr_fd_udf_l2(phy_mgt); + nbl_phy_clr_fd_udf_l3(phy_mgt); + nbl_phy_clr_fd_udf_l4(phy_mgt); + + return 0; +} + +static int nbl_phy_set_fd_udf_l2(struct nbl_phy_mgt *phy_mgt, u8 offset) +{ + union upa_ext_conf_table_u ext32 = {{0}}; + union upa_ext_conf_table_u ext32_0 = {{0}}; /* used for half length extraction */ + union upa_ext_conf_table_u ext32_1 = {{0}}; /* used for half length extraction */ + union upa_ext_conf_table_u ext8 = {{0}}; + u8 index = 0; /* extractors profile index */ + u8 entry = 0; /* extractor index */ + + if (offset % 4 == 0) { + /* use 4B extractor */ + ext32.info.dst_offset = 40; + ext32.info.source_offset = offset / 4; + ext32.info.mode_sel = 0; + ext32.info.mode_start_off = 0; + ext32.info.lx_sel = 1; + ext32.info.op_en = 1; + + index = 0; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 1; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 2; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 3; + entry = 2; + ext32.info.dst_offset = 44; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 4; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 5; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + } else if (offset % 4 == 2) { + /* use 2 * 4B extractor, all use half length extraction mode */ + ext32_0.info.dst_offset = 40; + ext32_0.info.source_offset = offset / 4; + ext32_0.info.mode_sel = 1; + ext32_0.info.mode_start_off = 0b10; /* low-2-high */ + ext32_0.info.lx_sel = 1; + ext32_0.info.op_en = 1; + + ext32_1.info.dst_offset = 40; + ext32_1.info.source_offset = offset / 4 + 1; + ext32_1.info.mode_sel = 1; + ext32_1.info.mode_start_off = 0b01; /* high-2-low */ + ext32_1.info.lx_sel = 1; + ext32_1.info.op_en = 1; + + /* tunnel cases */ + index = 0; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + index = 1; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + index = 2; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + + /* non-tunnel cases */ + ext32_0.info.dst_offset = 44; + ext32_1.info.dst_offset = 44; + index = 3; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + index = 4; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + index = 5; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_0, sizeof(ext32_0)); + entry = 3; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32_1, sizeof(ext32_1)); + } else if (offset % 4 == 1 || offset % 4 == 3) { + /* use 4B extractor & 1B extractor for overwritten */ + /* tunnel cases */ + ext32.info.dst_offset = 40; + ext32.info.source_offset = (offset + 2) / 4; + ext32.info.mode_sel = 0; + ext32.info.mode_start_off = 0; + ext32.info.lx_sel = 1; + ext32.info.op_en = 1; + + ext8.info.dst_offset = (offset % 4 == 3) ? 43 : 40; + ext8.info.source_offset = (offset % 4 == 3) ? offset : offset + 3; + ext8.info.mode_sel = 0; + ext8.info.mode_start_off = 0; + ext8.info.lx_sel = 1; + ext8.info.op_en = 1; + + index = 0; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + index = 1; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + index = 2; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + /* non-tunnel cases */ + ext32.info.dst_offset = 44; + ext8.info.dst_offset = (offset % 4 == 3) ? 47 : 44; + index = 3; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + index = 4; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + index = 5; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 12; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + } + + return 0; +} + +static int nbl_phy_set_fd_udf_l3(struct nbl_phy_mgt *phy_mgt, u8 offset) +{ + union upa_ext_conf_table_u ext16_0 = {{0}}; + union upa_ext_conf_table_u ext16_1 = {{0}}; + union upa_ext_conf_table_u ext16_2 = {{0}}; /* used in half extraction mode */ + union upa_ext_conf_table_u ext8 = {{0}}; /* for overwritten 4B extraction */ + u8 index = 0; /* extractors profile index */ + u8 entry = 0; /* extractor index */ + + if (offset % 4 == 0 || offset % 4 == 2) { + /* tunnel cases */ + /* use 2 * 2B extractor */ + ext16_0.info.dst_offset = 40; + ext16_0.info.source_offset = offset / 2; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 2; + ext16_0.info.op_en = 1; + ext16_1.info.dst_offset = 42; + ext16_1.info.source_offset = offset / 2 + 1; + ext16_1.info.mode_sel = 0; + ext16_1.info.mode_start_off = 0; + ext16_1.info.lx_sel = 2; + ext16_1.info.op_en = 1; + + index = 8; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 9; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + /* non-tunnel cases */ + ext16_0.info.dst_offset = 44; + ext16_1.info.dst_offset = 46; + index = 10; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 11; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 12; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + } else if (offset % 4 == 1) { + /* tunnel cases */ + /* use 2*2B extractors & 1B extractor for overwritten */ + ext16_0.info.dst_offset = 42; + ext16_0.info.source_offset = offset / 2 + 1; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 2; + ext16_0.info.op_en = 1; + + /* half mode extractor */ + ext16_1.info.dst_offset = 40; + ext16_1.info.source_offset = offset / 2; + ext16_1.info.mode_sel = 1; + ext16_1.info.mode_start_off = 0b11; /* low-2-low */ + ext16_1.info.lx_sel = 2; + ext16_1.info.op_en = 1; + + ext8.info.dst_offset = 40; + ext8.info.source_offset = offset + 3; + ext8.info.mode_sel = 0; + ext8.info.mode_start_off = 0; + ext8.info.lx_sel = 2; + ext8.info.op_en = 1; + + /* half mode extractor */ + ext16_2.info.dst_offset = 40; + ext16_2.info.source_offset = offset / 2 + 2; + ext16_2.info.mode_sel = 1; + ext16_2.info.mode_start_off = 0; /* high-2-high */ + ext16_2.info.lx_sel = 2; + ext16_2.info.op_en = 1; + + index = 8; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 9; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + + /* for non-tunnel cases */ + ext16_0.info.dst_offset = 46; + ext16_1.info.dst_offset = 44; + ext16_2.info.dst_offset = 44; + ext8.info.dst_offset = 44; + + index = 10; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 11; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + + index = 12; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + } else if (offset % 4 == 3) { + /* tunnel cases */ + /* use 2*2B extractors & 1B extractor for overwritten */ + ext16_0.info.dst_offset = 40; + ext16_0.info.source_offset = offset / 2 + 1; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 2; + ext16_0.info.op_en = 1; + + ext16_1.info.dst_offset = 42; + ext16_1.info.source_offset = offset / 2; + ext16_1.info.mode_sel = 1; + ext16_1.info.mode_start_off = 0b11; /* low-2-low */ + ext16_1.info.lx_sel = 2; + ext16_1.info.op_en = 1; + + ext8.info.dst_offset = 42; + ext8.info.source_offset = offset + 3; + ext8.info.mode_sel = 0; + ext8.info.mode_start_off = 0; + ext8.info.lx_sel = 2; + ext8.info.op_en = 1; + + /* half mode extractor */ + ext16_2.info.dst_offset = 42; + ext16_2.info.source_offset = offset / 2 + 2; + ext16_2.info.mode_sel = 1; + ext16_2.info.mode_start_off = 0b00; /* high-2-high */ + ext16_2.info.lx_sel = 2; + ext16_2.info.op_en = 1; + + index = 8; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 9; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + + /* for non-tunnel cases */ + ext16_0.info.dst_offset = 44; + ext16_1.info.dst_offset = 46; + ext16_2.info.dst_offset = 46; + ext8.info.dst_offset = 46; + + index = 10; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 11; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + + index = 12; + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + } + + return 0; +} + +static int nbl_phy_set_fd_udf_l4(struct nbl_phy_mgt *phy_mgt, u8 offset) +{ + union upa_ext_conf_table_u ext32 = {{0}}; /* entry = 2 */ + union upa_ext_conf_table_u ext16_0 = {{0}}; /* entry = 10 */ + union upa_ext_conf_table_u ext16_1 = {{0}}; /* entry = 11 */ + union upa_ext_conf_table_u ext16_2 = {{0}}; /* entry for 2B = 7 8 9 */ + union upa_ext_conf_table_u ext8 = {{0}}; /* entry = 12 */ + union upa_ext_conf_table_u ext4_0 = {{0}}; /* entry = 14 */ + union upa_ext_conf_table_u ext4_1 = {{0}}; /* entry = 15 */ + u8 index = 0; /* extractors profile index */ + u8 entry = 0; /* extractor index */ + + if (offset % 4 == 0) { + /* use 1 * 4B extractor */ + ext32.info.dst_offset = 40; + ext32.info.source_offset = offset / 4 + 2; + ext32.info.mode_sel = 0; + ext32.info.mode_start_off = 0; + ext32.info.lx_sel = 2; + ext32.info.op_en = 1; + + /* tunnel vxlan & geneve case: plus UDP length 8B */ + index = 16; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + + /* tunnel geneve-ovn case: plus UDP length 8B */ + index = 17; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + + /* non-tunnel case */ + /* use 1 * 4B extractor */ + ext32.info.source_offset = offset / 4; + ext32.info.dst_offset = 44; + ext32.info.lx_sel = 3; + index = 20; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 21; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 22; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 24; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + index = 25; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + } else if (offset % 4 == 2) { + /* use 2 * 2B extractors */ + ext16_0.info.dst_offset = 40; + ext16_0.info.source_offset = offset / 2 + 4; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 2; + ext16_0.info.op_en = 1; + + ext16_1.info.dst_offset = 42; + ext16_1.info.source_offset = offset / 2 + 5; + ext16_1.info.mode_sel = 0; + ext16_1.info.mode_start_off = 0; + ext16_1.info.lx_sel = 2; + ext16_1.info.op_en = 1; + + /* tunnel vxlan & geneve case: plus UDP length 8B */ + index = 16; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + /* tunnel geneve-ovn case: plus UDP length 8B */ + index = 17; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + /* non-tunnel case */ + ext16_0.info.source_offset = offset / 2; + ext16_1.info.source_offset = offset / 2 + 1; + ext16_0.info.dst_offset = 44; + ext16_1.info.dst_offset = 46; + ext16_0.info.lx_sel = 3; + ext16_1.info.lx_sel = 3; + index = 20; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 21; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 22; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 24; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + + index = 25; + entry = 10; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 11; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + } else if (offset % 4 == 1 || offset % 4 == 3) { + /* use 4B extractor & 1B extractor for overwritten */ + ext32.info.dst_offset = 40; + ext32.info.source_offset = 2 + (offset + 2) / 4; + ext32.info.mode_sel = 0; + ext32.info.mode_start_off = 0; + ext32.info.lx_sel = 2; + ext32.info.op_en = 1; + + ext8.info.dst_offset = (offset % 4 == 1) ? 40 : 43; + ext8.info.source_offset = 8 + ((offset % 4 == 1) ? offset + 3 : offset); + ext8.info.mode_sel = 0; + ext8.info.mode_start_off = 0; + ext8.info.lx_sel = 2; + ext8.info.op_en = 1; + + /* tunnel vxlan & geneve case: plus UDP length 8B */ + index = 16; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + /* tunnel geneve-ovn case: plus UDP length 8B */ + index = 17; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + /* for non-tunnel cases */ + ext32.info.source_offset = (offset + 2) / 4; + ext8.info.source_offset = (offset % 4 == 1) ? offset + 3 : offset; + ext32.info.dst_offset = 44; + ext8.info.dst_offset = (offset % 4 == 1) ? 44 : 47; + ext32.info.lx_sel = 3; + ext8.info.lx_sel = 3; + + index = 21; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 22; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + index = 24; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 13; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext8, sizeof(ext8)); + + /* non-tunnel for icmp: use 32bit & 4bit & 4bit extractors */ + /* currently disabled! */ + ext32.info.op_en = 0; + ext4_0.info.dst_offset = (offset % 4 == 1) ? 44 : 47; + ext4_0.info.source_offset = (offset % 4 == 1) ? offset + 3 : offset; + ext4_0.info.mode_sel = 1; + ext4_0.info.mode_start_off = 0b00; + ext4_0.info.lx_sel = 3; + ext4_0.info.op_en = 0; + + ext4_1.info.dst_offset = (offset % 4 == 1) ? 44 : 47; + ext4_1.info.source_offset = (offset % 4 == 1) ? offset + 3 : offset; + ext4_1.info.mode_sel = 1; + ext4_1.info.mode_start_off = 0b11; + ext4_1.info.lx_sel = 3; + ext4_1.info.op_en = 0; + + index = 25; + entry = 2; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext32, sizeof(ext32)); + entry = 14; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext4_0, sizeof(ext4_0)); + entry = 15; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext4_1, sizeof(ext4_1)); + + /* non-tunnel for icmpv6: use 32bit & 4bit & 4bit extractors */ + /* currently disabled! */ + ext16_0.info.dst_offset = (offset % 4 == 1) ? 42 : 40; + ext16_0.info.source_offset = offset / 2 + 1; + ext16_0.info.mode_sel = 0; + ext16_0.info.mode_start_off = 0; + ext16_0.info.lx_sel = 3; + ext16_0.info.op_en = 0; + + ext16_1.info.dst_offset = (offset % 4 == 1) ? 40 : 42; + ext16_1.info.source_offset = offset / 2; + ext16_1.info.mode_sel = 1; + ext16_1.info.mode_start_off = 0b00; + ext16_1.info.lx_sel = 3; + ext16_1.info.op_en = 0; + + ext16_2.info.dst_offset = (offset % 4 == 1) ? 40 : 42; + ext16_2.info.source_offset = offset / 2 + 2; + ext16_2.info.mode_sel = 1; + ext16_2.info.mode_start_off = 0b11; + ext16_2.info.lx_sel = 3; + ext16_2.info.op_en = 0; + + index = 20; + entry = 7; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_0, sizeof(ext16_0)); + entry = 8; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_1, sizeof(ext16_1)); + entry = 9; + nbl_hw_write_regs(phy_mgt, NBL_UPA_EXT_CONF_TABLE_REG(16 * index + entry), + (u8 *)&ext16_2, sizeof(ext16_2)); + } + + return 0; +} + +static int nbl_phy_set_fd_udf(void *priv, u8 lxmode, u8 offset) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + int ret = 0; + + switch (lxmode) { + case 0: + ret = nbl_phy_set_fd_udf_l2(phy_mgt, offset); + break; + case 1: + ret = nbl_phy_set_fd_udf_l3(phy_mgt, offset); + break; + case 2: + ret = nbl_phy_set_fd_udf_l4(phy_mgt, offset); + break; + default: + break; + } + + return ret; +} + +static int nbl_phy_set_fd_tcam_cfg_default(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union acl_tcam_cfg_u acl_key_cfg = {{0}}; + union acl_action_ram_cfg_u acl_action_cfg = {{0}}; + union acl_kgen_tcam_u acl_kgen_tcam = {{0}}; + int i; + + nbl_hw_read_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_FD_PROFILE_DEFAULT), + (u8 *)&acl_action_cfg, sizeof(acl_action_cfg)); + + nbl_hw_read_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_FD_PROFILE_DEFAULT), + (u8 *)&acl_key_cfg, sizeof(acl_key_cfg)); + + acl_key_cfg.info.startcompare0 = 1; + acl_key_cfg.info.startset0 = 1; + acl_key_cfg.info.key_id0 = 11; + acl_key_cfg.info.tcam0_enable = 1; + + acl_key_cfg.info.startcompare1 = 0; + acl_key_cfg.info.startset1 = 0; + acl_key_cfg.info.key_id1 = 10; + acl_key_cfg.info.tcam1_enable = 1; + + acl_key_cfg.info.startcompare2 = 0; + acl_key_cfg.info.startset2 = 0; + acl_key_cfg.info.key_id2 = 9; + acl_key_cfg.info.tcam2_enable = 1; + + acl_key_cfg.info.startcompare3 = 0; + acl_key_cfg.info.startset3 = 0; + acl_key_cfg.info.key_id3 = 8; + acl_key_cfg.info.tcam3_enable = 1; + + acl_key_cfg.info.startcompare4 = 0; + acl_key_cfg.info.startset4 = 0; + acl_key_cfg.info.key_id4 = 7; + acl_key_cfg.info.tcam4_enable = 1; + + acl_key_cfg.info.startcompare5 = 0; + acl_key_cfg.info.startset5 = 0; + acl_key_cfg.info.key_id5 = 6; + acl_key_cfg.info.tcam5_enable = 1; + + acl_key_cfg.info.startcompare6 = 0; + acl_key_cfg.info.startset6 = 0; + acl_key_cfg.info.key_id6 = 5; + acl_key_cfg.info.tcam6_enable = 1; + + acl_key_cfg.info.startcompare7 = 0; + acl_key_cfg.info.startset0 = 0; + acl_key_cfg.info.key_id7 = 4; + acl_key_cfg.info.tcam7_enable = 1; + + acl_key_cfg.info.startcompare8 = 0; + acl_key_cfg.info.startset8 = 0; + acl_key_cfg.info.key_id8 = 3; + acl_key_cfg.info.tcam8_enable = 1; + + acl_key_cfg.info.startcompare9 = 0; + acl_key_cfg.info.startset9 = 0; + acl_key_cfg.info.key_id9 = 2; + acl_key_cfg.info.tcam9_enable = 1; + + acl_key_cfg.info.startcompare10 = 0; + acl_key_cfg.info.startset10 = 0; + acl_key_cfg.info.key_id10 = 1; + acl_key_cfg.info.tcam10_enable = 1; + + acl_key_cfg.info.startcompare11 = 0; + acl_key_cfg.info.startset11 = 0; + acl_key_cfg.info.key_id11 = 0; + acl_key_cfg.info.tcam11_enable = 1; + + /* Although we don't use it, startcompare and startset must be 1, to identify the end. */ + acl_key_cfg.info.startcompare12 = 1; + acl_key_cfg.info.startset12 = 1; + acl_key_cfg.info.key_id12 = 0; + acl_key_cfg.info.tcam12_enable = 0; + + acl_key_cfg.info.startcompare13 = 0; + acl_key_cfg.info.startset13 = 0; + acl_key_cfg.info.key_id13 = 0; + acl_key_cfg.info.tcam13_enable = 0; + + acl_key_cfg.info.startcompare14 = 0; + acl_key_cfg.info.startset14 = 0; + acl_key_cfg.info.key_id14 = 0; + acl_key_cfg.info.tcam14_enable = 0; + + /* For ovs-tc upcall */ + acl_key_cfg.info.startcompare15 = 1; + acl_key_cfg.info.startset15 = 1; + acl_key_cfg.info.key_id15 = 0; + acl_key_cfg.info.tcam15_enable = 1; + + acl_action_cfg.info.action_ram0_enable = 1; + acl_action_cfg.info.action_ram0_alloc_id = 11; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_FD_PROFILE_DEFAULT), + (u8 *)&acl_action_cfg, sizeof(acl_action_cfg)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_CFG_REG(NBL_FD_PROFILE_DEFAULT + 1), + (u8 *)&acl_action_cfg, sizeof(acl_action_cfg)); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_FD_PROFILE_DEFAULT), + (u8 *)&acl_key_cfg, sizeof(acl_key_cfg)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_CFG_REG(NBL_FD_PROFILE_DEFAULT + 1), + (u8 *)&acl_key_cfg, sizeof(acl_key_cfg)); + + for (i = NBL_FD_PROFILE_IPV4; i < NBL_FD_PROFILE_DEFAULT; i++) { + nbl_hw_read_regs(phy_mgt, NBL_ACL_KGEN_TCAM_REG(i), + (u8 *)&acl_kgen_tcam, sizeof(acl_kgen_tcam)); + acl_kgen_tcam.info.valid_bit = 0; + nbl_hw_write_regs(phy_mgt, NBL_ACL_KGEN_TCAM_REG(i), + (u8 *)&acl_kgen_tcam, sizeof(acl_kgen_tcam)); + } + + return 0; +} + +static int nbl_phy_set_fd_tcam_cfg_lite(void *priv) +{ + return 0; +} + +static int nbl_phy_set_fd_tcam_cfg_full(void *priv) +{ + return 0; +} + +static int nbl_phy_set_fd_tcam_ram(void *priv, struct nbl_acl_tcam_param *data, + struct nbl_acl_tcam_param *mask, u16 ram_index, u32 depth_index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + struct nbl_common_info *common = NBL_PHY_MGT_TO_COMMON(phy_mgt); + union acl_indirect_ctrl_u indirect_ctrl = { + .info.cpu_acl_cfg_start = 1, + .info.cpu_acl_cfg_rw = NBL_ACL_INDIRECT_ACCESS_WRITE, + }; + union acl_valid_bit_u tcam_data_valid = {{0}}; + union acl_indirect_access_ack_u indirect_ack = {{0}}; + struct nbl_acl_tcam_common_data_u tcam_data = {{0}}, tcam_mask = {{0}}; + int i, rd_retry = NBL_ACL_RD_RETRY; + + for (i = 0; i < data->len / NBL_ACL_TCAM_KEY_LEN; i++) { + memset(&tcam_data, 0, sizeof(tcam_data)); + memset(&tcam_mask, 0, sizeof(tcam_data)); + + memcpy(&tcam_data.data, &data->info.key[i], sizeof(tcam_data.data)); + memcpy(&tcam_mask.data, &mask->info.key[i], sizeof(tcam_mask.data)); + + *(u64 *)(&tcam_mask) = ~(*(u64 *)(&tcam_mask)); + + nbl_tcam_truth_value_convert((u64 *)&tcam_data, (u64 *)&tcam_mask); + + indirect_ctrl.info.acc_btm |= 1 << (ram_index + i); + tcam_data_valid.info.valid_bit |= 1 << (ram_index + i); + + nbl_debug(common, NBL_DEBUG_FLOW, "Set key tcam %d: 0x%02x%02x%02x%02x%02x", + ram_index + i, tcam_data.data[4], tcam_data.data[3], tcam_data.data[2], + tcam_data.data[1], tcam_data.data[0]); + nbl_debug(common, NBL_DEBUG_FLOW, "Set key tcam mask %d: 0x%02x%02x%02x%02x%02x", + ram_index + i, tcam_mask.data[4], tcam_mask.data[3], tcam_mask.data[2], + tcam_mask.data[1], tcam_mask.data[0]); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_X(ram_index + i), + (u8 *)&tcam_data, sizeof(tcam_data)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_TCAM_DATA_Y(ram_index + i), + (u8 *)&tcam_mask, sizeof(tcam_mask)); + } + + indirect_ctrl.info.tcam_addr = depth_index; + + nbl_debug(common, NBL_DEBUG_FLOW, "Set valid bit %08x", *(u32 *)&tcam_data_valid); + nbl_debug(common, NBL_DEBUG_FLOW, "Set ctrl %08x", *(u32 *)&indirect_ctrl); + + nbl_hw_write_regs(phy_mgt, NBL_ACL_VALID_BIT_ADDR, + (u8 *)&tcam_data_valid, sizeof(tcam_data_valid)); + nbl_hw_write_regs(phy_mgt, NBL_ACL_INDIRECT_CTRL_ADDR, + (u8 *)&indirect_ctrl, sizeof(indirect_ctrl)); + do { + nbl_hw_read_regs(phy_mgt, NBL_ACL_INDIRECT_ACCESS_ACK_ADDR, + (u8 *)&indirect_ack, sizeof(indirect_ack)); + if (!indirect_ack.info.done) { + rd_retry--; + usleep_range(NBL_ACL_RD_WAIT_100US, NBL_ACL_RD_WAIT_200US); + } else { + break; + } + } while (rd_retry); + + if (!indirect_ack.info.done) { + nbl_err(common, NBL_DEBUG_FLOW, "Set fd acl tcam fail\n"); + return -EIO; + } + + return 0; +} + +static int nbl_phy_set_fd_action_ram(void *priv, u32 action, u16 ram_index, u32 depth_index) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + union acl_action_ram15_u acl_action_ram = {{0}}; + + acl_action_ram.info.action0 = action; + + nbl_hw_write_regs(phy_mgt, NBL_ACL_ACTION_RAM_TBL(ram_index, depth_index), + (u8 *)&acl_action_ram, sizeof(acl_action_ram)); + + return 0; +} + +static void nbl_phy_set_hw_status(void *priv, enum nbl_hw_status hw_status) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + phy_mgt->hw_status = hw_status; +}; + +static enum nbl_hw_status nbl_phy_get_hw_status(void *priv) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + return phy_mgt->hw_status; +}; + +static struct nbl_phy_ops phy_ops = { + .init_chip_module = nbl_phy_init_chip_module, + .init_qid_map_table = nbl_phy_init_qid_map_table, + .set_qid_map_table = nbl_phy_set_qid_map_table, + .set_qid_map_ready = nbl_phy_set_qid_map_ready, + .cfg_ipro_queue_tbl = nbl_phy_cfg_ipro_queue_tbl, + .cfg_ipro_dn_sport_tbl = nbl_phy_cfg_ipro_dn_sport_tbl, + .set_vnet_queue_info = nbl_phy_set_vnet_queue_info, + .clear_vnet_queue_info = nbl_phy_clear_vnet_queue_info, + .cfg_vnet_qinfo_log = nbl_phy_cfg_vnet_qinfo_log, + .reset_dvn_cfg = nbl_phy_reset_dvn_cfg, + .reset_uvn_cfg = nbl_phy_reset_uvn_cfg, + .restore_dvn_context = nbl_phy_restore_dvn_context, + .restore_uvn_context = nbl_phy_restore_uvn_context, + .get_tx_queue_cfg = nbl_phy_get_tx_queue_cfg, + .get_rx_queue_cfg = nbl_phy_get_rx_queue_cfg, + .cfg_tx_queue = nbl_phy_cfg_tx_queue, + .cfg_rx_queue = nbl_phy_cfg_rx_queue, + .check_q2tc = nbl_phy_check_q2tc, + .cfg_q2tc_netid = nbl_phy_cfg_q2tc_netid, + .cfg_q2tc_tcid = nbl_phy_cfg_q2tc_tcid, + .set_tc_wgt = nbl_phy_set_tc_wgt, + .active_shaping = nbl_phy_active_shaping, + .deactive_shaping = nbl_phy_deactive_shaping, + .set_shaping = nbl_phy_set_shaping, + .cfg_dsch_net_to_group = nbl_phy_cfg_dsch_net_to_group, + .init_epro_rss_key = nbl_phy_init_epro_rss_key, + .read_rss_key = nbl_phy_read_epro_rss_key, + .read_rss_indir = nbl_phy_read_rss_indir, + .get_rss_alg_sel = nbl_phy_get_rss_alg_sel, + .init_epro_vpt_tbl = nbl_phy_init_epro_vpt_tbl, + .set_epro_rss_default = nbl_phy_set_epro_rss_default, + .cfg_epro_rss_ret = nbl_phy_cfg_epro_rss_ret, + .set_epro_rss_pt = nbl_phy_set_epro_rss_pt, + .clear_epro_rss_pt = nbl_phy_clear_epro_rss_pt, + .set_promisc_mode = nbl_phy_set_promisc_mode, + .disable_dvn = nbl_phy_disable_dvn, + .disable_uvn = nbl_phy_disable_uvn, + .lso_dsch_drain = nbl_phy_lso_dsch_drain, + .rsc_cache_drain = nbl_phy_rsc_cache_drain, + .save_dvn_ctx = nbl_phy_save_dvn_ctx, + .save_uvn_ctx = nbl_phy_save_uvn_ctx, + .get_rx_queue_err_stats = nbl_phy_get_rx_queue_err_stats, + .get_tx_queue_err_stats = nbl_phy_get_tx_queue_err_stats, + .setup_queue_switch = nbl_phy_setup_queue_switch, + .init_pfc = nbl_phy_init_pfc, + .cfg_phy_flow = nbl_phy_cfg_phy_flow, + .cfg_eth_port_priority_replace = nbl_phy_cfg_eth_port_priority_replace, + .get_chip_temperature = nbl_phy_get_chip_temperature, + + .configure_msix_map = nbl_phy_configure_msix_map, + .configure_msix_info = nbl_phy_configure_msix_info, + .get_coalesce = nbl_phy_get_coalesce, + .set_coalesce = nbl_phy_set_coalesce, + + .set_ht = nbl_phy_set_ht, + .set_kt = nbl_phy_set_kt, + .search_key = nbl_phy_search_key, + .add_tcam = nbl_phy_add_tcam, + .del_tcam = nbl_phy_del_tcam, + .add_mcc = nbl_phy_add_mcc, + .del_mcc = nbl_phy_del_mcc, + .add_tnl_encap = nbl_phy_add_tnl_encap, + .del_tnl_encap = nbl_phy_del_tnl_encap, + .init_fem = nbl_phy_init_fem, + .init_acl = nbl_phy_init_acl, + .uninit_acl = nbl_phy_uninit_acl, + .set_upcall_rule = nbl_phy_acl_set_upcall_rule, + .unset_upcall_rule = nbl_phy_acl_unset_upcall_rule, + .set_shaping_dport_vld = nbl_phy_set_shaping_dport_vld, + .set_dport_fc_th_vld = nbl_phy_set_dport_fc_th_vld, + .init_acl_stats = nbl_phy_init_acl_stats, + + .update_mailbox_queue_tail_ptr = nbl_phy_update_mailbox_queue_tail_ptr, + .config_mailbox_rxq = nbl_phy_config_mailbox_rxq, + .config_mailbox_txq = nbl_phy_config_mailbox_txq, + .stop_mailbox_rxq = nbl_phy_stop_mailbox_rxq, + .stop_mailbox_txq = nbl_phy_stop_mailbox_txq, + .get_mailbox_rx_tail_ptr = nbl_phy_get_mailbox_rx_tail_ptr, + .check_mailbox_dma_err = nbl_phy_check_mailbox_dma_err, + .get_host_pf_mask = nbl_phy_get_host_pf_mask, + .get_host_pf_fid = nbl_phy_get_host_pf_fid, + .cfg_mailbox_qinfo = nbl_phy_cfg_mailbox_qinfo, + .enable_mailbox_irq = nbl_phy_enable_mailbox_irq, + .enable_abnormal_irq = nbl_phy_enable_abnormal_irq, + .enable_msix_irq = nbl_phy_enable_msix_irq, + .get_msix_irq_enable_info = nbl_phy_get_msix_irq_enable_info, + + .config_adminq_rxq = nbl_phy_config_adminq_rxq, + .config_adminq_txq = nbl_phy_config_adminq_txq, + .stop_adminq_rxq = nbl_phy_stop_adminq_rxq, + .stop_adminq_txq = nbl_phy_stop_adminq_txq, + .cfg_adminq_qinfo = nbl_phy_cfg_adminq_qinfo, + .enable_adminq_irq = nbl_phy_enable_adminq_irq, + .update_adminq_queue_tail_ptr = nbl_phy_update_adminq_queue_tail_ptr, + .get_adminq_rx_tail_ptr = nbl_phy_get_adminq_rx_tail_ptr, + .check_adminq_dma_err = nbl_phy_check_adminq_dma_err, + + .update_tail_ptr = nbl_phy_update_tail_ptr, + .get_tail_ptr = nbl_phy_get_tail_ptr, + .set_spoof_check_addr = nbl_phy_set_spoof_check_addr, + .set_spoof_check_enable = nbl_phy_set_spoof_check_enable, + + .get_hw_addr = nbl_phy_get_hw_addr, + + .cfg_ktls_tx_keymat = nbl_phy_cfg_ktls_tx_keymat, + .cfg_ktls_rx_keymat = nbl_phy_cfg_ktls_rx_keymat, + .cfg_ktls_rx_record = nbl_phy_cfg_ktls_rx_record, + + .cfg_dipsec_nat = nbl_phy_cfg_dipsec_nat, + .cfg_dipsec_sad_iv = nbl_phy_cfg_dipsec_sad_iv, + .cfg_dipsec_sad_esn = nbl_phy_cfg_dipsec_sad_esn, + .cfg_dipsec_sad_lifetime = nbl_phy_cfg_dipsec_sad_lifetime, + .cfg_dipsec_sad_crypto = nbl_phy_cfg_dipsec_sad_crypto, + .cfg_dipsec_sad_encap = nbl_phy_cfg_dipsec_sad_encap, + .read_dipsec_status = nbl_phy_read_dipsec_status, + .reset_dipsec_status = nbl_phy_reset_dipsec_status, + .read_dipsec_lft_info = nbl_phy_read_dipsec_lft_info, + .cfg_dipsec_lft_info = nbl_phy_cfg_dipsec_lft_info, + .init_dprbac = nbl_phy_init_dprbac, + .cfg_uipsec_nat = nbl_phy_cfg_uipsec_nat, + .cfg_uipsec_sad_esn = nbl_phy_cfg_uipsec_sad_esn, + .cfg_uipsec_sad_lifetime = nbl_phy_cfg_uipsec_sad_lifetime, + .cfg_uipsec_sad_crypto = nbl_phy_cfg_uipsec_sad_crypto, + .cfg_uipsec_sad_window = nbl_phy_cfg_uipsec_sad_window, + .cfg_uipsec_em_tcam = nbl_phy_cfg_uipsec_em_tcam, + .cfg_uipsec_em_ad = nbl_phy_cfg_uipsec_em_ad, + .clear_uipsec_tcam_ad = nbl_phy_clear_uipsec_tcam_ad, + .cfg_uipsec_em_ht = nbl_phy_cfg_uipsec_em_ht, + .cfg_uipsec_em_kt = nbl_phy_cfg_uipsec_em_kt, + .clear_uipsec_ht_kt = nbl_phy_clear_uipsec_ht_kt, + .read_uipsec_status = nbl_phy_read_uipsec_status, + .reset_uipsec_status = nbl_phy_reset_uipsec_status, + .read_uipsec_lft_info = nbl_phy_read_uipsec_lft_info, + .cfg_uipsec_lft_info = nbl_phy_cfg_uipsec_lft_info, + .init_uprbac = nbl_phy_init_uprbac, + + .get_fw_ping = nbl_phy_get_fw_ping, + .set_fw_ping = nbl_phy_set_fw_ping, + .get_fw_pong = nbl_phy_get_fw_pong, + .set_fw_pong = nbl_phy_set_fw_pong, + + .load_p4 = nbl_phy_load_p4, + + .configure_qos = nbl_phy_configure_qos, + .set_pfc_buffer_size = nbl_phy_set_pfc_buffer_size, + .get_pfc_buffer_size = nbl_phy_get_pfc_buffer_size, + + .init_offload_fwd = nbl_phy_init_offload_fwd, + .init_cmdq = nbl_phy_cmdq_init, + .reset_cmdq = nbl_phy_cmdq_reset, + .destroy_cmdq = nbl_phy_cmdq_destroy, + .update_cmdq_tail = nbl_phy_update_cmdq_tail, + .init_flow = nbl_phy_flow_init, + .deinit_flow = nbl_phy_flow_deinit, + .get_flow_acl_switch = nbl_phy_flow_get_acl_switch, + .get_line_rate_info = nbl_phy_get_line_rate_info, + .offload_flow_rule = nbl_phy_offload_flow_rule, + .init_rep = nbl_phy_init_rep, + .clear_profile_table_action = nbl_phy_clear_profile_table_action, + + .init_vdpaq = nbl_phy_init_vdpaq, + .destroy_vdpaq = nbl_phy_destroy_vdpaq, + + .get_reg_dump = nbl_phy_get_reg_dump, + .get_reg_dump_len = nbl_phy_get_reg_dump_len, + .process_abnormal_event = nbl_phy_process_abnormal_event, + .get_uvn_desc_entry_stats = nbl_phy_get_uvn_desc_entry_stats, + .set_uvn_desc_wr_timeout = nbl_phy_set_uvn_desc_wr_timeout, + + .cfg_lag_hash_algorithm = nbl_phy_cfg_lag_algorithm, + .cfg_lag_member_fwd = nbl_phy_cfg_lag_member_fwd, + .cfg_lag_member_list = nbl_phy_cfg_lag_member_list, + .cfg_lag_member_up_attr = nbl_phy_cfg_lag_member_up_attr, + .cfg_lag_mcc = nbl_phy_cfg_lag_mcc, + .get_lag_fwd = nbl_phy_get_lag_fwd, + .cfg_bond_shaping = nbl_phy_cfg_bond_shaping, + .cfg_bgid_back_pressure = nbl_phy_cfg_bgid_back_pressure, + + .get_fw_eth_num = nbl_phy_get_fw_eth_num, + .get_fw_eth_map = nbl_phy_get_fw_eth_map, + .get_board_info = nbl_phy_get_board_info, + .get_quirks = nbl_phy_get_quirks, + .set_tc_kgen_cvlan_zero = nbl_phy_set_tc_kgen_cvlan_zero, + .unset_tc_kgen_cvlan = nbl_phy_unset_tc_kgen_cvlan, + .set_ped_tab_vsi_type = nbl_phy_set_ped_tab_vsi_type, + + .clear_acl = nbl_phy_clear_acl, + .set_fd_udf = nbl_phy_set_fd_udf, + .clear_fd_udf = nbl_phy_clr_fd_udf, + .set_fd_tcam_cfg_default = nbl_phy_set_fd_tcam_cfg_default, + .set_fd_tcam_cfg_lite = nbl_phy_set_fd_tcam_cfg_lite, + .set_fd_tcam_cfg_full = nbl_phy_set_fd_tcam_cfg_full, + .set_fd_tcam_ram = nbl_phy_set_fd_tcam_ram, + .set_fd_action_ram = nbl_phy_set_fd_action_ram, + .set_hw_status = nbl_phy_set_hw_status, + .get_hw_status = nbl_phy_get_hw_status, +}; + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_phy_setup_phy_mgt(struct nbl_common_info *common, + struct nbl_phy_mgt_leonis **phy_mgt_leonis) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + *phy_mgt_leonis = devm_kzalloc(dev, sizeof(struct nbl_phy_mgt_leonis), GFP_KERNEL); + if (!*phy_mgt_leonis) + return -ENOMEM; + + NBL_PHY_MGT_TO_COMMON(&(*phy_mgt_leonis)->phy_mgt) = common; + + return 0; +} + +static void nbl_phy_remove_phy_mgt(struct nbl_common_info *common, + struct nbl_phy_mgt_leonis **phy_mgt_leonis) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, *phy_mgt_leonis); + *phy_mgt_leonis = NULL; +} + +static int nbl_phy_setup_ops(struct nbl_common_info *common, struct nbl_phy_ops_tbl **phy_ops_tbl, + struct nbl_phy_mgt_leonis *phy_mgt_leonis) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + *phy_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_phy_ops_tbl), GFP_KERNEL); + if (!*phy_ops_tbl) + return -ENOMEM; + + NBL_PHY_OPS_TBL_TO_OPS(*phy_ops_tbl) = &phy_ops; + NBL_PHY_OPS_TBL_TO_PRIV(*phy_ops_tbl) = phy_mgt_leonis; + + return 0; +} + +static void nbl_phy_remove_ops(struct nbl_common_info *common, struct nbl_phy_ops_tbl **phy_ops_tbl) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, *phy_ops_tbl); + *phy_ops_tbl = NULL; +} + +static void nbl_phy_disable_rx_err_report(struct pci_dev *pdev) +{ +#define NBL_RX_ERR_BIT 0 +#define NBL_BAD_TLP_BIT 6 +#define NBL_BAD_DLLP_BIT 7 + u8 mask = 0; + int aer_cap = 0; + + aer_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!aer_cap) + return; + + pci_read_config_byte(pdev, aer_cap + PCI_ERR_COR_MASK, &mask); + mask |= BIT(NBL_RX_ERR_BIT) | BIT(NBL_BAD_TLP_BIT) | BIT(NBL_BAD_DLLP_BIT); + pci_write_config_byte(pdev, aer_cap + PCI_ERR_COR_MASK, mask); +} + +int nbl_phy_init_leonis(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_common_info *common; + struct pci_dev *pdev; + struct nbl_phy_mgt_leonis **phy_mgt_leonis; + struct nbl_phy_mgt *phy_mgt; + struct nbl_phy_ops_tbl **phy_ops_tbl; + int bar_mask; + int ret = 0; + + common = NBL_ADAPTER_TO_COMMON(adapter); + phy_mgt_leonis = (struct nbl_phy_mgt_leonis **)&NBL_ADAPTER_TO_PHY_MGT(adapter); + phy_ops_tbl = &NBL_ADAPTER_TO_PHY_OPS_TBL(adapter); + pdev = NBL_COMMON_TO_PDEV(common); + + ret = nbl_phy_setup_phy_mgt(common, phy_mgt_leonis); + if (ret) + goto setup_mgt_fail; + + phy_mgt = &(*phy_mgt_leonis)->phy_mgt; + bar_mask = BIT(NBL_MEMORY_BAR) | BIT(NBL_MAILBOX_BAR); + ret = pci_request_selected_regions(pdev, bar_mask, NBL_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "Request memory bar and mailbox bar failed, err = %d\n", ret); + goto request_bar_region_fail; + } + + if (param->caps.has_ctrl || param->caps.has_factory_ctrl) { + phy_mgt->hw_addr = ioremap(pci_resource_start(pdev, NBL_MEMORY_BAR), + pci_resource_len(pdev, NBL_MEMORY_BAR) - + NBL_RDMA_NOTIFY_OFF); + if (!phy_mgt->hw_addr) { + dev_err(&pdev->dev, "Memory bar ioremap failed\n"); + ret = -EIO; + goto ioremap_err; + } + phy_mgt->hw_size = pci_resource_len(pdev, NBL_MEMORY_BAR) - NBL_RDMA_NOTIFY_OFF; + } else { + phy_mgt->hw_addr = ioremap(pci_resource_start(pdev, NBL_MEMORY_BAR), + NBL_RDMA_NOTIFY_OFF); + if (!phy_mgt->hw_addr) { + dev_err(&pdev->dev, "Memory bar ioremap failed\n"); + ret = -EIO; + goto ioremap_err; + } + phy_mgt->hw_size = NBL_RDMA_NOTIFY_OFF; + } + + phy_mgt->notify_offset = 0; + phy_mgt->mailbox_bar_hw_addr = pci_ioremap_bar(pdev, NBL_MAILBOX_BAR); + if (!phy_mgt->mailbox_bar_hw_addr) { + dev_err(&pdev->dev, "Mailbox bar ioremap failed\n"); + ret = -EIO; + goto mailbox_ioremap_err; + } + + spin_lock_init(&phy_mgt->reg_lock); + phy_mgt->should_lock = true; + + ret = nbl_phy_setup_ops(common, phy_ops_tbl, *phy_mgt_leonis); + if (ret) + goto setup_ops_fail; + + nbl_phy_disable_rx_err_report(pdev); + + (*phy_mgt_leonis)->ro_enable = pcie_relaxed_ordering_enabled(pdev); + + return 0; + +setup_ops_fail: + iounmap(phy_mgt->mailbox_bar_hw_addr); +mailbox_ioremap_err: + iounmap(phy_mgt->hw_addr); +ioremap_err: + pci_release_selected_regions(pdev, bar_mask); +request_bar_region_fail: + nbl_phy_remove_phy_mgt(common, phy_mgt_leonis); +setup_mgt_fail: + return ret; +} + +void nbl_phy_remove_leonis(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct nbl_common_info *common; + struct nbl_phy_mgt_leonis **phy_mgt_leonis; + struct nbl_phy_ops_tbl **phy_ops_tbl; + struct pci_dev *pdev; + u8 __iomem *hw_addr; + u8 __iomem *mailbox_bar_hw_addr; + int bar_mask = BIT(NBL_MEMORY_BAR) | BIT(NBL_MAILBOX_BAR); + + common = NBL_ADAPTER_TO_COMMON(adapter); + phy_mgt_leonis = (struct nbl_phy_mgt_leonis **)&NBL_ADAPTER_TO_PHY_MGT(adapter); + phy_ops_tbl = &NBL_ADAPTER_TO_PHY_OPS_TBL(adapter); + pdev = NBL_COMMON_TO_PDEV(common); + + hw_addr = (*phy_mgt_leonis)->phy_mgt.hw_addr; + mailbox_bar_hw_addr = (*phy_mgt_leonis)->phy_mgt.mailbox_bar_hw_addr; + + iounmap(mailbox_bar_hw_addr); + iounmap(hw_addr); + pci_release_selected_regions(pdev, bar_mask); + nbl_phy_remove_phy_mgt(common, phy_mgt_leonis); + + nbl_phy_remove_ops(common, phy_ops_tbl); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..c1f26c636773cd0437884ed61e594fc2a299ac17 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_phy_leonis.h @@ -0,0 +1,2165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_PHY_LEONIS_H_ +#define _NBL_PHY_LEONIS_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" +#include "nbl_phy.h" + +#define NBL_NOTIFY_DELAY_MAX_TIME_FOR_REGS 300 /* 300us for palladium,5us for s2c */ + +#define NBL_DRAIN_WAIT_TIMES (30000) + +/* ---------- FEM ---------- */ +#define NBL_FEM_INT_STATUS (NBL_PPE_FEM_BASE + 0x00000000) +#define NBL_FEM_INT_MASK (NBL_PPE_FEM_BASE + 0x00000004) +#define NBL_FEM_INIT_START (NBL_PPE_FEM_BASE + 0x00000180) +#define NBL_FEM_KT_ACC_DATA (NBL_PPE_FEM_BASE + 0x00000348) +#define NBL_FEM_INSERT_SEARCH0_CTRL (NBL_PPE_FEM_BASE + 0x00000500) +#define NBL_FEM_INSERT_SEARCH0_ACK (NBL_PPE_FEM_BASE + 0x00000504) +#define NBL_FEM_INSERT_SEARCH0_DATA (NBL_PPE_FEM_BASE + 0x00000508) +#define KT_MASK_LEN32_ACTION_INFO (0x0) +#define KT_MASK_LEN12_ACTION_INFO (0xFFFFF000) +#define NBL_FEM_SEARCH_KEY_LEN 44 + +#define HT_PORT0_BANK_SEL (0b01000000) +#define HT_PORT1_BANK_SEL (0b00110000) +#define HT_PORT2_BANK_SEL (0b00000111) +#define KT_PORT0_BANK_SEL (0b11000000) +#define KT_PORT1_BANK_SEL (0b00110000) +#define KT_PORT2_BANK_SEL (0b00001111) +#define AT_PORT0_BANK_SEL (0b000000000000) +#define AT_PORT1_BANK_SEL (0b111000000000) +#define AT_PORT2_BANK_SEL (0b000111111111) +#define HT_PORT0_BTM 1 +#define HT_PORT1_BTM 3 +#define HT_PORT2_BTM 16 + +#define NBL_1BIT 1 +#define NBL_8BIT 8 +#define NBL_16BIT 16 + +#define NBL_FEM_HT_BANK_SEL_BITMAP (NBL_PPE_FEM_BASE + 0x00000200) +#define NBL_FEM_KT_BANK_SEL_BITMAP (NBL_PPE_FEM_BASE + 0x00000204) +#define NBL_FEM_AT_BANK_SEL_BITMAP (NBL_PPE_FEM_BASE + 0x00000208) +#define NBL_FEM_AT_BANK_SEL_BITMAP2 (NBL_PPE_FEM_BASE + 0x0000020C) + +#define NBL_EM_PT_MASK_LEN_0 (0xFFFFFFFF) +#define NBL_EM_PT_MASK_LEN_64 (0x0000FFFF) +#define NBL_EM_PT_MASK_LEN_96 (0x000000FF) +#define NBL_EM_PT_MASK1_LEN_0 (0xFFFFFFFF) +#define NBL_EM_PT_MASK1_LEN_4 (0x7FFFFFFF) +#define NBL_EM_PT_MASK1_LEN_12 (0x1FFFFFFF) +#define NBL_EM_PT_MASK1_LEN_20 (0x07FFFFFF) +#define NBL_EM_PT_MASK1_LEN_28 (0x01FFFFFF) +#define NBL_EM_PT_MASK1_LEN_32 (0x00FFFFFF) +#define NBL_EM_PT_MASK1_LEN_76 (0x00001FFF) +#define NBL_EM_PT_MASK1_LEN_112 (0x0000000F) +#define NBL_EM_PT_MASK1_LEN_116 (0x00000007) +#define NBL_EM_PT_MASK1_LEN_124 (0x00000001) +#define NBL_EM_PT_MASK1_LEN_128 (0x0) +#define NBL_EM_PT_MASK2_LEN_28 (0x000007FF) +#define NBL_EM_PT_MASK2_LEN_36 (0x000001FF) +#define NBL_EM_PT_MASK2_LEN_44 (0x0000007F) +#define NBL_EM_PT_MASK2_LEN_52 (0x0000001F) +#define NBL_EM_PT_MASK2_LEN_60 (0x00000007) +#define NBL_EM_PT_MASK2_LEN_68 (0x00000001) +#define NBL_EM_PT_MASK2_LEN_72 (0x00000010) +#define NBL_EM_PT_MASK2_SEC_72 (0x00000000) + +#define NBL_KT_PHY_L2_DW_LEN 40 + +#define NBL_ACL_VSI_PF_UPCALL 9 +#define NBL_ACL_ETH_PF_UPCALL 8 +#define NBL_ACL_INDIRECT_ACCESS_WRITE (0) +#define NBL_ACL_INDIRECT_ACCESS_READ (1) +#define NBL_ETH_BASE_IDX 8 +#define NBL_VSI_BASE_IDX 0 +#define NBL_PF_MAX_NUM 4 +#define NBL_ACL_TCAM_UPCALL_IDX 15 + +#define NBL_GET_PF_ETH_ID(idx) ((idx) + NBL_ETH_BASE_IDX) +#define NBL_GET_PF_VSI_ID(idx) ((idx) * 256) +#define NBL_ACL_GET_ACTION_DATA(act_buf, act_data) (act_data = (act_buf) & 0x3fffff) +#define NBL_ACL_FLUSH_FLOW_BTM 0x7fff +#define NBL_ACL_FLUSH_UPCALL_BTM 0x8000 + +#define NBL_ACL_TCAM_DATA_X(t) (NBL_PPE_ACL_BASE + 0x00000904 + ((t) * 8)) +#define NBL_ACL_TCAM_DATA_Y(t) (NBL_PPE_ACL_BASE + 0x00000990 + ((t) * 8)) + +struct nbl_acl_tcam_common_data_u { + u8 data[5]; + u8 rsv[3]; +}; + +/* ---------- MCC ---------- */ +#define NBL_MCC_MODULE (0x00B44000) +#define NBL_MCC_LEAF_NODE_TABLE(i) \ + (NBL_MCC_MODULE + 0x00010000 + (i) * sizeof(struct nbl_mcc_tbl)) + +union nbl_acl_tcam_upcall_data_u { + struct { + u64 rsv1:26; + u64 vsi_id:8; + u64 sw_id:2; + u64 vsi_pt_id:4; + u64 vsi_rsv_h:24; + }; + struct { + u64 rsv2:32; + u64 eth_id:4; + u64 eth_pt_id:4; + u64 eth_rsv_h:24; + }; + u8 data[8]; + u64 tcam_data; +}; + +#pragma pack(1) + +struct nbl_fem_int_mask { + u32 rsv0:2; + u32 fifo_ovf_err:1; + u32 fifo_udf_err:1; + u32 cif_err:1; + u32 rsv1:1; + u32 cfg_err:1; + u32 data_ucor_err:1; + u32 bank_cflt_err:1; + u32 rsv2:23; +}; + +union nbl_fem_ht_acc_ctrl_u { + struct nbl_fem_ht_acc_ctrl { + u32 bucket_id:2; /* used for choose entry's hash-bucket */ + u32 entry_id:14; /* used for choose hash-bucket's entry */ + u32 ht_id:1; /* 0:HT0, 1:HT1 */ +#define NBL_ACC_HT0 (0) +#define NBL_ACC_HT1 (1) + u32 port:2; /* 0:pp0 1:pp1 2:pp2 */ + u32 rsv:10; + u32 access_size:1; /* 0:32bit 1:128bit,read support 128 */ +#define NBL_ACC_SIZE_32B (0) +#define NBL_ACC_SIZE_128B (1) + u32 rw:1; /* 1:read 0:write */ +#define NBL_ACC_MODE_READ (1) +#define NBL_ACC_MODE_WRITE (0) + u32 start:1; /* enable indirect access */ + } info; +#define NBL_FEM_HT_ACC_CTRL_TBL_WIDTH (sizeof(struct nbl_fem_ht_acc_ctrl)) + u8 data[NBL_FEM_HT_ACC_CTRL_TBL_WIDTH]; +}; + +#define NBL_FEM_HT_ACC_CTRL (NBL_PPE_FEM_BASE + 0x00000300) + +union nbl_fem_ht_acc_data_u { + struct nbl_fem_ht_acc_data { + u32 kt_index:17; + u32 hash:14; + u32 vld:1; + } info; +#define NBL_FEM_HT_ACC_DATA_TBL_WIDTH (sizeof(struct nbl_fem_ht_acc_data)) + u8 data[NBL_FEM_HT_ACC_DATA_TBL_WIDTH]; +}; + +#define NBL_FEM_HT_ACC_DATA (NBL_PPE_FEM_BASE + 0x00000308) + +union nbl_fem_ht_acc_ack_u { + struct nbl_fem_ht_acc_ack { + u32 done:1; /* indirect access is finished */ + u32 status:1; /* indirect access is error */ + u32 rsv:30; + } info; +#define NBL_FEM_HT_ACC_ACK_TBL_WIDTH (sizeof(struct nbl_fem_ht_acc_ack)) + u8 data[NBL_FEM_HT_ACC_ACK_TBL_WIDTH]; +}; + +#define NBL_FEM_HT_ACC_ACK (NBL_PPE_FEM_BASE + 0x00000304) + +union nbl_fem_kt_acc_ctrl_u { + struct nbl_fem_kt_acc_ctrl { + u32 addr:17; /* kt-index */ + u32 rsv:12; + u32 access_size:1; +#define NBL_ACC_SIZE_160B (0) +#define NBL_ACC_SIZE_320B (1) + u32 rw:1; /* 1:read 0:write */ + u32 start:1; /* enable ,indirect access */ + } info; +#define NBL_FEM_KT_ACC_CTRL_TBL_WIDTH (sizeof(struct nbl_fem_kt_acc_ctrl)) + u8 data[NBL_FEM_KT_ACC_CTRL_TBL_WIDTH]; +}; + +#define NBL_FEM_KT_ACC_CTRL (NBL_PPE_FEM_BASE + 0x00000340) + +union nbl_fem_kt_acc_ack_u { + struct nbl_fem_kt_acc_ack { + u32 done:1; /* indirect access is finished */ + u32 status:1; /* indirect access is error */ + u32 rsv:30; + } info; +#define NBL_FEM_KT_ACC_ACK_TBL_WIDTH (sizeof(struct nbl_fem_kt_acc_ack)) + u8 data[NBL_FEM_KT_ACC_ACK_TBL_WIDTH]; +}; + +#define NBL_FEM_KT_ACC_ACK (NBL_PPE_FEM_BASE + 0x00000344) + +union nbl_search_ctrl_u { + struct nbl_search_ctrl { + u32 rsv:31; + u32 start:1; + } info; +#define NBL_SEARCH_CTRL_WIDTH (sizeof(struct nbl_search_ctrl)) + u8 data[NBL_SEARCH_CTRL_WIDTH]; +}; + +union nbl_search_ack_u { + struct nbl_search_ack { + u32 done:1; + u32 status:1; + u32 rsv:30; + } info; +#define NBL_SEARCH_ACK_WIDTH (sizeof(struct nbl_search_ack)) + u8 data[NBL_SEARCH_ACK_WIDTH]; +}; + +#define NBL_FEM_EM0_TCAM_TABLE_ADDR (0xa0b000) +#define NBL_FEM_EM_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM_TCAM_TABLE_WIDTH (256) + +union fem_em_tcam_table_u { + struct fem_em_tcam_table { + u32 key[5]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [191:162] Default:0x0 RO */ + u32 rsv1[2]; /* [255:192] Default:0x0 RO */ + } info; + u32 data[NBL_FEM_EM_TCAM_TABLE_WIDTH / 32]; + u8 hash_key[sizeof(struct fem_em_tcam_table)]; +}; + +#define NBL_FEM_EM_TCAM_TABLE_REG(r, t) (NBL_FEM_EM0_TCAM_TABLE_ADDR + 0x1000 * (r) + \ + (NBL_FEM_EM_TCAM_TABLE_WIDTH / 8) * (t)) + +#define NBL_FEM_EM0_AD_TABLE_ADDR (0xa08000) +#define NBL_FEM_EM_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM_AD_TABLE_WIDTH (512) + +union fem_em_ad_table_u { + struct fem_em_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv[5]; /* [511:352] Default:0x0 RO */ + } info; + u32 data[NBL_FEM_EM_AD_TABLE_WIDTH / 32]; + u8 hash_key[sizeof(struct fem_em_ad_table)]; +}; + +#define NBL_FEM_EM_AD_TABLE_REG(r, t) (NBL_FEM_EM0_AD_TABLE_ADDR + 0x1000 * (r) + \ + (NBL_FEM_EM_AD_TABLE_WIDTH / 8) * (t)) + +#define NBL_FLOW_TCAM_TOTAL_LEN 32 +#define NBL_FLOW_AD_TOTAL_LEN 64 + +struct nbl_mcc_tbl { + u32 dport_act:16; + u32 dqueue_act:11; + u32 dqueue_en:1; + u32 dqueue_rsv:4; + u32 stateid_act:11; + u32 stateid_filter:1; + u32 flowid_filter:1; + u32 stateid_rsv:3; + u32 next_pntr:13; + u32 tail:1; + u32 vld:1; + u32 rsv:1; +}; + +union nbl_fem_ht_size_table_u { + struct nbl_fem_ht_size_table { + u32 pp0_size:5; + u32 rsv0:3; + u32 pp1_size:5; + u32 rsv1:3; + u32 pp2_size:5; + u32 rsv2:11; + } info; +#define NBL_FEM_HT_SIZE_TBL_WIDTH (sizeof(struct nbl_fem_ht_size_table)) + u8 data[NBL_FEM_HT_SIZE_TBL_WIDTH]; +}; + +#define NBL_FEM_HT_SIZE_REG (NBL_PPE_FEM_BASE + 0x0000011c) + +union nbl_fem_profile_tbl_u { + struct fem_profile_tbl { + u32 pt_cmd:1; + u32 pt_key_size:1; + u32 pt_mask_bmap0:30; + u32 pt_mask_bmap1; + u32 pt_mask_bmap2:18; + u32 pt_hash_sel0:2; + u32 pt_hash_sel1:2; + u32 pt_action0:16; + u32 pt_action0_id:6; + u32 fwd_queue:16; + u32 pt_action1_id:6; + u32 pt_action2:22; + u32 pt_action3:22; + u32 pt_action4:22; + u32 pt_action5:22; + u32 pt_action6:22; + u32 pt_action7:22; + u32 pt_act_num:4; + u32 pt_vld:1; + u32 rsv0:21; + u32 rsv1[7]; + } info; +#define NBL_FEM_PROFILE_TBL_WIDTH (sizeof(struct fem_profile_tbl)) + u8 data[NBL_FEM_PROFILE_TBL_WIDTH]; +}; + +#define NBL_FEM0_PROFILE_TABLE(t) (NBL_PPE_FEM_BASE + 0x00001000 + \ + (NBL_FEM_PROFILE_TBL_WIDTH) * (t)) + +/* ---------- REG BASE ADDR ---------- */ +#define NBL_LB_PCIEX16_TOP_BASE (0x01500000) +/* PPE modules base addr */ +#define NBL_PPE_FEM_BASE (0x00a04000) +#define NBL_PPE_IPRO_BASE (0x00b04000) +#define NBL_PPE_PP0_BASE (0x00b14000) +#define NBL_PPE_PP1_BASE (0x00b24000) +#define NBL_PPE_PP2_BASE (0x00b34000) +#define NBL_PPE_MCC_BASE (0x00b44000) +#define NBL_PPE_ACL_BASE (0x00b64000) +#define NBL_PPE_CAP_BASE (0x00e64000) +#define NBL_PPE_EPRO_BASE (0x00e74000) +#define NBL_PPE_DPRBAC_BASE (0x00904000) +#define NBL_PPE_UPRBAC_BASE (0x0000C000) +/* Interface modules base addr */ +#define NBL_INTF_HOST_PCOMPLETER_BASE (0x00f08000) +#define NBL_INTF_HOST_PADPT_BASE (0x00f4c000) +#define NBL_INTF_HOST_CTRLQ_BASE (0x00f8c000) +#define NBL_INTF_HOST_VDPA_NET_BASE (0x00f98000) +#define NBL_INTF_HOST_CMDQ_BASE (0x00fa0000) +#define NBL_INTF_HOST_MAILBOX_BASE (0x00fb0000) +#define NBL_INTF_HOST_PCIE_BASE (0X01504000) +#define NBL_INTF_HOST_PCAP_BASE (0X015a4000) +/* DP modules base addr */ +#define NBL_DP_URMUX_BASE (0x00008000) +#define NBL_DP_UPRBAC_BASE (0x0000C000) +#define NBL_DP_UPA_BASE (0x0008C000) +#define NBL_DP_USTORE_BASE (0x00104000) +#define NBL_DP_UPMEM_BASE (0x00108000) +#define NBL_DP_UBM_BASE (0x0010c000) +#define NBL_DP_UQM_BASE (0x00114000) +#define NBL_DP_USTAT_BASE (0x0011c000) +#define NBL_DP_UPED_BASE (0x0015c000) +#define NBL_DP_UCAR_BASE (0x00e84000) +#define NBL_DP_UL4S_BASE (0x00204000) +#define NBL_DP_UVN_BASE (0x00244000) +#define NBL_DP_DSCH_BASE (0x00404000) +#define NBL_DP_SHAPING_BASE (0x00504000) +#define NBL_DP_DVN_BASE (0x00514000) +#define NBL_DP_DL4S_BASE (0x00614000) +#define NBL_DP_DRMUX_BASE (0x00654000) +#define NBL_DP_DSTORE_BASE (0x00704000) +#define NBL_DP_DPMEM_BASE (0x00708000) +#define NBL_DP_DBM_BASE (0x0070c000) +#define NBL_DP_DQM_BASE (0x00714000) +#define NBL_DP_DSTAT_BASE (0x0071c000) +#define NBL_DP_DPED_BASE (0x0075c000) +#define NBL_DP_DPA_BASE (0x0085c000) +#define NBL_DP_DPRBAC_BASE (0x00904000) +#define NBL_DP_DDMUX_BASE (0x00984000) +#define NBL_DP_LB_DDP_BUF_BASE (0x00000000) +#define NBL_DP_LB_DDP_OUT_BASE (0x00000000) +#define NBL_DP_LB_DDP_DIST_BASE (0x00000000) +#define NBL_DP_LB_DDP_IN_BASE (0x00000000) +#define NBL_DP_LB_UDP_BUF_BASE (0x00000000) +#define NBL_DP_LB_UDP_OUT_BASE (0x00000000) +#define NBL_DP_LB_UDP_DIST_BASE (0x00000000) +#define NBL_DP_LB_UDP_IN_BASE (0x00000000) +#define NBL_DP_DL4S_BASE (0x00614000) +#define NBL_DP_UL4S_BASE (0x00204000) + +/* -------- LB -------- */ +#define NBL_LB_PF_CONFIGSPACE_SELECT_OFFSET (0x81100000) +#define NBL_LB_PF_CONFIGSPACE_SELECT_STRIDE (0x00100000) +#define NBL_LB_PF_CONFIGSPACE_BASE_ADDR (NBL_LB_PCIEX16_TOP_BASE + 0x00024000) +#define NBL_LB_PCIEX16_TOP_AHB (NBL_LB_PCIEX16_TOP_BASE + 0x00000020) + +/* -------- MAILBOX BAR2 ----- */ +#define NBL_MAILBOX_NOTIFY_ADDR (0x00000000) +#define NBL_MAILBOX_BAR_REG (0x00000000) +#define NBL_MAILBOX_QINFO_CFG_RX_TABLE_ADDR (0x10) +#define NBL_MAILBOX_QINFO_CFG_TX_TABLE_ADDR (0x20) +#define NBL_MAILBOX_QINFO_CFG_DBG_TABLE_ADDR (0x30) + +/* -------- ADMINQ BAR2 ----- */ +#define NBL_ADMINQ_NOTIFY_ADDR (0x40) +#define NBL_ADMINQ_QINFO_CFG_RX_TABLE_ADDR (0x50) +#define NBL_ADMINQ_QINFO_CFG_TX_TABLE_ADDR (0x60) +#define NBL_ADMINQ_QINFO_CFG_DBG_TABLE_ADDR (0x78) +#define NBL_ADMINQ_MSIX_MAP_TABLE_ADDR (0x80) + +/* -------- MAILBOX -------- */ + +/* mailbox BAR qinfo_cfg_dbg_table */ +struct nbl_mailbox_qinfo_cfg_dbg_tbl { + u16 rx_drop; + u16 rx_get; + u16 tx_drop; + u16 tx_out; + u16 rx_hd_ptr; + u16 tx_hd_ptr; + u16 rx_tail_ptr; + u16 tx_tail_ptr; +}; + +/* mailbox BAR qinfo_cfg_table */ +struct nbl_mailbox_qinfo_cfg_table { + u32 queue_base_addr_l; + u32 queue_base_addr_h; + u32 queue_size_bwind:4; + u32 rsv1:28; + u32 queue_rst:1; + u32 queue_en:1; + u32 dif_err:1; + u32 ptr_err:1; + u32 rsv2:28; +}; + +/* -------- ADMINQ -------- */ + +struct nbl_adminq_qinfo_map_table { + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 msix_idx:13; + u32 msix_idx_valid:1; + u32 rsv:2; +}; + +/* adminq BAR qinfo_cfg_dbg_table */ +struct nbl_adminq_qinfo_cfg_dbg_tbl { + u16 rx_hd_ptr; + u16 tx_hd_ptr; + u16 rx_tail_ptr; + u16 tx_tail_ptr; +}; + +/* -------- MAILBOX BAR0 ----- */ +/* mailbox qinfo_map_table */ +#define NBL_MAILBOX_QINFO_MAP_REG_ARR(func_id) \ + (NBL_INTF_HOST_MAILBOX_BASE + 0x00001000 + \ + (func_id) * sizeof(struct nbl_mailbox_qinfo_map_table)) + +/* MAILBOX qinfo_map_table */ +struct nbl_mailbox_qinfo_map_table { + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 msix_idx:13; + u32 msix_idx_valid:1; + u32 rsv:2; +}; + +/* -------- HOST_PCIE -------- */ +#define NBL_PCIE_HOST_K_PF_MASK_REG (NBL_INTF_HOST_PCIE_BASE + 0x00001004) +#define NBL_PCIE_HOST_K_PF_FID(pf_id) \ + (NBL_INTF_HOST_PCIE_BASE + 0x0000106C + 4 * (pf_id)) + +/* -------- HOST_PADPT -------- */ +#define NBL_HOST_PADPT_HOST_CFG_FC_PD_DN (NBL_INTF_HOST_PADPT_BASE + 0x00000160) +#define NBL_HOST_PADPT_HOST_CFG_FC_PH_DN (NBL_INTF_HOST_PADPT_BASE + 0x00000164) +#define NBL_HOST_PADPT_HOST_CFG_FC_NPH_DN (NBL_INTF_HOST_PADPT_BASE + 0x0000016C) +#define NBL_HOST_PADPT_HOST_CFG_FC_CPLH_UP (NBL_INTF_HOST_PADPT_BASE + 0x00000170) +/* host_padpt host_msix_info */ +#define NBL_PADPT_ABNORMAL_MSIX_VEC (NBL_INTF_HOST_PADPT_BASE + 0x00000200) +#define NBL_PADPT_ABNORMAL_TIMEOUT (NBL_INTF_HOST_PADPT_BASE + 0x00000204) +#define NBL_PADPT_HOST_MSIX_INFO_REG_ARR(vector_id) \ + (NBL_INTF_HOST_PADPT_BASE + 0x00010000 + (vector_id) * sizeof(struct nbl_host_msix_info)) +/* host_padpt host_vnet_qinfo */ +#define NBL_PADPT_HOST_VNET_QINFO_REG_ARR(queue_id) \ + (NBL_INTF_HOST_PADPT_BASE + 0x00008000 + (queue_id) * sizeof(struct nbl_host_vnet_qinfo)) + +struct nbl_host_msix_info { + u32 intrl_pnum:16; + u32 intrl_rate:16; + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 valid:1; + u32 msix_mask_en:1; + u32 rsv:14; +}; + +struct nbl_abnormal_msix_vector { + u32 idx:16; + u32 vld:1; + u32 rsv:15; +}; + +/* host_padpt host_vnet_qinfo */ +struct nbl_host_vnet_qinfo { + u32 function_id:3; + u32 device_id:5; + u32 bus_id:8; + u32 msix_idx:13; + u32 msix_idx_valid:1; + u32 log_en:1; + u32 valid:1; + u32 tph_en:1; + u32 ido_en:1; + u32 rlo_en:1; + u32 rsv0:29; +}; + +struct nbl_msix_notify { + u32 glb_msix_idx:13; + u32 rsv1:3; + u32 mask:1; + u32 rsv2:15; +}; + +/* -------- HOST_PCOMPLETER -------- */ +/* pcompleter_host pcompleter_host_virtio_qid_map_table */ +#define NBL_PCOMPLETER_QID_MAP_REG_ARR(select, i) \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00010000 + \ + (select) * NBL_QID_MAP_TABLE_ENTRIES * sizeof(struct nbl_virtio_qid_map_table) + \ + (i) * sizeof(struct nbl_virtio_qid_map_table)) +#define NBL_PCOMPLETER_FUNCTION_MSIX_MAP_REG_ARR(i) \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00004000 + (i) * sizeof(struct nbl_function_msix_map)) +#define NBL_PCOMPLETER_HOST_MSIX_FID_TABLE(i) \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x0003a000 + \ + (i) * sizeof(struct nbl_pcompleter_host_msix_fid_table)) +#define NBL_PCOMPLETER_INT_STATUS (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00000000) +#define NBL_PCOMPLETER_TLP_OUT_DROP_CNT (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00002430) + +/* pcompleter_host pcompleter_host_virtio_table_ready */ +#define NBL_PCOMPLETER_QUEUE_TABLE_READY_REG \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x0000110C) +/* pcompleter_host pcompleter_host_virtio_table_select */ +#define NBL_PCOMPLETER_QUEUE_TABLE_SELECT_REG \ + (NBL_INTF_HOST_PCOMPLETER_BASE + 0x00001110) + +#define NBL_PCOMPLETER_MSIX_NOTIRY_OFFSET (0x1020) + +#define NBL_REG_WRITE_MAX_TRY_TIMES 2 + +/* pcompleter_host virtio_qid_map_table */ +struct nbl_virtio_qid_map_table { + u32 local_qid:9; + u32 notify_addr_l:23; + u32 notify_addr_h; + u32 global_qid:12; + u32 ctrlq_flag:1; + u32 rsv1:19; + u32 rsv2; +}; + +struct nbl_pcompleter_host_msix_fid_table { + u32 fid:10; + u32 vld:1; + u32 rsv:21; +}; + +struct nbl_function_msix_map { + u64 msix_map_base_addr; + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 valid:1; + u32 rsv0:15; + u32 rsv1; +}; + +struct nbl_queue_table_select { + u32 select:1; + u32 rsv:31; +}; + +struct nbl_queue_table_ready { + u32 ready:1; + u32 rsv:31; +}; + +/* IPRO ipro_queue_tbl */ +struct nbl_ipro_queue_tbl { + u32 vsi_id:10; + u32 vsi_en:1; + u32 rsv:21; +}; + +/* -------- HOST_PCAP -------- */ +#define NBL_HOST_PCAP_TX_CAP_EN (NBL_INTF_HOST_PCAP_BASE + 0x00000200) +#define NBL_HOST_PCAP_TX_CAP_STORE (NBL_INTF_HOST_PCAP_BASE + 0x00000204) +#define NBL_HOST_PCAP_TX_CAP_STALL (NBL_INTF_HOST_PCAP_BASE + 0x00000208) +#define NBL_HOST_PCAP_RX_CAP_EN (NBL_INTF_HOST_PCAP_BASE + 0x00000800) +#define NBL_HOST_PCAP_RX_CAP_STORE (NBL_INTF_HOST_PCAP_BASE + 0x00000804) +#define NBL_HOST_PCAP_RX_CAP_STALL (NBL_INTF_HOST_PCAP_BASE + 0x00000808) + +/* ---------- DPED ---------- */ +#define NBL_DPED_VLAN_OFFSET (NBL_DP_DPED_BASE + 0x000003F4) +#define NBL_DPED_DSCP_OFFSET_0 (NBL_DP_DPED_BASE + 0x000003F8) +#define NBL_DPED_DSCP_OFFSET_1 (NBL_DP_DPED_BASE + 0x000003FC) + +/* DPED dped_hw_edt_prof */ +#define NBL_DPED_HW_EDT_PROF_TABLE(i) \ + (NBL_DP_DPED_BASE + 0x00001000 + (i) * sizeof(struct ped_hw_edit_profile)) +/* DPED dped_l4_ck_cmd_40 */ + +/* DPED hw_edt_prof/ UPED hw_edt_prof */ +struct ped_hw_edit_profile { + u32 l4_len:2; +#define NBL_PED_L4_LEN_MDY_CMD_0 (0) +#define NBL_PED_L4_LEN_MDY_CMD_1 (1) +#define NBL_PED_L4_LEN_MDY_DISABLE (2) + u32 l3_len:2; +#define NBL_PED_L3_LEN_MDY_CMD_0 (0) +#define NBL_PED_L3_LEN_MDY_CMD_1 (1) +#define NBL_PED_L3_LEN_MDY_DISABLE (2) + u32 l4_ck:3; +#define NBL_PED_L4_CKSUM_CMD_0 (0) +#define NBL_PED_L4_CKSUM_CMD_1 (1) +#define NBL_PED_L4_CKSUM_CMD_2 (2) +#define NBL_PED_L4_CKSUM_CMD_3 (3) +#define NBL_PED_L4_CKSUM_CMD_4 (4) +#define NBL_PED_L4_CKSUM_CMD_5 (5) +#define NBL_PED_L4_CKSUM_CMD_6 (6) +#define NBL_PED_L4_CKSUM_DISABLE (7) + u32 l3_ck:1; +#define NBL_PED_L3_CKSUM_ENABLE (1) +#define NBL_PED_L3_CKSUM_DISABLE (0) + u32 l4_ck_zero_free:1; +#define NBL_PED_L4_CKSUM_ZERO_FREE_ENABLE (1) +#define NBL_PED_L4_CKSUM_ZERO_FREE_DISABLE (0) + u32 rsv:23; +}; + +struct nbl_ped_hw_edit_profile_cfg { + u32 table_id; + struct ped_hw_edit_profile edit_prf; +}; + +/* ---------- UPED ---------- */ +/* UPED uped_hw_edt_prof */ +#define NBL_UPED_HW_EDT_PROF_TABLE(i) \ + (NBL_DP_UPED_BASE + 0x00001000 + (i) * sizeof(struct ped_hw_edit_profile)) + +/* --------- SHAPING --------- */ +#define NBL_SHAPING_NET_TIMMING_ADD_ADDR (NBL_DP_SHAPING_BASE + 0x00000300) +#define NBL_SHAPING_NET(i) \ + (NBL_DP_SHAPING_BASE + 0x00001800 + (i) * sizeof(struct nbl_shaping_net)) + +/* cir 1, bandwidth 1kB/s in protol environment */ +/* cir 1, bandwidth 1Mb/s */ +#define NBL_LR_LEONIS_SYS_CLK 15000.0 /* 0105tag Khz */ +#define NBL_LR_LEONIS_NET_SHAPING_CYCLE_MAX 25 +#define NBL_LR_LEONIS_NET_SHAPING_DPETH 600 +#define NBL_LR_LEONIS_NET_BUCKET_DEPTH 9600 + +#define NBL_SHAPING_DPORT_25G_RATE 0x61A8 +#define NBL_SHAPING_DPORT_HALF_25G_RATE 0x30D4 + +#define NBL_SHAPING_DPORT_100G_RATE 0x1A400 +#define NBL_SHAPING_DPORT_HALF_100G_RATE 0xD200 + +#define NBL_DSTORE_DROP_XOFF_TH 0xC8 +#define NBL_DSTORE_DROP_XON_TH 0x64 + +#define NBL_DSTORE_DROP_XOFF_TH_100G 0x1F4 +#define NBL_DSTORE_DROP_XON_TH_100G 0x12C + +#define NBL_DSTORE_DROP_XOFF_TH_BOND_MAIN 0x180 +#define NBL_DSTORE_DROP_XON_TH_BOND_MAIN 0x180 + +#define NBL_DSTORE_DROP_XOFF_TH_BOND_OTHER 0x64 +#define NBL_DSTORE_DROP_XON_TH_BOND_OTHER 0x64 + +#define NBL_DSTORE_DROP_XOFF_TH_100G_BOND_MAIN 0x2D5 +#define NBL_DSTORE_DROP_XON_TH_100G_BOND_MAIN 0x2BC + +#define NBL_DSTORE_DROP_XOFF_TH_100G_BOND_OTHER 0x145 +#define NBL_DSTORE_DROP_XON_TH_100G_BOND_OTHER 0x12C + +#define NBL_DSTORE_DISC_BP_TH (NBL_DP_DSTORE_BASE + 0x00000630) + +struct dstore_disc_bp_th { + u32 xoff_th:10; + u32 rsv1:6; + u32 xon_th:10; + u32 rsv:5; + u32 en:1; +}; + +struct nbl_shaping_net_timming_add { + u32 cycle_max:12; /* [11:0] Default:0x8 RW */ + u32 rsv1:4; /* [15:12] Default:0x0 RO */ + u32 depth:12; /* [27:16] Default:0x258 RW */ + u32 rsv:4; /* [31:28] Default:0x0 RO */ +}; + +/* DSCH dsch_vn_sha2net_map_tbl */ +struct dsch_vn_sha2net_map_tbl { + u32 vld:1; + u32 reserve:31; +}; + +/* DSCH dsch_vn_net2sha_map_tbl */ +struct dsch_vn_net2sha_map_tbl { + u32 vld:1; + u32 reserve:31; +}; + +struct dsch_psha_en { + u32 en:4; + u32 rsv:28; +}; + +/* SHAPING shaping_net */ +struct nbl_shaping_net { + u32 valid:1; + u32 depth:19; + u32 cir:19; + u32 pir:19; + u32 cbs:21; + u32 pbs:21; + u32 rsv:28; +}; + +struct nbl_shaping_dport { + u32 valid:1; + u32 depth:19; + u32 cir:19; + u32 pir:19; + u32 cbs:21; + u32 pbs:21; + u32 rsv:28; +}; + +struct nbl_shaping_dvn_dport { + u32 valid:1; + u32 depth:19; + u32 cir:19; + u32 pir:19; + u32 cbs:21; + u32 pbs:21; + u32 rsv:28; +}; + +struct nbl_shaping_rdma_dport { + u32 valid:1; + u32 depth:19; + u32 cir:19; + u32 pir:19; + u32 cbs:21; + u32 pbs:21; + u32 rsv:28; +}; + +/* ---------- DSCH ---------- */ +/* DSCH vn_host_qid_max */ +#define NBL_DSCH_NOTIFY_BITMAP_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00003000 + (i) * BYTES_PER_DWORD) +#define NBL_DSCH_FLY_BITMAP_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00004000 + (i) * BYTES_PER_DWORD) +#define NBL_DSCH_PORT_MAP_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00005000 + (i) * sizeof(struct nbl_port_map)) +/* DSCH dsch_vn_q2tc_cfg_tbl */ +#define NBL_DSCH_VN_Q2TC_CFG_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00010000 + (i) * sizeof(struct dsch_vn_q2tc_cfg_tbl)) +/* DSCH dsch_vn_n2g_cfg_tbl */ +#define NBL_DSCH_VN_N2G_CFG_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00060000 + (i) * sizeof(struct dsch_vn_n2g_cfg_tbl)) +/* DSCH dsch_vn_g2p_cfg_tbl */ +#define NBL_DSCH_VN_G2P_CFG_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00064000 + (i) * sizeof(struct dsch_vn_g2p_cfg_tbl)) +/* DSCH dsch_vn_tc_wgt_cfg_tbl */ +#define NBL_DSCH_VN_TC_WGT_CFG_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00068000 + (i) * sizeof(union dsch_vn_tc_wgt_cfg_tbl_u)) +/* DSCH dsch_vn_sha2net_map_tbl */ +#define NBL_DSCH_VN_SHA2NET_MAP_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00070000 + (i) * sizeof(struct dsch_vn_sha2net_map_tbl)) +/* DSCH dsch_vn_net2sha_map_tbl */ +#define NBL_DSCH_VN_NET2SHA_MAP_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00074000 + (i) * sizeof(struct dsch_vn_net2sha_map_tbl)) +/* DSCH dsch_vn_tc_q_list_tbl */ +#define NBL_DSCH_VN_TC_Q_LIST_TABLE_REG_ARR(i) \ + (NBL_DP_DSCH_BASE + 0x00040000 + (i) * sizeof(struct dsch_vn_tc_q_list_tbl)) +/* DSCH dsch maxqid */ +#define NBL_DSCH_HOST_QID_MAX (NBL_DP_DSCH_BASE + 0x00000118) +#define NBL_DSCH_VN_QUANTA_ADDR (NBL_DP_DSCH_BASE + 0x00000134) +#define NBL_DSCH_INT_STATUS (NBL_DP_DSCH_BASE + 0x00000000) +#define NBL_DSCH_RDMA_OTHER_ABN (NBL_DP_DSCH_BASE + 0x00000080) +#define NBL_DSCH_RDMA_OTHER_ABN_BIT (0x4000) +#define NBL_DSCH_RDMA_DPQM_DB_LOST (2) + +#define NBL_MAX_QUEUE_ID (0x7ff) +#define NBL_HOST_QUANTA (0x8000) +#define NBL_ECPU_QUANTA (0x1000) + +/* DSCH dsch_vn_q2tc_cfg_tbl */ +struct dsch_vn_q2tc_cfg_tbl { + u32 tcid:13; + u32 rsv:18; + u32 vld:1; +}; + +/* DSCH dsch_vn_n2g_cfg_tbl */ +struct dsch_vn_n2g_cfg_tbl { + u32 grpid:8; + u32 rsv:23; + u32 vld:1; +}; + +/* DSCH dsch_vn_tc_qlist_tbl */ +struct dsch_vn_tc_q_list_tbl { + u32 nxt:11; + u32 reserve:18; + u32 regi:1; + u32 fly:1; + u32 vld:1; +}; + +/* DSCH dsch_vn_g2p_cfg_tbl */ +struct dsch_vn_g2p_cfg_tbl { + u32 port:3; + u32 rsv:28; + u32 vld:1; +}; + +/* DSCH dsch_vn_tc_wgt_cfg_tbl */ +union dsch_vn_tc_wgt_cfg_tbl_u { + struct dsch_vn_tc_wgt_cfg_tbl { + u8 tc0_wgt; + u8 tc1_wgt; + u8 tc2_wgt; + u8 tc3_wgt; + u8 tc4_wgt; + u8 tc5_wgt; + u8 tc6_wgt; + u8 tc7_wgt; + } info; +#define NBL_DSCH_VN_TC_WGT_CFG_TBL_WIDTH (sizeof(struct dsch_vn_tc_wgt_cfg_tbl)) + u8 data[NBL_DSCH_VN_TC_WGT_CFG_TBL_WIDTH]; +}; + +struct dsch_vn_quanta { + u32 h_qua:16; + u32 e_qua:16; +}; + +/* ---------- DVN ---------- */ + +struct nbl_dvn_stat_cnt { + u32 dvn_desc_fwd_cnt:16; + u32 rsv0:16; + u32 dvn_desc_drop_cnt:16; + u32 rsv1:16; + u32 dvn_pkt_fwd_cnt:16; + u32 rsv2:16; + u32 dvn_pkt_drop_cnt:16; + u32 rsv3:16; + u32 rsv4[4]; +}; + +/* DVN dvn_queue_table */ +#define NBL_DVN_QUEUE_TABLE_ARR(i) \ + (NBL_DP_DVN_BASE + 0x00020000 + (i) * sizeof(struct dvn_queue_table)) +#define NBL_DVN_QUEUE_CXT_TABLE_ARR(i) \ + (NBL_DP_DVN_BASE + 0x00030000 + (i) * sizeof(struct dvn_queue_context)) +#define NBL_DVN_STAT_CNT(i) (NBL_DP_DVN_BASE + 0x00040000 + (i) * sizeof(struct nbl_dvn_stat_cnt)) +/* DVN dvn_queue_reset */ +#define NBL_DVN_QUEUE_RESET_REG (NBL_DP_DVN_BASE + 0x00000400) +/* DVN dvn_queue_reset_done */ +#define NBL_DVN_QUEUE_RESET_DONE_REG (NBL_DP_DVN_BASE + 0x00000404) +#define NBL_DVN_ECPU_QUEUE_NUM (NBL_DP_DVN_BASE + 0x0000041C) +#define NBL_DVN_DESCREQ_NUM_CFG (NBL_DP_DVN_BASE + 0x00000430) +#define NBL_DVN_DESC_WR_MERGE_TIMEOUT (NBL_DP_DVN_BASE + 0x00000480) +#define NBL_DVN_DIF_REQ_RD_RO_FLAG (NBL_DP_DVN_BASE + 0x0000045C) +#define NBL_DVN_INT_STATUS (NBL_DP_DVN_BASE + 0x00000000) +#define NBL_DVN_DESC_DIF_ERR_CNT (NBL_DP_DVN_BASE + 0x0000003C) +#define NBL_DVN_DESC_DIF_ERR_INFO (NBL_DP_DVN_BASE + 0x00000038) +#define NBL_DVN_PKT_DIF_ERR_INFO (NBL_DP_DVN_BASE + 0x00000030) +#define NBL_DVN_PKT_DIF_ERR_CNT (NBL_DP_DVN_BASE + 0x00000034) +#define NBL_DVN_ERR_QUEUE_ID_GET (NBL_DP_DVN_BASE + 0x0000040C) +#define NBL_DVN_BACK_PRESSURE_MASK (NBL_DP_DVN_BASE + 0x00000464) + +#define DEFAULT_DVN_DESCREQ_NUMCFG (0x00080014) +#define DEFAULT_DVN_100G_DESCREQ_NUMCFG (0x00080020) + +#define NBL_DVN_INT_PKT_DIF_ERR (4) +#define DEFAULT_DVN_DESC_WR_MERGE_TIMEOUT_MAX (0x3FF) + +#define NBL_DVN_INT_DESC_DIF_ERR (5) + +struct nbl_dvn_descreq_num_cfg { + u32 avring_cfg_num:1; /* spilit ring descreq_num 0:8,1:16 */ + u32 rsv0:3; + u32 packed_l1_num:3; /* packet ring descreq_num 0:8,1:12,2:16;3:20,4:24,5:26;6:32,7:32 */ + u32 rsv1:25; +}; + +struct nbl_dvn_desc_wr_merge_timeout { + u32 cfg_cycle:10; + u32 rsv:22; +}; + +struct nbl_dvn_dif_req_rd_ro_flag { + u32 rd_desc_ro_en:1; + u32 rd_data_ro_en:1; + u32 rd_avring_ro_en:1; + u32 rsv:29; +}; + +/* DVN dvn_queue_table */ +struct dvn_queue_table { + u64 dvn_used_baddr; + u64 dvn_avail_baddr; + u64 dvn_queue_baddr; + u32 dvn_queue_size:4; + u32 dvn_queue_type:1; + u32 dvn_queue_en:1; + u32 dvn_extend_header_en:1; + u32 dvn_interleave_seg_disable:1; + u32 dvn_seg_disable:1; + u32 rsv0:23; + u32 rsv1:32; +}; + +/* DVN dvn_queue_context */ +struct dvn_queue_context { + u32 dvn_descrd_num:3; + u32 dvn_firstdescid:16; + u32 dvn_firstdesc:16; + u32 dvn_indirect_len:6; + u64 dvn_indirect_addr:64; + u32 dvn_indirect_next:5; + u32 dvn_l1_ring_read:16; + u32 dvn_avail_ring_read:16; + u32 dvn_ring_wrap_counter:1; + u32 dvn_lso_id:10; + u32 dvn_avail_ring_idx:16; + u32 dvn_used_ring_idx:16; + u32 dvn_indirect_left:1; + u32 dvn_desc_left:1; + u32 dvn_lso_flag:1; + u32 dvn_descrd_disable:1; + u32 dvn_queue_err:1; + u32 dvn_lso_drop:1; + u32 dvn_protected_bit:1; + u64 reserve; +}; + +/* DVN dvn_queue_reset */ +struct nbl_dvn_queue_reset { + u32 dvn_queue_index:11; + u32 vld:1; + u32 rsv:20; +}; + +/* DVN dvn_queue_reset_done */ +struct nbl_dvn_queue_reset_done { + u32 flag:1; + u32 rsv:31; +}; + +/* DVN dvn_desc_dif_err_info */ +struct dvn_desc_dif_err_info { + u32 queue_id:11; + u32 rsv:21; +}; + +struct dvn_pkt_dif_err_info { + u32 queue_id:11; + u32 rsv:21; +}; + +struct dvn_err_queue_id_get { + u32 pkt_flag:1; + u32 desc_flag:1; + u32 rsv:30; +}; + +struct dvn_back_pressure_mask { + u32 l4s_flag:1; + u32 dsch_flag:1; + u32 dstore_port0_flag:1; + u32 dstore_port1_flag:1; + u32 dstore_port2_flag:1; + u32 dstore_port3_flag:1; + u32 rsv:26; +}; + +/* ---------- UVN ---------- */ +/* UVN uvn_queue_table */ +#define NBL_UVN_QUEUE_TABLE_ARR(i) \ + (NBL_DP_UVN_BASE + 0x00010000 + (i) * sizeof(struct uvn_queue_table)) +/* UVN uvn_queue_cxt */ +#define NBL_UVN_QUEUE_CXT_TABLE_ARR(i) \ + (NBL_DP_UVN_BASE + 0x00020000 + (i) * sizeof(struct uvn_queue_cxt)) +/* UVN uvn_desc_cxt */ +#define NBL_UVN_DESC_CXT_TABLE_ARR(i) \ + (NBL_DP_UVN_BASE + 0x00028000 + (i) * sizeof(struct uvn_desc_cxt)) +/* UVN uvn_queue_reset */ +#define NBL_UVN_QUEUE_RESET_REG (NBL_DP_UVN_BASE + 0x00000200) +/* UVN uvn_queue_reset_done */ +#define NBL_UVN_QUEUE_RESET_DONE_REG (NBL_DP_UVN_BASE + 0x00000408) +#define NBL_UVN_STATIS_PKT_DROP(i) (NBL_DP_UVN_BASE + 0x00038000 + (i) * sizeof(u32)) +#define NBL_UVN_INT_STATUS (NBL_DP_UVN_BASE + 0x00000000) +#define NBL_UVN_QUEUE_ERR_INFO (NBL_DP_UVN_BASE + 0x00000034) +#define NBL_UVN_QUEUE_ERR_CNT (NBL_DP_UVN_BASE + 0x00000038) +#define NBL_UVN_DESC_RD_WAIT (NBL_DP_UVN_BASE + 0x0000020C) +#define NBL_UVN_QUEUE_ERR_MASK (NBL_DP_UVN_BASE + 0x00000224) +#define NBL_UVN_ECPU_QUEUE_NUM (NBL_DP_UVN_BASE + 0x0000023C) +#define NBL_UVN_DESC_WR_TIMEOUT (NBL_DP_UVN_BASE + 0x00000214) +#define NBL_UVN_DESC_RD_ENTRY (NBL_DP_UVN_BASE + 0x000012D0) +#define NBL_UVN_DIF_REQ_RO_FLAG (NBL_DP_UVN_BASE + 0x00000250) +#define NBL_UVN_DESC_PREFETCH_INIT (NBL_DP_UVN_BASE + 0x00000204) +#define NBL_UVN_DESC_WR_TIMEOUT_4US (0x960) +#define NBL_UVN_DESC_PREFETCH_NUM (4) + +#define NBL_UVN_INT_QUEUE_ERR (5) + +struct uvn_dif_req_ro_flag { + u32 avail_rd:1; + u32 desc_rd:1; + u32 pkt_wr:1; + u32 desc_wr:1; + u32 rsv:28; +}; + +/* UVN uvn_queue_table */ +struct uvn_queue_table { + u64 used_baddr; + u64 avail_baddr; + u64 queue_baddr; + u32 queue_size_mask_pow:4; + u32 queue_type:1; + u32 queue_enable:1; + u32 extend_header_en:1; + u32 guest_csum_en:1; + u32 half_offload_en:1; + u32 rsv0:23; + u32 rsv1:32; +}; + +/* uvn uvn_queue_cxt */ +struct uvn_queue_cxt { + u32 queue_head:16; + u32 wrap_count:1; + u32 queue_err:1; + u32 prefetch_null_cnt:2; + u32 ntf_finish:1; + u32 spnd_flag:1; + u32 reserve0:10; + u32 avail_idx:16; + u32 avail_idx_spnd_flag:1; + u32 reserve1:15; + u32 reserve2[2]; +}; + +/* uvn uvn_queue_reset */ +struct nbl_uvn_queue_reset { + u32 index:11; + u32 rsv0:5; + u32 vld:1; + u32 rsv1:15; +}; + +/* uvn uvn_queue_reset_done */ +struct nbl_uvn_queue_reset_done { + u32 flag:1; + u32 rsv:31; +}; + +/* uvn uvn_desc_cxt */ +struct uvn_desc_cxt { + u32 cache_head:9; + u32 reserve0:7; + u32 cache_tail:9; + u32 reserve1:7; + u32 cache_pref_num_prev:9; + u32 reserve2:7; + u32 cache_pref_num_post:9; + u32 reserve3:7; + u32 cache_head_byte:30; + u32 reserve4:2; + u32 cache_tail_byte:30; + u32 reserve5:2; +}; + +struct uvn_desc_wr_timeout { + u32 num:15; + u32 mask:1; + u32 rsv:16; +}; + +struct uvn_queue_err_info { + u32 queue_id:11; + u32 type:5; + u32 rsv:16; +}; + +struct uvn_queue_err_mask { + u32 rsv0:1; + u32 buffer_len_err:1; + u32 next_err:1; + u32 indirect_err:1; + u32 split_err:1; + u32 dif_err:1; + u32 rsv1:26; +}; + +struct uvn_desc_prefetch_init { + u32 num:8; + u32 rsv1:8; + u32 sel:1; + u32 rsv:15; +}; + +/* -------- USTORE -------- */ +#define NBL_USTORE_PKT_LEN_ADDR (NBL_DP_USTORE_BASE + 0x00000108) +#define NBL_USTORE_PORT_FC_TH_REG_ARR(port_id) \ + (NBL_DP_USTORE_BASE + 0x00000134 + (port_id) * sizeof(struct nbl_ustore_port_fc_th)) + +#define NBL_USTORE_COS_FC_TH_REG_ARR(cos_id) \ + (NBL_DP_USTORE_BASE + 0x00000200 + (cos_id) * sizeof(struct nbl_ustore_cos_fc_th)) + +#define NBL_USTORE_PORT_DROP_TH_REG_ARR(port_id) \ + (NBL_DP_USTORE_BASE + 0x00000150 + (port_id) * sizeof(struct nbl_ustore_port_drop_th)) + +#define NBL_USTORE_SIGNLE_ETH_DROP_TH 0xC80 +#define NBL_USTORE_DUAL_ETH_DROP_TH 0x640 +#define NBL_USTORE_QUAD_ETH_DROP_TH 0x320 + +/* USTORE pkt_len */ +struct ustore_pkt_len { + u32 min:7; + u32 rsv:8; + u32 min_chk_en:1; + u32 max:14; + u32 rsv2:1; + u32 max_chk_len:1; +}; + +/* USTORE port_fc_th */ +struct nbl_ustore_port_fc_th { + u32 xoff_th:12; + u32 rsv1:4; + u32 xon_th:12; + u32 rsv2:2; + u32 fc_set:1; + u32 fc_en:1; +}; + +/* USTORE cos_fc_th */ +struct nbl_ustore_cos_fc_th { + u32 xoff_th:12; + u32 rsv1:4; + u32 xon_th:12; + u32 rsv2:2; + u32 fc_set:1; + u32 fc_en:1; +}; + +#define NBL_MAX_USTORE_COS_FC_TH (4080) + +/* USTORE port_drop_th */ +struct nbl_ustore_port_drop_th { + u32 disc_th:12; + u32 rsv:19; + u32 en:1; +}; + +/* ---------- UL4S ---------- */ +#define NBL_UL4S_SCH_PAD_ADDR (NBL_DP_UL4S_BASE + 0x000006c4) + +/* UL4S ul4s_sch_pad */ +struct ul4s_sch_pad { + u32 en:1; + u32 clr:1; + u32 rsv:30; +}; + +/* ---------- IPRO ---------- */ +/* ipro module related macros */ +#define NBL_IPRO_MODULE (0xB04000) +/* ipro queue tbl */ +#define NBL_IPRO_QUEUE_TBL(i) \ + (NBL_IPRO_MODULE + 0x00004000 + (i) * sizeof(struct nbl_ipro_queue_tbl)) +#define NBL_IPRO_UP_SPORT_TABLE(i) \ + (NBL_IPRO_MODULE + 0x00007000 + (i) * sizeof(struct nbl_ipro_upsport_tbl)) +#define NBL_IPRO_DN_SRC_PORT_TABLE(i) \ + (NBL_PPE_IPRO_BASE + 0x00008000 + (i) * sizeof(struct nbl_ipro_dn_src_port_tbl)) + +enum nbl_fwd_type_e { + NBL_FWD_TYPE_NORMAL = 0, + NBL_FWD_TYPE_CPU_ASSIGNED = 1, + NBL_FWD_TYPE_UPCALL = 2, + NBL_FWD_TYPE_SRC_MIRROR = 3, + NBL_FWD_TYPE_OTHER_MIRROR = 4, + NBL_FWD_TYPE_MNG = 5, + NBL_FWD_TYPE_GLB_LB = 6, + NBL_FWD_TYPE_DROP = 7, + NBL_FWD_TYPE_MAX = 8, +}; + +/* IPRO dn_src_port_tbl */ +struct nbl_ipro_dn_src_port_tbl { + u32 entry_vld:1; + u32 mirror_en:1; + u32 mirror_pr:2; + u32 mirror_id:4; + u32 vlan_layer_num_1:2; + u32 phy_flow:1; + u32 not_used_0:4; + u32 addr_check_en:1; + u32 smac_low:16; + u32 smac_high; + u32 dqueue:11; + u32 dqueue_en:1; + u32 dqueue_pri:2; + u32 set_dport_pri:2; + union nbl_action_data set_dport; + u32 set_dport_en:1; + u32 proc_done:1; + u32 not_used_1:6; + u32 rsv:24; +}; + +/* IPRO up sport tab */ +struct nbl_ipro_upsport_tbl { + u32 entry_vld:1; + u32 vlan_layer_num_0:2; + u32 vlan_layer_num_1:2; + u32 lag_vld:1; + u32 lag_id:2; + u32 phy_flow:1; + u32 mirror_en:1; + u32 mirror_pr:2; + u32 mirror_id:4; + u32 dqueue_pri:2; + u32 set_dport_pri:2; + u32 dqueue:11; + u32 dqueue_en:1; + union nbl_action_data set_dport; + u32 set_dport_en:1; + u32 proc_done:1; + u32 car_en:1; + u32 car_pr:2; + u32 car_id:10; + u32 rsv:1; +}; + +/* ---------- EPRO ---------- */ +#define NBL_EPRO_INT_STATUS (NBL_PPE_EPRO_BASE + 0x00000000) +#define NBL_EPRO_INT_MASK (NBL_PPE_EPRO_BASE + 0x00000004) +#define NBL_EPRO_RSS_KEY_REG (NBL_PPE_EPRO_BASE + 0x00000400) +#define NBL_EPRO_MIRROR_ACT_PRI_REG (NBL_PPE_EPRO_BASE + 0x00000234) +#define NBL_EPRO_ACTION_FILTER_TABLE(i) (NBL_PPE_EPRO_BASE + 0x00001900 + \ + sizeof(struct nbl_epro_action_filter_tbl) * (i)) +/* epro epro_ept table */ +#define NBL_EPRO_EPT_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00001800 + (i) * sizeof(struct nbl_epro_ept_tbl)) +/* epro epro_vpt table */ +#define NBL_EPRO_VPT_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00004000 + (i) * sizeof(struct nbl_epro_vpt_tbl)) +/* epro epro_rss_pt table */ +#define NBL_EPRO_RSS_PT_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00002000 + (i) * sizeof(struct nbl_epro_rss_pt_tbl)) +/* epro epro_rss_ret table */ +#define NBL_EPRO_RSS_RET_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00008000 + (i) * sizeof(struct nbl_epro_rss_ret_tbl)) +/* epro epro_sch_cos_map table */ +#define NBL_EPRO_SCH_COS_MAP_TABLE(i, j) \ + (NBL_PPE_EPRO_BASE + 0x00000640 + ((i) * 0x20) + (j) * sizeof(struct nbl_epro_cos_map)) +/* epro epro_port_pri_mdf_en */ +#define NBL_EPRO_PORT_PRI_MDF_EN (NBL_PPE_EPRO_BASE + 0x000006E0) +/* epro epro_act_sel_en */ +#define NBL_EPRO_ACT_SEL_EN_REG \ + (NBL_PPE_EPRO_BASE + 0x00000214) +/* epro epro_kgen_ft table */ +#define NBL_EPRO_KGEN_FT_TABLE(i) \ + (NBL_PPE_EPRO_BASE + 0x00001980 + (i) * sizeof(struct nbl_epro_kgen_ft_tbl)) + +struct nbl_epro_int_mask { + u32 fatal_err:1; + u32 fifo_uflw_err:1; + u32 fifo_dflw_err:1; + u32 cif_err:1; + u32 input_err:1; + u32 cfg_err:1; + u32 data_ucor_err:1; + u32 bank_cor_err:1; + u32 rsv2:24; +}; + +struct nbl_epro_rss_key { + u64 key0; + u64 key1; + u64 key2; + u64 key3; + u64 key4; +}; + +struct nbl_epro_mirror_act_pri { + u32 car_idx_pri:2; + u32 dqueue_pri:2; + u32 dport_pri:2; + u32 rsv:26; +}; + +/* EPRO epro_rss_ret table */ +struct nbl_epro_rss_ret_tbl { + u32 dqueue0:11; + u32 vld0:1; + u32 rsv0:4; + u32 dqueue1:11; + u32 vld1:1; + u32 rsv1:4; +}; + +/* EPRO epro_rss_pt table */ +struct nbl_epro_rss_pt_tbl { + u32 entry_size:3; +#define NBL_EPRO_RSS_ENTRY_SIZE_16 (0) +#define NBL_EPRO_RSS_ENTRY_SIZE_32 (1) +#define NBL_EPRO_RSS_ENTRY_SIZE_64 (2) +#define NBL_EPRO_RSS_ENTRY_SIZE_128 (3) +#define NBL_EPRO_RSS_ENTRY_SIZE_256 (4) + u32 offset1:14; + u32 offset1_vld:1; + u32 offset0:14; + u32 offset0_vld:1; + u32 vld:1; + u32 rsv:30; +}; + +/*EPRO sch cos map*/ +struct nbl_epro_cos_map { + u32 pkt_cos:3; + u32 dscp:6; + u32 rsv:23; +}; + +/* EPRO epro_port_pri_mdf_en */ +struct nbl_epro_port_pri_mdf_en_cfg { + u32 eth0:1; + u32 eth1:1; + u32 eth2:1; + u32 eth3:1; + u32 loop:1; + u32 rsv:27; +}; + +enum nbl_md_action_id_e { + NBL_MD_ACTION_NONE = 0, + NBL_MD_ACTION_CLEAR_FLAG = 1, + NBL_MD_ACTION_SET_FLAG = NBL_MD_ACTION_CLEAR_FLAG, + NBL_MD_ACTION_SET_FWD = NBL_MD_ACTION_CLEAR_FLAG, + NBL_MD_ACTION_FLOWID0 = 2, + NBL_MD_ACTION_FLOWID1 = 3, + NBL_MD_ACTION_RSSIDX = 4, + NBL_MD_ACTION_PORT_CARIDX = 5, + NBL_MD_ACTION_FLOW_CARIDX = 6, + NBL_MD_ACTION_TABLE_INDEX = 7, + NBL_MD_ACTION_MIRRIDX = 8, + NBL_MD_ACTION_DPORT = 9, + NBL_MD_ACTION_SET_DPORT = NBL_MD_ACTION_DPORT, + NBL_MD_ACTION_DQUEUE = 10, + NBL_MD_ACTION_MCIDX = 13, + NBL_MD_ACTION_VNI0 = 14, + NBL_MD_ACTION_VNI1 = 15, + NBL_MD_ACTION_STAT_IDX = 16, + NBL_MD_ACTION_PRBAC_IDX = 17, + NBL_MD_ACTION_L4S_IDX = NBL_MD_ACTION_PRBAC_IDX, + NBL_MD_ACTION_DP_HASH0 = 19, + NBL_MD_ACTION_DP_HASH1 = 20, + NBL_MD_ACTION_MDF_PRI = 21, + + NBL_MD_ACTION_MDF_V4_SIP = 32, + NBL_MD_ACTION_MDF_V4_DIP = 33, + NBL_MD_ACTION_MDF_V6_SIP = 34, + NBL_MD_ACTION_MDF_V6_DIP = 35, + NBL_MD_ACTION_MDF_DPORT = 36, + NBL_MD_ACTION_MDF_SPORT = 37, + NBL_MD_ACTION_MDF_DMAC = 38, + NBL_MD_ACTION_MDF_SMAC = 39, + NBL_MD_ACTION_MDF_V4_DSCP_ECN = 40, + NBL_MD_ACTION_MDF_V6_DSCP_ECN = 41, + NBL_MD_ACTION_MDF_V4_TTL = 42, + NBL_MD_ACTION_MDF_V6_HOPLIMIT = 43, + NBL_MD_ACTION_DEL_O_VLAN = 44, + NBL_MD_ACTION_DEL_I_VLAN = 45, + NBL_MD_ACTION_MDF_O_VLAN = 46, + NBL_MD_ACTION_MDF_I_VLAN = 47, + NBL_MD_ACTION_ADD_O_VLAN = 48, + NBL_MD_ACTION_ADD_I_VLAN = 49, + NBL_MD_ACTION_ENCAP_TNL = 50, + NBL_MD_ACTION_DECAP_TNL = 51, + NBL_MD_ACTION_MDF_TNL_SPORT = 52, +}; + +/* EPRO action filter table */ +struct nbl_epro_action_filter_tbl { + u64 filter_mask; +}; + +#define NBL_EPRO_LAG_MAX (4) +#define NBL_EPRO_EPT_LAG_OFFSET (4) + +/* EPRO epr_ept table */ +struct nbl_epro_ept_tbl { + u32 cvlan:16; + u32 svlan:16; + u32 fwd:1; +#define NBL_EPRO_FWD_TYPE_DROP (0) +#define NBL_EPRO_FWD_TYPE_NORMAL (1) + u32 mirror_en:1; + u32 mirror_id:4; + u32 pop_i_vlan:1; + u32 pop_o_vlan:1; + u32 push_i_vlan:1; + u32 push_o_vlan:1; + u32 replace_i_vlan:1; + u32 replace_o_vlan:1; + u32 lag_alg_sel:2; +#define NBL_EPRO_LAG_ALG_L2_HASH (0) +#define NBL_EPRO_LAG_ALG_L23_HASH (1) +#define NBL_EPRO_LAG_ALG_LINUX_L34_HASH (2) +#define NBL_EPRO_LAG_ALG_DPDK_L34_HASH (3) + u32 lag_port_btm:4; + u32 lag_l2_protect_en:1; + u32 pfc_sch_cos_default:3; + u32 pfc_mode:1; + u32 vld:1; + u32 rsv:8; +}; + +/* EPRO epro_vpt table */ +struct nbl_epro_vpt_tbl { + u32 cvlan:16; + u32 svlan:16; + u32 fwd:1; +#define NBL_EPRO_FWD_TYPE_DROP (0) +#define NBL_EPRO_FWD_TYPE_NORMAL (1) + u32 mirror_en:1; + u32 mirror_id:4; + u32 car_en:1; + u32 car_id:10; + u32 pop_i_vlan:1; + u32 pop_o_vlan:1; + u32 push_i_vlan:1; + u32 push_o_vlan:1; + u32 replace_i_vlan:1; + u32 replace_o_vlan:1; + u32 rss_alg_sel:1; +#define NBL_EPRO_RSS_ALG_TOEPLITZ_HASH (0) +#define NBL_EPRO_RSS_ALG_CRC32 (1) + u32 rss_key_type_ipv4:1; +#define NBL_EPRO_RSS_KEY_TYPE_IPV4_L3 (0) +#define NBL_EPRO_RSS_KEY_TYPE_IPV4_L4 (1) + u32 rss_key_type_ipv6:1; +#define NBL_EPRO_RSS_KEY_TYPE_IPV6_L3 (0) +#define NBL_EPRO_RSS_KEY_TYPE_IPV6_L4 (1) + u32 vld:1; + u32 rsv:5; +}; + +/* UPA upa_pri_sel_conf */ +#define NBL_UPA_PRI_SEL_CONF_TABLE(id) (NBL_DP_UPA_BASE + 0x00000230 + \ + ((id) * sizeof(struct nbl_upa_pri_sel_conf))) +#define NBL_UPA_PRI_CONF_TABLE(id) (NBL_DP_UPA_BASE + 0x00002000 + \ + ((id) * sizeof(struct nbl_upa_pri_conf))) + +/* UPA pri_sel_conf */ +struct nbl_upa_pri_sel_conf { + u32 pri_sel:5; + u32 pri_default:3; + u32 pri_disen:1; + u32 rsv:23; +}; + +/* UPA pri_conf_table */ +struct nbl_upa_pri_conf { + u32 pri0:4; + u32 pri1:4; + u32 pri2:4; + u32 pri3:4; + u32 pri4:4; + u32 pri5:4; + u32 pri6:4; + u32 pri7:4; +}; + +#define NBL_DQM_RXMAC_TX_PORT_BP_EN (NBL_DP_DQM_BASE + 0x00000660) +#define NBL_DQM_RXMAC_TX_COS_BP_EN (NBL_DP_DQM_BASE + 0x00000664) +#define NBL_DQM_RXMAC_RX_PORT_BP_EN (NBL_DP_DQM_BASE + 0x00000670) +#define NBL_DQM_RX_PORT_BP_EN (NBL_DP_DQM_BASE + 0x00000610) +#define NBL_DQM_RX_COS_BP_EN (NBL_DP_DQM_BASE + 0x00000614) + +/* DQM rxmac_tx_port_bp_en */ +struct nbl_dqm_rxmac_tx_port_bp_en_cfg { + u32 eth0:1; + u32 eth1:1; + u32 eth2:1; + u32 eth3:1; + u32 rsv:28; +}; + +/* DQM rxmac_tx_cos_bp_en */ +struct nbl_dqm_rxmac_tx_cos_bp_en_cfg { + u32 eth0:8; + u32 eth1:8; + u32 eth2:8; + u32 eth3:8; +}; + +#define NBL_UQM_RX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000614) +#define NBL_UQM_TX_COS_BP_EN (NBL_DP_UQM_BASE + 0x00000604) + +#define NBL_UQM_DROP_PKT_CNT (NBL_DP_UQM_BASE + 0x000009C0) +#define NBL_UQM_DROP_PKT_SLICE_CNT (NBL_DP_UQM_BASE + 0x000009C4) +#define NBL_UQM_DROP_PKT_LEN_ADD_CNT (NBL_DP_UQM_BASE + 0x000009C8) +#define NBL_UQM_DROP_HEAD_PNTR_ADD_CNT (NBL_DP_UQM_BASE + 0x000009CC) +#define NBL_UQM_DROP_WEIGHT_ADD_CNT (NBL_DP_UQM_BASE + 0x000009D0) +#define NBL_UQM_PORT_DROP_PKT_CNT (NBL_DP_UQM_BASE + 0x000009D4) +#define NBL_UQM_PORT_DROP_PKT_SLICE_CNT (NBL_DP_UQM_BASE + 0x000009F4) +#define NBL_UQM_PORT_DROP_PKT_LEN_ADD_CNT (NBL_DP_UQM_BASE + 0x00000A14) +#define NBL_UQM_PORT_DROP_HEAD_PNTR_ADD_CNT (NBL_DP_UQM_BASE + 0x00000A34) +#define NBL_UQM_PORT_DROP_WEIGHT_ADD_CNT (NBL_DP_UQM_BASE + 0x00000A54) +#define NBL_UQM_FWD_DROP_CNT (NBL_DP_UQM_BASE + 0x00000A80) +#define NBL_UQM_DPORT_DROP_CNT (NBL_DP_UQM_BASE + 0x00000B74) + +#define NBL_UQM_PORT_DROP_DEPTH 6 +#define NBL_UQM_DPORT_DROP_DEPTH 16 + +/* UQM rx_cos_bp_en */ +struct nbl_uqm_rx_cos_bp_en_cfg { + u32 vld_l; + u32 vld_h:16; +}; + +/* UQM rx_port_bp_en */ +struct nbl_uqm_rx_port_bp_en_cfg { + u32 l4s_h:1; + u32 l4s_e:1; + u32 rdma_h:1; + u32 rdma_e:1; + u32 emp:1; + u32 loopback:1; + u32 rsv:26; +}; + +/* UQM tx_cos_bp_en */ +struct nbl_uqm_tx_cos_bp_en_cfg { + u32 vld_l; + u32 vld_h:8; +}; + +/* UQM tx_port_bp_en */ +struct nbl_uqm_tx_port_bp_en_cfg { + u32 l4s_h:1; + u32 l4s_e:1; + u32 rdma_h:1; + u32 rdma_e:1; + u32 emp:1; + u32 rsv:27; +}; + +/* dl4s */ +#define NBL_DL4S_KEY_SALT(_i) (NBL_DP_DL4S_BASE + 0x00010000 + (_i) * 64) +/* ul4s */ +#define NBL_UL4S_SYNC_TRIG (NBL_DP_UL4S_BASE + 0x00000700) +#define NBL_UL4S_SYNC_SID (NBL_DP_UL4S_BASE + 0x00000704) +#define NBL_UL4S_SYNC_TCP_SN (NBL_DP_UL4S_BASE + 0x00000710) +#define NBL_UL4S_SYNC_REC_NUM (NBL_DP_UL4S_BASE + 0x00000714) +#define NBL_UL4S_KEY_SALT(_i) (NBL_DP_UL4S_BASE + 0x00010000 + (_i) * 64) + +struct nbl_ktls_keymat { + u8 key[32]; + u8 salt[4]; + u32 mode:2; + u32 ena:1; + u32 rsv:29; +}; + +union nbl_ktls_sync_trig { + u32 data; + struct { + u32 rsv1 : 1; + u32 trig : 1; + u32 init_sync : 1; + u32 rsv2 : 29; + }; +}; + +/* dprbac */ +#define NBL_DPRBAC_INT_STATUS (NBL_PPE_DPRBAC_BASE + 0x00000000) +#define NBL_DPRBAC_LIFETIME_INFO (NBL_PPE_DPRBAC_BASE + 0x00000014) +#define NBL_DPRBAC_ENABLE (NBL_PPE_DPRBAC_BASE + 0x00000114) +#define NBL_DPRBAC_NAT (NBL_PPE_DPRBAC_BASE + 0x0000012C) +#define NBL_DPRBAC_SAD_LIFEDIFF (NBL_PPE_DPRBAC_BASE + 0x00000204) +#define NBL_DPRBAC_LIFETIME_DIFF (NBL_PPE_DPRBAC_BASE + 0x00000208) +#define NBL_DPRBAC_DBG_CNT_EN (NBL_PPE_DPRBAC_BASE + 0x00000680) + +#define NBL_DPRBAC_SAD_IV(_i) (NBL_PPE_DPRBAC_BASE + 0x000010000 + (_i) * 8) +#define NBL_DPRBAC_SAD_ESN(_i) (NBL_PPE_DPRBAC_BASE + 0x000020000 + (_i) * 16) +#define NBL_DPRBAC_SAD_LIFETIME(_i) (NBL_PPE_DPRBAC_BASE + 0x000030000 + (_i) * 16) +#define NBL_DPRBAC_SAD_CRYPTO_INFO(_i) (NBL_PPE_DPRBAC_BASE + 0x000040000 + (_i) * 64) +#define NBL_DPRBAC_SAD_ENCAP_INFO(_i) (NBL_PPE_DPRBAC_BASE + 0x000060000 + (_i) * 64) + +union nbl_dprbac_enable { + u32 data; + struct { + u32 prbac : 1; + u32 mf_fwd : 1; + u32 ipv4_nat_csm : 1; + u32 ipv6_nat_csm : 1; + u32 rsv : 28; + }; +}; + +union nbl_dprbac_clk_gate { + u32 data; + struct { + u32 clk_en : 1; + u32 rsv : 31; + }; +}; + +union nbl_dprbac_init_start { + u32 data; + struct { + u32 start : 1; + u32 rsv : 31; + }; +}; + +union nbl_dprbac_nat { + u32 data; + struct { + u32 rsv : 16; + u32 sport : 16; + }; +}; + +union nbl_dprbac_dbg_cnt_en { + u32 data; + struct { + u32 total : 1; + u32 in_right_bypass : 1; + u32 in_drop_bypass : 1; + u32 in_drop_prbac : 1; + u32 out_drop_prbac : 1; + u32 out_right_prbac : 1; + u32 rsv : 26; + }; +}; + +struct nbl_dprbac_sad_iv { + u64 iv; +}; + +struct nbl_dprbac_sad_esn { + u32 sn; + u32 esn; + u32 wrap_en : 1; + u32 enable : 1; + u32 rsv1 : 30; + u32 rsv2; +}; + +struct nbl_dprbac_sad_lifetime { + u32 diff; + u32 cnt; + u32 flag : 1; + u32 unit : 1; + u32 enable : 1; + u32 rsv1 : 29; + u32 rsv2; +}; + +struct nbl_dprbac_sad_crypto_info { + u32 key[8]; + u32 salt; + u32 crypto_type : 3; + u32 tunnel_mode : 1; + u32 icv_len : 2; + u32 rsv1 : 26; + u32 rsv2[6]; +}; + +struct nbl_dprbac_sad_encap_info { + u32 dip_addr[4]; + u32 sip_addr[4]; + u32 spi; + u32 dport : 16; + u32 nat_flag : 1; + u32 rsv1 : 15; + u32 rsv2[6]; +}; + +/* uprbac */ +#define NBL_UPRBAC_INT_STATUS (NBL_PPE_UPRBAC_BASE + 0x00000000) +#define NBL_UPRBAC_LIFETIME_INFO (NBL_PPE_UPRBAC_BASE + 0x00000014) +#define NBL_UPRBAC_ENABLE (NBL_PPE_UPRBAC_BASE + 0x00000114) +#define NBL_UPRBAC_NAT (NBL_PPE_UPRBAC_BASE + 0x0000012C) +#define NBL_UPRBAC_SAD_LIFEDIFF (NBL_PPE_UPRBAC_BASE + 0x00000204) +#define NBL_UPRBAC_LIFETIME_DIFF (NBL_PPE_UPRBAC_BASE + 0x00000208) +#define NBL_UPRBAC_DBG_CNT_EN (NBL_PPE_UPRBAC_BASE + 0x00000680) +#define LEONIS_UPRBAC_EM_PROFILE (NBL_PPE_UPRBAC_BASE + 0x00002000) + +#define NBL_UPRBAC_SAD_BOTTOM(_i) (NBL_PPE_UPRBAC_BASE + 0x000020000 + (_i) * 16) +#define NBL_UPRBAC_SAD_LIFETIME(_i) (NBL_PPE_UPRBAC_BASE + 0x000030000 + (_i) * 16) +#define NBL_UPRBAC_SAD_CRYPTO_INFO(_i) (NBL_PPE_UPRBAC_BASE + 0x000040000 + (_i) * 64) +#define NBL_UPRBAC_SAD_SLIDE_WINDOW(_i) (NBL_PPE_UPRBAC_BASE + 0x000060000 + (_i) * 64) + +#define NBL_UPRBAC_EM_TCAM(_i) (NBL_PPE_UPRBAC_BASE + 0x00002800 + (_i) * 16) +#define NBL_UPRBAC_EM_AD(_i) (NBL_PPE_UPRBAC_BASE + 0x00003000 + (_i) * 4) +#define NBL_UPRBAC_HT(_i, _j) (NBL_PPE_UPRBAC_BASE + 0x00004000 + \ + (_i) * 0x00004000 + (_j) * 16) +#define NBL_UPRBAC_KT(_i) (NBL_PPE_UPRBAC_BASE + 0x00010000 + (_i) * 32) + +union nbl_uprbac_enable { + u32 data; + struct { + u32 prbac : 1; + u32 padding_check : 1; + u32 pa_am : 1; + u32 dm_am : 1; + u32 icv_err : 1; + u32 pad_err : 1; + u32 ipv6_nat_csm0 : 1; + u32 rsv : 25; + }; +}; + +union nbl_uprbac_clk_gate { + u32 data; + struct { + u32 clk_en : 1; + u32 rsv : 31; + }; +}; + +union nbl_uprbac_init_start { + u32 data; + struct { + u32 start : 1; + u32 rsv : 31; + }; +}; + +union nbl_uprbac_nat { + u32 data; + struct { + u32 enable : 1; + u32 rsv : 15; + u32 dport : 16; + }; +}; + +union nbl_uprbac_dbg_cnt_en { + u32 data; + struct { + u32 drop_prbac : 1; + u32 right_prbac : 1; + u32 replay : 1; + u32 right_misc : 1; + u32 error_misc : 1; + u32 xoff_drop : 1; + u32 intf_cell : 1; + u32 sad_miss : 1; + u32 rsv : 24; + }; +}; + +struct nbl_uprbac_em_profile { + u32 pp_cmd_type : 1; + u32 key_size : 1; + u32 mask_btm0 : 20; + u32 mask_btm1 : 20; + u32 hash_sel0 : 2; + u32 hash_sel1 : 2; + u32 action0 : 1; + u32 act_num : 4; + u32 vld : 1; + u32 rsv : 12; +}; + +struct nbl_uprbac_sad_bottom { + u32 sn; + u32 esn; + u32 overlap : 1; + u32 enable : 1; + u32 rsv1 : 30; + u32 rsv2; +}; + +struct nbl_uprbac_sad_lifetime { + u32 diff; + u32 cnt; + u32 flag : 1; + u32 unit : 1; + u32 enable : 1; + u32 rsv1 : 29; + u32 rsv2; +}; + +struct nbl_uprbac_sad_crypto_info { + u32 key[8]; + u32 salt; + u32 crypto_type : 3; + u32 tunnel_mode : 1; + u32 icv_len : 2; + u32 rsv1 : 26; + u32 rsv2[6]; +}; + +struct nbl_uprbac_sad_slide_window { + u32 bitmap[8]; + u32 option : 2; + u32 enable : 1; + u32 rsv1 : 29; + u32 rsv2[7]; +}; + +struct nbl_uprbac_em_tcam { + u32 key_dat0; + u32 key_dat1; + u32 key_dat2 : 16; + u32 key_vld : 1; + u32 key_size : 1; + u32 rsv1 : 14; + u32 rsv2; +}; + +union nbl_uprbac_em_ad { + u32 data; + struct { + u32 sad_index : 11; + u32 rsv : 21; + }; +}; + +union nbl_uprbac_ht { + u8 data[16]; + struct { + u32 kt_index0 : 11; + u32 ht_other_index0 : 9; + u32 vld0 : 1; + + u32 kt_index1 : 11; + u32 ht_other_index1 : 9; + u32 vld1 : 1; + + u32 kt_index2 : 11; + u32 ht_other_index2 : 9; + u32 vld2 : 1; + + u32 kt_index3 : 11; + u32 ht_other_index3 : 9; + u32 vld3 : 1; + + u32 rsv1 : 12; + u32 rsv2; + }; +}; + +struct nbl_uprbac_kt { + u32 key[5]; + u32 sad_index : 11; + u32 rsv1 : 21; + u32 rsv[2]; +}; + +union nbl_ipsec_lifetime_diff { + u32 data[2]; + struct { + u32 sad_index : 11; + u32 rsv1 : 5; + u32 msb_value : 1; + u32 flag_value : 1; + u32 rsv2 : 2; + u32 msb_wen : 1; + u32 flag_wen : 1; + u32 rsv3 : 10; + u32 lifetime_diff; + }; +}; + +#pragma pack() + +/* ---------- TOP ---------- */ +/* lb_top_ctrl_crg_cfg crg_cfg */ +#define NBL_TOP_CTRL_MODULE (0x01300000) +#define NBL_TOP_CTRL_INT_STATUS (NBL_TOP_CTRL_MODULE + 0X0000) +#define NBL_TOP_CTRL_INT_MASK (NBL_TOP_CTRL_MODULE + 0X0004) +#define NBL_TOP_CTRL_TVSENSOR0 (NBL_TOP_CTRL_MODULE + 0X0254) +#define NBL_TOP_CTRL_SOFT_DEF0 (NBL_TOP_CTRL_MODULE + 0x0430) +#define NBL_TOP_CTRL_SOFT_DEF1 (NBL_TOP_CTRL_MODULE + 0x0434) +#define NBL_TOP_CTRL_SOFT_DEF2 (NBL_TOP_CTRL_MODULE + 0x0438) +#define NBL_TOP_CTRL_SOFT_DEF3 (NBL_TOP_CTRL_MODULE + 0x043c) +#define NBL_TOP_CTRL_SOFT_DEF4 (NBL_TOP_CTRL_MODULE + 0x0440) +#define NBL_TOP_CTRL_SOFT_DEF5 (NBL_TOP_CTRL_MODULE + 0x0444) +#define NBL_TOP_CTRL_VERSION_INFO (NBL_TOP_CTRL_MODULE + 0X0900) +#define NBL_TOP_CTRL_VERSION_DATE (NBL_TOP_CTRL_MODULE + 0X0904) + +#define NBL_FW_HEARTBEAT_PONG NBL_TOP_CTRL_SOFT_DEF1 + +/* temperature threshold1 */ +#define NBL_LEONIS_TEMP_MAX (105) +/* temperature threshold2 */ +#define NBL_LEONIS_TEMP_CRIT (115) + +#define NBL_ACT_DATA_BITS (16) + +#define NBL_CMDQ_DIF_MODE_VALUE (2) +#define NBL_CMDQ_DELAY_200US (200) +#define NBL_CMDQ_DELAY_300US (300) +#define NBL_CMDQ_RESET_MAX_WAIT (30) +#define NBL_CMD_NOTIFY_ADDR (0x00001000) +#define NBL_ACL_RD_RETRY (50000) +#define NBL_ACL_RD_WAIT_100US (100) +#define NBL_ACL_RD_WAIT_200US (200) +#define NBL_ACL_CPU_WRITE (0) +#define NBL_ACL_CPU_READ (1) + +/* the capacity of storing acl-items in all tcams */ +#define NBL_ACL_ITEM_CAP (1536) +#define NBL_ACL_KEY_WIDTH (120) +#define NBL_ACL_ITEM6_CAP (512) +#define NBL_ACL_KEY6_WIDTH (240) +#define NBL_ACL_TCAM_DEPTH (512) +#define NBL_ACL_S1_PROFILE_ID (0) +#define NBL_ACL_S2_PROFILE_ID (1) +#define NBL_ACL_TCAM_CNT (16) +#define NBL_ACL_TCAM_HALF (8) +#define NBL_ACL_TCAM_DEPTH (512) +#define NBL_ACL_TCAM_BITS (40) +#define NBL_ACL_HALF_TCAMS_BITS (320) +#define NBL_ACL_HALF_TCAMS_BYTES (40) +#define NBL_ACL_ALL_TCAMS_BITS (640) +#define NBL_ACL_ALL_TCAMS_BYTES (80) +#define NBL_ACL_ACT_RAM_CNT (4) + +#define NBL_FEM_TCAM_MAX_NUM (64) + +#define RTE_ETHER_TYPE_VLAN 0x8100 +#define RTE_ETHER_TYPE_QINQ 0x88A8 +#define RTE_ETHER_TYPE_QINQ1 0x9100 +#define RTE_ETHER_TYPE_QINQ2 0x9200 +#define NBL_BYTES_IN_REG (4) +#define NBL_CMDQ_HI_DWORD(x) ((u32)(((x) >> 32) & 0xFFFFFFFF)) +#define NBL_CMDQ_LO_DWORD(x) ((u32)(x) & 0xFFFFFFFF) +#define NBL_FEM_INIT_START_KERN (0xFE) +#define NBL_FEM_INIT_START_VALUE (0x7E) +#define NBL_PED_VSI_TYPE_ETH_BASE (1027) +#define NBL_DPED_VLAN_TYPE_PORT_NUM (1031) +#define NBL_CHAN_REG_MAX_LEN (32) +#define NBL_EPRO_RSS_KEY_32 (0x6d5a6d5a) + +#define NBL_SHAPING_GRP_TIMMING_ADD_ADDR (0x504400) +#define NBL_SHAPING_GRP_ADDR (0x504800) +#define NBL_SHAPING_GRP_DWLEN (4) +#define NBL_SHAPING_GRP_REG(r) (NBL_SHAPING_GRP_ADDR + \ + (NBL_SHAPING_GRP_DWLEN * 4) * (r)) +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_ADDR (0x47c000) +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_DWLEN (1) +#define NBL_DSCH_VN_SHA2GRP_MAP_TBL_REG(r) (NBL_DSCH_VN_SHA2GRP_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_SHA2GRP_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_ADDR (0x480000) +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_DWLEN (1) +#define NBL_DSCH_VN_GRP2SHA_MAP_TBL_REG(r) (NBL_DSCH_VN_GRP2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_GRP2SHA_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_SHAPING_DPORT_TIMMING_ADD_ADDR (0x504504) +#define NBL_SHAPING_DPORT_ADDR (0x504700) +#define NBL_SHAPING_DPORT_DWLEN (4) +#define NBL_SHAPING_DPORT_REG(r) (NBL_SHAPING_DPORT_ADDR + \ + (NBL_SHAPING_DPORT_DWLEN * 4) * (r)) +#define NBL_SHAPING_DVN_DPORT_ADDR (0x504750) +#define NBL_SHAPING_DVN_DPORT_DWLEN (4) +#define NBL_SHAPING_DVN_DPORT_REG(r) (NBL_SHAPING_DVN_DPORT_ADDR + \ + (NBL_SHAPING_DVN_DPORT_DWLEN * 4) * (r)) +#define NBL_SHAPING_RDMA_DPORT_ADDR (0x5047a0) +#define NBL_SHAPING_RDMA_DPORT_DWLEN (4) +#define NBL_SHAPING_RDMA_DPORT_REG(r) (NBL_SHAPING_RDMA_DPORT_ADDR + \ + (NBL_SHAPING_RDMA_DPORT_DWLEN * 4) * (r)) +#define NBL_DSCH_PSHA_EN_ADDR (0x404314) +#define NBL_SHAPING_NET_ADDR (0x505800) +#define NBL_SHAPING_NET_DWLEN (4) +#define NBL_SHAPING_NET_REG(r) (NBL_SHAPING_NET_ADDR + \ + (NBL_SHAPING_NET_DWLEN * 4) * (r)) +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_ADDR (0x474000) +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_DWLEN (1) +#define NBL_DSCH_VN_SHA2NET_MAP_TBL_REG(r) (NBL_DSCH_VN_SHA2NET_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_SHA2NET_MAP_TBL_DWLEN * 4) * (r)) +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_ADDR (0x478000) +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_DWLEN (1) +#define NBL_DSCH_VN_NET2SHA_MAP_TBL_REG(r) (NBL_DSCH_VN_NET2SHA_MAP_TBL_ADDR + \ + (NBL_DSCH_VN_NET2SHA_MAP_TBL_DWLEN * 4) * (r)) + +/* Mailbox bar phy register offset begin */ +#define NBL_FW_HEARTBEAT_PING 0x84 +#define NBL_FW_BOARD_CONFIG 0x200 +#define NBL_FW_BOARD_DW3_OFFSET (NBL_FW_BOARD_CONFIG + 12) +#define NBL_FW_BOARD_DW6_OFFSET (NBL_FW_BOARD_CONFIG + 24) +#define NBL_ETH_REP_INFO_BASE (1024) + +/* Mailbox bar phy register offset end */ + +enum nbl_ethdev_repr_flag { + NBL_ETHDEV_VIRTIO_REP = 0, + NBL_ETHDEV_ETH_REP, + NBL_ETHDEV_PF_REP, + NBL_ETHDEV_INVALID_REP, +}; + +enum nbl_ped_vlan_type_e { + INNER_VLAN_TYPE, + OUTER_VLAN_TYPE, +}; + +enum nbl_ped_vlan_tpid_e { + PED_VLAN_TYPE_8100 = 0, + PED_VLAN_TYPE_88A8 = 1, + PED_VLAN_TYPE_9100 = 2, + PED_VLAN_TYPE_9200 = 3, + PED_VLAN_TYPE_NUM = 4, +}; + +enum nbl_error_code_e { + NBL_ERROR_CODE_NONE = 0, + NBL_ERROR_CODE_VLAN = 1, + NBL_ERROR_CODE_L3_HEAD_LEN = 2, + NBL_ERROR_CODE_L3_PLD_LEN = 3, + NBL_ERROR_CODE_L3_CHKSUM = 4, + NBL_ERROR_CODE_L4_CHKSUM = 5, + NBL_ERROR_CODE_TTL_HOPLIMT = 6, + NBL_ERROR_CODE_ESP_AUTH_FAIL = 7, + NBL_ERROR_CODE_ESP_BAD_FAIL = 8, + NBL_ERROR_CODE_PA_RECG_FAIL = 9, + NBL_ERROR_CODE_DN_SMAC = 10, + NBL_ERROR_CODE_TOTAL_NUM = 16, +}; + +enum nbl_epro_act_pri_e { + EPRO_ACT_MIRRORIDX_PRI = 3, + EPRO_ACT_CARIDX_PRI = 3, + EPRO_ACT_DQUEUE_PRI = 3, + EPRO_ACT_DPORT_PRI = 3, + EPRO_ACT_POP_IVLAN_PRI = 3, + EPRO_ACT_POP_OVLAN_PRI = 3, + EPRO_ACT_REPLACE_IVLAN_PRI = 3, + EPRO_ACT_REPLACE_OVLAN_PRI = 3, + EPRO_ACT_PUSH_IVLAN_PRI = 3, + EPRO_ACT_PUSH_OVLAN_PRI = 3, + EPRO_ACT_OUTER_SPORT_MDF_PRI = 3, + EPRO_ACT_PRI_MDF_PRI = 3, + EPRO_ACT_DP_HASH0_PRI = 3, + EPRO_ACT_DP_HASH1_PRI = 3, +}; + +enum nbl_epro_mirror_act_pri_e { + EPRO_MIRROR_ACT_CARIDX_PRI = 3, + EPRO_MIRROR_ACT_DQUEUE_PRI = 3, + EPRO_MIRROR_ACT_DPORT_PRI = 3, +}; + +union nbl_ped_port_vlan_type_u { + struct ped_port_vlan_type { + u32 o_vlan_sel:2; + u32 i_vlan_sel:2; + u32 rsv:28; + } __packed info; +#define NBL_PED_PORT_VLAN_TYPE_TABLE_WIDTH (sizeof(struct ped_port_vlan_type) \ + / sizeof(u32)) + u32 data[NBL_PED_PORT_VLAN_TYPE_TABLE_WIDTH]; +}; + +#define NBL_ACL_ACTION_RAM_TBL(r, i) (NBL_ACL_BASE + 0x00002000 + 0x2000 * (r) + \ + (NBL_ACL_ACTION_RAM0_DWLEN * 4 * (i))) +#define NBL_DPED_MIR_CMD_0_TABLE(t) (NBL_DPED_MIR_CMD_00_ADDR + \ + (NBL_DPED_MIR_CMD_00_DWLEN * 2 * (t))) +#define NBL_SET_DPORT(upcall_flag, nxtstg_sel, port_type, port_id) \ + ((upcall_flag) << 14 | (nxtstg_sel) << 12 | (port_type) << 10 | (port_id)) + +#define MAX_RSS_LEN (100) +#define NBL_RSS_FUNC_TYPE "rss_func_type=" +enum rss_func_type { + NBL_SYM_TOEPLITZ_INT = 0, + NBL_XOR_INT, + NBL_INVALID_FUNC_TYPE +}; + +#define NBL_XOR "xor" +#define NBL_SYM_TOEPLITZ "sym_toeplitz" +#define NBL_RSS_KEY_TYPE "rss_key_type" + +enum rss_field_type { + NBL_KEY_IPV4_L3_INT = 0, + NBL_KEY_IPV4_L4_INT, + NBL_KEY_IPV6_L3_INT, + NBL_KEY_IPV6_L4_INT, + NBL_KEY_AUTO, +}; + +#define NBL_KEY_IPV4_L3 "ipv4" +#define NBL_KEY_IPV4_L4 "ipv4_l4" +#define NBL_KEY_IPV6_L3 "ipv6" +#define NBL_KEY_IPV6_L4 "ipv6_l4" + +#define RSS_SPLIT_STR_NUM 2 +#define NBL_KEY_IP4_L4_RSS_BIT 1 +#define NBL_KEY_IP6_L4_RSS_BIT 2 + +union nbl_fw_board_cfg_dw3 { + struct board_cfg_dw3 { + u32 port_type:1; + u32 port_num:7; + u32 port_speed:2; + u32 gpio_type:3; + u32 p4_version:1; /* 0: low version; 1: high version */ + u32 rsv:18; + } __packed info; + u32 data; +}; + +union nbl_fw_board_cfg_dw6 { + struct board_cfg_dw6 { + u8 lane_bitmap; + u8 eth_bitmap; + u16 rsv; + } __packed info; + u32 data; +}; + +#define NBL_LEONIS_QUIRKS_OFFSET (0x00000140) + +#define NBL_LEONIS_ILLEGAL_REG_VALUE (0xDEADBEEF) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..dbdfc1035139f9138f5f779e2f49a5ec1cb174c3 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.c @@ -0,0 +1,1491 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_queue_leonis.h" +#include "nbl_resource_leonis.h" + +static struct nbl_queue_vsi_info * +nbl_res_queue_get_vsi_info(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + u16 func_id; + int i; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + + for (i = 0; i < NBL_VSI_MAX; i++) + if (queue_info->vsi_info[i].vsi_id == vsi_id) + return &queue_info->vsi_info[i]; + + return NULL; +} + +static int nbl_res_queue_get_net_id(u16 func_id, u16 vsi_type) +{ + int net_id; + + switch (vsi_type) { + case NBL_VSI_DATA: + case NBL_VSI_XDP: + net_id = func_id; + break; + case NBL_VSI_USER: + case NBL_VSI_CTRL: + net_id = func_id + NBL_SPECIFIC_VSI_NET_ID_OFFSET; + break; + default: + net_id = func_id; + break; + } + + return net_id; +} + +static int nbl_res_queue_setup_queue_info(struct nbl_resource_mgt *res_mgt, u16 func_id, + u16 num_queues) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_event_queue_update_data event_data; + u16 *txrx_queues, *queues_context; + u16 queue_index; + int i, ret = 0; + + nbl_info(common, NBL_DEBUG_QUEUE, + "Setup qid map, func_id:%d, num_queues:%d", func_id, num_queues); + + txrx_queues = kcalloc(num_queues, sizeof(txrx_queues[0]), GFP_ATOMIC); + if (!txrx_queues) { + ret = -ENOMEM; + goto alloc_txrx_queues_fail; + } + + queues_context = kcalloc(num_queues * 2, sizeof(txrx_queues[0]), GFP_ATOMIC); + if (!queues_context) { + ret = -ENOMEM; + goto alloc_queue_contex_fail; + } + + queue_info->num_txrx_queues = num_queues; + queue_info->txrx_queues = txrx_queues; + queue_info->queues_context = queues_context; + + for (i = 0; i < num_queues; i++) { + queue_index = find_first_zero_bit(queue_mgt->txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE); + if (queue_index == NBL_MAX_TXRX_QUEUE) { + nbl_err(common, NBL_DEBUG_QUEUE, "There is no available txrx queues left\n"); + ret = -ENOSPC; + goto get_txrx_queue_fail; + } + txrx_queues[i] = queue_index; + set_bit(queue_index, queue_mgt->txrx_queue_bitmap); + } + + event_data.func_id = func_id; + event_data.ring_num = num_queues; + event_data.map = txrx_queues; + nbl_event_notify(NBL_EVENT_QUEUE_ALLOC, &event_data, NBL_COMMON_TO_VSI_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + + return 0; + +get_txrx_queue_fail: + while (--i + 1) { + queue_index = txrx_queues[i]; + clear_bit(queue_index, queue_mgt->txrx_queue_bitmap); + } + queue_info->num_txrx_queues = 0; + queue_info->txrx_queues = NULL; +alloc_queue_contex_fail: + kfree(txrx_queues); +alloc_txrx_queues_fail: + return ret; +} + +static void nbl_res_queue_remove_queue_info(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + u16 i; + + for (i = 0; i < queue_info->num_txrx_queues; i++) + clear_bit(queue_info->txrx_queues[i], queue_mgt->txrx_queue_bitmap); + + kfree(queue_info->txrx_queues); + kfree(queue_info->queues_context); + queue_info->txrx_queues = NULL; + queue_info->queues_context = NULL; + + queue_info->num_txrx_queues = 0; +} + +static inline u64 nbl_res_queue_qid_map_key(struct nbl_qid_map_table qid_map) +{ + u64 notify_addr_l = qid_map.notify_addr_l; + u64 notify_addr_h = qid_map.notify_addr_h; + + return (notify_addr_h << NBL_QID_MAP_NOTIFY_ADDR_LOW_PART_LEN) | notify_addr_l; +} + +static void nbl_res_queue_set_qid_map_table(struct nbl_resource_mgt *res_mgt, u16 tail) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_qid_map_param param; + int i; + + param.qid_map = kcalloc(tail, sizeof(param.qid_map[0]), GFP_ATOMIC); + if (!param.qid_map) + return; + + for (i = 0; i < tail; i++) + param.qid_map[i] = queue_mgt->qid_map_table[i]; + + param.start = 0; + param.len = tail; + + phy_ops->set_qid_map_table(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¶m, + queue_mgt->qid_map_select); + queue_mgt->qid_map_select = !queue_mgt->qid_map_select; + + if (!queue_mgt->qid_map_ready) { + phy_ops->set_qid_map_ready(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), true); + queue_mgt->qid_map_ready = true; + } + + kfree(param.qid_map); +} + +int nbl_res_queue_setup_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, u16 func_id, + u64 notify_addr) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_qid_map_table qid_map; + u64 key; + u16 *txrx_queues = queue_info->txrx_queues; + u16 qid_map_entries = queue_info->num_txrx_queues, qid_map_base, tail; + int i; + + /* Get base location */ + queue_info->notify_addr = notify_addr; + key = notify_addr >> NBL_QID_MAP_NOTIFY_ADDR_SHIFT; + + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + WARN_ON(key == nbl_res_queue_qid_map_key(queue_mgt->qid_map_table[i])); + if (key < nbl_res_queue_qid_map_key(queue_mgt->qid_map_table[i])) { + qid_map_base = i; + break; + } + } + if (i == NBL_QID_MAP_TABLE_ENTRIES) { + nbl_err(common, NBL_DEBUG_QUEUE, "No valid qid map key for func %d", func_id); + return -ENOSPC; + } + + /* Calc tail, we will set the qid_map from 0 to tail. + * We have to make sure that this range (0, tail) can cover all the changes, which need to + * consider all the two tables. Therefore, it is necessary to store each table's tail, and + * always use the larger one between this table's tail and the added tail. + * + * The reason can be illustrated in the following example: + * Step 1: del some entries, which happens on table 1, and each table could be + * Table 0: 0 - 31 used + * Table 1: 0 - 15 used + * SW : queue_mgt->total_qid_map_entries = 16 + * Step 2: add 2 entries, which happens on table 0, if we use 16 + 2 as the tail, then + * Table 0: 0 - 17 correctly added, 18 - 31 garbage data + * Table 1: 0 - 15 used + * SW : queue_mgt->total_qid_map_entries = 18 + * And this is definitely wrong, it should use 32, table 0's original tail + */ + queue_mgt->total_qid_map_entries += qid_map_entries; + tail = max(queue_mgt->total_qid_map_entries, + queue_mgt->qid_map_tail[queue_mgt->qid_map_select]); + queue_mgt->qid_map_tail[queue_mgt->qid_map_select] = queue_mgt->total_qid_map_entries; + + /* Update qid map */ + for (i = NBL_QID_MAP_TABLE_ENTRIES - qid_map_entries; i > qid_map_base; i--) + queue_mgt->qid_map_table[i - 1 + qid_map_entries] = queue_mgt->qid_map_table[i - 1]; + + for (i = 0; i < queue_info->num_txrx_queues; i++) { + qid_map.local_qid = 2 * i + 1; + qid_map.notify_addr_l = key; + qid_map.notify_addr_h = key >> NBL_QID_MAP_NOTIFY_ADDR_LOW_PART_LEN; + qid_map.global_qid = txrx_queues[i]; + qid_map.ctrlq_flag = 0; + queue_mgt->qid_map_table[qid_map_base + i] = qid_map; + } + + nbl_res_queue_set_qid_map_table(res_mgt, tail); + + return 0; +} + +void nbl_res_queue_remove_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_qid_map_table qid_map; + u64 key; + u16 qid_map_entries = queue_info->num_txrx_queues, qid_map_base, tail; + int i; + + /* Get base location */ + key = queue_info->notify_addr >> NBL_QID_MAP_NOTIFY_ADDR_SHIFT; + + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + if (key == nbl_res_queue_qid_map_key(queue_mgt->qid_map_table[i])) { + qid_map_base = i; + break; + } + } + if (i == NBL_QID_MAP_TABLE_ENTRIES) { + nbl_err(common, NBL_DEBUG_QUEUE, "No valid qid map key for func %d", func_id); + return; + } + + /* Calc tail, we will set the qid_map from 0 to tail. + * We have to make sure that this range (0, tail) can cover all the changes, which need to + * consider all the two tables. Therefore, it is necessary to store each table's tail, and + * always use the larger one between this table's tail and the driver-stored tail. + * + * The reason can be illustrated in the following example: + * Step 1: del some entries, which happens on table 1, and each table could be + * Table 0: 0 - 31 used + * Table 1: 0 - 15 used + * SW : queue_mgt->total_qid_map_entries = 16 + * Step 2: del 2 entries, which happens on table 0, if we use 16 as the tail, then + * Table 0: 0 - 13 correct, 14 - 31 garbage data + * Table 1: 0 - 15 used + * SW : queue_mgt->total_qid_map_entries = 14 + * And this is definitely wrong, it should use 32, table 0's original tail + */ + tail = max(queue_mgt->total_qid_map_entries, + queue_mgt->qid_map_tail[queue_mgt->qid_map_select]); + queue_mgt->total_qid_map_entries -= qid_map_entries; + queue_mgt->qid_map_tail[queue_mgt->qid_map_select] = queue_mgt->total_qid_map_entries; + + /* Update qid map */ + memset(&qid_map, U8_MAX, sizeof(qid_map)); + + for (i = qid_map_base; i < NBL_QID_MAP_TABLE_ENTRIES - qid_map_entries; i++) + queue_mgt->qid_map_table[i] = queue_mgt->qid_map_table[i + qid_map_entries]; + for (; i < NBL_QID_MAP_TABLE_ENTRIES; i++) + queue_mgt->qid_map_table[i] = qid_map; + + nbl_res_queue_set_qid_map_table(res_mgt, tail); +} + +static int nbl_res_queue_get_rss_ret_base(struct nbl_resource_mgt *res_mgt, u16 count, u16 *result) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + u16 index, i, j, k; + int success = 1; + int ret = -EFAULT; + + for (i = 0; i < NBL_EPRO_RSS_RET_TBL_DEPTH;) { + index = find_next_zero_bit(queue_mgt->rss_ret_bitmap, + NBL_EPRO_RSS_RET_TBL_DEPTH, i); + if (index == NBL_EPRO_RSS_RET_TBL_DEPTH) { + nbl_err(common, NBL_DEBUG_QUEUE, "There is no available rss ret left"); + break; + } + + success = 1; + for (j = index + 1; j < (index + count); j++) { + if (j >= NBL_EPRO_RSS_RET_TBL_DEPTH) { + success = 0; + break; + } + + if (test_bit(j, queue_mgt->rss_ret_bitmap)) { + success = 0; + break; + } + } + if (success) { + for (k = index; k < (index + count); k++) + set_bit(k, queue_mgt->rss_ret_bitmap); + *result = index; + ret = 0; + break; + } + i = j; + } + + return ret; +} + +static int nbl_res_queue_setup_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_info *queue_info = NULL; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id; + int ret = 0, i; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + /* config ipro queue tbl */ + for (i = vsi_info->queue_offset; + i < vsi_info->queue_offset + vsi_info->queue_num && + i < queue_info->num_txrx_queues; i++) { + ret = phy_ops->cfg_ipro_queue_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], vsi_id, 1); + if (ret) { + while (--i + 1) + phy_ops->cfg_ipro_queue_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], 0, 0); + return ret; + } + } + + return 0; +} + +static void nbl_res_queue_remove_q2vsi(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_info *queue_info = NULL; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id; + int i; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + /*config ipro queue tbl*/ + for (i = vsi_info->queue_offset; + i < vsi_info->queue_offset + vsi_info->queue_num && i < queue_info->num_txrx_queues; + i++) + phy_ops->cfg_ipro_queue_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], 0, 0); +} + +static int nbl_res_queue_setup_rss(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 rss_entry_size, count; + int ret = 0; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + rss_entry_size = (vsi_info->queue_num + NBL_EPRO_RSS_ENTRY_SIZE_UNIT - 1) + / NBL_EPRO_RSS_ENTRY_SIZE_UNIT; + rss_entry_size = ilog2(roundup_pow_of_two(rss_entry_size)); + count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << rss_entry_size; + + ret = nbl_res_queue_get_rss_ret_base(res_mgt, count, &vsi_info->rss_ret_base); + if (ret) + return -ENOSPC; + + vsi_info->rss_entry_size = rss_entry_size; + vsi_info->rss_vld = true; + + return 0; +} + +static void nbl_res_queue_remove_rss(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 rss_ret_base, rss_entry_size, count; + int i; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + if (!vsi_info->rss_vld) + return; + + rss_ret_base = vsi_info->rss_ret_base; + rss_entry_size = vsi_info->rss_entry_size; + count = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << rss_entry_size; + + for (i = rss_ret_base; i < (rss_ret_base + count); i++) + clear_bit(i, queue_mgt->rss_ret_bitmap); + + vsi_info->rss_vld = false; +} + +static void nbl_res_queue_setup_queue_cfg(struct nbl_queue_mgt *queue_mgt, + struct nbl_queue_cfg_param *cfg_param, + struct nbl_txrx_queue_param *queue_param, + bool is_tx, u16 func_id) +{ + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + + cfg_param->desc = queue_param->dma; + cfg_param->size = queue_param->desc_num; + cfg_param->global_vector = queue_param->global_vector_id; + cfg_param->global_queue_id = queue_info->txrx_queues[queue_param->local_queue_id]; + + cfg_param->avail = queue_param->avail; + cfg_param->used = queue_param->used; + cfg_param->extend_header = queue_param->extend_header; + cfg_param->split = queue_param->split; + cfg_param->last_avail_idx = queue_param->cxt; + + cfg_param->intr_en = queue_param->intr_en; + cfg_param->intr_mask = queue_param->intr_mask; + + cfg_param->tx = is_tx; + cfg_param->rxcsum = queue_param->rxcsum; + cfg_param->half_offload_en = queue_param->half_offload_en; +} + +static void nbl_res_queue_setup_hw_dq(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_cfg_param *queue_cfg, u16 func_id) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_vnet_queue_info_param param = {0}; + u16 global_queue_id = queue_cfg->global_queue_id; + u8 bus, dev, func; + + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &dev, &func); + queue_info->split = queue_cfg->split; + queue_info->queue_size = queue_cfg->size; + + param.function_id = func; + param.device_id = dev; + param.bus_id = bus; + param.valid = 1; + + if (queue_cfg->intr_en) { + param.msix_idx = queue_cfg->global_vector; + param.msix_idx_valid = 1; + } + + if (queue_cfg->tx) { + phy_ops->set_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¶m, + NBL_PAIR_ID_GET_TX(global_queue_id)); + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + if (!queue_cfg->extend_header) + phy_ops->restore_dvn_context(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_cfg->split, + queue_cfg->last_avail_idx); + phy_ops->cfg_tx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_cfg, global_queue_id); + + } else { + phy_ops->set_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¶m, + NBL_PAIR_ID_GET_RX(global_queue_id)); + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue_id); + if (!queue_cfg->extend_header) + phy_ops->restore_uvn_context(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_cfg->split, + queue_cfg->last_avail_idx); + phy_ops->cfg_rx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), queue_cfg, + global_queue_id); + } +} + +static void nbl_res_queue_remove_all_hw_dq(struct nbl_resource_mgt *res_mgt, u16 func_id, + struct nbl_queue_vsi_info *vsi_info) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 start = vsi_info->queue_offset, end = vsi_info->queue_offset + vsi_info->queue_num; + u16 global_queue; + int i; + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + + phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->disable_dvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + } + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + + phy_ops->disable_uvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + } + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + queue_info->queues_context[NBL_PAIR_ID_GET_RX(i)] = + phy_ops->save_uvn_ctx(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue, queue_info->split, + queue_info->queue_size); + queue_info->queues_context[NBL_PAIR_ID_GET_TX(i)] = + phy_ops->save_dvn_ctx(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue, queue_info->split); + } + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + } + + for (i = start; i < end; i++) { + global_queue = queue_info->txrx_queues[i]; + phy_ops->clear_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_RX(global_queue)); + phy_ops->clear_vnet_queue_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_TX(global_queue)); + } +} + +int nbl_res_queue_init_qid_map_table(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_mgt *queue_mgt, + struct nbl_phy_ops *phy_ops) +{ + struct nbl_qid_map_table invalid_qid_map; + u16 i; + + queue_mgt->qid_map_ready = 0; + queue_mgt->qid_map_select = NBL_MASTER_QID_MAP_TABLE; + + memset(&invalid_qid_map, 0, sizeof(invalid_qid_map)); + invalid_qid_map.local_qid = 0x1FF; + invalid_qid_map.notify_addr_l = 0x7FFFFF; + invalid_qid_map.notify_addr_h = 0xFFFFFFFF; + invalid_qid_map.global_qid = 0xFFF; + invalid_qid_map.ctrlq_flag = 0X1; + + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) + queue_mgt->qid_map_table[i] = invalid_qid_map; + + phy_ops->init_qid_map_table(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + return 0; +} + +static int nbl_res_queue_init_epro_rss_key(struct nbl_resource_mgt *res_mgt, + struct nbl_phy_ops *phy_ops) +{ + int ret = 0; + + ret = phy_ops->init_epro_rss_key(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + return ret; +} + +static int nbl_res_queue_init_epro_vpt_table(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + int pfid, vfid; + u16 vsi_id, vf_vsi_id; + u16 i; + + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + + if (sriov_info->bdf != 0) { + /* init pf vsi */ + for (i = 0; i < NBL_VSI_MAX; i++) { + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, i); + phy_ops->init_epro_vpt_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + } + + for (vfid = 0; vfid < sriov_info->num_vfs; vfid++) { + vf_vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); + if (vf_vsi_id == 0xFFFF) + continue; + + phy_ops->init_epro_vpt_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vf_vsi_id); + } + } + + return 0; +} + +static int nbl_res_queue_init_ipro_dn_sport_tbl(struct nbl_resource_mgt *res_mgt, + u16 func_id, u16 bmode, bool binit) + +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + int pfid, vfid; + u16 eth_id, vsi_id, vf_vsi_id; + int i; + + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + + if (sriov_info->bdf != 0) { + eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); + + for (i = 0; i < NBL_VSI_MAX; i++) + phy_ops->cfg_ipro_dn_sport_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_id + i, eth_id, bmode, binit); + + for (vfid = 0; vfid < sriov_info->num_vfs; vfid++) { + vf_vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); + if (vf_vsi_id == 0xFFFF) + continue; + + phy_ops->cfg_ipro_dn_sport_tbl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vf_vsi_id, eth_id, bmode, binit); + } + } + + return 0; +} + +static int nbl_res_queue_set_bridge_mode(void *priv, u16 func_id, u16 bmode) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_queue_init_ipro_dn_sport_tbl(res_mgt, func_id, bmode, false); +} + +static int nbl_res_queue_init_rss(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_mgt *queue_mgt, + struct nbl_phy_ops *phy_ops) +{ + return nbl_res_queue_init_epro_rss_key(res_mgt, phy_ops); +} + +static int nbl_res_queue_alloc_txrx_queues(void *priv, u16 vsi_id, u16 queue_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u64 notify_addr; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + int ret = 0; + + notify_addr = nbl_res_get_func_bar_base_addr(res_mgt, func_id); + + ret = nbl_res_queue_setup_queue_info(res_mgt, func_id, queue_num); + if (ret) + goto setup_queue_info_fail; + + ret = nbl_res_queue_setup_qid_map_table_leonis(res_mgt, func_id, notify_addr); + if (ret) + goto setup_qid_map_fail; + + return 0; + +setup_qid_map_fail: + nbl_res_queue_remove_queue_info(res_mgt, func_id); +setup_queue_info_fail: + return ret; +} + +static void nbl_res_queue_free_txrx_queues(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_event_queue_update_data event_data; + + nbl_res_queue_remove_qid_map_table_leonis(res_mgt, func_id); + nbl_res_queue_remove_queue_info(res_mgt, func_id); + event_data.func_id = func_id; + event_data.ring_num = 0; + event_data.map = NULL; + nbl_event_notify(NBL_EVENT_QUEUE_ALLOC, &event_data, NBL_COMMON_TO_VSI_ID(res_mgt->common), + NBL_COMMON_TO_BOARD_ID(res_mgt->common)); +} + +static int nbl_res_queue_setup_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_cfg_param cfg_param = {0}; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, param->vsi_id); + + nbl_res_queue_setup_queue_cfg(NBL_RES_MGT_TO_QUEUE_MGT(res_mgt), + &cfg_param, param, is_tx, func_id); + nbl_res_queue_setup_hw_dq(res_mgt, &cfg_param, func_id); + return 0; +} + +static void nbl_res_queue_remove_all_queues(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_vsi_info *vsi_info = NULL; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + nbl_res_queue_remove_all_hw_dq(res_mgt, func_id, vsi_info); +} + +static int nbl_res_queue_register_vsi2q(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = NULL; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = &queue_info->vsi_info[vsi_index]; + + memset(vsi_info, 0, sizeof(*vsi_info)); + vsi_info->vld = 1; + vsi_info->vsi_index = vsi_index; + vsi_info->vsi_id = vsi_id; + vsi_info->queue_offset = queue_offset; + vsi_info->queue_num = queue_num; + vsi_info->net_id = nbl_res_queue_get_net_id(func_id, vsi_info->vsi_index); + + return 0; +} + +static void nbl_res_queue_update_netid_refnum(struct nbl_queue_mgt *queue_mgt, u16 net_id, bool add) +{ + if (net_id >= NBL_MAX_NET_ID) + return; + + if (add) { + queue_mgt->net_id_ref_vsinum[net_id]++; + } else { + /* probe call clear_queue first, so judge nor zero to support disable dsch more than + * once + */ + if (queue_mgt->net_id_ref_vsinum[net_id]) + queue_mgt->net_id_ref_vsinum[net_id]--; + } +} + +static u16 nbl_res_queue_get_netid_refnum(struct nbl_queue_mgt *queue_mgt, u16 net_id) +{ + if (net_id >= NBL_MAX_NET_ID) + return 0; + + return queue_mgt->net_id_ref_vsinum[net_id]; +} + +static int nbl_res_queue_cfg_dsch(void *priv, u16 vsi_id, bool vld) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info; + u16 group_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); /* group_id is same with eth_id */ + u16 start = 0, end = 0; + int i, ret = 0; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + start = vsi_info->queue_offset; + end = vsi_info->queue_num + vsi_info->queue_offset; + + /* When setting up, g2p -> n2g -> q2tc; when down, q2tc -> n2g -> g2p */ + if (!vld) { + phy_ops->deactive_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id); + for (i = start; i < end; i++) + phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], vsi_info->net_id, vld); + nbl_res_queue_update_netid_refnum(queue_mgt, vsi_info->net_id, false); + } + + if (!nbl_res_queue_get_netid_refnum(queue_mgt, vsi_info->net_id)) { + ret = phy_ops->cfg_dsch_net_to_group(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_info->net_id, group_id, vld); + if (ret) + return ret; + } + + if (vld) { + for (i = start; i < end; i++) + phy_ops->cfg_q2tc_netid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i], vsi_info->net_id, vld); + phy_ops->active_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id); + nbl_res_queue_update_netid_refnum(queue_mgt, vsi_info->net_id, true); + } + + return 0; +} + +static int nbl_res_queue_setup_cqs(void *priv, u16 vsi_id, u16 real_qps) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 func_id; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return -ENOENT; + + if (real_qps == vsi_info->curr_qps) + return 0; + + if (real_qps) + phy_ops->cfg_epro_rss_ret(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_info->rss_ret_base, + vsi_info->rss_entry_size, real_qps, + queue_info->txrx_queues + vsi_info->queue_offset); + + if (!vsi_info->curr_qps) + phy_ops->set_epro_rss_pt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + vsi_info->rss_ret_base, vsi_info->rss_entry_size); + + vsi_info->curr_qps = real_qps; + vsi_info->curr_qps_static = real_qps; + return 0; +} + +static void nbl_res_queue_remove_cqs(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_vsi_info *vsi_info = NULL; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + phy_ops->clear_epro_rss_pt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + + vsi_info->curr_qps = 0; +} + +static int nbl_res_queue_init_switch(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) + phy_ops->setup_queue_switch(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), i); + + return 0; +} + +static int nbl_res_queue_init(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt; + struct nbl_phy_ops *phy_ops; + int i, ret = 0; + + if (!res_mgt) + return -EINVAL; + + queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = nbl_res_queue_init_qid_map_table(res_mgt, queue_mgt, phy_ops); + if (ret) + goto init_queue_fail; + + ret = nbl_res_queue_init_rss(res_mgt, queue_mgt, phy_ops); + if (ret) + goto init_queue_fail; + + ret = nbl_res_queue_init_switch(res_mgt); + if (ret) + goto init_queue_fail; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + nbl_res_queue_init_epro_vpt_table(res_mgt, i); + nbl_res_queue_init_ipro_dn_sport_tbl(res_mgt, i, BRIDGE_MODE_VEB, true); + } + phy_ops->init_pfc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), NBL_MAX_ETHERNET); + + return 0; + +init_queue_fail: + return ret; +} + +static int nbl_res_queue_get_queue_err_stats(void *priv, u16 func_id, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, + bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops; + u16 global_queue_id; + + if (queue_id >= queue_info->num_txrx_queues) + return -EINVAL; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + global_queue_id = queue_info->txrx_queues[queue_id]; + + if (is_tx) + phy_ops->get_tx_queue_err_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_err_stats); + else + phy_ops->get_rx_queue_err_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_queue_id, queue_err_stats); + + return 0; +} + +static int nbl_res_queue_cfg_qdisc_mqprio(void *priv, struct nbl_tc_qidsc_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_queue_vsi_info *vsi_info = NULL; + u64 total_tx_rate = 0, max_rate = 0, max_tc_rate = 0; + u16 func_id, curr_qps = 0, queue_id = 0; + u8 *weight; + bool is_active = false; + int i, j, gravity; + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, param->vsi_id); + queue_info = &queue_mgt->queue_info[func_id]; + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, param->vsi_id); + + switch (res_info->board_info.eth_speed) { + case NBL_FW_PORT_SPEED_100G: + max_rate = NBL_RATE_MBPS_100G; + break; + case NBL_FW_PORT_SPEED_25G: + max_rate = NBL_RATE_MBPS_25G; + break; + default: + return -EOPNOTSUPP; + } + + for (i = 0; i < param->num_tc; i++) + total_tx_rate += param->info[i].max_tx_rate; + + if (total_tx_rate > max_rate) { + nbl_err(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_QUEUE, + "Invalid total_tx_rate: %llu mbps, should within %llu mbps", + total_tx_rate, max_rate); + return -EINVAL; + } + + for (i = 0; i < vsi_info->curr_qps; i++) { + queue_id = queue_info->txrx_queues[i + vsi_info->queue_offset]; + phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), queue_id); + is_active |= phy_ops->check_q2tc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), queue_id); + } + + /* Config tc */ + for (i = 0; i < param->num_tc; i++) + for (j = 0; j < param->info[i].count; j++) { + phy_ops->cfg_q2tc_tcid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[curr_qps + + vsi_info->queue_offset], i); + curr_qps++; + } + + for (i = curr_qps; i < param->origin_qps; i++) + phy_ops->cfg_q2tc_tcid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_info->txrx_queues[i + vsi_info->queue_offset], 0); + + /* Config weight */ + weight = kcalloc(param->num_tc, sizeof(*weight), GFP_KERNEL); + if (!weight) + return -ENOMEM; + + for (i = 0; i < param->num_tc; i++) + if (param->info[i].max_tx_rate > max_tc_rate) + max_tc_rate = param->info[i].max_tx_rate; + + gravity = max_tc_rate / NBL_SHAPING_WGT_MAX + 1; + + for (i = 0; i < param->num_tc; i++) + weight[i] = param->info[i].max_tx_rate / gravity; + phy_ops->set_tc_wgt(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, + weight, param->num_tc); + + /* Config shaping */ + phy_ops->set_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, total_tx_rate, + param->enable && total_tx_rate, is_active); + + kfree(weight); + return 0; +} + +static void nbl_res_queue_get_rxfh_indir_size(void *priv, u16 vsi_id, u32 *rxfh_indir_size) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_vsi_info *vsi_info = NULL; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + *rxfh_indir_size = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << vsi_info->rss_entry_size; +} + +static void nbl_res_queue_get_rxfh_indir(void *priv, u16 vsi_id, u32 *indir) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_vsi_info *vsi_info = NULL; + int i, j; + u32 rxfh_indir_size; + u16 queue_num; + + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + if (!vsi_info) + return; + + queue_num = vsi_info->curr_qps_static ? vsi_info->curr_qps_static : vsi_info->queue_num; + rxfh_indir_size = NBL_EPRO_RSS_ENTRY_SIZE_UNIT << vsi_info->rss_entry_size; + + for (i = 0, j = 0; i < rxfh_indir_size; i++) { + indir[i] = j; + j++; + if (j == queue_num) + j = 0; + } +} + +static void nbl_res_queue_get_rxfh_rss_key_size(void *priv, u32 *rxfh_rss_key_size) +{ + *rxfh_rss_key_size = NBL_EPRO_RSS_SK_SIZE; +} + +static void nbl_res_rss_key_reverse_order(u8 *key) +{ + u8 temp; + int i; + + for (i = 0; i < (NBL_EPRO_RSS_PER_KEY_SIZE / 2); i++) { + temp = key[i]; + key[i] = key[NBL_EPRO_RSS_PER_KEY_SIZE - 1 - i]; + key[NBL_EPRO_RSS_PER_KEY_SIZE - 1 - i] = temp; + } +} + +static void nbl_res_queue_get_rss_key(void *priv, u8 *rss_key) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int i; + + phy_ops->read_rss_key(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), rss_key); + + for (i = 0; i < NBL_EPRO_RSS_KEY_NUM; i++) + nbl_res_rss_key_reverse_order(rss_key + i * NBL_EPRO_RSS_PER_KEY_SIZE); +} + +static void nbl_res_queue_get_rss_alg_sel(void *priv, u8 *alg_sel, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_rss_alg_sel(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, alg_sel); +} + +static void nbl_res_queue_clear_queues(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_tc_qidsc_param param; + + nbl_res_queue_remove_rss(priv, vsi_id); + nbl_res_queue_remove_q2vsi(priv, vsi_id); + if (!queue_info->num_txrx_queues) + return; + + memset(¶m, 0, sizeof(param)); + /* clear shapping */ + param.vsi_id = vsi_id; + param.enable = false; + nbl_res_queue_cfg_qdisc_mqprio(priv, ¶m); + nbl_res_queue_remove_cqs(res_mgt, vsi_id); + nbl_res_queue_cfg_dsch(res_mgt, vsi_id, false); + nbl_res_queue_remove_all_queues(res_mgt, vsi_id); + nbl_res_queue_free_txrx_queues(res_mgt, vsi_id); +} + +/* for pmd driver */ +static u16 nbl_res_queue_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + + if (!queue_info->num_txrx_queues) + return 0xffff; + + return queue_info->txrx_queues[local_qid]; +} + +static int nbl_res_queue_cfg_log(void *priv, u16 vsi_id, u16 qps, bool vld) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 global_queue, i; + + if (!queue_info->num_txrx_queues) + return 0; + + for (i = 0; i < qps; i++) { + global_queue = queue_info->txrx_queues[i]; + phy_ops->cfg_vnet_qinfo_log(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_RX(global_queue), vld); + phy_ops->cfg_vnet_qinfo_log(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + NBL_PAIR_ID_GET_TX(global_queue), vld); + } + + return 0; +} + +static u16 nbl_req_queue_get_ctx(void *priv, u16 vsi_id, u16 qid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + + if (!queue_info->num_txrx_queues) + return 0xffff; + + return queue_info->queues_context[qid]; +} + +static u16 nbl_get_adapt_desc_gother_level(u16 last_level, u64 rates) +{ + switch (last_level) { + case NBL_ADAPT_DESC_GOTHER_LEVEL0: + if (rates > NBL_ADAPT_DESC_GOTHER_LEVEL1_TH) + return NBL_ADAPT_DESC_GOTHER_LEVEL1; + else + return NBL_ADAPT_DESC_GOTHER_LEVEL0; + case NBL_ADAPT_DESC_GOTHER_LEVEL1: + if (rates > NBL_ADAPT_DESC_GOTHER_LEVEL1_DOWNGRADE_TH) + return NBL_ADAPT_DESC_GOTHER_LEVEL1; + else + return NBL_ADAPT_DESC_GOTHER_LEVEL0; + default: + return NBL_ADAPT_DESC_GOTHER_LEVEL0; + } +} + +static u16 nbl_get_adapt_desc_gother_timeout(u16 level) +{ + switch (level) { + case NBL_ADAPT_DESC_GOTHER_LEVEL0: + return NBL_ADAPT_DESC_GOTHER_LEVEL0_TIMEOUT; + case NBL_ADAPT_DESC_GOTHER_LEVEL1: + return NBL_ADAPT_DESC_GOTHER_LEVEL1_TIMEOUT; + default: + return NBL_ADAPT_DESC_GOTHER_LEVEL0_TIMEOUT; + } +} + +static void nbl_res_queue_adapt_desc_gother(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_adapt_desc_gother *adapt_desc_gother = &queue_mgt->adapt_desc_gother; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u32 last_uvn_desc_rd_entry = adapt_desc_gother->uvn_desc_rd_entry; + u64 last_get_stats_jiffies = adapt_desc_gother->get_desc_stats_jiffies; + u64 time_diff; + u32 uvn_desc_rd_entry; + u32 rx_rate; + u16 level, last_level, timeout; + + last_level = adapt_desc_gother->level; + time_diff = jiffies - last_get_stats_jiffies; + uvn_desc_rd_entry = phy_ops->get_uvn_desc_entry_stats(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + rx_rate = (uvn_desc_rd_entry - last_uvn_desc_rd_entry) / time_diff * HZ; + adapt_desc_gother->get_desc_stats_jiffies = jiffies; + adapt_desc_gother->uvn_desc_rd_entry = uvn_desc_rd_entry; + + level = nbl_get_adapt_desc_gother_level(last_level, rx_rate); + if (level != last_level) { + timeout = nbl_get_adapt_desc_gother_timeout(level); + phy_ops->set_uvn_desc_wr_timeout(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), timeout); + adapt_desc_gother->level = level; + } +} + +static void nbl_res_flr_clear_queues(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); + + if (nbl_res_vf_is_active(priv, func_id)) + nbl_res_queue_clear_queues(priv, vsi_id); +} + +static int nbl_res_queue_restore_tx_queue(struct nbl_resource_mgt *res_mgt, u16 vsi_id, + u16 local_queue_id, dma_addr_t dma) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_queue_cfg_param queue_cfg = {0}; + u16 global_queue, func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + queue_info = &queue_mgt->queue_info[func_id]; + global_queue = queue_info->txrx_queues[local_queue_id]; + + phy_ops->get_tx_queue_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + /* Rectify size, in register it is log2(size) */ + queue_cfg.size = queue_info->queue_size; + /* DMA addr is realloced, updated it */ + queue_cfg.desc = dma; + + phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->disable_dvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->cfg_tx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + + return 0; +} + +static int nbl_res_queue_restore_rx_queue(struct nbl_resource_mgt *res_mgt, u16 vsi_id, + u16 local_queue_id, dma_addr_t dma) +{ + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_info *queue_info; + struct nbl_queue_cfg_param queue_cfg = {0}; + u16 global_queue, func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + queue_info = &queue_mgt->queue_info[func_id]; + global_queue = queue_info->txrx_queues[local_queue_id]; + + phy_ops->get_rx_queue_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + /* Rectify size, in register it is log2(size) */ + queue_cfg.size = queue_info->queue_size; + /* DMA addr is realloced, updated it */ + queue_cfg.desc = dma; + + phy_ops->disable_uvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->cfg_rx_queue(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), &queue_cfg, global_queue); + + return 0; +} + +static int nbl_res_queue_restore_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + switch (type) { + case NBL_TX: + return nbl_res_queue_restore_tx_queue(res_mgt, vsi_id, local_queue_id, dma); + case NBL_RX: + return nbl_res_queue_restore_rx_queue(res_mgt, vsi_id, local_queue_id, dma); + default: + break; + } + + return -EINVAL; +} + +static int +nbl_res_queue_stop_abnormal_hw_queue(void *priv, u16 vsi_id, u16 local_queue_id, int type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_info *queue_info; + u16 global_queue, func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + queue_info = &queue_mgt->queue_info[func_id]; + global_queue = queue_info->txrx_queues[local_queue_id]; + switch (type) { + case NBL_TX: + phy_ops->lso_dsch_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->disable_dvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->reset_dvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + return 0; + case NBL_RX: + phy_ops->disable_uvn(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + phy_ops->rsc_cache_drain(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + + phy_ops->reset_uvn_cfg(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_queue); + return 0; + default: + break; + } + + return -EINVAL; +} + +static u16 nbl_res_queue_get_local_queue_id(void *priv, u16 vsi_id, u16 global_queue_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + int i; + + queue_info = &queue_mgt->queue_info[func_id]; + + if (queue_info->txrx_queues) + for (i = 0; i < queue_info->num_txrx_queues; i++) + if (global_queue_id == queue_info->txrx_queues[i]) + return i; + + return U16_MAX; +} + +static int nbl_res_queue_set_tx_rate(void *priv, u16 func_id, int tx_rate) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + struct nbl_queue_info *queue_info = &queue_mgt->queue_info[func_id]; + struct nbl_queue_vsi_info *vsi_info = NULL; + u16 vsi_id, queue_id; + bool is_active = false; + int max_rate = 0, i; + + vsi_id = nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_DATA); + vsi_info = nbl_res_queue_get_vsi_info(res_mgt, vsi_id); + + if (!vsi_info) + return 0; + + switch (res_info->board_info.eth_speed) { + case NBL_FW_PORT_SPEED_100G: + max_rate = NBL_RATE_MBPS_100G; + break; + case NBL_FW_PORT_SPEED_25G: + max_rate = NBL_RATE_MBPS_25G; + break; + default: + return -EOPNOTSUPP; + } + + if (tx_rate > max_rate) + return -EINVAL; + + if (queue_info->txrx_queues) + for (i = 0; i < vsi_info->curr_qps; i++) { + queue_id = queue_info->txrx_queues[i + vsi_info->queue_offset]; + is_active |= phy_ops->check_q2tc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + queue_id); + } + + /* Config shaping */ + return phy_ops->set_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_info->net_id, tx_rate, + !!(tx_rate), is_active); +} + +static void nbl_res_queue_get_active_func_bitmaps(void *priv, unsigned long *bitmap, int max_func) +{ + int i; + int func_id_end; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + func_id_end = max_func > NBL_MAX_FUNC ? NBL_MAX_FUNC : max_func; + for (i = 0; i < func_id_end; i++) { + if (!nbl_res_check_func_active_by_queue(res_mgt, i)) + continue; + + set_bit(i, bitmap); + } +} + +/* NBL_QUEUE_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_QUEUE_OPS_TBL \ +do { \ + NBL_QUEUE_SET_OPS(alloc_txrx_queues, nbl_res_queue_alloc_txrx_queues); \ + NBL_QUEUE_SET_OPS(free_txrx_queues, nbl_res_queue_free_txrx_queues); \ + NBL_QUEUE_SET_OPS(register_vsi2q, nbl_res_queue_register_vsi2q); \ + NBL_QUEUE_SET_OPS(setup_q2vsi, nbl_res_queue_setup_q2vsi); \ + NBL_QUEUE_SET_OPS(remove_q2vsi, nbl_res_queue_remove_q2vsi); \ + NBL_QUEUE_SET_OPS(setup_rss, nbl_res_queue_setup_rss); \ + NBL_QUEUE_SET_OPS(remove_rss, nbl_res_queue_remove_rss); \ + NBL_QUEUE_SET_OPS(setup_queue, nbl_res_queue_setup_queue); \ + NBL_QUEUE_SET_OPS(remove_all_queues, nbl_res_queue_remove_all_queues); \ + NBL_QUEUE_SET_OPS(cfg_dsch, nbl_res_queue_cfg_dsch); \ + NBL_QUEUE_SET_OPS(setup_cqs, nbl_res_queue_setup_cqs); \ + NBL_QUEUE_SET_OPS(remove_cqs, nbl_res_queue_remove_cqs); \ + NBL_QUEUE_SET_OPS(queue_init, nbl_res_queue_init); \ + NBL_QUEUE_SET_OPS(get_queue_err_stats, nbl_res_queue_get_queue_err_stats); \ + NBL_QUEUE_SET_OPS(cfg_qdisc_mqprio, nbl_res_queue_cfg_qdisc_mqprio); \ + NBL_QUEUE_SET_OPS(get_rxfh_indir_size, nbl_res_queue_get_rxfh_indir_size); \ + NBL_QUEUE_SET_OPS(get_rxfh_indir, nbl_res_queue_get_rxfh_indir); \ + NBL_QUEUE_SET_OPS(get_rxfh_rss_key_size, nbl_res_queue_get_rxfh_rss_key_size); \ + NBL_QUEUE_SET_OPS(get_rxfh_rss_key, nbl_res_queue_get_rss_key); \ + NBL_QUEUE_SET_OPS(get_rss_alg_sel, nbl_res_queue_get_rss_alg_sel); \ + NBL_QUEUE_SET_OPS(clear_queues, nbl_res_queue_clear_queues); \ + NBL_QUEUE_SET_OPS(get_vsi_global_queue_id, nbl_res_queue_get_vsi_global_qid); \ + NBL_QUEUE_SET_OPS(cfg_queue_log, nbl_res_queue_cfg_log); \ + NBL_QUEUE_SET_OPS(get_queue_ctx, nbl_req_queue_get_ctx); \ + NBL_QUEUE_SET_OPS(adapt_desc_gother, nbl_res_queue_adapt_desc_gother); \ + NBL_QUEUE_SET_OPS(flr_clear_queues, nbl_res_flr_clear_queues); \ + NBL_QUEUE_SET_OPS(restore_hw_queue, nbl_res_queue_restore_hw_queue); \ + NBL_QUEUE_SET_OPS(get_local_queue_id, nbl_res_queue_get_local_queue_id); \ + NBL_QUEUE_SET_OPS(set_bridge_mode, nbl_res_queue_set_bridge_mode); \ + NBL_QUEUE_SET_OPS(set_tx_rate, nbl_res_queue_set_tx_rate); \ + NBL_QUEUE_SET_OPS(stop_abnormal_hw_queue, nbl_res_queue_stop_abnormal_hw_queue); \ + NBL_QUEUE_SET_OPS(get_active_func_bitmaps, nbl_res_queue_get_active_func_bitmaps); \ +} while (0) + +int nbl_queue_setup_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_QUEUE_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_QUEUE_OPS_TBL; +#undef NBL_QUEUE_SET_OPS + + return 0; +} + +void nbl_queue_remove_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_QUEUE_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_QUEUE_OPS_TBL; +#undef NBL_QUEUE_SET_OPS +} + +void nbl_queue_mgt_init_leonis(struct nbl_queue_mgt *queue_mgt) +{ + queue_mgt->qid_map_select = NBL_MASTER_QID_MAP_TABLE; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..5e2620ca7836eb5c036de2eba5ed67881f30029d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_queue_leonis.h @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_QUEUE_LEONIS_H_ +#define _NBL_QUEUE_LEONIS_H_ + +#include "nbl_resource.h" + +#define NBL_QID_MAP_NOTIFY_ADDR_SHIFT (9) +#define NBL_QID_MAP_NOTIFY_ADDR_LOW_PART_LEN (23) + +#define NBL_ADAPT_DESC_GOTHER_LEVEL1_TH (1000000) /* 1000k */ +#define NBL_ADAPT_DESC_GOTHER_LEVEL1_DOWNGRADE_TH (700000) /* 700k */ +#define NBL_ADAPT_DESC_GOTHER_LEVEL0 (0) +#define NBL_ADAPT_DESC_GOTHER_LEVEL1 (1) + +#define NBL_ADAPT_DESC_GOTHER_LEVEL0_TIMEOUT (0x12c) +#define NBL_ADAPT_DESC_GOTHER_LEVEL1_TIMEOUT (0x960) + +#define NBL_SHAPING_WGT_MAX (255) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..889edf2749b0fa7d2f1ffd50764ca6393eb01fa0 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.c @@ -0,0 +1,2366 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_resource_leonis.h" + +MODULE_VERSION(NBL_LEONIS_DRIVER_VERSION); + +static void nbl_res_setup_common_ops(struct nbl_resource_mgt *res_mgt) +{ +} + +static int nbl_res_pf_to_eth_id(struct nbl_resource_mgt *res_mgt, u16 pf_id) +{ + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + if (pf_id >= NBL_MAX_PF) + return 0; + + return eth_info->eth_id[pf_id]; +} + +static u32 nbl_res_get_pfvf_queue_num(struct nbl_resource_mgt *res_mgt, int pfid, int vfid) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; + u16 func_id = nbl_res_pfvfid_to_func_id(res_mgt, pfid, vfid); + u32 queue_num = 0; + + if (vfid >= 0) { + if (num_info->net_max_qp_num[func_id] != 0) + queue_num = num_info->net_max_qp_num[func_id]; + else + queue_num = num_info->vf_def_max_net_qp_num; + } else { + if (num_info->net_max_qp_num[func_id] != 0) + queue_num = num_info->net_max_qp_num[func_id]; + else + queue_num = num_info->pf_def_max_net_qp_num; + } + + if (queue_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) { + nbl_warn(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_QUEUE, + "Invalid queue num %u for func %d, use default", queue_num, func_id); + queue_num = vfid >= 0 ? NBL_DEFAULT_VF_HW_QUEUE_NUM : NBL_DEFAULT_PF_HW_QUEUE_NUM; + } + + return queue_num; +} + +static void nbl_res_get_rep_queue_info(void *priv, u16 *queue_num, u16 *queue_size) +{ + *queue_size = NBL_DEFAULT_DESC_NUM; + *queue_num = NBL_DEFAULT_REP_HW_QUEUE_NUM; +} + +static void nbl_res_get_user_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + if (num_info->net_max_qp_num[func_id] != 0) + *queue_num = num_info->net_max_qp_num[func_id]; + else + *queue_num = num_info->pf_def_max_net_qp_num; + + *queue_size = NBL_DEFAULT_DESC_NUM; + + if (*queue_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) { + nbl_warn(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_QUEUE, + "Invalid user queue num %d for func %d, use default", *queue_num, func_id); + *queue_num = NBL_DEFAULT_PF_HW_QUEUE_NUM; + } +} + +static int __maybe_unused nbl_res_get_queue_num(struct nbl_resource_mgt *res_mgt, + u16 func_id, u16 *tx_queue_num, u16 *rx_queue_num) +{ + int pfid, vfid; + + nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + + *tx_queue_num = nbl_res_get_pfvf_queue_num(res_mgt, pfid, vfid); + *rx_queue_num = nbl_res_get_pfvf_queue_num(res_mgt, pfid, vfid); + + return 0; +} + +static int nbl_res_save_vf_bar_info(struct nbl_resource_mgt *res_mgt, + u16 func_id, struct nbl_register_net_param *register_param) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + u64 pf_bar_start; + u16 pf_bdf; + u64 vf_bar_start; + u64 vf_bar_size; + u16 total_vfs; + u16 offset; + u16 stride; + + pf_bar_start = register_param->pf_bar_start; + if (pf_bar_start) { + sriov_info->pf_bar_start = pf_bar_start; + dev_info(dev, "sriov_info, pf_bar_start:%llx\n", sriov_info->pf_bar_start); + } + + pf_bdf = register_param->pf_bdf; + vf_bar_start = register_param->vf_bar_start; + vf_bar_size = register_param->vf_bar_size; + total_vfs = register_param->total_vfs; + offset = register_param->offset; + stride = register_param->stride; + + if (total_vfs) { + if (pf_bdf != sriov_info->bdf) { + dev_err(dev, "PF bdf donot equal, af record = %u, real pf bdf: %u\n", + sriov_info->bdf, pf_bdf); + return -EIO; + } + sriov_info->offset = offset; + sriov_info->stride = stride; + sriov_info->vf_bar_start = vf_bar_start; + sriov_info->vf_bar_len = vf_bar_size / total_vfs; + + dev_info(dev, "sriov_info, bdf:%x:%x.%x, num_vfs:%d, start_vf_func_id:%d," + "offset:%d, stride:%d,", + PCI_BUS_NUM(pf_bdf), PCI_SLOT(pf_bdf & 0xff), PCI_FUNC(pf_bdf & 0xff), + sriov_info->num_vfs, sriov_info->start_vf_func_id, offset, stride); + } + + return 0; +} + +static int nbl_res_prepare_vf_chan(struct nbl_resource_mgt *res_mgt, + u16 func_id, struct nbl_register_net_param *register_param) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_sriov_info *sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + u16 pf_bdf; + u16 total_vfs; + u16 offset; + u16 stride; + u8 pf_bus; + u8 pf_devfn; + u16 vf_id; + u8 bus; + u8 devfn; + u8 devid; + u8 function; + u16 vf_func_id; + + pf_bdf = register_param->pf_bdf; + total_vfs = register_param->total_vfs; + offset = register_param->offset; + stride = register_param->stride; + + if (total_vfs) { + if (pf_bdf != sriov_info->bdf) { + dev_err(dev, "PF bdf donot equal, af record = %u, real pf bdf: %u\n", + sriov_info->bdf, pf_bdf); + return -EIO; + } + + /* Configure mailbox qinfo_map_table for the pf's all vf, + * so vf's mailbox is ready, vf can use mailbox. + */ + pf_bus = PCI_BUS_NUM(sriov_info->bdf); + pf_devfn = sriov_info->bdf & 0xff; + for (vf_id = 0; vf_id < sriov_info->num_vfs; vf_id++) { + vf_func_id = sriov_info->start_vf_func_id + vf_id; + + bus = pf_bus + ((pf_devfn + offset + stride * vf_id) >> 8); + devfn = (pf_devfn + offset + stride * vf_id) & 0xff; + devid = PCI_SLOT(devfn); + function = PCI_FUNC(devfn); + + phy_ops->cfg_mailbox_qinfo(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vf_func_id, bus, devid, function); + } + } + + return 0; +} + +static int nbl_res_update_active_vf_num(struct nbl_resource_mgt *res_mgt, u16 func_id, + bool add_flag) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_resource_info *resource_info = res_mgt->resource_info; + struct nbl_sriov_info *sriov_info = res_mgt->resource_info->sriov_info; + int pfid = 0; + int vfid = 0; + int ret; + + ret = nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "convert func id to pfvfid failed\n"); + return ret; + } + + if (vfid == U32_MAX) + return 0; + + if (add_flag) { + if (!test_bit(func_id, resource_info->func_bitmap)) { + sriov_info[pfid].active_vf_num++; + set_bit(func_id, resource_info->func_bitmap); + } + } else if (sriov_info[pfid].active_vf_num) { + if (test_bit(func_id, resource_info->func_bitmap)) { + sriov_info[pfid].active_vf_num--; + clear_bit(func_id, resource_info->func_bitmap); + } + } + + return 0; +} + +static u32 nbl_res_get_quirks(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_quirks(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_register_net(void *priv, u16 func_id, + struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + netdev_features_t csumo_features = 0; + netdev_features_t tso_features = 0; + netdev_features_t pf_features = 0; + u16 tx_queue_num, rx_queue_num; + u8 mac[ETH_ALEN] = {0}; + u32 quirks; + int ret = 0; + + if (func_id < NBL_MAX_PF) { + nbl_res_get_eth_mac(res_mgt, mac, nbl_res_pf_to_eth_id(res_mgt, func_id)); + pf_features = NBL_FEATURE(NETIF_F_NTUPLE); + } else { + ether_addr_copy(mac, vsi_info->mac_info[func_id].mac); + } + ether_addr_copy(register_result->mac, mac); + + quirks = nbl_res_get_quirks(res_mgt); + if (performance_mode & BIT(NBL_QUIRKS_NO_TOE) || + !(quirks & BIT(NBL_QUIRKS_NO_TOE))) { + csumo_features = NBL_FEATURE(NETIF_F_RXCSUM) | + NBL_FEATURE(NETIF_F_IP_CSUM) | + NBL_FEATURE(NETIF_F_IPV6_CSUM); + tso_features = NBL_FEATURE(NETIF_F_TSO) | + NBL_FEATURE(NETIF_F_TSO6) | + NBL_FEATURE(NETIF_F_GSO_UDP_L4); + } + + register_result->hw_features |= pf_features | + csumo_features | + tso_features | + NBL_FEATURE(NETIF_F_SG) | + NBL_FEATURE(NETIF_F_HW_TC); + register_result->features |= register_result->hw_features | + NBL_FEATURE(NETIF_F_HW_TC) | + NBL_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER) | + NBL_FEATURE(NETIF_F_HW_VLAN_STAG_FILTER); + + register_result->max_mtu = NBL_MAX_JUMBO_FRAME_SIZE - NBL_PKT_HDR_PAD; + + register_result->vlan_proto = vsi_info->mac_info[func_id].vlan_proto; + register_result->vlan_tci = vsi_info->mac_info[func_id].vlan_tci; + register_result->rate = vsi_info->mac_info[func_id].rate; + + if (loongarch_low_version) { + if (common->is_vf) { + tx_queue_num = NBL_LOONGSON64_VF_MAX_QUEUE_NUM; + rx_queue_num = NBL_LOONGSON64_VF_MAX_QUEUE_NUM; + } else { + tx_queue_num = NBL_LOONGSON64_MAX_QUEUE_NUM; + rx_queue_num = NBL_LOONGSON64_MAX_QUEUE_NUM; + } + } else { + nbl_res_get_queue_num(res_mgt, func_id, &tx_queue_num, &rx_queue_num); + } + + register_result->tx_queue_num = tx_queue_num; + register_result->rx_queue_num = rx_queue_num; + register_result->queue_size = NBL_DEFAULT_DESC_NUM; + + ret = nbl_res_update_active_vf_num(res_mgt, func_id, 1); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "change active vf num failed with ret: %d\n", + ret); + goto update_active_vf_fail; + } + + if (func_id >= NBL_RES_MGT_TO_PF_NUM(res_mgt)) + return 0; + + ret = nbl_res_save_vf_bar_info(res_mgt, func_id, register_param); + if (ret) + goto save_vf_bar_info_fail; + + ret = nbl_res_prepare_vf_chan(res_mgt, func_id, register_param); + if (ret) + goto prepare_vf_chan_fail; + + nbl_res_open_sfp(res_mgt, nbl_res_pf_to_eth_id(res_mgt, func_id)); + + return ret; + +prepare_vf_chan_fail: +save_vf_bar_info_fail: +update_active_vf_fail: + return -EIO; +} + +static int nbl_res_unregister_net(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_update_active_vf_num(res_mgt, func_id, 0); +} + +static u16 nbl_res_get_vsi_id(void *priv, u16 func_id, u16 type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_func_id_to_vsi_id(res_mgt, func_id, type); +} + +static void nbl_res_get_eth_id(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + u16 pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + *eth_mode = eth_info->eth_num; + if (pf_id < eth_info->eth_num) { + *eth_id = eth_info->eth_id[pf_id]; + *logic_eth_id = pf_id; + /* if pf_id > eth_num, use eth_id 0 */ + } else { + *eth_id = eth_info->eth_id[0]; + *logic_eth_id = 0; + } +} + +static DEFINE_IDA(nbl_adev_ida); + +static void nbl_res_setup_rdma_id(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id; + + for_each_set_bit(func_id, resource_info->rdma_info.func_cap, NBL_MAX_FUNC) + resource_info->rdma_info.rdma_id[func_id] = ida_alloc(&nbl_adev_ida, GFP_KERNEL); +} + +static void nbl_res_remove_rdma_id(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id; + + for_each_set_bit(func_id, resource_info->rdma_info.func_cap, NBL_MAX_FUNC) + ida_free(&nbl_adev_ida, resource_info->rdma_info.rdma_id[func_id]); +} + +static void nbl_res_register_rdma(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + /* Even if we don't have capability, we would still return mem_type */ + param->has_rdma = false; + param->mem_type = resource_info->rdma_info.mem_type; + + if (test_bit(func_id, resource_info->rdma_info.func_cap)) { + param->has_rdma = true; + param->intr_num = NBL_RES_RDMA_INTR_NUM; + + param->id = resource_info->rdma_info.rdma_id[func_id]; + } +} + +static void nbl_res_unregister_rdma(void *priv, u16 vsi_id) +{ +} + +static void nbl_res_register_rdma_bond(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id = 0; + int i; + + register_param->has_rdma = false; + register_param->mem_type = resource_info->rdma_info.mem_type; + + /* Rdma bond can be created only if all members have rdma cap */ + for (i = 0; i < list_param->lag_num; i++) { + func_id = nbl_res_vsi_id_to_func_id(res_mgt, list_param->member_list[i].vsi_id); + + if (!test_bit(func_id, resource_info->rdma_info.func_cap)) + return; + } + + register_param->has_rdma = true; + register_param->intr_num = NBL_RES_RDMA_INTR_NUM; +} + +static void nbl_res_unregister_rdma_bond(void *priv, u16 lag_id) +{ +} + +static u8 __iomem *nbl_res_get_hw_addr(void *priv, size_t *size) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_hw_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), size); +} + +static u64 nbl_res_get_real_hw_addr(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + return nbl_res_get_func_bar_base_addr(res_mgt, func_id); +} + +static u16 nbl_res_get_function_id(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); +} + +static void nbl_res_get_real_bdf(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + nbl_res_func_id_to_bdf(res_mgt, func_id, bus, dev, function); +} + +static u32 nbl_res_check_active_vf(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_sriov_info *sriov_info = res_mgt->resource_info->sriov_info; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int pfid = 0; + int vfid = 0; + int ret; + + ret = nbl_res_func_id_to_pfvfid(res_mgt, func_id, &pfid, &vfid); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "convert func id to pfvfid failed\n"); + return ret; + } + + return sriov_info[pfid].active_vf_num; +} + +static void nbl_res_set_dport_fc_th_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_dport_fc_th_vld(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, vld); +} + +static void nbl_res_set_shaping_dport_vld(void *priv, u8 eth_id, bool vld) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_shaping_dport_vld(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, vld); +} + +static int nbl_res_set_phy_flow(struct nbl_resource_mgt *res_mgt, u8 eth_id, bool status) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + u8 pf_id = nbl_res_eth_id_to_pf_id(res_mgt, eth_id); + int i, ret = 0; + + for (i = 0; i < NBL_VSI_SERV_MAX_TYPE; i++) { + ret = phy_ops->cfg_phy_flow(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + vsi_info->serv_info[pf_id][i].base_id, + vsi_info->serv_info[pf_id][i].num, eth_id, status); + if (ret) + return ret; + } + + nbl_res_set_dport_fc_th_vld(res_mgt, eth_id, !status); + nbl_res_set_shaping_dport_vld(res_mgt, eth_id, !status); + phy_ops->cfg_eth_port_priority_replace(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, status); + + return 0; +} + +static void nbl_res_get_base_mac_addr(void *priv, u8 *mac) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + nbl_res_get_eth_mac(res_mgt, mac, nbl_res_pf_to_eth_id(res_mgt, 0)); +} + +static int nbl_res_update_offload_status(struct nbl_resource_mgt_leonis *res_mgt_leonis) +{ + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + struct nbl_event_acl_state_update_data event_data = {0}; + struct nbl_sriov_info *sriov_info; + bool status; + int i, j, start, end, vsi_match, eth_id, eth_tmp, lag_id, ret = 0; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + status = false; + eth_id = nbl_res_pf_to_eth_id(res_mgt, i); + + start = nbl_res_pfvfid_to_vsi_id(res_mgt, i, U32_MAX, NBL_VSI_DATA); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + i; + end = nbl_res_pfvfid_to_vsi_id(res_mgt, i, sriov_info->num_vfs, NBL_VSI_DATA); + vsi_match = find_next_bit(rep_status->rep_vsi_bitmap, + NBL_OFFLOAD_STATUS_MAX_VSI, start); + if (vsi_match <= end || test_bit(eth_id, rep_status->rep_eth_bitmap)) + status = true; + + if (rep_status->status[eth_id] != status) { + ret = nbl_res_set_phy_flow(res_mgt, eth_id, status); + if (ret) + return ret; + rep_status->status[eth_id] = status; + } + } + + /* Update bond offload status. + * For bond, there will be only one pf is bind to ovs-dpdk, but all pfs should + * change to offload. + */ + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + status = false; + eth_id = nbl_res_pf_to_eth_id(res_mgt, i); + lag_id = nbl_res_eth_id_to_lag_id(res_mgt, eth_id); + + if (lag_id >= 0 && lag_id < NBL_LAG_MAX_NUM) { + for (j = 0; j < eth_bond_info->entry[lag_id].lag_num && + NBL_ETH_BOND_VALID_PORT(j); j++) { + /* If bond, any port is offload means all ports are offload */ + eth_tmp = eth_bond_info->entry[lag_id].eth_id[j]; + if (rep_status->status[eth_tmp]) { + status = true; + break; + } + } + + if (rep_status->status[eth_id] != status) { + ret = nbl_res_set_phy_flow(res_mgt, eth_id, status); + if (ret) + return ret; + rep_status->status[eth_id] = status; + } + } + } + + event_data.is_offload = false; + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + eth_id = nbl_res_pf_to_eth_id(res_mgt, i); + if (rep_status->status[eth_id]) + event_data.is_offload = true; + } + + nbl_event_notify(NBL_EVENT_ACL_STATE_UPDATE, &event_data, NBL_COMMON_TO_VSI_ID(common), + NBL_COMMON_TO_BOARD_ID(common)); + + return 0; +} + +static int nbl_res_set_pmd_debug(void *priv, bool pmd_debug) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + + rep_status->pmd_debug = pmd_debug; + return 0; +} + +static void nbl_res_set_offload_status(void *priv, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + + if (!upcall_port_info->upcall_port_active || + upcall_port_info->func_id != func_id) + return; + + rep_status->timestamp = jiffies; +} + +static int nbl_res_check_offload_status(void *priv, bool *is_down) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int i; + + if (!upcall_port_info->upcall_port_active) + return 0; + + /* check pmd debug, no check if pmd_debug is on */ + if (rep_status->pmd_debug) { + nbl_info(common, NBL_DEBUG_FLOW, "pmd is in debug mode now"); + rep_status->timestamp = jiffies; + return 0; + } + + if (rep_status->timestamp && time_after(jiffies, rep_status->timestamp + 30 * HZ)) { + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_VSI; i++) + clear_bit(i, rep_status->rep_vsi_bitmap); + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_ETH; i++) + clear_bit(i, rep_status->rep_eth_bitmap); + + upcall_port_info->upcall_port_active = false; + nbl_err(common, NBL_DEBUG_FLOW, "offload found inactive!"); + phy_ops->clear_profile_table_action(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + nbl_res_update_offload_status(res_mgt_leonis); + *is_down = true; + } + + return 0; +} + +static void nbl_res_get_rep_feature(void *priv, struct nbl_register_net_result *register_result) +{ + netdev_features_t csumo_features; + + csumo_features = NBL_FEATURE(NETIF_F_RXCSUM) | + NBL_FEATURE(NETIF_F_IP_CSUM) | + NBL_FEATURE(NETIF_F_IPV6_CSUM) | + NBL_FEATURE(NETIF_F_SCTP_CRC); + register_result->hw_features = csumo_features | NBL_FEATURE(NETIF_F_HW_TC); + register_result->features |= csumo_features | NBL_FEATURE(NETIF_F_HW_TC); + register_result->max_mtu = NBL_MAX_JUMBO_FRAME_SIZE - NBL_PKT_HDR_PAD; +} + +static void nbl_res_set_eswitch_mode(void *priv, u16 switch_mode) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring; + struct nbl_res_rx_ring *rx_ring; + int i; + + resource_info->eswitch_info->mode = switch_mode; + + /* set ring info switch_mode */ + for (i = 0; i < txrx_mgt->rx_ring_num; i++) { + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); + tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i); + + tx_ring->mode = switch_mode; + rx_ring->mode = switch_mode; + } +} + +static u16 nbl_res_get_eswitch_mode(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + if (resource_info->eswitch_info) + return resource_info->eswitch_info->mode; + else + return NBL_ESWITCH_NONE; +} + +static int nbl_res_alloc_rep_data(void *priv, int num_vfs, u16 vf_base_vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + + eswitch_info->rep_data = devm_kcalloc(dev, num_vfs, + sizeof(struct nbl_rep_data), GFP_KERNEL); + if (!eswitch_info->rep_data) + return -ENOMEM; + eswitch_info->num_vfs = num_vfs; + eswitch_info->vf_base_vsi_id = vf_base_vsi_id; + return 0; +} + +static void nbl_res_free_rep_data(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info **eswitch_info = &NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + + if ((*eswitch_info)->rep_data) { + devm_kfree(dev, (*eswitch_info)->rep_data); + (*eswitch_info)->rep_data = NULL; + } + (*eswitch_info)->num_vfs = 0; +} + +static void nbl_res_set_rep_netdev_info(void *priv, void *rep_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_data *rep = (struct nbl_rep_data *)rep_data; + u16 rep_data_index; + + rep_data_index = nbl_res_get_rep_idx(eswitch_info, rep->rep_vsi_id); + if (rep_data_index >= eswitch_info->num_vfs) + return; + eswitch_info->rep_data[rep_data_index].rep_vsi_id = rep->rep_vsi_id; + eswitch_info->rep_data[rep_data_index].netdev = rep->netdev; + nbl_info(common, NBL_DEBUG_RESOURCE, "nbl set rep netdev rep_vsi_id %d netdev %p\n", + rep->rep_vsi_id, rep->netdev); +} + +static void nbl_res_unset_rep_netdev_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + + memset(eswitch_info->rep_data, 0, + eswitch_info->num_vfs * sizeof(struct nbl_rep_data)); +} + +static struct net_device *nbl_res_get_rep_netdev_info(void *priv, u16 rep_data_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + + if (rep_data_index >= eswitch_info->num_vfs) + return NULL; + return eswitch_info->rep_data[rep_data_index].netdev; +} + +static int nbl_res_disable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_set_phy_flow(res_mgt, eth_id, true); +} + +static int nbl_res_enable_phy_flow(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_set_phy_flow(res_mgt, eth_id, false); +} + +static void nbl_res_init_acl(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (!res_mgt->resource_info->init_acl_refcnt) + phy_ops->init_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + res_mgt->resource_info->init_acl_refcnt++; +} + +static void nbl_res_uninit_acl(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + res_mgt->resource_info->init_acl_refcnt--; + + if (!res_mgt->resource_info->init_acl_refcnt) + phy_ops->uninit_acl(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_set_upcall_rule(void *priv, u8 eth_id, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->set_upcall_rule(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, vsi_id); +} + +static int nbl_res_unset_upcall_rule(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->unset_upcall_rule(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id); +} + +static void nbl_res_get_rep_stats(void *priv, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats, bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct nbl_rep_data *rep_data; + unsigned int start; + u16 rep_data_index = 0; + + if (!eswitch_info || eswitch_info->mode != NBL_ESWITCH_OFFLOADS || + ((nbl_res_get_rep_idx(eswitch_info, rep_vsi_id)) == U32_MAX)) + return; + + rep_data_index = nbl_res_get_rep_idx(eswitch_info, rep_vsi_id); + if (rep_data_index >= eswitch_info->num_vfs) + return; + rep_data = &eswitch_info->rep_data[rep_data_index]; + if (rep_data->rep_vsi_id != rep_vsi_id) + return; + + if (is_tx) { + do { + start = u64_stats_fetch_begin(&rep_data->rep_syncp); + rep_stats->packets = rep_data->tx_packets; + rep_stats->bytes = rep_data->tx_bytes; + } while (u64_stats_fetch_retry(&rep_data->rep_syncp, start)); + } else { + do { + start = u64_stats_fetch_begin(&rep_data->rep_syncp); + rep_stats->packets = rep_data->rx_packets; + rep_stats->bytes = rep_data->rx_bytes; + } while (u64_stats_fetch_retry(&rep_data->rep_syncp, start)); + } +} + +static u16 nbl_res_get_rep_index(void *priv, u16 rep_vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + + return nbl_res_get_rep_idx(eswitch_info, rep_vsi_id); +} + +static void nbl_res_register_net_rep(void *priv, u16 pf_id, u16 vf_id, + struct nbl_register_net_rep_result *result) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + int pf_id_tmp, vf_id_tmp; + + pf_id_tmp = pf_id; + if (vf_id == U16_MAX) + vf_id_tmp = U32_MAX; + else + vf_id_tmp = vf_id; + + result->vsi_id = nbl_res_pfvfid_to_vsi_id(res_mgt, pf_id_tmp, vf_id_tmp, NBL_VSI_DATA); + result->func_id = nbl_res_pfvfid_to_func_id(res_mgt, pf_id_tmp, vf_id_tmp); + + if (result->vsi_id >= NBL_OFFLOAD_STATUS_MAX_VSI) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "register_net_rep pf %d vf %d vsi_id %d err\n", + pf_id, vf_id, result->vsi_id); + return; + } + + set_bit(result->vsi_id, rep_status->rep_vsi_bitmap); + nbl_res_update_offload_status(res_mgt_leonis); +} + +static void nbl_res_unregister_net_rep(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (vsi_id >= NBL_OFFLOAD_STATUS_MAX_VSI) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "unregister_net_rep vsi_id %d err\n", vsi_id); + return; + } + + /* set rss to l4 */ + phy_ops->set_epro_rss_default(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id); + clear_bit(vsi_id, rep_status->rep_vsi_bitmap); + nbl_res_update_offload_status(res_mgt_leonis); +} + +static void nbl_res_register_eth_rep(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + + if (eth_id >= NBL_OFFLOAD_STATUS_MAX_ETH) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "register_eth_rep eth_id %d err\n", eth_id); + return; + } + set_bit(eth_id, rep_status->rep_eth_bitmap); + nbl_res_update_offload_status(res_mgt_leonis); +} + +static void nbl_res_unregister_eth_rep(void *priv, u8 eth_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + + if (eth_id >= NBL_OFFLOAD_STATUS_MAX_ETH) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "unregister_eth_rep eth_id %d err\n", eth_id); + return; + } + + clear_bit(eth_id, rep_status->rep_eth_bitmap); + nbl_res_update_offload_status(res_mgt_leonis); +} + +static int nbl_res_register_upcall_port(void *priv, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + u16 vsi_id = nbl_res_func_id_to_vsi_id(&res_mgt_leonis->res_mgt, func_id, NBL_VSI_DATA); + int i; + + rep_status->timestamp = jiffies; + + if (!upcall_port_info->upcall_port_active) { + upcall_port_info->func_id = func_id; + upcall_port_info->upcall_port_active = true; + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_VSI; i++) + clear_bit(i, rep_status->rep_vsi_bitmap); + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_ETH; i++) + clear_bit(i, rep_status->rep_eth_bitmap); + + set_bit(vsi_id, rep_status->rep_vsi_bitmap); + + nbl_res_update_offload_status(res_mgt_leonis); + return 0; + } + + if (func_id != upcall_port_info->func_id) { + nbl_err(NBL_RES_MGT_TO_COMMON(&res_mgt_leonis->res_mgt), NBL_DEBUG_RESOURCE, + "can not add rep port with two pf port, register_upcall_port failed\n"); + return -EINVAL; + } + + return 0; +} + +static void nbl_res_unregister_upcall_port(void *priv, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + struct nbl_rep_offload_status *rep_status = + &res_mgt_leonis->pmd_status.rep_status; + int i; + + if (!upcall_port_info->upcall_port_active || + upcall_port_info->func_id != func_id) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "func_id %d unregister upcall failed\n", func_id); + return; + } + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_VSI; i++) + clear_bit(i, rep_status->rep_vsi_bitmap); + + for (i = 0; i < NBL_OFFLOAD_STATUS_MAX_ETH; i++) + clear_bit(i, rep_status->rep_eth_bitmap); + + nbl_res_update_offload_status(res_mgt_leonis); + upcall_port_info->upcall_port_active = false; +} + +static void nbl_res_init_offload_fwd(void *priv, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->init_offload_fwd(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id); +} + +static void nbl_res_init_cmdq(void *priv, void *data, u16 func_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->init_cmdq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data, func_id); +} + +static void nbl_res_destroy_cmdq(void *priv) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->destroy_cmdq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static void nbl_res_reset_cmdq(void *priv) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->reset_cmdq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static void nbl_res_init_rep(void *priv, u16 vsi_id, u8 inner_type, + u8 outer_type, u8 rep_type) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->init_rep(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, + inner_type, outer_type, rep_type); +} + +static void nbl_res_init_flow(void *priv, void *param) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->init_flow(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), param); +} + +static void nbl_res_deinit_flow(void *priv) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->deinit_flow(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static void nbl_res_offload_flow_rule(void *priv, void *data) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->offload_flow_rule(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data); +} + +static void nbl_res_get_flow_acl_switch(void *priv, u8 *acl_enable) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_flow_acl_switch(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + acl_enable); +} + +static void nbl_res_get_line_rate_info(void *priv, void *data, void *result) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_line_rate_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data, result); +} + +/* return value need to convert to Mil degree Celsius(1/1000) */ +static u32 nbl_res_get_chip_temperature(void *priv, enum nbl_hwmon_type type, u32 senser_id) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_chip_temperature(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), type, senser_id); +} + +static int nbl_res_init_vdpaq(void *priv, u16 func_id, u64 pa, u32 size) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u8 bus, dev, func; + u16 bdf; + + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &dev, &func); + bdf = PCI_DEVID(bus, PCI_DEVFN(dev, func)); + + return phy_ops->init_vdpaq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, bdf, pa, size); +} + +static void nbl_res_destroy_vdpaq(void *priv) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->destroy_vdpaq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_get_upcall_port(void *priv, u16 *bdf) +{ + struct nbl_resource_mgt_leonis *res_mgt_leonis = + (struct nbl_resource_mgt_leonis *)priv; + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + u8 bus, dev, func; + + if (!upcall_port_info->upcall_port_active) + return U32_MAX; + + nbl_res_func_id_to_bdf(res_mgt, upcall_port_info->func_id, &bus, &dev, &func); + *bdf = PCI_DEVID(bus, PCI_DEVFN(dev, func)); + return 0; +} + +static void nbl_res_get_reg_dump(void *priv, u32 *data, u32 len) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_reg_dump(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), data, len); +} + +static int nbl_res_get_reg_dump_len(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_reg_dump_len(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_process_abnormal_event(void *priv, struct nbl_abnormal_event_info *abnomal_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->process_abnormal_event(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), abnomal_info); +} + +static int nbl_res_cfg_lag_hash_algorithm(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_lag_hash_algorithm(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + eth_id, lag_id, hash_type); +} + +static int nbl_res_cfg_lag_member_fwd(void *priv, u16 eth_id, u16 lag_id, u8 fwd) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_lag_member_fwd(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, lag_id, fwd); +} + +static int nbl_res_cfg_lag_member_list(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_lag_member_list(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), param); +} + +static int nbl_res_cfg_lag_member_up_attr(void *priv, u16 eth_id, u16 lag_id, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_lag_member_up_attr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + eth_id, lag_id, enable); +} + +static int nbl_res_cfg_bond_shaping(void *priv, u8 eth_id, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->cfg_bond_shaping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, + res_mgt->resource_info->board_info.eth_speed, enable); +} + +static void nbl_res_cfg_bgid_back_pressure(void *priv, u8 main_eth_id, u8 other_eth_id, bool enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->cfg_bgid_back_pressure(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), main_eth_id, other_eth_id, + enable, res_mgt->resource_info->board_info.eth_speed); +} + +static int nbl_res_switchdev_init_cmdq(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_mgt *chan_mgt = NBL_RES_MGT_TO_CHAN_PRIV(res_mgt); + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + + return chan_ops->init_cmdq(dev, chan_mgt); +} + +static int nbl_res_switchdev_deinit_cmdq(void *priv, u8 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_channel_mgt *chan_mgt = NBL_RES_MGT_TO_CHAN_PRIV(res_mgt); + struct nbl_channel_ops *chan_ops = NBL_RES_MGT_TO_CHAN_OPS(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)); + + return chan_ops->deinit_cmdq(dev, chan_mgt, index); +} + +static int nbl_res_set_tc_flow_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int i = 0; + + if (common->tc_inst_id >= NBL_TC_FLOW_INST_COUNT) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow set inst_id=%d is invalid.\n", + common->tc_inst_id); + return -EINVAL; + } + + if (!tc_flow_mgt->pf_set_tc_count) { + nbl_tc_set_flow_info(tc_flow_mgt, common->tc_inst_id); + nbl_info(common, NBL_DEBUG_FLOW, "tc flow set inst_id=%d success.\n", + common->tc_inst_id); + + nbl_info(common, NBL_DEBUG_FLOW, "tc flow set kgen cvlan zero, set ped vsi type zero\n"); + phy_ops->set_tc_kgen_cvlan_zero(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + for (i = 0; i < NBL_TPID_PORT_NUM; i++) + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), i, 0); + } + + tc_flow_mgt->pf_set_tc_count++; + nbl_info(common, NBL_DEBUG_FLOW, "tc flow set pf_set_tc_count++=%d\n", + tc_flow_mgt->pf_set_tc_count); + + return 0; +} + +static int nbl_res_unset_tc_flow_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int ret = 0; + int i = 0; + + if (common->tc_inst_id >= NBL_TC_FLOW_INST_COUNT) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow unset inst_id=%d is invalid.\n", + common->tc_inst_id); + return -EINVAL; + } + + tc_flow_mgt->pf_set_tc_count--; + nbl_info(common, NBL_DEBUG_FLOW, "tc flow set pf_set_tc_count--=%d\n", + tc_flow_mgt->pf_set_tc_count); + + if (!tc_flow_mgt->pf_set_tc_count) { + ret = nbl_tc_flow_flush_flow(res_mgt); + if (ret) + return -EINVAL; + + nbl_info(common, NBL_DEBUG_FLOW, "tc flow unset kgen cvlan, set ped vsi type zero\n"); + phy_ops->unset_tc_kgen_cvlan(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + + for (i = 0; i < NBL_TPID_PORT_NUM; i++) { + if (tc_flow_mgt->port_tpid_type[i] != 0) { + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + i, 0); + tc_flow_mgt->port_tpid_type[i] = 0; + } + } + + nbl_tc_unset_flow_info(common->tc_inst_id); + nbl_info(common, NBL_DEBUG_FLOW, "tc flow unset inst_id=%d success.\n", + common->tc_inst_id); + } + + return 0; +} + +static int nbl_res_get_tc_flow_info(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (common->tc_inst_id >= NBL_TC_FLOW_INST_COUNT) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow get inst_id=%d is invalid.\n", + common->tc_inst_id); + return -EINVAL; + } + + if (NBL_COMMON_TO_PCI_FUNC_ID(common)) + NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt) = nbl_tc_get_flow_info(common->tc_inst_id); + nbl_info(common, NBL_DEBUG_FLOW, "tc flow get inst_id=%d success.\n", + common->tc_inst_id); + + return 0; +} + +static int nbl_res_get_driver_info(void *priv, struct nbl_driver_info *driver_info) +{ + strscpy(driver_info->driver_version, NBL_LEONIS_DRIVER_VERSION, + sizeof(driver_info->driver_version)); + return 1; +} + +static int nbl_res_get_p4_info(void *priv, char *verify_code) +{ + /* We actually only care about the snic-v3r1 part, won't check m181xx */ + strscpy(verify_code, "snic_v3r1_m181xx", NBL_P4_NAME_LEN); + + return NBL_P4_DEFAULT; +} + +static int nbl_res_get_p4_used(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + return resource_info->p4_used; +} + +static int nbl_res_set_p4_used(void *priv, int p4_type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + resource_info->p4_used = p4_type; + + return 0; +} + +static u32 nbl_res_get_p4_version(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return res_mgt->resource_info->board_info.p4_version; +} + +static int nbl_res_load_p4(void *priv, struct nbl_load_p4_param *p4_param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (p4_param->start || p4_param->end) + return 0; + + phy_ops->load_p4(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), p4_param->addr, + p4_param->size, p4_param->data); + + return 0; +} + +static void nbl_res_get_board_info(void *priv, struct nbl_board_port_info *board_info) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + memcpy(board_info, &res_mgt->resource_info->board_info, sizeof(*board_info)); +} + +static u16 nbl_res_get_vf_base_vsi_id(void *priv, u16 pf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return nbl_res_pfvfid_to_vsi_id(res_mgt, pf_id, 0, NBL_VSI_DATA); +} + +static void nbl_res_flr_clear_net(void *priv, u16 vf_id) +{ + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id; + + vsi_id = nbl_res_func_id_to_vsi_id(priv, func_id, NBL_VSI_DATA); + nbl_res_unregister_rdma(priv, vsi_id); + + if (nbl_res_vf_is_active(priv, func_id)) + nbl_res_unregister_net(priv, func_id); +} + +static void nbl_res_flr_clear_rdma(void *priv, u16 vf_id) +{ + u16 func_id = vf_id + NBL_MAX_PF; + u16 vsi_id; + + vsi_id = nbl_res_func_id_to_vsi_id(priv, func_id, NBL_VSI_DATA); + nbl_res_unregister_rdma(priv, vsi_id); +} + +static u16 nbl_res_covert_vfid_to_vsi_id(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; + + return nbl_res_func_id_to_vsi_id(res_mgt, func_id, NBL_VSI_SERV_VF_DATA_TYPE); +} + +static int nbl_res_get_board_id(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + return NBL_COMMON_TO_BOARD_ID(common); +} + +static int nbl_res_cfg_eth_bond_info(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_eth_bond_entry origin_entry; + struct nbl_eth_bond_entry *entry = NULL; + struct nbl_event_link_status_update_data *event_data = NULL; + u8 eth_btm[NBL_MAX_ETHERNET] = {0}; + int num = 0, i = 0, j = 0; + + if (param->lag_id >= NBL_LAG_MAX_NUM) + return -EINVAL; + + entry = ð_bond_info->entry[param->lag_id]; + memcpy(&origin_entry, entry, sizeof(origin_entry)); + + /* We always clear it first, in case lag member changed. */ + memset(entry, 0, sizeof(*entry)); + + if (param->lag_num > 1) { + for (i = 0; i < param->lag_num && NBL_ETH_BOND_VALID_PORT(i); i++) { + entry->eth_id[i] = param->member_list[i].eth_id; + eth_btm[param->member_list[i].eth_id] = 1; + } + + entry->lag_id = param->lag_id; + entry->lag_num = param->lag_num; + } + + /* If lag member changed, notify both original and new related vfs to update link_state */ + for (i = 0; i < origin_entry.lag_num && NBL_ETH_BOND_VALID_PORT(i); i++) + eth_btm[origin_entry.eth_id[i]] = 1; + + for (i = 0; i < NBL_MAX_ETHERNET; i++) + if (eth_btm[i]) + num++; + + nbl_res_update_offload_status((struct nbl_resource_mgt_leonis *)res_mgt); + + event_data = kzalloc(sizeof(*event_data), GFP_KERNEL); + if (!event_data) + return -ENOMEM; + + for (i = 0; i < NBL_MAX_ETHERNET; i++) + if (eth_btm[i]) + event_data->eth_id[j++] = i; + + event_data->num = num; + + nbl_event_notify(NBL_EVENT_LINK_STATE_UPDATE, event_data, + NBL_COMMON_TO_VSI_ID(common), NBL_COMMON_TO_BOARD_ID(common)); + + kfree(event_data); + return 0; +} + +static int nbl_res_get_eth_bond_info(void *priv, struct nbl_bond_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + struct nbl_eth_bond_entry *entry = NULL; + int num = 0, i = 0, j = 0, pf_id = 0; + + for (i = 0; i < NBL_LAG_MAX_NUM; i++) { + entry = ð_bond_info->entry[i]; + + if (entry->lag_num < NBL_LAG_VALID_PORTS || entry->lag_num > NBL_LAG_MAX_PORTS) + continue; + + for (j = 0; j < entry->lag_num; j++) { + pf_id = nbl_res_eth_id_to_pf_id(res_mgt, entry->eth_id[j]); + + param->info[num].port[j].eth_id = entry->eth_id[j]; + param->info[num].port[j].vsi_id = + nbl_res_pfvfid_to_vsi_id(res_mgt, pf_id, -1, NBL_VSI_DATA); + param->info[num].port[j].is_active = + phy_ops->get_lag_fwd(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + entry->eth_id[j]); + } + + param->info[num].mem_num = entry->lag_num; + param->info[num].lag_id = entry->lag_id; + + num++; + } + + param->lag_num = num; + + return 0; +} + +static void nbl_res_get_driver_version(void *priv, char *ver, int len) +{ + strscpy(ver, NBL_LEONIS_DRIVER_VERSION, len); +} + +static void nbl_res_get_xdp_queue_info(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &res_info->net_ring_num_info; + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + if (num_info->net_max_qp_num[func_id] != 0) + *queue_num = num_info->net_max_qp_num[func_id]; + else + *queue_num = num_info->pf_def_max_net_qp_num; + + *queue_size = NBL_DEFAULT_DESC_NUM; + + if (*queue_num > NBL_MAX_TXRX_QUEUE_PER_FUNC) { + nbl_warn(NBL_RES_MGT_TO_COMMON(res_mgt), NBL_DEBUG_QUEUE, + "Invalid xdp queue num %d for func %d, use default", *queue_num, func_id); + *queue_num = NBL_DEFAULT_PF_HW_QUEUE_NUM; + } +} + +static int nbl_res_get_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_pfc_buffer_size(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, prio, xoff, xon); + + return 0; +} + +static int nbl_res_set_pfc_buffer_size(void *priv, u8 eth_id, u8 prio, int xoff, int xon) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->set_pfc_buffer_size(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, + prio, xoff, xon); +} + +static int nbl_res_configure_qos(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->configure_qos(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), eth_id, pfc, trust, dscp2prio_map); + + return 0; +} + +static struct nbl_resource_ops res_ops = { + .register_net = nbl_res_register_net, + .unregister_net = nbl_res_unregister_net, + .check_active_vf = nbl_res_check_active_vf, + .get_base_mac_addr = nbl_res_get_base_mac_addr, + .get_vsi_id = nbl_res_get_vsi_id, + .get_eth_id = nbl_res_get_eth_id, + .get_rep_feature = nbl_res_get_rep_feature, + .get_rep_queue_info = nbl_res_get_rep_queue_info, + .get_user_queue_info = nbl_res_get_user_queue_info, + .set_eswitch_mode = nbl_res_set_eswitch_mode, + .get_eswitch_mode = nbl_res_get_eswitch_mode, + .alloc_rep_data = nbl_res_alloc_rep_data, + .free_rep_data = nbl_res_free_rep_data, + .set_rep_netdev_info = nbl_res_set_rep_netdev_info, + .unset_rep_netdev_info = nbl_res_unset_rep_netdev_info, + .get_rep_netdev_info = nbl_res_get_rep_netdev_info, + .disable_phy_flow = nbl_res_disable_phy_flow, + .enable_phy_flow = nbl_res_enable_phy_flow, + .init_acl = nbl_res_init_acl, + .uninit_acl = nbl_res_uninit_acl, + .set_upcall_rule = nbl_res_set_upcall_rule, + .unset_upcall_rule = nbl_res_unset_upcall_rule, + .set_shaping_dport_vld = nbl_res_set_shaping_dport_vld, + .set_dport_fc_th_vld = nbl_res_set_dport_fc_th_vld, + .get_rep_stats = nbl_res_get_rep_stats, + .get_rep_index = nbl_res_get_rep_index, + .setup_rdma_id = nbl_res_setup_rdma_id, + .remove_rdma_id = nbl_res_remove_rdma_id, + .register_rdma = nbl_res_register_rdma, + .unregister_rdma = nbl_res_unregister_rdma, + .register_rdma_bond = nbl_res_register_rdma_bond, + .unregister_rdma_bond = nbl_res_unregister_rdma_bond, + .get_hw_addr = nbl_res_get_hw_addr, + .get_real_hw_addr = nbl_res_get_real_hw_addr, + .get_function_id = nbl_res_get_function_id, + .get_real_bdf = nbl_res_get_real_bdf, + .get_product_flex_cap = nbl_res_get_flex_capability, + .get_product_fix_cap = nbl_res_get_fix_capability, + .register_net_rep = nbl_res_register_net_rep, + .unregister_net_rep = nbl_res_unregister_net_rep, + .register_eth_rep = nbl_res_register_eth_rep, + .unregister_eth_rep = nbl_res_unregister_eth_rep, + .register_upcall_port = nbl_res_register_upcall_port, + .unregister_upcall_port = nbl_res_unregister_upcall_port, + .check_offload_status = nbl_res_check_offload_status, + .set_offload_status = nbl_res_set_offload_status, + .init_offload_fwd = nbl_res_init_offload_fwd, + .init_cmdq = nbl_res_init_cmdq, + .destroy_cmdq = nbl_res_destroy_cmdq, + .reset_cmdq = nbl_res_reset_cmdq, + .init_rep = nbl_res_init_rep, + .init_flow = nbl_res_init_flow, + .deinit_flow = nbl_res_deinit_flow, + .offload_flow_rule = nbl_res_offload_flow_rule, + .get_flow_acl_switch = nbl_res_get_flow_acl_switch, + .get_line_rate_info = nbl_res_get_line_rate_info, + .get_chip_temperature = nbl_res_get_chip_temperature, + .get_driver_info = nbl_res_get_driver_info, + .get_board_info = nbl_res_get_board_info, + .flr_clear_net = nbl_res_flr_clear_net, + .flr_clear_rdma = nbl_res_flr_clear_rdma, + .covert_vfid_to_vsi_id = nbl_res_covert_vfid_to_vsi_id, + + .init_vdpaq = nbl_res_init_vdpaq, + .destroy_vdpaq = nbl_res_destroy_vdpaq, + .get_upcall_port = nbl_res_get_upcall_port, + + .get_reg_dump = nbl_res_get_reg_dump, + .get_reg_dump_len = nbl_res_get_reg_dump_len, + .process_abnormal_event = nbl_res_process_abnormal_event, + + .cfg_lag_hash_algorithm = nbl_res_cfg_lag_hash_algorithm, + .cfg_lag_member_fwd = nbl_res_cfg_lag_member_fwd, + .cfg_lag_member_list = nbl_res_cfg_lag_member_list, + .cfg_lag_member_up_attr = nbl_res_cfg_lag_member_up_attr, + .cfg_bond_shaping = nbl_res_cfg_bond_shaping, + .cfg_bgid_back_pressure = nbl_res_cfg_bgid_back_pressure, + + .cfg_eth_bond_info = nbl_res_cfg_eth_bond_info, + .get_eth_bond_info = nbl_res_get_eth_bond_info, + + .switchdev_init_cmdq = nbl_res_switchdev_init_cmdq, + .switchdev_deinit_cmdq = nbl_res_switchdev_deinit_cmdq, + .set_tc_flow_info = nbl_res_set_tc_flow_info, + .unset_tc_flow_info = nbl_res_unset_tc_flow_info, + .get_tc_flow_info = nbl_res_get_tc_flow_info, + + .get_p4_info = nbl_res_get_p4_info, + .get_p4_used = nbl_res_get_p4_used, + .set_p4_used = nbl_res_set_p4_used, + .get_vf_base_vsi_id = nbl_res_get_vf_base_vsi_id, + .load_p4 = nbl_res_load_p4, + .get_p4_version = nbl_res_get_p4_version, + + .get_board_id = nbl_res_get_board_id, + .set_pmd_debug = nbl_res_set_pmd_debug, + + .get_driver_version = nbl_res_get_driver_version, + .get_xdp_queue_info = nbl_res_get_xdp_queue_info, + .set_hw_status = nbl_res_set_hw_status, + + .configure_qos = nbl_res_configure_qos, + .set_pfc_buffer_size = nbl_res_set_pfc_buffer_size, + .get_pfc_buffer_size = nbl_res_get_pfc_buffer_size, +}; + +static struct nbl_res_product_ops product_ops = { + .queue_mgt_init = nbl_queue_mgt_init_leonis, + .setup_qid_map_table = nbl_res_queue_setup_qid_map_table_leonis, + .remove_qid_map_table = nbl_res_queue_remove_qid_map_table_leonis, + .init_qid_map_table = nbl_res_queue_init_qid_map_table, +}; + +static bool is_ops_inited; +static int nbl_res_setup_res_mgt(struct nbl_common_info *common, + struct nbl_resource_mgt_leonis **res_mgt_leonis) +{ + struct device *dev; + struct nbl_resource_info *resource_info; + + dev = NBL_COMMON_TO_DEV(common); + *res_mgt_leonis = devm_kzalloc(dev, sizeof(struct nbl_resource_mgt_leonis), GFP_KERNEL); + if (!*res_mgt_leonis) + return -ENOMEM; + NBL_RES_MGT_TO_COMMON(&(*res_mgt_leonis)->res_mgt) = common; + + resource_info = devm_kzalloc(dev, sizeof(struct nbl_resource_info), GFP_KERNEL); + if (!resource_info) + return -ENOMEM; + NBL_RES_MGT_TO_RES_INFO(&(*res_mgt_leonis)->res_mgt) = resource_info; + + return 0; +} + +static void nbl_res_remove_res_mgt(struct nbl_common_info *common, + struct nbl_resource_mgt_leonis **res_mgt_leonis) +{ + struct device *dev; + + dev = NBL_COMMON_TO_DEV(common); + devm_kfree(dev, NBL_RES_MGT_TO_RES_INFO(&(*res_mgt_leonis)->res_mgt)); + devm_kfree(dev, *res_mgt_leonis); + *res_mgt_leonis = NULL; +} + +static void nbl_res_remove_ops(struct device *dev, struct nbl_resource_ops_tbl **res_ops_tbl) +{ + devm_kfree(dev, *res_ops_tbl); + *res_ops_tbl = NULL; +} + +static int nbl_res_setup_ops(struct device *dev, struct nbl_resource_ops_tbl **res_ops_tbl, + struct nbl_resource_mgt_leonis *res_mgt_leonis) +{ + int ret = 0; + + *res_ops_tbl = devm_kzalloc(dev, sizeof(struct nbl_resource_ops_tbl), GFP_KERNEL); + if (!*res_ops_tbl) + return -ENOMEM; + + if (!is_ops_inited) { + ret = nbl_flow_setup_ops_leonis(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_tc_flow_setup_ops_leonis(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_queue_setup_ops_leonis(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_txrx_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_intr_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_vsi_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_adminq_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_accel_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + ret = nbl_fd_setup_ops(&res_ops); + if (ret) + goto setup_fail; + + is_ops_inited = true; + } + + NBL_RES_OPS_TBL_TO_OPS(*res_ops_tbl) = &res_ops; + NBL_RES_OPS_TBL_TO_PRIV(*res_ops_tbl) = res_mgt_leonis; + + return 0; + +setup_fail: + nbl_res_remove_ops(dev, res_ops_tbl); + return -EAGAIN; +} + +static int nbl_res_dev_setup_eswitch_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_eswitch_info *eswitch_info; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + eswitch_info = devm_kzalloc(dev, sizeof(struct nbl_eswitch_info), GFP_KERNEL); + if (!eswitch_info) + return -ENOMEM; + NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt) = eswitch_info; + + return 0; +} + +static void nbl_res_pf_dev_remove_eswitch_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_eswitch_info **eswitch_info = &NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + + if (!(*eswitch_info)) + return; + devm_kfree(dev, *eswitch_info); + *eswitch_info = NULL; +} + +static int nbl_res_ctrl_dev_setup_eth_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_eth_info *eth_info; + struct nbl_eth_bond_info *eth_bond_info; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u32 eth_num = 0; + u32 eth_bitmap, eth_id; + int i; + + eth_info = devm_kzalloc(dev, sizeof(struct nbl_eth_info), GFP_KERNEL); + if (!eth_info) + return -ENOMEM; + + NBL_RES_MGT_TO_ETH_INFO(res_mgt) = eth_info; + + eth_bond_info = devm_kzalloc(dev, sizeof(struct nbl_eth_bond_info), GFP_KERNEL); + if (!eth_bond_info) + return -ENOMEM; + + NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt) = eth_bond_info; + + eth_info->eth_num = (u8)phy_ops->get_fw_eth_num(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + eth_bitmap = phy_ops->get_fw_eth_map(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + /* for 2 eth port board, the eth_id is 0, 2 */ + for (i = 0; i < NBL_MAX_ETHERNET; i++) { + if ((1 << i) & eth_bitmap) { + set_bit(i, eth_info->eth_bitmap); + eth_info->eth_id[eth_num] = i; + eth_info->logic_eth_id[i] = eth_num; + eth_num++; + } + } + + for (i = 0; i < NBL_RES_MGT_TO_PF_NUM(res_mgt); i++) { + /* if pf_id <= eth_num, the pf relate corresponding eth_id*/ + if (i < eth_num) { + eth_id = eth_info->eth_id[i]; + eth_info->pf_bitmap[eth_id] |= BIT(i); + } + /* if pf_id > eth_num, the pf relate eth 0*/ + else + eth_info->pf_bitmap[0] |= BIT(i); + } + + return 0; +} + +static void nbl_res_ctrl_dev_remove_eth_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_eth_info **eth_info = &NBL_RES_MGT_TO_ETH_INFO(res_mgt); + struct nbl_eth_bond_info **eth_bond_info = &NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + + if (*eth_bond_info) { + devm_kfree(dev, *eth_bond_info); + *eth_bond_info = NULL; + } + + if (*eth_info) { + devm_kfree(dev, *eth_info); + *eth_info = NULL; + } +} + +static int nbl_res_ctrl_dev_sriov_info_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_resource_info *res_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_sriov_info *sriov_info; + u32 vf_fid, vf_startid, vf_endid = NBL_MAX_VF; + u16 func_id; + u16 function; + + sriov_info = devm_kcalloc(dev, NBL_RES_MGT_TO_PF_NUM(res_mgt), + sizeof(struct nbl_sriov_info), GFP_KERNEL); + if (!sriov_info) + return -ENOMEM; + + NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) = sriov_info; + + for (func_id = 0; func_id < NBL_RES_MGT_TO_PF_NUM(res_mgt); func_id++) { + sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt)[func_id]; + function = NBL_COMMON_TO_PCI_FUNC_ID(common) + func_id; + + sriov_info->bdf = PCI_DEVID(common->bus, + PCI_DEVFN(common->devid, function)); + vf_fid = phy_ops->get_host_pf_fid(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + func_id); + vf_startid = vf_fid & 0xFFFF; + vf_endid = (vf_fid >> 16) & 0xFFFF; + sriov_info->start_vf_func_id = vf_startid + NBL_MAX_PF_LEONIS; + sriov_info->num_vfs = vf_endid - vf_startid; + } + + res_info->max_vf_num = vf_endid; + + return 0; +} + +static void nbl_res_ctrl_dev_sriov_info_remove(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_sriov_info **sriov_info = &NBL_RES_MGT_TO_SRIOV_INFO(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + + if (!(*sriov_info)) + return; + + devm_kfree(dev, *sriov_info); + *sriov_info = NULL; +} + +static int nbl_res_ctrl_dev_vsi_info_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_COMMON_TO_DEV(common); + struct nbl_vsi_info *vsi_info; + struct nbl_sriov_info *sriov_info; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i; + + vsi_info = devm_kcalloc(dev, NBL_RES_MGT_TO_PF_NUM(res_mgt), + sizeof(struct nbl_vsi_info), GFP_KERNEL); + if (!vsi_info) + return -ENOMEM; + + NBL_RES_MGT_TO_VSI_INFO(res_mgt) = vsi_info; + /** + * case 1 two port(2pf) + * pf0,pf1(NBL_VSI_SERV_PF_DATA_TYPE) vsi is 0,512 + * pf0,pf1(NBL_VSI_SERV_PF_CTLR_TYPE) vsi is 1,513 + * pf0,pf1(NBL_VSI_SERV_PF_USER_TYPE) vsi is 2,514 + * pf0,pf1(NBL_VSI_SERV_PF_XDP_TYPE) vsi is 3,515 + * pf0.vf0-pf0.vf255(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 4-259 + * pf1.vf0-pf1.vf255(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 516-771 + * pf2-pf7(NBL_VSI_SERV_PF_EXTRA_TYPE) vsi 260-265(if exist) + * case 2 four port(4pf) + * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_DATA_TYPE) vsi is 0,256,512,768 + * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_CTLR_TYPE) vsi is 1,257,513,769 + * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_USER_TYPE) vsi is 2,258,514,770 + * pf0,pf1,pf2,pf3(NBL_VSI_SERV_PF_XDP_TYPE) vsi is 3,259,515,771 + * pf0.vf0-pf0.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 4-131 + * pf1.vf0-pf1.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 260-387 + * pf2.vf0-pf2.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 516-643 + * pf3.vf0-pf3.vf127(NBL_VSI_SERV_VF_DATA_TYPE) vsi is 772-899 + * pf4-pf7(NBL_VSI_SERV_PF_EXTRA_TYPE) vsi 132-135(if exist) + */ + + vsi_info->num = eth_info->eth_num; + for (i = 0; i < vsi_info->num; i++) { + vsi_info->serv_info[i][NBL_VSI_SERV_PF_DATA_TYPE].base_id = i + * NBL_VSI_ID_GAP(vsi_info->num); + vsi_info->serv_info[i][NBL_VSI_SERV_PF_DATA_TYPE].num = 1; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].base_id = + vsi_info->serv_info[i][NBL_VSI_SERV_PF_DATA_TYPE].base_id + + vsi_info->serv_info[i][NBL_VSI_SERV_PF_DATA_TYPE].num; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].num = 1; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].base_id = + vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].base_id + + vsi_info->serv_info[i][NBL_VSI_SERV_PF_CTLR_TYPE].num; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].num = 1; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_XDP_TYPE].base_id = + vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].base_id + + vsi_info->serv_info[i][NBL_VSI_SERV_PF_USER_TYPE].num; + vsi_info->serv_info[i][NBL_VSI_SERV_PF_XDP_TYPE].num = 1; + vsi_info->serv_info[i][NBL_VSI_SERV_VF_DATA_TYPE].base_id = + vsi_info->serv_info[i][NBL_VSI_SERV_PF_XDP_TYPE].base_id + + vsi_info->serv_info[i][NBL_VSI_SERV_PF_XDP_TYPE].num; + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + i; + vsi_info->serv_info[i][NBL_VSI_SERV_VF_DATA_TYPE].num = sriov_info->num_vfs; + } + + /* pf_id >= eth_num, it belong pf0's switch */ + vsi_info->serv_info[0][NBL_VSI_SERV_PF_EXTRA_TYPE].base_id = + vsi_info->serv_info[0][NBL_VSI_SERV_VF_DATA_TYPE].base_id + + vsi_info->serv_info[0][NBL_VSI_SERV_VF_DATA_TYPE].num; + vsi_info->serv_info[0][NBL_VSI_SERV_PF_EXTRA_TYPE].num = + NBL_RES_MGT_TO_PF_NUM(res_mgt) - vsi_info->num; + + return 0; +} + +static void nbl_res_ctrl_dev_remove_vsi_info(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_vsi_info **vsi_info = &NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + if (!(*vsi_info)) + return; + + devm_kfree(dev, *vsi_info); + *vsi_info = NULL; +} + +static int nbl_res_ring_num_info_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_net_ring_num_info *num_info = &resource_info->net_ring_num_info; + + num_info->pf_def_max_net_qp_num = NBL_DEFAULT_PF_HW_QUEUE_NUM; + num_info->vf_def_max_net_qp_num = NBL_DEFAULT_VF_HW_QUEUE_NUM; + + return 0; +} + +static int nbl_res_check_fw_working(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + unsigned long fw_pong_current; + unsigned long seconds_current = 0; + + seconds_current = (unsigned long)ktime_get_real_seconds(); + phy_ops->set_fw_pong(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), seconds_current - 1); + phy_ops->set_fw_ping(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), seconds_current); + + /* Wait for FW to ack the first heartbeat seq */ + return nbl_read_poll_timeout(phy_ops->get_fw_pong, fw_pong_current, + fw_pong_current == seconds_current, + USEC_PER_MSEC, 100 * USEC_PER_MSEC, + false, NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); +} + +static int nbl_res_init_pf_num(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u32 pf_mask; + u32 pf_num = 0; + int i; + + pf_mask = phy_ops->get_host_pf_mask(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + for (i = 0; i < NBL_MAX_PF_LEONIS; i++) { + if (!(pf_mask & (1 << i))) + pf_num++; + else + break; + } + + NBL_RES_MGT_TO_PF_NUM(res_mgt) = pf_num; + + if (!pf_num) + return -1; + + return 0; +} + +static void nbl_res_init_board_info(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->get_board_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + &res_mgt->resource_info->board_info); +} + +static void nbl_res_stop(struct nbl_resource_mgt_leonis *res_mgt_leonis) +{ + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + nbl_fd_mgt_stop(res_mgt); + nbl_queue_mgt_stop(res_mgt); + nbl_txrx_mgt_stop(res_mgt); + nbl_intr_mgt_stop(res_mgt); + nbl_adminq_mgt_stop(res_mgt); + nbl_vsi_mgt_stop(res_mgt); + nbl_accel_mgt_stop(res_mgt); + nbl_flow_mgt_stop_leonis(res_mgt); + nbl_res_ctrl_dev_remove_vsi_info(res_mgt); + nbl_res_ctrl_dev_remove_eth_info(res_mgt); + nbl_res_ctrl_dev_sriov_info_remove(res_mgt); + nbl_res_pf_dev_remove_eswitch_info(res_mgt); + + /*only pf0 need tc_flow_mgt_stop*/ + if (!common->is_vf && !NBL_COMMON_TO_PCI_FUNC_ID(common)) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow stop tc flow mgt"); + nbl_tc_flow_mgt_stop_leonis(res_mgt); + } +} + +static int nbl_res_start(struct nbl_resource_mgt_leonis *res_mgt_leonis, + struct nbl_func_caps caps) +{ + struct nbl_resource_mgt *res_mgt = &res_mgt_leonis->res_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_upcall_port_info *upcall_port_info = + &res_mgt_leonis->pmd_status.upcall_port_info; + u32 quirks; + int ret = 0; + + if (caps.has_factory_ctrl) { + ret = nbl_res_check_fw_working(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "fw is not working"); + return ret; + } + + ret = nbl_res_init_pf_num(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "pf number is illegal"); + return ret; + } + + nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_HB_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_RESET_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_CLEAN_ADMINDQ_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_RESTOOL_CAP); + + ret = nbl_res_ctrl_dev_sriov_info_init(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Failed to init sr_iov info"); + return ret; + } + + ret = nbl_intr_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_adminq_mgt_start(res_mgt); + if (ret) + goto start_fail; + + return 0; + } + + if (caps.has_ctrl) { + ret = nbl_res_check_fw_working(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "fw is not working"); + return ret; + } + + nbl_res_init_board_info(res_mgt); + + ret = nbl_res_init_pf_num(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "pf number is illegal"); + return ret; + } + + ret = nbl_res_ctrl_dev_sriov_info_init(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Failed to init sr_iov info"); + return ret; + } + + ret = nbl_res_ctrl_dev_setup_eth_info(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_res_ctrl_dev_vsi_info_init(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_res_ring_num_info_init(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_flow_mgt_start_leonis(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_tc_flow_mgt_start_leonis(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_queue_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_vsi_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_adminq_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_intr_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_accel_mgt_start(res_mgt); + if (ret) + goto start_fail; + + ret = nbl_fd_mgt_start(res_mgt); + if (ret) + goto start_fail; + + nbl_res_set_flex_capability(res_mgt, NBL_DUMP_FLOW_CAP); + nbl_res_set_flex_capability(res_mgt, NBL_DUMP_FD_CAP); + nbl_res_set_flex_capability(res_mgt, NBL_SECURITY_ACCEL_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_OFFLOAD_NETWORK_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_HB_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_FW_RESET_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_CLEAN_ADMINDQ_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_RESTOOL_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_ADAPT_DESC_GOTHER); + nbl_res_set_fix_capability(res_mgt, NBL_PROCESS_FLR_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_RESET_CTRL_CAP); + /* leonis af need a pmd_debug for dpdk gdb debug */ + nbl_res_set_fix_capability(res_mgt, NBL_PMD_DEBUG); + nbl_res_set_flex_capability(res_mgt, NBL_SECURITY_ACCEL_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_IPSEC_AGE_CAP); + + upcall_port_info->upcall_port_active = false; + } + + if (caps.has_net) { + ret = nbl_txrx_mgt_start(res_mgt); + if (ret) + goto start_fail; + + if (!caps.is_vf) { + ret = nbl_res_dev_setup_eswitch_info(res_mgt); + if (ret) + goto start_fail; + } + } + + nbl_res_set_fix_capability(res_mgt, NBL_HWMON_TEMP_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_CLEAN_MAILBOX_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_ITR_DYNAMIC); + nbl_res_set_fix_capability(res_mgt, NBL_P4_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_TASK_RESET_CAP); + nbl_res_set_fix_capability(res_mgt, NBL_QOS_SYSFS_CAP); + + quirks = nbl_res_get_quirks(res_mgt); + if (quirks & BIT(NBL_QUIRKS_NO_TOE)) { + nbl_res_set_fix_capability(res_mgt, NBL_TASK_KEEP_ALIVE); + if (caps.has_ctrl) + nbl_res_set_fix_capability(res_mgt, NBL_RECOVERY_ABNORMAL_STATUS); + } + + return 0; + +start_fail: + nbl_res_stop(res_mgt_leonis); + return ret; +} + +int nbl_res_init_leonis(void *p, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_resource_mgt_leonis **res_mgt_leonis; + struct nbl_resource_ops_tbl **res_ops_tbl; + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_channel_ops_tbl *chan_ops_tbl; + int ret = 0; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + res_mgt_leonis = (struct nbl_resource_mgt_leonis **)&NBL_ADAPTER_TO_RES_MGT(adapter); + res_ops_tbl = &NBL_ADAPTER_TO_RES_OPS_TBL(adapter); + phy_ops_tbl = NBL_ADAPTER_TO_PHY_OPS_TBL(adapter); + chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter); + + ret = nbl_res_setup_res_mgt(common, res_mgt_leonis); + if (ret) + goto setup_mgt_fail; + + nbl_res_setup_common_ops(&(*res_mgt_leonis)->res_mgt); + NBL_RES_MGT_TO_CHAN_OPS_TBL(&(*res_mgt_leonis)->res_mgt) = chan_ops_tbl; + NBL_RES_MGT_TO_PHY_OPS_TBL(&(*res_mgt_leonis)->res_mgt) = phy_ops_tbl; + + NBL_RES_MGT_TO_PROD_OPS(&(*res_mgt_leonis)->res_mgt) = &product_ops; + + ret = nbl_res_start(*res_mgt_leonis, param->caps); + if (ret) + goto start_fail; + + ret = nbl_res_setup_ops(dev, res_ops_tbl, *res_mgt_leonis); + if (ret) + goto setup_ops_fail; + + return 0; + +setup_ops_fail: + nbl_res_stop(*res_mgt_leonis); +start_fail: + nbl_res_remove_res_mgt(common, res_mgt_leonis); +setup_mgt_fail: + return ret; +} + +void nbl_res_remove_leonis(void *p) +{ + struct nbl_adapter *adapter = (struct nbl_adapter *)p; + struct device *dev; + struct nbl_common_info *common; + struct nbl_resource_mgt_leonis **res_mgt; + struct nbl_resource_ops_tbl **res_ops_tbl; + + dev = NBL_ADAPTER_TO_DEV(adapter); + common = NBL_ADAPTER_TO_COMMON(adapter); + res_mgt = (struct nbl_resource_mgt_leonis **)&NBL_ADAPTER_TO_RES_MGT(adapter); + res_ops_tbl = &NBL_ADAPTER_TO_RES_OPS_TBL(adapter); + + nbl_res_remove_ops(dev, res_ops_tbl); + nbl_res_stop(*res_mgt); + nbl_res_remove_res_mgt(common, res_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..2d43449119e88069f05a220644f6d142657d50b1 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_resource_leonis.h @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_RESOURCE_LEONIS_H_ +#define _NBL_RESOURCE_LEONIS_H_ + +#include "nbl_resource.h" + +#define NBL_MAX_PF_LEONIS 8 +/* product NO(ASIC SNIC as 3)-V NO.R NO.B NO.SP NO */ +#define NBL_LEONIS_DRIVER_VERSION "3-3.1.312.1" + +int nbl_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt); +void nbl_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt); +int nbl_flow_setup_ops_leonis(struct nbl_resource_ops *resource_ops); +void nbl_flow_remove_ops_leonis(struct nbl_resource_ops *resource_ops); +int nbl_queue_setup_ops_leonis(struct nbl_resource_ops *resource_ops); +void nbl_queue_remove_ops_leonis(struct nbl_resource_ops *resource_ops); +int nbl_tc_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt); +void nbl_tc_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt); +int nbl_tc_flow_setup_ops_leonis(struct nbl_resource_ops *resource_ops); +void nbl_tc_flow_remove_ops_leonis(struct nbl_resource_ops *resource_ops); +int nbl_tc_flow_flush_flow(struct nbl_resource_mgt *res_mgt); + +void nbl_queue_mgt_init_leonis(struct nbl_queue_mgt *queue_mgt); +int nbl_res_queue_setup_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, u16 func_id, + u64 notify_addr); +void nbl_res_queue_remove_qid_map_table_leonis(struct nbl_resource_mgt *res_mgt, u16 func_id); +int nbl_res_queue_init_qid_map_table(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_mgt *queue_mgt, struct nbl_phy_ops *phy_ops); + +void nbl_intr_mgt_init_leonis(struct nbl_resource_mgt *res_mgt); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..a7eb5dd7bfb20814348eee44b84c4c0457473191 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.c @@ -0,0 +1,1792 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_tc_flow_filter_leonis.h" +#include "nbl_p4_actions.h" +#include "nbl_tc_tun_leonis.h" +#include "nbl_tc_flow_leonis.h" + +#define NBL_ACT_OFT 16 +#define NBL_GET_ACT_INFO(data, idx) (*(u16 *)&(data) + ((idx) << NBL_ACT_OFT)) + +static const struct nbl_cmd_hdr g_cmd_hdr[] = { + [NBL_FEM_KTAT_WRITE] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, + NBL_TABLE_FEM_KTAT, NBL_CMD_OP_WRITE }, + [NBL_FEM_KTAT_READ] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, + NBL_TABLE_FEM_KTAT, NBL_CMD_OP_READ }, + [NBL_FEM_KTAT_SEARCH] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, + NBL_TABLE_FEM_KTAT, NBL_CMD_OP_SEARCH }, + [NBL_FEM_HT_WRITE] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, NBL_TABLE_FEM_HT, + NBL_CMD_OP_WRITE }, + [NBL_FEM_HT_READ] = { NBL_BLOCK_PPE, NBL_MODULE_FEM, NBL_TABLE_FEM_HT, + NBL_CMD_OP_READ }, +}; + +static int nbl_set_tcam_process(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item, + struct nbl_flow_tcam_ad_item *ad_item, + u16 *index, bool *is_new) +{ + int ret; + + if (!nbl_tcam_key_lookup(tcam_pp_key_mng, tcam_item, index)) { + tcam_pp_key_mng[*index].ref_cnt++; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow tcam:ref_cnt++ pp%d index=%d, ref_cnt=%d", + tcam_item->pp_type, *index, + tcam_pp_key_mng[*index].ref_cnt); + if (tcam_item->key_mode == NBL_TC_KT_FULL_MODE) { + tcam_pp_key_mng[*index + 1].ref_cnt++; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow tcam:ref_cnt++ pp%d index=%d, ref_cnt=%d", + tcam_item->pp_type, *index + 1, + tcam_pp_key_mng[*index + 1].ref_cnt); + } + } else { + ret = nbl_insert_tcam_key_ad(common, tcam_pp_key_mng, tcam_pp_ad_mng, + tcam_item, ad_item, index); + *is_new = true; + if (ret) + return ret; + } + + return 0; +} + +static int nbl_flow_ht_assign_proc(struct nbl_resource_mgt *res_mgt, + struct nbl_mt_input *mt_input, + struct nbl_flow_pp_ht_mng *pp_ht0_mng, + struct nbl_flow_pp_ht_mng *pp_ht1_mng, + struct nbl_tc_ht_item *ht_item, + struct nbl_tcam_item *tcam_item) +{ + int ret = 0; + u16 i = 0; + u16 ht0_hash = 0; + u16 ht1_hash = 0; + struct nbl_flow_pp_ht_tbl *pp_ht0_node = NULL; + struct nbl_flow_pp_ht_tbl *pp_ht1_node = NULL; + u32 num = 0; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + ht0_hash = NBL_CRC16_CCITT(mt_input->key, NBL_KT_BYTE_LEN); + ht1_hash = NBL_CRC16_IBM(mt_input->key, NBL_KT_BYTE_LEN); + + ht0_hash = + nbl_hash_transfer(ht0_hash, mt_input->power, mt_input->depth); + ht1_hash = + nbl_hash_transfer(ht1_hash, mt_input->power, mt_input->depth); + + pp_ht0_node = pp_ht0_mng->hash_map[ht0_hash]; + pp_ht1_node = pp_ht1_mng->hash_map[ht1_hash]; + + ht_item->ht0_hash = ht0_hash; + ht_item->ht1_hash = ht1_hash; + ht_item->tbl_id = mt_input->tbl_id; + + /* 2 flow has the same ht0 ht1,put it to tcam*/ + if (nbl_pp_ht0_ht1_search(pp_ht0_mng, ht0_hash, pp_ht1_mng, ht1_hash)) { + if ((*tcam_item->pp_tcam_count < NBL_FEM_TCAM_MAX_NUM - num - 1) || + (*tcam_item->pp_tcam_count == NBL_FEM_TCAM_MAX_NUM - num - 1 && + tcam_item->key_mode == NBL_TC_KT_HALF_MODE)) { + tcam_item->tcam_flag = true; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow tcam:pp%d has the same ht0=%x,ht1=%x,put it to tcam.\n", + mt_input->pp_type, ht0_hash, ht1_hash); + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow tcam:pp%d has the same ht0=%x,ht1=%x,exceed max num.\n", + mt_input->pp_type, ht0_hash, ht1_hash); + ret = -ENOSPC; + } + return ret; + } + + if (!pp_ht0_node && !pp_ht1_node) { + ret = nbl_insert_pp_ht(res_mgt, pp_ht0_mng, ht0_hash, ht1_hash, + mt_input->tbl_id); + ht_item->ht_entry = NBL_HASH0; + ht_item->hash_bucket = 0; + + } else if (pp_ht0_node && !pp_ht1_node) { + if (pp_ht0_node->ref_cnt >= NBL_HASH_CFT_AVL) { + ret = nbl_insert_pp_ht(res_mgt, pp_ht1_mng, ht1_hash, ht0_hash, + mt_input->tbl_id); + ht_item->ht_entry = NBL_HASH1; + ht_item->hash_bucket = 0; + } else { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht0_node->key[i].vid == 0) { + pp_ht0_node->key[i].vid = 1; + pp_ht0_node->key[i].ht_other_index = + ht1_hash; + pp_ht0_node->key[i].kt_index = + mt_input->tbl_id; + pp_ht0_node->ref_cnt++; + ht_item->ht_entry = NBL_HASH0; + ht_item->hash_bucket = i; + break; + } + } + } + } else if (!pp_ht0_node && pp_ht1_node) { + if (pp_ht1_node->ref_cnt >= NBL_HASH_CFT_AVL) { + ret = nbl_insert_pp_ht(res_mgt, pp_ht0_mng, ht0_hash, ht1_hash, + mt_input->tbl_id); + ht_item->ht_entry = NBL_HASH0; + ht_item->hash_bucket = 0; + } else { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht1_node->key[i].vid == 0) { + pp_ht1_node->key[i].vid = 1; + pp_ht1_node->key[i].ht_other_index = + ht0_hash; + pp_ht1_node->key[i].kt_index = + mt_input->tbl_id; + pp_ht1_node->ref_cnt++; + ht_item->ht_entry = NBL_HASH1; + ht_item->hash_bucket = i; + break; + } + } + } + } else { + if (pp_ht0_node->ref_cnt <= NBL_HASH_CFT_AVL || + (pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht0_node->ref_cnt < NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL)) { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht0_node->key[i].vid == 0) { + pp_ht0_node->key[i].vid = 1; + pp_ht0_node->key[i].ht_other_index = + ht1_hash; + pp_ht0_node->key[i].kt_index = + mt_input->tbl_id; + pp_ht0_node->ref_cnt++; + ht_item->ht_entry = NBL_HASH0; + ht_item->hash_bucket = i; + break; + } + } + } else if ((pp_ht0_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt <= NBL_HASH_CFT_AVL) || + (pp_ht0_node->ref_cnt == NBL_HASH_CFT_MAX && + pp_ht1_node->ref_cnt > NBL_HASH_CFT_AVL && + pp_ht1_node->ref_cnt < NBL_HASH_CFT_MAX)) { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (pp_ht1_node->key[i].vid == 0) { + pp_ht1_node->key[i].vid = 1; + pp_ht1_node->key[i].ht_other_index = + ht0_hash; + pp_ht1_node->key[i].kt_index = + mt_input->tbl_id; + pp_ht1_node->ref_cnt++; + ht_item->ht_entry = NBL_HASH1; + ht_item->hash_bucket = i; + break; + } + } + } else { + if ((*tcam_item->pp_tcam_count < + NBL_FEM_TCAM_MAX_NUM - num - 1) || + (*tcam_item->pp_tcam_count == + NBL_FEM_TCAM_MAX_NUM - num - 1 && + tcam_item->key_mode == NBL_TC_KT_HALF_MODE)) { + tcam_item->tcam_flag = true; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow tcam:pp%d ht0=%x,cnt=%d,ht1=%x,cnt=%d, " + "put it to tcam.\n", + mt_input->pp_type, ht0_hash, + pp_ht0_node->ref_cnt, ht1_hash, + pp_ht1_node->ref_cnt); + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow tcam: pp%d ht0=%x,ht1=%x,exceed max tcam num.\n", + mt_input->pp_type, ht0_hash, ht1_hash); + ret = -ENOSPC; + } + } + } + + return ret; +} + +static inline u8 nbl_flow_act_num(struct nbl_mt_input *input, + u16 count) +{ + if (count <= input->kt_left_num) + return NBL_FEM_AT_NO_ENTRY; + else if (count <= input->kt_left_num + NBL_MAX_ACTION_NUM - 1) + return NBL_FEM_AT_ONE_ENTRY; + else if (count <= input->kt_left_num + 2 * (NBL_MAX_ACTION_NUM - 1)) + return NBL_FEM_AT_TWO_ENTRY; + + return NBL_FEM_AT_TWO_ENTRY; +} + +static int +nbl_flow_port_id_action_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_action_data set_dport = {.data = 0}; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(tc_flow_mgt->res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_txrx_bond_info *bond_info = &txrx_mgt->bond_info; + u16 port_id = 0; + u16 act_idx = *item; + u16 cur_eth_proto = 0; + u32 salve1_port_id = 0; + u32 salve2_port_id = 0; + + if (!action || !buf) + return -EINVAL; + + set_dport.dport.up.port_type = action->port_type; + set_dport.dport.up.port_id = action->port_id; + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.up.next_stg_sel = action->next_stg_sel; + + memcpy(&port_id, &set_dport, 2); + buf[act_idx] = port_id + (NBL_ACT_SET_DPORT << 16); + + if (!(action->flag & NBL_FLOW_ACTION_PUSH_OUTER_VLAN)) + goto ret_info; + + if (action->vlan.eth_proto == NBL_QINQ_TPID_VALUE) + cur_eth_proto = NBL_QINQ_TPYE; + else if (action->vlan.eth_proto == NBL_VLAN_TPID_VALUE) + cur_eth_proto = NBL_VLAN_TPYE; + else + goto ret_info; + + if ((action->vlan.port_type == NBL_TC_PORT_TYPE_VSI || + action->vlan.port_type == NBL_TC_PORT_TYPE_ETH) && + cur_eth_proto != tc_flow_mgt->port_tpid_type[action->vlan.port_id]) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow port_id=%d,eth_proto=%d.\n", + action->vlan.port_id, cur_eth_proto); + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + action->vlan.port_id, cur_eth_proto); + tc_flow_mgt->port_tpid_type[action->vlan.port_id] = cur_eth_proto; + goto ret_info; + } + + salve1_port_id = bond_info->eth_id[0] + NBL_VLAN_TYPE_ETH_BASE; + salve2_port_id = bond_info->eth_id[1] + NBL_VLAN_TYPE_ETH_BASE; + + if (action->vlan.port_type == NBL_TC_PORT_TYPE_BOND && bond_info->bond_enable && + action->vlan.port_id == bond_info->lag_id && + (cur_eth_proto != tc_flow_mgt->port_tpid_type[salve1_port_id] || + cur_eth_proto != tc_flow_mgt->port_tpid_type[salve2_port_id])) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow lag_id = %d, port1_id=%d, eth_proto=%d, port2_id=%d, eth_proto=%d.\n", + bond_info->lag_id, salve1_port_id, cur_eth_proto, + salve2_port_id, cur_eth_proto); + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + salve1_port_id, cur_eth_proto); + tc_flow_mgt->port_tpid_type[salve1_port_id] = cur_eth_proto; + phy_ops->set_ped_tab_vsi_type(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + salve2_port_id, cur_eth_proto); + tc_flow_mgt->port_tpid_type[salve2_port_id] = cur_eth_proto; + } + +ret_info: + return 0; +} + +static int nbl_flow_drop_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_action_data set_dport = {.data = 0}; + u16 port_id = 0; + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + set_dport.dport.up.port_type = SET_DPORT_TYPE_SP_PORT; + set_dport.dport.up.port_id = 0x3FF; + set_dport.dport.up.upcall_flag = AUX_KEEP_FWD_TYPE; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + + memcpy(&port_id, &set_dport, 2); + buf[act_idx] = port_id + (NBL_ACT_SET_DPORT << 16); + + return 0; +} + +static int +nbl_flow_counter_action_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = + (action->counter_id & 0x1FFFF) + (NBL_ACT_SET_FLOW_STAT0 << 16); + return 0; +} + +static int +nbl_flow_mcc_action_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + int i; + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + u16 mcc_port = 0; + union nbl_action_data mcc_dport = {.data = 0}; + + if (!action || !buf) + return -EINVAL; + + for (i = 0; i < action->mcc_cnt; i++) { + ret = nbl_tc_mcc_add_leaf_node(&tc_flow_mgt->tc_mcc_mgt, + action->port_mcc[i].dport_id, + action->port_mcc[i].port_type); + if (ret < 0) { + nbl_tc_mcc_free_list(&tc_flow_mgt->tc_mcc_mgt); + return ret; + } + + if (i == action->mcc_cnt - 1) { + edit_item->mcc_idx = ret; + edit_item->is_mir = true; + } + } + + buf[act_idx] = edit_item->mcc_idx + (NBL_ACT_SET_MCC << 16); + ++act_idx; + + mcc_dport.set_fwd_type.identify = NBL_SET_FWD_TYPE_IDENTIFY; + mcc_dport.set_fwd_type.next_stg_vld = 1; + mcc_dport.set_fwd_type.next_stg = NBL_NEXT_STG_MCC; + memcpy(&mcc_port, &mcc_dport, 2); + buf[act_idx] = mcc_port + (NBL_ACT_SET_AUX_FIELD << 16); + *item = act_idx; + + nbl_tc_mcc_add_hw_tbl(tc_flow_mgt->res_mgt, &tc_flow_mgt->tc_mcc_mgt); + + nbl_tc_mcc_get_list(&tc_flow_mgt->tc_mcc_mgt, &edit_item->tc_mcc_list); + + return 0; +} + +static int +nbl_flow_push_outer_vlan_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = action->vlan.vlan_tag + (NBL_ACT_ADD_SVLAN << 16); + return 0; +} + +static int +nbl_flow_push_inner_vlan_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = action->vlan.vlan_tag + (NBL_ACT_ADD_CVLAN << 16); + return 0; +} + +static int +nbl_flow_pop_outer_vlan_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = NBL_ACT_DEL_SVLAN << 16; + return 0; +} + +static int +nbl_flow_pop_inner_vlan_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = NBL_ACT_DEL_CVLAN << 16; + return 0; +} + +static int +nbl_flow_tunnel_encap_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 vni_h; + u16 vni_l; + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + vni_l = (u16)(action->vni & 0x0000ffff); + vni_h = (u16)(action->vni >> 16); + buf[act_idx] = (action->encap_idx & 0x1FFFF) + (NBL_ACT_TNL_ENCAP << 16); + act_idx++; + buf[act_idx] = vni_h + (NBL_ACT_SET_VNI1 << 16); + act_idx++; + buf[act_idx] = vni_l + (NBL_ACT_SET_VNI0 << 16); + *item = act_idx; + + return 0; +} + +static int +nbl_flow_tunnel_decap_act_2hw(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, + struct nbl_resource_mgt *res_mgt) +{ + u16 act_idx = *item; + + if (!action || !buf) + return -EINVAL; + + buf[act_idx] = NBL_ACT_TNL_DECAP << 16; + + return 0; +} + +static struct nbl_flow_action_2hw acts_2hw[] = { + { NBL_FLOW_ACTION_PORT_ID, nbl_flow_port_id_action_2hw }, + { NBL_FLOW_ACTION_DROP, nbl_flow_drop_2hw }, + { NBL_FLOW_ACTION_COUNTER, nbl_flow_counter_action_2hw }, + { NBL_FLOW_ACTION_MCC, nbl_flow_mcc_action_2hw }, + { NBL_FLOW_ACTION_PUSH_OUTER_VLAN, nbl_flow_push_outer_vlan_2hw }, + { NBL_FLOW_ACTION_PUSH_INNER_VLAN, nbl_flow_push_inner_vlan_2hw }, + { NBL_FLOW_ACTION_POP_OUTER_VLAN, nbl_flow_pop_outer_vlan_2hw }, + { NBL_FLOW_ACTION_POP_INNER_VLAN, nbl_flow_pop_inner_vlan_2hw }, + { NBL_FLOW_ACTION_TUNNEL_ENCAP, nbl_flow_tunnel_encap_act_2hw }, + { NBL_FLOW_ACTION_TUNNEL_DECAP, nbl_flow_tunnel_decap_act_2hw }, +}; + +static int nbl_flow_at_num_proc(struct nbl_resource_mgt *res_mgt, + struct nbl_mt_input *mt_input, + u16 action_cnt, u32 *buf, + struct nbl_tc_at_item *at_item) +{ + u16 idx = 0; + u16 act_idx = 0; + u16 act1_idx = 0; + u16 act2_idx = 0; + u32 act_node_idx[2]; + u32 i; + int ret = 0; + struct nbl_flow_pp_at_key at_key[2]; + struct nbl_flow_at_tbl *node = NULL; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&at_key, 0, sizeof(at_key)); + + if (mt_input->at_num == 0) { + for (idx = 0; idx < action_cnt; idx++) + at_item->act_buf[idx] = buf[idx]; + + at_item->act_num = action_cnt; + } else if (mt_input->at_num == 1) { + while (idx < mt_input->kt_left_num - 1) { + at_item->act_buf[idx + 1] = buf[idx]; + idx++; + } + at_item->act_num = mt_input->kt_left_num; + + while (idx < action_cnt) { + at_item->act1_buf[act1_idx] = buf[idx]; + at_key[0].act[act1_idx] = buf[idx]; + idx++; + act1_idx++; + } + + at_item->act1_num = action_cnt - mt_input->kt_left_num + 1; + act_node_idx[0] = nbl_pp_at_lookup(res_mgt, mt_input->pp_type, NBL_AT_TYPE_1, + &at_key[0], &node); + if (act_node_idx[0] != U32_MAX) { + node->ref_cnt++; + } else { + act_node_idx[0] = nbl_insert_pp_at(res_mgt, mt_input->pp_type, + NBL_AT_TYPE_1, &at_key[0], &node); + if (act_node_idx[0] == U32_MAX) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow nbl_insert_pp_at error.\n"); + return -1; + } + + memcpy(&at_item->act_collect.act_key, &at_key[0], + sizeof(struct nbl_flow_pp_at_key)); + } + + at_item->act_collect.act_vld = 1; + at_item->act_collect.act_hw_index = act_node_idx[0] + + at_item->act_collect.act_offset; + at_item->act_buf[0] = at_item->act_collect.act_hw_index + + (NBL_ACT_NEXT_AT_FULL0 << 16); + } else if (mt_input->at_num == 2) { + while (idx < mt_input->kt_left_num - 2) { + at_item->act_buf[idx + 2] = buf[idx]; + idx++; + } + at_item->act_num = mt_input->kt_left_num; + act_idx = idx; + + while (idx < NBL_AT_MAX_NUM + act_idx) { + at_item->act1_buf[act1_idx] = buf[idx]; + at_key[0].act[act1_idx] = buf[idx]; + idx++; + act1_idx++; + } + at_item->act1_num = NBL_AT_MAX_NUM; + + while (idx < action_cnt) { + at_item->act2_buf[act2_idx] = buf[idx]; + at_key[1].act[act2_idx] = buf[idx]; + idx++; + act2_idx++; + } + at_item->act2_num = + action_cnt - mt_input->kt_left_num + 2 - NBL_AT_MAX_NUM; + + for (i = 0; i < 2; i++) { + act_node_idx[i] = nbl_pp_at_lookup(res_mgt, mt_input->pp_type, + NBL_AT_TYPE_1 + i, &at_key[i], &node); + if (act_node_idx[i] != U32_MAX) { + node->ref_cnt++; + } else { + ret = nbl_insert_pp_at(res_mgt, mt_input->pp_type, + NBL_AT_TYPE_1 + i, &at_key[i], &node); + if (act_node_idx[i] == U32_MAX) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow nbl_insert_pp_at error.\n"); + return -1; + } + memcpy(&at_item->act_collect.act_key[i], &at_key[i], + sizeof(struct nbl_flow_pp_at_key)); + } + } + + at_item->act_collect.act2_vld = 1; + at_item->act_collect.act_vld = 1; + at_item->act_collect.act2_hw_index = + act_node_idx[0] + + at_item->act_collect.act2_offset; + at_item->act_collect.act_hw_index = + act_node_idx[1] + + at_item->act_collect.act_offset; + at_item->act_buf[0] = at_item->act_collect.act2_hw_index + + (NBL_ACT_NEXT_AT_FULL0 << 16); + at_item->act_buf[1] = at_item->act_collect.act_hw_index + + (NBL_ACT_NEXT_AT_FULL0 << 16); + } + + return ret; +} + +static int nbl_flow_insert_at(struct nbl_resource_mgt *res_mgt, + struct nbl_mt_input *mt_input, + struct nbl_rule_action *action, + struct nbl_tc_at_item *at_item, + struct nbl_edit_item *edit_item, + struct nbl_tcam_item *tcam_item) +{ + int ret = 0; + u32 idx = 0; + u16 item = 0; + u32 list_num = ARRAY_SIZE(acts_2hw); + u32 buf[NBL_MAX_ACTION_NUM] = { 0 }; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + for (idx = 0; idx < list_num; idx++) { + if (action->flag & acts_2hw[idx].action_type) { + if (!acts_2hw[idx].act_2hw) + continue; + + ret = acts_2hw[idx].act_2hw(action, buf, &item, + edit_item, res_mgt); + if (ret) + return ret; + item++; + } + } + + if (tcam_item->tcam_flag) { + memcpy(tcam_item->tcam_action, buf, sizeof(tcam_item->tcam_action)); + return ret; + } + + mt_input->at_num = nbl_flow_act_num(mt_input, item); + spin_lock(&tc_flow_mgt->flow_lock); + + ret = nbl_flow_at_num_proc(res_mgt, mt_input, item, buf, at_item); + spin_unlock(&tc_flow_mgt->flow_lock); + + return ret; +} + +static void nbl_cmdq_show_ht_data(struct nbl_common_info *common, + union nbl_cmd_fem_ht_u *ht, bool read) +{ + u32 index = 0; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow HT bucket/entry/ht/em: %x-%04x-%x-%x\n", + ht->info.bucket_id, ht->info.entry_id, ht->info.ht_id, ht->info.em_id); + if (read) { + for (index = 0; index < 4; index++) { + if (index == 0) + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow HT four buckets kt_idx/hash/vld:%05x-%04x-%x\n", + ht->info.ht_data[index].info.kt_index, + ht->info.ht_data[index].info.hash, + ht->info.ht_data[index].info.vld); + else + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow HT four buckets kt_idx/hash/vld: %05x-%04x-%x", + ht->info.ht_data[index].info.kt_index, + ht->info.ht_data[index].info.hash, + ht->info.ht_data[index].info.vld); + } + + } else { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow HT kt_idx/hash/vld: %05x-%04x-%x\n", + ht->info.ht_data[index].info.kt_index, + ht->info.ht_data[index].info.hash, + ht->info.ht_data[index].info.vld); + } +} + +int nbl_cmdq_flow_ht_clear_2hw(struct nbl_tc_ht_item *ht_item, + u8 pp_type, struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ht_u ht; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_HT_WRITE]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&ht, 0, sizeof(ht)); + + ht.info.ht_valid = 1; + if (ht_item->ht_entry == NBL_HASH0) { + ht.info.entry_id = ht_item->ht0_hash; + ht.info.ht_id = NBL_ACC_HT0; + } else if (ht_item->ht_entry == NBL_HASH1) { + ht.info.entry_id = ht_item->ht1_hash; + ht.info.ht_id = NBL_ACC_HT1; + } + + ht.info.bucket_id = ht_item->hash_bucket; + ht.info.em_id = pp_type; + /* prepare the command and command header */ + cmd.in_va = &ht; + cmd.in_length = NBL_CMDQ_FEM_W_REQ_LEN; + nbl_cmdq_show_ht_data(common, &ht, false); + return nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); +} + +static int nbl_flow_del_ht_2hw(struct nbl_tc_ht_item *ht_item, u8 pp_type, + struct nbl_flow_pp_ht_mng *pp_ht0_mng, + struct nbl_flow_pp_ht_mng *pp_ht1_mng, + struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + struct nbl_flow_pp_ht_key pp_ht_key = { 0 }; + struct nbl_flow_pp_ht_tbl *node = NULL; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (ht_item->ht_entry == NBL_HT0_HASH) { + pp_ht_key.vid = 1; + pp_ht_key.ht_other_index = ht_item->ht1_hash; + pp_ht_key.kt_index = ht_item->tbl_id; + node = nbl_pp_ht_lookup(pp_ht0_mng, ht_item->ht0_hash, + &pp_ht_key); + + if (node) { + ret = nbl_cmdq_flow_ht_clear_2hw(ht_item, pp_type, res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow failed to del cmdq ht 2hw,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + pp_type, ht_item->ht0_hash, + ht_item->ht1_hash, ht_item->tbl_id); + return ret; + } + + ret = nbl_delete_pp_ht(res_mgt, pp_ht0_mng, node, + ht_item->ht0_hash, + ht_item->ht1_hash, + ht_item->tbl_id); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow failed to del ht,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + pp_type, ht_item->ht0_hash, + ht_item->ht1_hash, ht_item->tbl_id); + return ret; + } + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow node = null, pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + pp_type, ht_item->ht0_hash, ht_item->ht1_hash, + ht_item->tbl_id); + return -EINVAL; + } + + } else if (ht_item->ht_entry == NBL_HT1_HASH) { + pp_ht_key.vid = 1; + pp_ht_key.ht_other_index = ht_item->ht0_hash; + pp_ht_key.kt_index = ht_item->tbl_id; + node = nbl_pp_ht_lookup(pp_ht1_mng, ht_item->ht1_hash, + &pp_ht_key); + + if (node) { + ret = nbl_cmdq_flow_ht_clear_2hw(ht_item, pp_type, res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow failed to del cmdq ht 2hw,pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + pp_type, ht_item->ht0_hash, + ht_item->ht1_hash, ht_item->tbl_id); + return ret; + } + + ret = nbl_delete_pp_ht(res_mgt, pp_ht1_mng, node, + ht_item->ht1_hash, + ht_item->ht0_hash, + ht_item->tbl_id); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow failed to del ht, pp%d ht1_hash=%d, ht0_hash=%d, tbl_id=%d.\n", + pp_type, ht_item->ht1_hash, + ht_item->ht0_hash, ht_item->tbl_id); + return ret; + } + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow node = null, pp%d ht1_hash=%d,ht0_hash=%d,tbl_id=%d.\n", + pp_type, ht_item->ht1_hash, ht_item->ht0_hash, + ht_item->tbl_id); + return -EINVAL; + } + } else { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow ht_entry error, pp%d ht0_hash=%d,ht1_hash=%d,tbl_id=%d.\n", + pp_type, ht_item->ht0_hash, ht_item->ht1_hash, + ht_item->tbl_id); + } + + return ret; +} + +static int nbl_flow_del_at_2hw(struct nbl_resource_mgt *res_mgt, + struct nbl_act_collect *act_collect, u8 pp_type) +{ + int ret = 0; + int idx; + struct nbl_flow_at_tbl *at_node = NULL; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + void *at1_tbl = tc_flow_mgt->at_mng.at_tbl[pp_type][NBL_AT_TYPE_1]; + void *at2_tbl = tc_flow_mgt->at_mng.at_tbl[pp_type][NBL_AT_TYPE_2]; + struct nbl_index_key_extra extra_key; + + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + if (act_collect->act_vld == 1) { + idx = nbl_common_get_index_with_data(at1_tbl, act_collect->act_key[0].act, + &extra_key, NULL, 0, (void **)&at_node); + if (idx != U32_MAX) { + at_node->ref_cnt--; + if (!at_node->ref_cnt) { + nbl_common_free_index(at1_tbl, act_collect->act_key[0].act); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow delete at node key:%d-%d-%d-%d-%d-%d-%d-%d.\n", + act_collect->act_key[0].act[0], + act_collect->act_key[0].act[1], + act_collect->act_key[0].act[2], + act_collect->act_key[0].act[3], + act_collect->act_key[0].act[4], + act_collect->act_key[0].act[5], + act_collect->act_key[0].act[6], + act_collect->act_key[0].act[7]); + } else { + ret = -1; + } + } + } + + if (act_collect->act2_vld == 1) { + idx = nbl_common_get_index_with_data(at2_tbl, act_collect->act_key[1].act, + &extra_key, NULL, 0, (void **)&at_node); + if (idx != U32_MAX) { + at_node->ref_cnt--; + if (!at_node->ref_cnt) { + nbl_common_free_index(at2_tbl, act_collect->act_key[1].act); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow delete at node key:%d-%d-%d-%d-%d-%d-%d-%d.\n", + act_collect->act_key[1].act[0], + act_collect->act_key[1].act[1], + act_collect->act_key[1].act[2], + act_collect->act_key[1].act[3], + act_collect->act_key[1].act[4], + act_collect->act_key[1].act[5], + act_collect->act_key[1].act[6], + act_collect->act_key[1].act[7]); + } else { + ret = -1; + } + } + } + + return ret; +} + +static int nbl_tc_flow_send_tcam_2hw(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item) +{ + int ret = 0; + struct nbl_flow_tcam_ad_item ad_item; + u16 index = 0; + bool is_new = false; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(tc_flow_mgt->res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u8 mode; + + if (!tcam_pp_key_mng || !tcam_pp_ad_mng || !tcam_item) + return -EINVAL; + + memset(&ad_item, 0, sizeof(ad_item)); + + memcpy(ad_item.action, tcam_item->tcam_action, sizeof(ad_item.action)); + ret = nbl_set_tcam_process(common, tcam_pp_key_mng, tcam_pp_ad_mng, + tcam_item, &ad_item, &index, &is_new); + if (ret) + return ret; + + if (is_new) { + tcam_item->tcam_index = index; + if (tcam_item->key_mode == NBL_TC_KT_HALF_MODE) { + mode = NBL_KT_HALF_MODE; + *tcam_item->pp_tcam_count = + *tcam_item->pp_tcam_count + 1; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow tcam:count+1 pp%d count=%d", + tcam_item->pp_type, *tcam_item->pp_tcam_count); + } else { + mode = NBL_KT_FULL_MODE; + *tcam_item->pp_tcam_count = + *tcam_item->pp_tcam_count + 2; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow tcam:count+2 pp%d count=%d", + tcam_item->pp_type, *tcam_item->pp_tcam_count); + } + + ret = phy_ops->add_tcam(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + tcam_item->tcam_index, tcam_item->kt_data.hash_key, + tcam_item->tcam_action, mode, tcam_item->pp_type); + } + + return ret; +} + +static void nbl_cmdq_show_ktat_header(struct nbl_common_info *common, + union nbl_cmd_fem_ktat_u *ktat) +{ + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show KT index: 0x%08x\n", ktat->info.kt_index); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show KT valid: 0x%0x\n", ktat->info.kt_valid); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show KT size: 0x%02x\n", ktat->info.kt_size); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show AT index: 0x%08x\n", ktat->info.at_index); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show AT valid: 0x%0x\n", ktat->info.at_valid); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow show AT size: 0x%02x\n", ktat->info.at_size); +} + +static void nbl_cmdq_show_kt_data(struct nbl_common_info *common, + union nbl_cmd_fem_ktat_u *ktat, bool second) +{ + u32 i = 0; + const unsigned char *p = (unsigned char *)&ktat->info.kt_data; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing KT data (320 bits):\n"); + + for (i = 0; i < NBL_PPE_KT_FULL_SIZE; i += 16) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow [%d] %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + i / 8, p[i], p[i + 1], p[i + 2], p[i + 3], p[i + 4], + p[i + 5], p[i + 6], p[i + 7], p[i + 8], p[i + 9], + p[i + 10], p[i + 11], p[i + 12], p[i + 13], p[i + 14], + p[i + 15]); + } + + if (second) { + const union nbl_fem_four_at_data_u *test = + (const union nbl_fem_four_at_data_u *)(p + 20); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing KT actions:\n"); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow [actions]: %02x %02x %02x %02x\n", + test->info.at1, test->info.at2, test->info.at3, + test->info.at4); + } else { + const union nbl_fem_four_at_data_u *test = + (const union nbl_fem_four_at_data_u *)(p); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing KT actions:\n"); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow [actions]: %02x %02x %02x %02x\n", + test->info.at1, test->info.at2, test->info.at3, + test->info.at4); + } +} + +static void __maybe_unused +nbl_cmdq_show_at_data(struct nbl_common_info *common, + union nbl_cmd_fem_ktat_u *ktat) +{ + /* AT 176 bit */ + const union nbl_fem_at_acc_data_u *at = (union nbl_fem_at_acc_data_u *)&ktat->info.at_data; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing AT data (176 bits):\n"); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow check at data:0x%x-%x-%x-%x-%x-%x-%x-%x.\n", + at->info.at1, at->info.at2, at->info.at3, at->info.at4, + at->info.at5, at->info.at6, at->info.at7, at->info.at8); +} + +static void __maybe_unused +nbl_cmdq_show_searched_at_data(struct nbl_common_info *common, + union nbl_cmd_fem_ktat_u *ktat) +{ + const union nbl_fem_all_at_data_u *at = (union nbl_fem_all_at_data_u *)&ktat->info.kt_data; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow showing all action data (352 bits):\n"); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow check at data:0x%x-%x-%x-%x-%x-%x-%x-%x.\n", + at->info.at1, at->info.at2, at->info.at3, at->info.at4, + at->info.at5, at->info.at6, at->info.at7, at->info.at8); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow check act data:0x%x-%x-%x-%x-%x-%x-%x-%x.\n", + at->info.at9, at->info.at10, at->info.at11, at->info.at12, + at->info.at13, at->info.at14, at->info.at15, at->info.at16); +} + +static void __maybe_unused +nbl_cmdq_search_flow_ktat(const struct nbl_tc_kt_item *kt_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ktat_u ktat; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_KTAT_SEARCH]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&ktat, 0, sizeof(ktat)); + + ktat.info.kt_valid = 1; + ktat.info.kt_em = kt_item->pp_type; + if (kt_item->key_type == NBL_KEY_TYPE_160) + memcpy(&ktat.info.kt_data[5], &kt_item->kt_data.data, + sizeof(kt_item->kt_data.data) / 2); + else + memcpy(&ktat.info.kt_data, &kt_item->kt_data.data, + sizeof(kt_item->kt_data.data)); + + cmd.in_va = &ktat; + cmd.in_length = NBL_CMDQ_FEM_S_REQ_LEN; + cmd.out_va = &ktat; + nbl_cmdq_show_kt_data(common, &ktat, kt_item->key_type == NBL_KEY_TYPE_160); + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + /* the result AT will be stored in KT in ktat */ + nbl_cmdq_show_searched_at_data(common, &ktat); +} + +/* search a non-existant KT */ +static void __maybe_unused +nbl_cmdq_search_noflow_ktat(const struct nbl_tc_kt_item *kt_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ktat_u ktat; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_KTAT_SEARCH]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&ktat, 0, sizeof(ktat)); + + ktat.info.kt_valid = 1; + ktat.info.kt_em = kt_item->pp_type; + if (kt_item->key_type == NBL_KEY_TYPE_160) + memcpy(&ktat.info.kt_data[5], &kt_item->kt_data.data, + sizeof(kt_item->kt_data.data) / 2); + else + memcpy(&ktat.info.kt_data, &kt_item->kt_data.data, + sizeof(kt_item->kt_data.data)); + + /* KT data modification */ + ktat.info.kt_data[9] = 0; + + cmd.in_va = &ktat; + cmd.in_length = NBL_CMDQ_FEM_S_REQ_LEN; + cmd.out_va = &ktat; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow searching AT with non-existant KT data\n"); + nbl_cmdq_show_kt_data(common, &ktat, kt_item->key_type == NBL_KEY_TYPE_160); + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* the result AT will be stored in KT in ktat */ + nbl_cmdq_show_searched_at_data(common, &ktat); +} + +/* use cmdq to read the KT & AT written to MT */ +static int __maybe_unused nbl_cmdq_read_flow_ktat(struct nbl_tc_ht_item *ht_item, + struct nbl_tc_at_item *at_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ktat_u ktat; + union nbl_cmd_fem_ktat_u extra_ktat; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_KTAT_READ]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + /* not necessary to check KT info */ + if (!ht_item || !at_item) + return -EINVAL; + + memset(&ktat, 0, sizeof(ktat)); + memset(&extra_ktat, 0, sizeof(extra_ktat)); + + /* read KT */ + ktat.info.kt_valid = 1; + ktat.info.kt_size = 1; /* can only read full table */ + ktat.info.kt_index = ht_item->tbl_id; + if (at_item->act1_num) { + ktat.info.at_valid = 1; + ktat.info.at_size = 1; /* can only read full table */ + ktat.info.at_index = at_item->act_collect.act_hw_index; + } + + cmd.in_va = &ktat; + cmd.in_length = NBL_CMDQ_FEM_R_REQ_LEN; + cmd.out_va = &ktat; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow sending read request of KT and AT table\n"); + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* print out read data */ + nbl_cmdq_show_ktat_header(common, &ktat); + nbl_cmdq_show_kt_data(common, &ktat, false); + nbl_cmdq_show_at_data(common, &ktat); + + /* read AT */ + if (at_item->act2_num) { + extra_ktat.info.at_index = at_item->act_collect.act2_hw_index; + extra_ktat.info.at_valid = 1; + extra_ktat.info.at_size = 1; + cmd.in_va = &extra_ktat; + cmd.in_length = NBL_CMDQ_FEM_R_REQ_LEN; + cmd.out_va = &extra_ktat; + cmd.out_length = 0; + cmd.out_params = 0; + cmd.in_params = 0; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow sending read request of AT table\n"); + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* print out AT data */ + nbl_cmdq_show_ktat_header(common, &extra_ktat); + nbl_cmdq_show_at_data(common, &extra_ktat); + } + + return 0; +} + +static void __maybe_unused +nbl_cmdq_read_hw_ht_entry(struct nbl_tc_ht_item *ht_item, + u8 pp_type, struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ht_u ht; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_HT_READ]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + ht.info.ht_valid = 1; + ht.info.ht_data[0].info.vld = 1; + if (ht_item->ht_entry == NBL_HASH0) { + ht.info.ht_data[0].info.hash = ht_item->ht1_hash; + ht.info.entry_id = ht_item->ht0_hash; + ht.info.ht_id = NBL_ACC_HT0; + } else if (ht_item->ht_entry == NBL_HASH1) { + ht.info.ht_data[0].info.hash = ht_item->ht0_hash; + ht.info.entry_id = ht_item->ht1_hash; + ht.info.ht_id = NBL_ACC_HT1; + } + + /* no need to fill in the bucket id */ + ht.info.ht_data[0].info.kt_index = ht_item->tbl_id; + ht.info.em_id = pp_type; + + cmd.in_va = &ht; + cmd.in_length = NBL_CMDQ_FEM_R_REQ_LEN; + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* show read result */ + nbl_cmdq_show_ht_data(common, &ht, true); +} + +/* write HT table using CMDQ */ +static void nbl_cmdq_send_flow_ht(struct nbl_tc_ht_item *ht_item, u8 pp_type, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ht_u ht; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_HT_WRITE]; + struct nbl_cmd_content cmd = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&ht, 0, sizeof(ht)); + + ht.info.ht_valid = 1; + ht.info.ht_data[0].info.vld = 1; + + if (ht_item->ht_entry == NBL_HASH0) { + ht.info.ht_data[0].info.hash = ht_item->ht1_hash; + ht.info.entry_id = ht_item->ht0_hash; + ht.info.ht_id = NBL_ACC_HT0; + } else if (ht_item->ht_entry == NBL_HASH1) { + ht.info.ht_data[0].info.hash = ht_item->ht0_hash; + ht.info.entry_id = ht_item->ht1_hash; + ht.info.ht_id = NBL_ACC_HT1; + } + + ht.info.bucket_id = ht_item->hash_bucket; + ht.info.ht_data[0].info.kt_index = ht_item->tbl_id; + ht.info.em_id = pp_type; + + /* sending the command */ + cmd.in_va = &ht; + cmd.in_length = NBL_CMDQ_FEM_W_REQ_LEN; + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); +} + +/* write KT and AT table, KT index is stored in ht_item */ +__maybe_unused static int +nbl_cmdq_send_flow_ktat(struct nbl_tc_ht_item *ht_item, + struct nbl_tc_kt_item *kt_item, + struct nbl_tc_at_item *at_item, + struct nbl_resource_mgt *res_mgt) +{ + union nbl_cmd_fem_ktat_u ktat; + struct nbl_cmd_hdr hdr = g_cmd_hdr[NBL_FEM_KTAT_WRITE]; + struct nbl_cmd_content cmd = { 0 }; + union nbl_fem_at_acc_data_u at1; + union nbl_fem_at_acc_data_u at2; + struct nbl_cmd_content cmd_addition; + union nbl_cmd_fem_ktat_u extra_ktat; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!ht_item || !kt_item || !at_item) + return -EINVAL; + + memset(&ktat, 0, sizeof(ktat)); + memset(&at1, 0, sizeof(at1)); + memset(&at2, 0, sizeof(at2)); + memset(&cmd_addition, 0, sizeof(cmd_addition)); + memset(&extra_ktat, 0, sizeof(extra_ktat)); + + /* the first command, it should send KT, and possible the first AT */ + ktat.info.kt_valid = 1; + ktat.info.kt_index = ht_item->tbl_id; + ktat.info.kt_size = (kt_item->key_type == NBL_KEY_TYPE_160) ? 0 : 1; + memcpy(&ktat.info.kt_data, &kt_item->kt_data.data, sizeof(kt_item->kt_data.data)); + + if (at_item->act1_num) { + at1.info.at1 = at_item->act1_buf[0]; + at1.info.at2 = at_item->act1_buf[1]; + at1.info.at3 = at_item->act1_buf[2]; + at1.info.at4 = at_item->act1_buf[3]; + at1.info.at5 = at_item->act1_buf[4]; + at1.info.at6 = at_item->act1_buf[5]; + at1.info.at7 = at_item->act1_buf[6]; + at1.info.at8 = at_item->act1_buf[7]; + + ktat.info.at_valid = 1; + ktat.info.at_index = at_item->act_collect.act_hw_index; + /* all AT entry use the full width */ + ktat.info.at_size = 1; + memcpy(&ktat.info.at_data, &at1.info, sizeof(at1)); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow kt index=0x%x,at_hw_index=0x%x," + "at data:0x%x-%x-%x-%x-%x-%x-%x-%x.", + ktat.info.kt_index, at_item->act_collect.act_hw_index, + at1.info.at1, at1.info.at2, at1.info.at3, at1.info.at4, + at1.info.at5, at1.info.at6, at1.info.at7, at1.info.at8); + } + + /* fill in the command flags, block, module, table, etc */ + cmd.in_va = &ktat; + cmd.in_length = NBL_CMDQ_FEM_W_REQ_LEN; + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + + /* if AT2 is used, another command is also needed */ + if (at_item->act2_num) { + at2.info.at1 = at_item->act2_buf[0]; + at2.info.at2 = at_item->act2_buf[1]; + at2.info.at3 = at_item->act2_buf[2]; + at2.info.at4 = at_item->act2_buf[3]; + at2.info.at5 = at_item->act2_buf[4]; + at2.info.at6 = at_item->act2_buf[5]; + at2.info.at7 = at_item->act2_buf[6]; + at2.info.at8 = at_item->act2_buf[7]; + + extra_ktat.info.at_valid = 1; + extra_ktat.info.at_index = at_item->act_collect.act2_hw_index; + /* all AT entry use the full width */ + extra_ktat.info.at_size = 1; + memcpy(&extra_ktat.info.at_data, &at2.info, sizeof(at2)); + + cmd_addition.in_va = &extra_ktat; + cmd_addition.in_length = NBL_CMDQ_FEM_W_REQ_LEN; + + nbl_tc_call_inst_cmdq(common->tc_inst_id, (void *)&hdr, (void *)&cmd); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow kt index=0x%x, at2_hw_index=0x%x, at2 data:0x%x-%x-%x-%x-%x-%x-%x-%x.", + ktat.info.kt_index, at_item->act_collect.act2_hw_index, + at2.info.at1, at2.info.at2, at2.info.at3, at2.info.at4, + at2.info.at5, at2.info.at6, at2.info.at7, at2.info.at8); + } + + /* write HT table using CMDQ */ + nbl_cmdq_send_flow_ht(ht_item, kt_item->pp_type, res_mgt); + + return 0; +} + +static int nbl_flow_del_tcam_2hw(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item) +{ + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(tc_flow_mgt->res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u8 mode; + + if (!tcam_pp_key_mng || !tcam_pp_ad_mng || !tcam_item) + return -EINVAL; + + ret = nbl_delete_tcam_key_ad(common, tcam_pp_key_mng, tcam_pp_ad_mng, + tcam_item->tcam_index, tcam_item->key_mode, + tcam_item->pp_type); + if (ret == 0 && tcam_pp_key_mng[tcam_item->tcam_index].ref_cnt == 0) { + if (tcam_item->key_mode == NBL_TC_KT_HALF_MODE) { + mode = NBL_KT_HALF_MODE; + *tcam_item->pp_tcam_count = + *tcam_item->pp_tcam_count - 1; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow tcam:count-1 pp%d count=%d\n", + tcam_item->pp_type, *tcam_item->pp_tcam_count); + } else { + mode = NBL_KT_FULL_MODE; + *tcam_item->pp_tcam_count = + *tcam_item->pp_tcam_count - 2; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow tcam:count-2 pp%d count=%d\n", + tcam_item->pp_type, *tcam_item->pp_tcam_count); + } + + phy_ops->del_tcam(NBL_RES_MGT_TO_PHY_PRIV(tc_flow_mgt->res_mgt), + tcam_item->tcam_index, mode, tcam_item->pp_type); + } + + return ret; +} + +static int nbl_tc_set_pp_related_value(struct nbl_select_input *select_input, + struct nbl_mt_input *mt_input, + struct nbl_tc_flow_mgt *tc_flow_mgt, + u8 profile_id) +{ + select_input->pp_type = profile_id / NBL_PP_PROFILE_NUM; + + switch (select_input->pp_type) { + case NBL_PP_TYPE_1: + select_input->pp_tcam_count = &tc_flow_mgt->count_mng.pp1_tcam_count; + select_input->pp_ht0_mng = &tc_flow_mgt->pp1_ht0_mng; + select_input->pp_ht1_mng = &tc_flow_mgt->pp1_ht1_mng; + select_input->act_offset = NBL_PP1_AT_OFFSET; + select_input->act2_offset = NBL_PP1_AT2_OFFSET; + + select_input->tcam_pp_key_mng = tc_flow_mgt->tcam_pp1_key_mng; + select_input->tcam_pp_ad_mng = tc_flow_mgt->tcam_pp1_ad_mng; + select_input->pp_kt_bmp = tc_flow_mgt->pp1_kt_bmp; + select_input->pp_kt_num = NBL_PP1_KT_NUM; + + mt_input->depth = NBL_FEM_HT_PP1_DEPTH; + select_input->kt_idx_offset = NBL_PP1_KT_OFFSET; + mt_input->power = NBL_PP1_POWER; + + mt_input->pp_type = NBL_PP_TYPE_1; + break; + case NBL_PP_TYPE_2: + select_input->pp_tcam_count = &tc_flow_mgt->count_mng.pp2_tcam_count; + select_input->pp_ht0_mng = &tc_flow_mgt->pp2_ht0_mng; + select_input->pp_ht1_mng = &tc_flow_mgt->pp2_ht1_mng; + select_input->tcam_pp_key_mng = tc_flow_mgt->tcam_pp2_key_mng; + select_input->tcam_pp_ad_mng = tc_flow_mgt->tcam_pp2_ad_mng; + select_input->pp_kt_bmp = tc_flow_mgt->pp2_kt_bmp; + select_input->pp_kt_num = NBL_PP2_KT_NUM; + + mt_input->depth = NBL_FEM_HT_PP2_DEPTH; + select_input->act2_offset = NBL_PP2_AT2_OFFSET; + mt_input->power = NBL_PP2_POWER; + + mt_input->pp_type = NBL_PP_TYPE_2; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void nbl_tc_assign_action_data(u32 *key, u32 offset, + u32 value) +{ + u32 index = offset / NBL_BITS_IN_U32; + u32 remain = offset % NBL_BITS_IN_U32; + u32 shifted = 0; + + if (NBL_BITS_IN_U32 - remain < NBL_AT_WIDTH) { + /* if the value span across u32 boundary */ + shifted = NBL_BITS_IN_U32 - remain; + key[index] += (value << remain); + key[index + 1] += (value >> shifted); + } else { + key[index] += (value << remain); + } +} + +static void nbl_tc_assign_acts_for_kt(struct nbl_common_info *common, + struct nbl_tc_at_item *at_item, u32 *key, + struct nbl_mt_input *input) +{ + u8 i = 0; + u32 offset = 0; + + if (input->kt_left_num > (NBL_FEM_KT_HALF_LEN / NBL_AT_WIDTH)) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow too many actions to insert for KT data\n"); + return; + } + + for (i = 0; i < input->kt_left_num; i++) { + nbl_tc_assign_action_data(key, offset, at_item->act_buf[i]); + offset += NBL_AT_WIDTH; + } +} + +static inline void nbl_tc_assign_idx_act_for_kt(struct nbl_tc_kt_item *kt_item, + struct nbl_flow_tab_filter *node) +{ + u32 act_value = node->assoc_tbl_id + (NBL_ACT_SET_TAB_INDEX << 16); + + nbl_tc_assign_action_data(kt_item->kt_data.data, 0, act_value); +} + +static inline void +nbl_tc_assign_idx_act_for_tcam(struct nbl_tcam_item *tcam_item, + struct nbl_flow_tab_filter *node) +{ + u32 idx = 0; + + tcam_item->tcam_action[idx++] = NBL_GET_ACT_INFO(node->tbl_id, + NBL_ACT_SET_TAB_INDEX); +} + +static inline void nbl_tc_assign_key_for_hash(struct nbl_mt_input *mt_input, + struct nbl_flow_tab_filter *node) +{ + u8 idx; + u8 *ptr = (u8 *)node->key.key_value; + + for (idx = 0; idx < NBL_KT_BYTE_HALF_LEN; idx++) { + mt_input->key[idx] = ptr[NBL_KT_BYTE_LEN - idx - 1]; + mt_input->key[NBL_KT_BYTE_LEN - idx - 1] = ptr[idx]; + } +} + +static inline void nbl_tc_assign_kt_item(struct nbl_tc_kt_item *kt_item, + struct nbl_select_input *select_input, + struct nbl_flow_tab_filter *node, + bool full) +{ + u32 *ptr = node->key.key_value; + u16 size = full ? NBL_FEM_KT_LEN : NBL_FEM_KT_HALF_LEN; + u16 offset = full ? 0 : (NBL_FEM_KT_HALF_LEN / NBL_BITS_IN_U32); + + kt_item->key_type = full ? NBL_KEY_TYPE_320 : NBL_KEY_TYPE_160; + kt_item->pp_type = select_input->pp_type; + memcpy(kt_item->kt_data.data, ptr + offset, size / NBL_BITS_IN_U8); +} + +static void +nbl_tc_kt_mt_set_value(struct nbl_tc_at_item *at_item, + struct nbl_mt_input *mt_input, + struct nbl_select_input *select_input, + struct nbl_rule_action *action, + struct nbl_profile_msg *profile_msg, + const struct nbl_flow_idx_info *idx_info) +{ + at_item->act_collect.act_offset = select_input->act_offset; + at_item->act_collect.act2_offset = select_input->act2_offset; + mt_input->kt_left_num = profile_msg->act_count; + if (idx_info->key_flag & NBL_FLOW_KEY_DIPV4_FLAG) { + action->flag |= NBL_FLOW_ACTION_IPV4; + } else if (idx_info->key_flag & + NBL_FLOW_KEY_DIPV6_FLAG) { + action->flag |= NBL_FLOW_ACTION_IPV6; + } + + if (idx_info->key_flag & NBL_FLOW_KEY_T_VNI_FLAG) + action->flag |= NBL_FLOW_ACTION_TUNNEL_DECAP; +} + +static void +nbl_tc_node_at_set_value(struct nbl_tc_at_item *at_item, + struct nbl_flow_tab_filter *node, + struct nbl_edit_item *edit_item, + struct nbl_rule_action *action) +{ + memcpy(&node->act_collect, &at_item->act_collect, + sizeof(at_item->act_collect)); + memcpy(&node->edit_item, edit_item, sizeof(struct nbl_edit_item)); + if (node->edit_item.is_mir) + list_replace_init(&edit_item->tc_mcc_list, &node->edit_item.tc_mcc_list); + if (action->flag & NBL_FLOW_ACTION_INGRESS) + node->edit_item.direct = NBL_ACT_INGRESS; +} + +static int nbl_flow_tab_add(struct nbl_flow_tab_filter *node, + struct nbl_rule_action *action, + struct nbl_resource_mgt *res_mgt, + const struct nbl_flow_idx_info *idx_info, + struct nbl_mt_input *mt_input, + struct nbl_select_input *select_input) +{ + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_profile_msg *profile_msg = + &tc_flow_mgt->profile_msg[idx_info->profile_id]; + struct nbl_tc_ht_item ht_item; + struct nbl_tc_kt_item kt_item; + struct nbl_tc_at_item *at_item = NULL; + struct nbl_edit_item edit_item; + struct nbl_tcam_item tcam_item; + + memset(&ht_item, 0, sizeof(ht_item)); + memset(&kt_item, 0, sizeof(kt_item)); + memset(&edit_item, 0, sizeof(edit_item)); + memset(&tcam_item, 0, sizeof(tcam_item)); + + nbl_tc_assign_key_for_hash(mt_input, node); + + spin_lock(&tc_flow_mgt->flow_lock); + if (mt_input->key_full) { + tcam_item.key_mode = NBL_TC_KT_FULL_MODE; + ret = nbl_tc_flow_alloc_bmp_id(select_input->pp_kt_bmp, + select_input->pp_kt_num, + tcam_item.key_mode, &node->tbl_id); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow failed to alloc id for full table.\n"); + return -ENOSPC; + } + } else { + tcam_item.key_mode = NBL_TC_KT_HALF_MODE; + ret = nbl_tc_flow_alloc_bmp_id(select_input->pp_kt_bmp, + select_input->pp_kt_num, + tcam_item.key_mode, &node->tbl_id); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow failed to alloc id for half table.\n"); + return -ENOSPC; + } + } + + mt_input->tbl_id = node->tbl_id + select_input->kt_idx_offset; + tcam_item.pp_tcam_count = select_input->pp_tcam_count; + ret = nbl_flow_ht_assign_proc(res_mgt, mt_input, + select_input->pp_ht0_mng, + select_input->pp_ht1_mng, &ht_item, + &tcam_item); + spin_unlock(&tc_flow_mgt->flow_lock); + + if (ret) + goto ret_bitmap_fail; + + if (tcam_item.tcam_flag) { + node->tcam_flag = tcam_item.tcam_flag; + if (mt_input->key_full) + memcpy(tcam_item.kt_data.data, node->key.key_value, + sizeof(node->key.key_value)); + else + memcpy(tcam_item.kt_data.data, + &node->key.key_value[NBL_TABLE_KEY_DATA_LEN / 2], + sizeof(node->key.key_value) / 2); + } + memcpy(&node->ht_item, &ht_item, sizeof(ht_item)); + node->pp_type = select_input->pp_type; + + /* copy pure key from node to kt */ + nbl_tc_assign_kt_item(&kt_item, select_input, node, (bool)mt_input->key_full); + + at_item = kzalloc(sizeof(*at_item), GFP_KERNEL); + if (!at_item) { + ret = -ENOMEM; + goto ret_bitmap_fail; + } + + if (idx_info->last_stage) { + nbl_tc_kt_mt_set_value(at_item, mt_input, select_input, + action, profile_msg, idx_info); + + ret = nbl_flow_insert_at(res_mgt, mt_input, action, + at_item, &edit_item, &tcam_item); + if (ret) + goto ret_fail; + nbl_tc_node_at_set_value(at_item, node, &edit_item, action); + nbl_tc_assign_acts_for_kt(common, at_item, kt_item.kt_data.data, + mt_input); + } else { + if (!tcam_item.tcam_flag) + nbl_tc_assign_idx_act_for_kt(&kt_item, node); + else + nbl_tc_assign_idx_act_for_tcam(&tcam_item, node); + } + + if (tcam_item.tcam_flag) { + spin_lock(&tc_flow_mgt->flow_lock); + tcam_item.pp_type = select_input->pp_type; + tcam_item.sw_hash_id = node->sw_hash_id; + tcam_item.profile_id = idx_info->profile_id; + ret = nbl_tc_flow_send_tcam_2hw(res_mgt, select_input->tcam_pp_key_mng, + select_input->tcam_pp_ad_mng, &tcam_item); + node->tcam_index = tcam_item.tcam_index; + spin_unlock(&tc_flow_mgt->flow_lock); + goto ret_fail; + } + + /* write flow KT AT using CMDQ */ + ret = nbl_cmdq_send_flow_ktat(&ht_item, &kt_item, at_item, res_mgt); + +ret_fail: + kfree(at_item); +ret_bitmap_fail: + if (ret) { + spin_lock(&tc_flow_mgt->flow_lock); + if (mt_input->key_full) + nbl_tc_flow_free_bmp_id(select_input->pp_kt_bmp, + node->tbl_id, NBL_TC_KT_FULL_MODE); + else + nbl_tc_flow_free_bmp_id(select_input->pp_kt_bmp, + node->tbl_id, NBL_TC_KT_HALF_MODE); + spin_unlock(&tc_flow_mgt->flow_lock); + } + + return ret; +} + +static int nbl_flow_tab_del(struct nbl_flow_tab_filter *node, struct nbl_resource_mgt *res_mgt, + struct nbl_mt_input *mt_input, struct nbl_select_input *select_input) +{ + int ret = 0; + struct nbl_tcam_item tcam_item; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + memset(&tcam_item, 0, sizeof(tcam_item)); + + spin_lock(&tc_flow_mgt->flow_lock); + if (node->tcam_flag) { + if (mt_input->key_full) + tcam_item.key_mode = NBL_TC_KT_FULL_MODE; + else + tcam_item.key_mode = NBL_TC_KT_HALF_MODE; + tcam_item.pp_type = select_input->pp_type; + tcam_item.tcam_index = node->tcam_index; + tcam_item.pp_tcam_count = select_input->pp_tcam_count; + ret = nbl_flow_del_tcam_2hw(res_mgt, select_input->tcam_pp_key_mng, + select_input->tcam_pp_ad_mng, &tcam_item); + if (!ret) + goto ret_tcam_success; + else + goto ret_fail; + } + + if (mt_input->key_full) { + nbl_tc_flow_free_bmp_id(select_input->pp_kt_bmp, + node->tbl_id, NBL_TC_KT_FULL_MODE); + } else { + nbl_tc_flow_free_bmp_id(select_input->pp_kt_bmp, + node->tbl_id, NBL_TC_KT_HALF_MODE); + } + + ret = nbl_flow_del_ht_2hw(&node->ht_item, node->pp_type, + select_input->pp_ht0_mng, + select_input->pp_ht1_mng, + res_mgt); + if (ret) + goto ret_fail; + + ret = nbl_flow_del_at_2hw(res_mgt, &node->act_collect, select_input->pp_type); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow failed to del at 2hw\n"); + goto ret_fail; + } + +ret_tcam_success: + if (node->edit_item.is_mir) + nbl_tc_mcc_free_hw_tbl(tc_flow_mgt->res_mgt, &tc_flow_mgt->tc_mcc_mgt, + &node->edit_item.tc_mcc_list); + +ret_fail: + spin_unlock(&tc_flow_mgt->flow_lock); + return ret; +} + +/* note that the key in node should not be modified */ +static int nbl_flow_tab_ht_at(struct nbl_flow_tab_filter *node, + struct nbl_rule_action *action, u8 opcode, + struct nbl_resource_mgt *res_mgt, + const struct nbl_flow_idx_info *idx_info) +{ + int ret = 0; + struct nbl_mt_input mt_input = { 0 }; + struct nbl_select_input select_input = { 0 }; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_profile_msg *profile_msg = + &tc_flow_mgt->profile_msg[idx_info->profile_id]; + + if (!node || !idx_info) + return -EINVAL; + + mt_input.key_full = profile_msg->key_full; + ret = nbl_tc_set_pp_related_value(&select_input, &mt_input, tc_flow_mgt, + idx_info->profile_id); + if (ret) + return ret; + + if (opcode == NBL_OPCODE_ADD) + ret = nbl_flow_tab_add(node, action, res_mgt, idx_info, &mt_input, &select_input); + else if (opcode == NBL_OPCODE_DELETE) + ret = nbl_flow_tab_del(node, res_mgt, &mt_input, &select_input); + + return ret; +} + +static int nbl_flow_tbl_op(void *ptr, struct nbl_rule_action *action, + struct nbl_resource_mgt *res_mgt, + const struct nbl_flow_idx_info *idx_info, + __maybe_unused void *query_rslt, u8 opcode) +{ + struct nbl_flow_tab_filter *flow_tab_node = NULL; + int ret = 0; + + if (opcode == NBL_OPCODE_ADD && !action) + return -EINVAL; + + flow_tab_node = (struct nbl_flow_tab_filter *)ptr; + ret = nbl_flow_tab_ht_at(flow_tab_node, action, opcode, res_mgt, idx_info); + + return ret; +} + +static int nbl_off_flow_op(void *ptr, struct nbl_rule_action *act, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info, u8 opcode, + void *query_rslt) +{ + int ret = 0; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!ptr) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow offload op failed. op:%u\n", opcode); + return -EINVAL; + } + + ret = nbl_flow_tbl_op(ptr, act, res_mgt, idx_info, query_rslt, opcode); + + return ret; +} + +/** + * @brief: offload flow add + * + * @param[in] ptr: flow tab node info + * @param[in] act: act to add + * @param[in] idx_info: some indx info + * @return int : 0-success other-fail + */ +static int nbl_off_flow_add(void *ptr, struct nbl_rule_action *act, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info) +{ + return nbl_off_flow_op(ptr, act, res_mgt, idx_info, NBL_OPCODE_ADD, NULL); +} + +/** + * @brief: offload flow del + * + * @param[in] ptr: flow tab node info + * @param[in] idx_info: some indx info + * @return int : 0-success other-fail + */ +static int nbl_off_flow_del(void *ptr, struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info) +{ + return nbl_off_flow_op(ptr, NULL, res_mgt, idx_info, NBL_OPCODE_DELETE, NULL); +} + +/** + * @brief: offload flow query + * + * @param[in] ptr: flow tab node info + * @param[in] idx: flow-id + * @param[in] type: distinguish which key template to query + * @param[out] query_rslt: when query use this param + * @brief: offload flow + * @return int : 0-success other-fail + */ +static int nbl_off_flow_query(void *ptr, u32 idx, void *query_rslt) +{ + struct nbl_flow_idx_info idx_inf = { 0 }; + + idx_inf.flow_idx = idx; + return nbl_off_flow_op(ptr, NULL, NULL, &idx_inf, NBL_OPCODE_QUERY, + query_rslt); +} + +const struct nbl_flow_offload_ops nbl_flow_offload_ops = { + .add = nbl_off_flow_add, + .del = nbl_off_flow_del, + .query = nbl_off_flow_query, +}; diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..f0d2f7edfd188feea420a1648d52bf8ec68397a7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_filter_leonis.h @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ +#ifndef _NBL_TC_FLOW_FILTER_LEONIS_H_ +#define _NBL_TC_FLOW_FILTER_LEONIS_H_ + +#include "nbl_tc_flow_leonis.h" + +#define NBL_ACC_HT0 (0) +#define NBL_ACC_HT1 (1) + +struct nbl_flow_offload_ops { + int (*add) + (void *ptr, + struct nbl_rule_action *act, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info); + + int (*del) + (void *pt, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info); + + int (*query) + (void *ptr, + u32 idx, + void *query_rslt); +}; + +extern const struct nbl_flow_offload_ops nbl_flow_offload_ops; + +struct nbl_flow_action_2hw { + u64 action_type; + int (*act_2hw)(struct nbl_rule_action *action, u32 *buf, u16 *item, + struct nbl_edit_item *edit_item, struct nbl_resource_mgt *res_mgt); +}; + +struct nbl_del_action_2hw { + u64 action_type; + int (*del_act_2hw)(struct nbl_tc_flow_mgt *tc_flow_mgt, + struct nbl_edit_item *edit_item); +}; + +union nbl_ipv4_tnl_data_u { + struct nbl_ipv4_tnl_data { + u32 act0:22; + u32 act1:22; + u32 rsv1:16; + u32 dst_port:16; + u32 option_class:16; + u32 option_data:32; + u32 dst_ip:32; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_IPV4_TNL_DATA_TAB_WIDTH (sizeof(struct nbl_ipv4_tnl_data) \ + / sizeof(u32)) + u32 data[NBL_IPV4_TNL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_ipv4_tnl_data)]; +} __packed; + +union nbl_ipv6_tnl_data_u { + struct nbl_ipv6_tnl_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 act4:22; + u32 rsv:14; + u32 dst_port:16; + u32 option_class:16; + u32 option_data:32; + u64 dst_ipv6_2:64; + u64 dst_ipv6_1:64; + u32 template:4; + } __packed info; +#define NBL_IPV6_TNL_DATA_TAB_WIDTH (sizeof(struct nbl_ipv6_tnl_data) \ + / sizeof(u32)) + u32 data[NBL_IPV6_TNL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_ipv6_tnl_data)]; +} __packed; + +union nbl_l2_tnl_data_u { + struct nbl_l2_tnl_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 act4:22; + u32 act5:22; + u32 act6:22; + u32 rsv2:6; + u32 inport:12; + u32 metadata:16; + u32 svlan_id:12; + u32 rsv1:4; + u32 cvlan_id:12; + u32 rsv:4; + u32 ether_type:16; + u64 dst_mac:48; + u32 vni:32; + u32 template:4; + } __packed info; +#define NBL_L2_TNL_DATA_TAB_WIDTH (sizeof(struct nbl_l2_tnl_data) \ + / sizeof(u32)) + u32 data[NBL_L2_TNL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_tnl_data)]; +} __packed; + +union nbl_l2_notnl_data_u { + struct nbl_l2_notnl_data { + u32 act0:22; + u32 act1:22; + u32 rsv3:4; + u32 inport:12; + u32 svlan_id:12; + u32 rsv2:4; + u32 cvlan_id:12; + u32 rsv1:4; + u32 ether_type:16; + u64 dst_mac:48; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L2_NOTNL_DATA_TAB_WIDTH (sizeof(struct nbl_l2_notnl_data) \ + / sizeof(u32)) + u32 data[NBL_L2_NOTNL_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l2_notnl_data)]; +} __packed; + +union nbl_l3_ipv4_data_u { + struct nbl_l3_ipv4_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 rsv1:4; + u32 metadata:16; + u32 dscp:8; + u32 ttl:8; + u32 dst_ip:32; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_L3_IPV4_DATA_TAB_WIDTH (sizeof(struct nbl_l3_ipv4_data) \ + / sizeof(u32)) + u32 data[NBL_L3_IPV4_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l3_ipv4_data)]; +} __packed; + +union nbl_l3_ipv6_data_u { + struct nbl_l3_ipv6_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 act4:22; + u32 act5:22; + u32 act6:22; + u32 rsv:2; + u32 metadata:16; + u32 dscp:8; + u32 hoplimit:8; + u64 dst_ipv6_2:64; + u64 dst_ipv6_1:64; + u32 template:4; + } __packed info; +#define NBL_L3_IPV6_DATA_TAB_WIDTH (sizeof(struct nbl_l3_ipv6_data) \ + / sizeof(u32)) + u32 data[NBL_L3_IPV6_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_l3_ipv6_data)]; +} __packed; + +union nbl_t5_ipv4_data_u { + struct nbl_t5_ipv4_data { + u32 act0:22; + u32 act1:22; + u32 rsv1:16; + u32 metadata:16; + u32 pad:8; + u32 proto:8; + u32 dst_port:16; + u32 src_port:16; + u32 src_ip:32; + u32 template:4; + u32 rsv[5]; + } __packed info; +#define NBL_T5_IPV4_DATA_TAB_WIDTH (sizeof(struct nbl_t5_ipv4_data) \ + / sizeof(u32)) + u32 data[NBL_T5_IPV4_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_t5_ipv4_data)]; +} __packed; + +union nbl_t5_ipv6_data_u { + struct nbl_t5_ipv6_data { + u32 act0:22; + u32 act1:22; + u32 act2:22; + u32 act3:22; + u32 act4:22; + u32 rsv:14; + u32 metadata:16; + u32 pad:8; + u32 proto:8; + u32 dst_port:16; + u32 src_port:16; + u64 src_ipv6_2:64; + u64 src_ipv6_1:64; + u32 template:4; + } __packed info; +#define NBL_T5_IPV6_DATA_TAB_WIDTH (sizeof(struct nbl_t5_ipv6_data) \ + / sizeof(u32)) + u32 data[NBL_T5_IPV6_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_t5_ipv6_data)]; +} __packed; + +#define NBL_FEM_KT_ACC_DATA (NBL_PPE_FEM_BASE + 0x00000348) + +#define NBL_FEM_EM0_TCAM_TABLE_ADDR (0xa0b000) +#define NBL_FEM_EM_TCAM_TABLE_DEPTH (64) +#define NBL_FEM_EM_TCAM_TABLE_WIDTH (256) +union fem_em_tcam_table_u { + struct fem_em_tcam_table { + u32 key[5]; /* [159:0] Default:0x0 RW */ + u32 key_vld:1; /* [160] Default:0x0 RW */ + u32 key_size:1; /* [161] Default:0x0 RW */ + u32 rsv:30; /* [191:162] Default:0x0 RO */ + u32 rsv1[2]; /* [255:192] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM_TCAM_TABLE_WIDTH / 32]; + u8 hash_key[sizeof(struct fem_em_tcam_table)]; +} __packed; + +#define NBL_FEM_EM_TCAM_TABLE_REG(r, t) (NBL_FEM_EM0_TCAM_TABLE_ADDR + 0x1000 * (r) + \ + (NBL_FEM_EM_TCAM_TABLE_WIDTH / 8) * (t)) + +#define NBL_FEM_EM0_AD_TABLE_ADDR (0xa08000) +#define NBL_FEM_EM_AD_TABLE_DEPTH (64) +#define NBL_FEM_EM_AD_TABLE_WIDTH (512) +union fem_em_ad_table_u { + struct fem_em_ad_table { + u32 action0:22; /* [21:0] Default:0x0 RW */ + u32 action1:22; /* [43:22] Default:0x0 RW */ + u32 action2:22; /* [65:44] Default:0x0 RW */ + u32 action3:22; /* [87:66] Default:0x0 RW */ + u32 action4:22; /* [109:88] Default:0x0 RW */ + u32 action5:22; /* [131:110] Default:0x0 RW */ + u32 action6:22; /* [153:132] Default:0x0 RW */ + u32 action7:22; /* [175:154] Default:0x0 RW */ + u32 action8:22; /* [197:176] Default:0x0 RW */ + u32 action9:22; /* [219:198] Default:0x0 RW */ + u32 action10:22; /* [241:220] Default:0x0 RW */ + u32 action11:22; /* [263:242] Default:0x0 RW */ + u32 action12:22; /* [285:264] Default:0x0 RW */ + u32 action13:22; /* [307:286] Default:0x0 RW */ + u32 action14:22; /* [329:308] Default:0x0 RW */ + u32 action15:22; /* [351:330] Default:0x0 RW */ + u32 rsv[5]; /* [511:352] Default:0x0 RO */ + } __packed info; + u32 data[NBL_FEM_EM_AD_TABLE_WIDTH / 32]; + u8 hash_key[sizeof(struct fem_em_ad_table)]; +} __packed; + +#define NBL_FEM_EM_AD_TABLE_REG(r, t) (NBL_FEM_EM0_AD_TABLE_ADDR + 0x1000 * (r) + \ + (NBL_FEM_EM_AD_TABLE_WIDTH / 8) * (t)) + +union nbl_fem_at_acc_data_u { + struct nbl_fem_at_acc_data { + u32 at1:22; + u32 at2:22; + u32 at3:22; + u32 at4:22; + u32 at5:22; + u32 at6:22; + u32 at7:22; + u32 at8:22; + u32 rsv:16; + } __packed info; +#define NBL_FEM_AT_ACC_DATA_TBL_WIDTH (sizeof(struct nbl_fem_at_acc_data) \ + / sizeof(u32)) + u32 data[NBL_FEM_AT_ACC_DATA_TBL_WIDTH]; +} __packed; + +#define NBL_FEM_AT_ACC_DATA (NBL_PPE_FEM_BASE + 0x00000398) + +union nbl_fem_all_at_data_u { + struct nbl_fem_all_at_data { + u32 at1:22; + u32 at2:22; + u32 at3:22; + u32 at4:22; + u32 at5:22; + u32 at6:22; + u32 at7:22; + u32 at8:22; + u32 at9:22; + u32 at10:22; + u32 at11:22; + u32 at12:22; + u32 at13:22; + u32 at14:22; + u32 at15:22; + u32 at16:22; + } __packed info; +#define NBL_FEM_ALL_AT_DATA_TBL_WIDTH (sizeof(struct nbl_fem_all_at_data) \ + / sizeof(u32)) + u32 data[NBL_FEM_ALL_AT_DATA_TBL_WIDTH]; +} __packed; + +union nbl_fem_four_at_data_u { + struct nbl_fem_four_at_data { + u32 at1:22; + u32 at2:22; + u32 at3:22; + u32 at4:22; + } __packed info; +#define NBL_FEM_FOUR_AT_DATA_TBL_WIDTH (sizeof(struct nbl_fem_four_at_data) \ + / sizeof(u32)) + u32 data[NBL_FEM_FOUR_AT_DATA_TBL_WIDTH]; +} __packed; + +/* COMMON CRC16 Calc */ +u16 nbl_calc_crc16(const u8 *data, u32 size, u16 crc_poly, + u16 init_value, u8 ref_flag, u16 xorout); +#define NBL_CRC16_CCITT(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0x0000, 1, 0x0000) +#define NBL_CRC16_CCITT_FALSE(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0xFFFF, 0, 0x0000) +#define NBL_CRC16_XMODEM(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0x0000, 0, 0x0000) +#define NBL_CRC16_IBM(data, size) \ + nbl_calc_crc16(data, size, 0x8005, 0x0000, 1, 0x0000) + +/* CMDQ data content for FEM-KT AT */ +union nbl_cmd_fem_ktat_u { + struct nbl_cmd_fem_ktat { + u32 at_index; + u8 at_valid:1; + u8 rsv0:7; + u8 at_size:8; + u16 rsv1:16; + u32 kt_index; + u8 kt_valid:1; + u8 rsv2:7; + u8 kt_size:8; + u16 rsv3:16; + u32 at_data[8]; + u32 kt_data[10]; + u32 kt_em:2; + u32 rsv4:30; + u32 rsv5[5]; + } __packed info; +#define NBL_CMD_FEM_KTAT_TAB_WIDTH (sizeof(struct nbl_cmd_fem_ktat) \ + / sizeof(u32)) + u32 data[NBL_CMD_FEM_KTAT_TAB_WIDTH]; +} __packed; + +#define NBL_CMD_FEM_KT_SIZE (16 + 32) +#define HALF_CMD_DESC_LENGTH 16 + +union nbl_fem_ht_acc_data_u { + struct nbl_fem_ht_acc_data { + u32 kt_index:17; + u32 hash:14; + u32 vld:1; + } __packed info; +#define NBL_FEM_HT_ACC_DATA_TBL_WIDTH (sizeof(struct nbl_fem_ht_acc_data) \ + / sizeof(u32)) + u32 data[NBL_FEM_HT_ACC_DATA_TBL_WIDTH]; +} __packed; + +/* CMDQ data content for FEM-HT */ +union nbl_cmd_fem_ht_u { + struct nbl_cmd_fem_ht { + u32 bucket_id:2; /* four buckets in the hash entry */ + u32 entry_id:14; /* hash table entry id */ + u32 ht_id:1; /* 0:HT0, 1:HT1 */ + u32 em_id:2; /* 0:pp0 1:pp1 2 or 3:pp2 */ + u32 rsv:13; + u8 ht_valid:1; + u8 rsv0:7; + u8 rsv1:8; + u16 rsv2:16; + u32 kt_index; + u8 kt_valid:1; + u8 rsv3:7; + u8 kt_size:8; + u16 rsv4:16; + union nbl_fem_ht_acc_data_u ht_data[4]; + u32 rsv5[4]; + u32 kt_data[10]; + u32 kt_em:2; + u32 rsv6:30; + u32 rsv7[5]; + } __packed info; +#define NBL_CMD_FEM_HT_TAB_WIDTH (sizeof(struct nbl_cmd_fem_ht) \ + / sizeof(u32)) + u32 data[NBL_CMD_FEM_HT_TAB_WIDTH]; +} __packed; + +/* size macros, all in unit of bytes */ +#define NBL_CMDQ_FEM_R_REQ_LEN 16 +#define NBL_CMDQ_FEM_W_REQ_LEN 112 +#define NBL_CMDQ_FEM_S_REQ_LEN 112 +#define NBL_CMDQ_ACL_TCAM_R_REQ_LEN 4 +#define NBL_CMDQ_ACL_TCAM_W_REQ_LEN 168 +#define NBL_CMDQ_ACL_TCAM_S_REQ_LEN 84 +#define NBL_CMDQ_ACL_STAT_BASE_LEN 32 +#define NBL_CMDQ_ACL_STAT_ITEM_LEN 12 + +#define NBL_PPE_KT_FULL_SIZE 40 +#define NBL_PPE_KT_HALF_SIZE 20 + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..8d9a2715aa5df843cae9ec0ee132783512c41619 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.c @@ -0,0 +1,4532 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ +#include "nbl_tc_flow_leonis.h" +#include "nbl_tc_flow_filter_leonis.h" +#include "nbl_p4_actions.h" +#include "nbl_fc_leonis.h" +#include "nbl_tc_tun_leonis.h" +#include "nbl_resource_leonis.h" + +static struct nbl_profile_msg g_prf_msg[NBL_ALL_PROFILE_NUM] = { + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 1, + .to_end = 0, + .need_upcall = 0, + .pp_id = 1, + .profile_id = 0, + .g_profile_id = 16, + .key_count = 7, + .key_len = 100, + .key_flag = 20500, + .act_count = 2, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 2, + .name = "t_dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 32, + .key_id = 4, + .name = "t_ovnData", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 16, + .key_id = 14, + .name = "t_ovnClass", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 12, + .name = "t_dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 138, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 116, + .length = 22, + .key_id = 0, + .name = "action1", + }, + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 1, + .pt_cmd = 0, + .from_start = 1, + .to_end = 0, + .need_upcall = 0, + .pp_id = 1, + .profile_id = 1, + .g_profile_id = 17, + .key_count = 10, + .key_len = 196, + .key_flag = 20504, + .act_count = 5, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 128, + .key_id = 3, + .name = "t_dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 32, + .key_id = 4, + .name = "t_ovnData", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 164, + .length = 16, + .key_id = 14, + .name = "t_ovnClass", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 180, + .length = 16, + .key_id = 12, + .name = "t_dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 1, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 1, + .profile_id = 2, + .g_profile_id = 18, + .key_count = 18, + .key_len = 160, + .key_flag = 549999083555, + .act_count = 7, + .pre_assoc_profile_id = {16, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {32, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 5, + .name = "t_vni", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 48, + .key_id = 23, + .name = "dstMAC", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 27, + .name = "etherType", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 100, + .length = 16, + .key_id = 26, + .name = "vlan2_pcv", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 116, + .length = 16, + .key_id = 25, + .name = "vlan1_pcv", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 8, + .key_id = 1, + .name = "sport_b8", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 156, + .length = 4, + .key_id = 39, + .name = "sport_b4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)3, + .offset = 100, + .length = 4, + .key_id = 0, + .name = "vlan2_pcv_mask", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)3, + .offset = 116, + .length = 4, + .key_id = 0, + .name = "vlan1_pcv_mask", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 188, + .length = 22, + .key_id = 0, + .name = "action5", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 166, + .length = 22, + .key_id = 0, + .name = "action6", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 0, + .pt_cmd = 0, + .from_start = 1, + .to_end = 0, + .need_upcall = 0, + .pp_id = 1, + .profile_id = 3, + .g_profile_id = 19, + .key_count = 11, + .key_len = 112, + .key_flag = 549999083522, + .act_count = 2, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {32, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 48, + .key_id = 23, + .name = "dstMAC", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 52, + .length = 16, + .key_id = 27, + .name = "etherType", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 16, + .key_id = 26, + .name = "vlan2_pcv", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 25, + .name = "vlan1_pcv", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 100, + .length = 8, + .key_id = 1, + .name = "sport_b8", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 108, + .length = 4, + .key_id = 39, + .name = "sport_b4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)3, + .offset = 68, + .length = 4, + .key_id = 0, + .name = "vlan2_pcv_mask", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)3, + .offset = 84, + .length = 4, + .key_id = 0, + .name = "vlan1_pcv_mask", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 138, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 116, + .length = 22, + .key_id = 0, + .name = "action1", + }, + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 1, + .pp_id = 2, + .profile_id = 0, + .g_profile_id = 32, + .key_count = 9, + .key_len = 68, + .key_flag = 51541704705, + .act_count = 4, + .pre_assoc_profile_id = {18, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {34, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 21, + .name = "dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 8, + .key_id = 35, + .name = "ttl", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 44, + .length = 8, + .key_id = 34, + .name = "tos", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 52, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 138, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 116, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 94, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 72, + .length = 22, + .key_id = 0, + .name = "action3", + }, + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 1, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 1, + .pp_id = 2, + .profile_id = 1, + .g_profile_id = 33, + .key_count = 12, + .key_len = 164, + .key_flag = 51543801857, + .act_count = 7, + .pre_assoc_profile_id = {18, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {35, 37, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 128, + .key_id = 22, + .name = "dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 8, + .key_id = 35, + .name = "ttl", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 140, + .length = 8, + .key_id = 34, + .name = "tos", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 188, + .length = 22, + .key_id = 0, + .name = "action5", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 166, + .length = 22, + .key_id = 0, + .name = "action6", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 1, + .pt_cmd = 1, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 2, + .profile_id = 2, + .g_profile_id = 34, + .key_count = 13, + .key_len = 164, + .key_flag = 8801195917312, + .act_count = 7, + .pre_assoc_profile_id = {32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 32, + .key_id = 19, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 32, + .key_id = 21, + .name = "dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 124, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 188, + .length = 22, + .key_id = 0, + .name = "action5", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 166, + .length = 22, + .key_id = 0, + .name = "action6", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 1, + .pt_cmd = 1, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 2, + .profile_id = 3, + .g_profile_id = 35, + .key_count = 13, + .key_len = 164, + .key_flag = 8801198538752, + .act_count = 7, + .pre_assoc_profile_id = {33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 32, + .key_id = 20, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 32, + .key_id = 22, + .name = "dip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 124, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 188, + .length = 22, + .key_id = 0, + .name = "action5", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 166, + .length = 22, + .key_id = 0, + .name = "action6", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 1, + .pp_id = 2, + .profile_id = 4, + .g_profile_id = 36, + .key_count = 8, + .key_len = 100, + .key_flag = 5100797953, + .act_count = 2, + .pre_assoc_profile_id = {32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 19, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 52, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 138, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 116, + .length = 22, + .key_id = 0, + .name = "action1", + }, + }, + }, + { + .valid = 1, + .pp_mode = 1, + .key_full = 1, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 1, + .pp_id = 2, + .profile_id = 5, + .g_profile_id = 37, + .key_count = 11, + .key_len = 196, + .key_flag = 5101322241, + .act_count = 5, + .pre_assoc_profile_id = {33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 128, + .key_id = 20, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 164, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 180, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 298, + .length = 22, + .key_id = 0, + .name = "action0", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 276, + .length = 22, + .key_id = 0, + .name = "action1", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 254, + .length = 22, + .key_id = 0, + .name = "action2", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 232, + .length = 22, + .key_id = 0, + .name = "action3", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)1, + .offset = 210, + .length = 22, + .key_id = 0, + .name = "action4", + }, + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 3, + .profile_id = 0, + .g_profile_id = 48, + .key_count = 7, + .key_len = 116, + .key_flag = 17597286842369, + .act_count = 0, + .pre_assoc_profile_id = {34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 32, + .key_id = 19, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 36, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 52, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 68, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 84, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 100, + .length = 16, + .key_id = 44, + .name = "dp_hash0", + }, + }, + }, + { + .valid = 1, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 3, + .profile_id = 1, + .g_profile_id = 49, + .key_count = 7, + .key_len = 212, + .key_flag = 17597287366657, + .act_count = 0, + .pre_assoc_profile_id = {35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)0, + .offset = 0, + .length = 4, + .key_id = 0, + .name = "profileID", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 4, + .length = 128, + .key_id = 20, + .name = "sip", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 132, + .length = 16, + .key_id = 28, + .name = "srcPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 148, + .length = 16, + .key_id = 29, + .name = "dstPort", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 164, + .length = 8, + .key_id = 32, + .name = "protocol", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 180, + .length = 16, + .key_id = 0, + .name = "tab_index", + }, + { + .valid = 1, + .key_type = (enum nbl_flow_key_type)2, + .offset = 196, + .length = 16, + .key_id = 44, + .name = "dp_hash0", + }, + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, + { + .valid = 0, + .pp_mode = 0, + .key_full = 0, + .pt_cmd = 0, + .from_start = 0, + .to_end = 0, + .need_upcall = 0, + .pp_id = 0, + .profile_id = 0, + .g_profile_id = 0, + .key_count = 0, + .key_len = 0, + .key_flag = 0, + .act_count = 0, + .pre_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .next_assoc_profile_id = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .flow_keys = { + }, + }, +}; + +static struct nbl_profile_assoc_graph g_prf_graph[NBL_ASSOC_PROFILE_GRAPH_NUM] = { + { + .key_flag = 26994920673335, + .profile_count = 5, + .profile_id = {16, 18, 32, 34, 48}, + }, + { + .key_flag = 606641606711, + .profile_count = 4, + .profile_id = {16, 18, 32, 36}, + }, + { + .key_flag = 601540808759, + .profile_count = 3, + .profile_id = {16, 18, 32}, + }, + { + .key_flag = 26994923294775, + .profile_count = 5, + .profile_id = {16, 18, 33, 35, 49}, + }, + { + .key_flag = 606644228151, + .profile_count = 4, + .profile_id = {16, 18, 33, 37}, + }, + { + .key_flag = 601542905911, + .profile_count = 3, + .profile_id = {16, 18, 33}, + }, + { + .key_flag = 549999104055, + .profile_count = 2, + .profile_id = {16, 18}, + }, + { + .key_flag = 20500, + .profile_count = 1, + .profile_id = {16}, + }, + { + .key_flag = 26994920673339, + .profile_count = 5, + .profile_id = {17, 18, 32, 34, 48}, + }, + { + .key_flag = 606641606715, + .profile_count = 4, + .profile_id = {17, 18, 32, 36}, + }, + { + .key_flag = 601540808763, + .profile_count = 3, + .profile_id = {17, 18, 32}, + }, + { + .key_flag = 26994923294779, + .profile_count = 5, + .profile_id = {17, 18, 33, 35, 49}, + }, + { + .key_flag = 606644228155, + .profile_count = 4, + .profile_id = {17, 18, 33, 37}, + }, + { + .key_flag = 601542905915, + .profile_count = 3, + .profile_id = {17, 18, 33}, + }, + { + .key_flag = 549999104059, + .profile_count = 2, + .profile_id = {17, 18}, + }, + { + .key_flag = 20504, + .profile_count = 1, + .profile_id = {17}, + }, + { + .key_flag = 26994920652803, + .profile_count = 4, + .profile_id = {19, 32, 34, 48}, + }, + { + .key_flag = 606641586179, + .profile_count = 3, + .profile_id = {19, 32, 36}, + }, + { + .key_flag = 601540788227, + .profile_count = 2, + .profile_id = {19, 32}, + }, + { + .key_flag = 26994923274243, + .profile_count = 4, + .profile_id = {19, 33, 35, 49}, + }, + { + .key_flag = 606644207619, + .profile_count = 3, + .profile_id = {19, 33, 37}, + }, + { + .key_flag = 601542885379, + .profile_count = 2, + .profile_id = {19, 33}, + }, + { + .key_flag = 549999083522, + .profile_count = 1, + .profile_id = {19}, + }, +}; + +static u8 g_profile_graph_count = 23; + +static void nbl_assign_key(u32 *kt_data, bool full, + u32 offset, u16 length, u32 value) +{ + u32 full_offset = NBL_FEM_KT_LEN - offset - length; + u32 index = full_offset / NBL_BITS_IN_U32; + u32 remain = full_offset % NBL_BITS_IN_U32; + u32 shifted = 0; + + if (NBL_BITS_IN_U32 - remain < length) { + /* if the value span across u32 boundary */ + shifted = NBL_BITS_IN_U32 - remain; + kt_data[index] += (value << remain); + kt_data[index + 1] += (value >> shifted); + } else { + kt_data[index] += (value << remain); + } +} + +static void nbl_assign_flow_key_input(u32 *kt_data, bool full, + const struct nbl_flow_key_info *key, + struct nbl_fdir_fltr *input, + u16 tab_index) +{ + const u32 *data = NULL; + const u32 *mask = NULL; + u16 temp_etype = 0; + u16 length = (u16)(key->length / NBL_BITS_IN_U32); + int i = 0; + + switch (1ULL << key->key_id) { + case NBL_FLOW_KEY_TABLE_IDX_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + tab_index); + break; + case NBL_FLOW_KEY_INPORT8_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->port & 0xFF); + break; + case NBL_FLOW_KEY_INPORT4_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + (input->port >> NBL_BITS_IN_U8) & 0xF); + break; + case NBL_FLOW_KEY_T_DIPV4_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip_outer.dst_ip.addr); + break; + case NBL_FLOW_KEY_T_DIPV6_FLAG: + data = (u32 *)(&input->ip_outer.dst_ip.v6_addr); + for (i = length - 1; i >= 0; i--, data++) + nbl_assign_key(kt_data, full, + key->offset + NBL_BITS_IN_U32 * i, + NBL_BITS_IN_U32, (*data)); + break; + case NBL_FLOW_KEY_T_SRCPORT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4_outer.src_port); + break; + case NBL_FLOW_KEY_T_DSTPORT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4_outer.dst_port); + break; + case NBL_FLOW_KEY_T_PROTOCOL_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip_outer.proto); + break; + case NBL_FLOW_KEY_T_TOS_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip_outer.tos); + break; + case NBL_FLOW_KEY_T_TTL_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip_outer.ttl); + break; + case NBL_FLOW_KEY_T_VNI_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->tnl.vni); + break; + case NBL_FLOW_KEY_SIPV4_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.src_ip.addr & input->ip_mask.src_ip.addr); + break; + case NBL_FLOW_KEY_SIPV6_FLAG: + data = (u32 *)(&input->ip.src_ip.v6_addr); + mask = (u32 *)(&input->ip_mask.src_ip.v6_addr); + for (i = length - 1; i >= 0; i--, data++, mask++) + nbl_assign_key(kt_data, full, + key->offset + NBL_BITS_IN_U32 * i, + NBL_BITS_IN_U32, (*data) & (*mask)); + break; + case NBL_FLOW_KEY_DIPV4_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.dst_ip.addr); + break; + case NBL_FLOW_KEY_DIPV6_FLAG: + data = (u32 *)(&input->ip.dst_ip.v6_addr); + for (i = length - 1; i >= 0; i--, data++, mask++) + nbl_assign_key(kt_data, full, + key->offset + NBL_BITS_IN_U32 * i, + NBL_BITS_IN_U32, (*data)); + break; + case NBL_FLOW_KEY_DSTMAC_FLAG: + data = (u32 *)input->l2_data.dst_mac; + nbl_assign_key(kt_data, full, key->offset + NBL_BITS_IN_U16, + NBL_BITS_IN_U32, *data); + nbl_assign_key(kt_data, full, key->offset, NBL_BITS_IN_U16, + (*(data + 1)) & 0x0000FFFF); + break; + case NBL_FLOW_KEY_SRCMAC_FLAG: + data = (u32 *)input->l2_data.src_mac; + nbl_assign_key(kt_data, full, key->offset + NBL_BITS_IN_U16, + NBL_BITS_IN_U32, *data); + nbl_assign_key(kt_data, full, key->offset, NBL_BITS_IN_U16, + (*(data + 1)) & 0x0000FFFF); + break; + case NBL_FLOW_KEY_SVLAN_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->svlan_tag); + break; + case NBL_FLOW_KEY_CVLAN_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->cvlan_tag); + break; + case NBL_FLOW_KEY_ETHERTYPE_FLAG: + if (input->cvlan_type) + temp_etype = input->cvlan_type; + else if (input->svlan_type) + temp_etype = input->svlan_type; + else + temp_etype = input->l2_data.ether_type; + nbl_assign_key(kt_data, full, key->offset, key->length, + temp_etype); + break; + case NBL_FLOW_KEY_SRCPORT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4.src_port & input->l4_mask.src_port); + break; + case NBL_FLOW_KEY_DSTPORT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4.dst_port & input->l4_mask.dst_port); + break; + case NBL_FLOW_KEY_PROTOCOL_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.proto & input->ip_mask.proto); + break; + case NBL_FLOW_KEY_TCPSTAT_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->l4.tcp_flag); + break; + case NBL_FLOW_KEY_TOS_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.tos); + break; + case NBL_FLOW_KEY_TTL_FLAG: + nbl_assign_key(kt_data, full, key->offset, key->length, + input->ip.ttl); + break; + case NBL_FLOW_KEY_T_DSTMAC_FLAG: + case NBL_FLOW_KEY_T_SRCMAC_FLAG: + case NBL_FLOW_KEY_T_SVLAN_FLAG: + case NBL_FLOW_KEY_T_CVLAN_FLAG: + case NBL_FLOW_KEY_T_ETHERTYPE_FLAG: + case NBL_FLOW_KEY_T_NPROTO_FLAG: + case NBL_FLOW_KEY_T_TCPSTAT_FLAG: + case NBL_FLOW_KEY_ARP_OP_FLAG: + case NBL_FLOW_KEY_ICMPV6_TYPE_FLAG: + case NBL_FLOW_KEY_RDMA_ACK_SEQ_FLAG: + case NBL_FLOW_KEY_RDMA_QPN_FLAG: + case NBL_FLOW_KEY_RDMA_OP_FLAG: + case NBL_FLOW_KEY_INPORT2_FLAG: + case NBL_FLOW_KEY_INPORT2L_FLAG: + default: + break; + } +} + +/* kt_data: five u64 data */ +static void nbl_assign_hash_key_key(u32 *kt_data, + struct nbl_flow_key_info *key, + struct nbl_profile_msg *prf_msg, + struct nbl_fdir_fltr *input, + u16 tab_index) +{ + /* assign profile id, key PHVs (key and action data) */ + /* ignore bit setter and masks, actions */ + switch (key->key_type) { + case NBL_FLOW_KEY_TYPE_PID: + nbl_assign_key(kt_data, prf_msg->key_full, key->offset, + key->length, prf_msg->profile_id); + break; + case NBL_FLOW_KEY_TYPE_PHV: + nbl_assign_flow_key_input(kt_data, prf_msg->key_full, key, + input, tab_index); + break; + case NBL_FLOW_KEY_TYPE_ACTION: + break; + case NBL_FLOW_KEY_TYPE_BTS: + break; + case NBL_FLOW_KEY_TYPE_MASK: + break; + default: + break; + } +} + +static void nbl_debug_print_hash_key(struct nbl_common_info *common, + struct nbl_flow_tab_conf *hash_key, + struct nbl_profile_msg *prf_msg, + struct nbl_fdir_fltr *input) +{ + size_t index = 0; + u32 *ptr = (u32 *)(&hash_key->key_value); + /* debug example: tnl v4/v6/l2 */ + const union nbl_ipv4_tnl_data_u *p0 = (union nbl_ipv4_tnl_data_u *)(ptr); + const union nbl_ipv6_tnl_data_u *p1 = (union nbl_ipv6_tnl_data_u *)(ptr); + const union nbl_l2_tnl_data_u *p2 = (union nbl_l2_tnl_data_u *)(ptr); + + /* debug example: nontnl l2 */ + const union nbl_l2_notnl_data_u *p3 = (union nbl_l2_notnl_data_u *)(ptr); + + /* debug example: l3 */ + const union nbl_l3_ipv4_data_u *p4 = (union nbl_l3_ipv4_data_u *)(ptr); + const union nbl_l3_ipv6_data_u *p5 = (union nbl_l3_ipv6_data_u *)(ptr); + + /* debug example: t5 ipv4 (160 bits) and t5 ipv6 (320 bits) */ + const union nbl_t5_ipv4_data_u *p8 = (union nbl_t5_ipv4_data_u *)(ptr); + const union nbl_t5_ipv6_data_u *p9 = (union nbl_t5_ipv6_data_u *)(ptr); + + unsigned long long test_l2_notnl = + NBL_FLOW_KEY_DSTMAC_FLAG | NBL_FLOW_KEY_ETHERTYPE_FLAG | + NBL_FLOW_KEY_SVLAN_FLAG | NBL_FLOW_KEY_CVLAN_FLAG; + + unsigned long long test_tnl_v4 = + NBL_FLOW_KEY_T_DIPV4_FLAG | NBL_FLOW_KEY_T_OPT_DATA_FLAG | + NBL_FLOW_KEY_T_OPT_CLASS_FLAG | NBL_FLOW_KEY_T_DSTPORT_FLAG; + + unsigned long long test_tnl_v6 = + NBL_FLOW_KEY_T_DIPV6_FLAG | NBL_FLOW_KEY_T_OPT_DATA_FLAG | + NBL_FLOW_KEY_T_OPT_CLASS_FLAG | NBL_FLOW_KEY_T_DSTPORT_FLAG; + + unsigned long long test_tnl_l2 = + NBL_FLOW_KEY_T_VNI_FLAG | NBL_FLOW_KEY_DSTMAC_FLAG | + NBL_FLOW_KEY_ETHERTYPE_FLAG | NBL_FLOW_KEY_CVLAN_FLAG | + NBL_FLOW_KEY_SVLAN_FLAG; + + unsigned long long test_l3_v4 = NBL_FLOW_KEY_DIPV4_FLAG | + NBL_FLOW_KEY_TTL_FLAG | + NBL_FLOW_KEY_DSCP_FLAG; + + unsigned long long test_l3_v6 = NBL_FLOW_KEY_DIPV6_FLAG | + NBL_FLOW_KEY_TTL_FLAG | + NBL_FLOW_KEY_DSCP_FLAG; + + unsigned long long test_t5_ipv6 = + NBL_FLOW_KEY_DSTPORT_FLAG | NBL_FLOW_KEY_SRCPORT_FLAG | + NBL_FLOW_KEY_PROTOCOL_FLAG | NBL_FLOW_KEY_SIPV6_FLAG; + + unsigned long long test_t5_ipv4 = + NBL_FLOW_KEY_DSTPORT_FLAG | NBL_FLOW_KEY_SRCPORT_FLAG | + NBL_FLOW_KEY_PROTOCOL_FLAG | NBL_FLOW_KEY_SIPV4_FLAG; + + u8 offset = + prf_msg->key_full ? 0 : (NBL_FEM_KT_HALF_LEN / NBL_BITS_IN_U32); + ptr += offset; + + /* print out all the fields */ + for (index = 0; index < 10; index++) + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw kt data[%ld]: %x\n", index, + hash_key->key_value[index]); + + if ((prf_msg->key_flag & test_tnl_v4) == test_tnl_v4) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated tv4 profile: id %d, " + "dipv4 0x%x, optdata 0x%x, optclass 0x%x, dport 0x%x\n", + p0->info.template, p0->info.dst_ip, + p0->info.option_data, p0->info.option_class, + p0->info.dst_port); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original tv4 profile: id %d, " + "dipv4 0x%x, dport 0x%x\n", + prf_msg->profile_id, input->ip_outer.dst_ip.addr, + input->l4_outer.dst_port); + } else if ((prf_msg->key_flag & test_tnl_v6) == test_tnl_v6) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated tv6 profile: id %d, " + "dipv6 0x%lx 0x%lx, optdata 0x%x, optclass 0x%x, dport 0x%x\n", + p1->info.template, (unsigned long)p1->info.dst_ipv6_1, + (unsigned long)p1->info.dst_ipv6_2, + p1->info.option_data, p1->info.option_class, + p1->info.dst_port); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw originla tv6 profile: id %d, " + "dipv6 0x%x, dport 0x%x\n", + prf_msg->profile_id, input->ip_outer.dst_ip.addr, + input->l4_outer.dst_port); + } else if ((prf_msg->key_flag & test_tnl_l2) == test_tnl_l2) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated tnl l2 profile: id %d, " + "vni %d, dstmac 0x%lx, etype 0x%04x, cvlan %d, svlan %d\n", + p2->info.template, p2->info.vni, + (unsigned long)p2->info.dst_mac, p2->info.ether_type, + p2->info.cvlan_id, p2->info.svlan_id); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original tnl l2 profile: id %d, " + "dstmac 0x%llx, etype 0x%04x, cvlan %d, svlan %d\n", + prf_msg->profile_id, + *(u64 *)input->l2_data.dst_mac, + input->l2_data.ether_type, input->cvlan_tag, + input->svlan_tag); + } else if ((prf_msg->key_flag & test_l2_notnl) == test_l2_notnl) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated l2 profile: id %d, dstmac 0x%lx, " + "etype 0x%04x, svlan %d, cvlan %d\n", + p3->info.template, (unsigned long)p3->info.dst_mac, + p3->info.ether_type, p3->info.svlan_id, + p3->info.cvlan_id); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original l2 profile: id %d, dstmac 0x%llx, " + "etype 0x%04x, svlan %d, cvlan %d\n", + prf_msg->profile_id, + *(u64 *)input->l2_data.dst_mac, + input->l2_data.ether_type, input->svlan_tag, + input->cvlan_tag); + } else if ((prf_msg->key_flag & test_l3_v4) == test_l3_v4) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated l3 v4: id %d, sip 0x%x, " + "ttl %d, dscp %d\n", + p4->info.template, p4->info.dst_ip, p4->info.ttl, + p4->info.dscp); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original l3 v4: id %d, sip 0x%x, " + "ttl %d, dscp %d\n", + prf_msg->profile_id, input->ip.dst_ip.addr, + p4->info.ttl, p4->info.dscp); + } else if ((prf_msg->key_flag & test_l3_v6) == test_l3_v6) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated l3 v6: id %d, dip 0x%llx-%llx, " + "ttl %d, dscp %d\n", + p5->info.template, p5->info.dst_ipv6_1, + p5->info.dst_ipv6_2, p5->info.hoplimit, p5->info.dscp); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original l3 v6: id %d, dip 0x%llx-%llx, " + "ttl %d, dscp %d\n", + prf_msg->profile_id, + *(u64 *)input->ip.dst_ip.v6_addr, + *((u64 *)input->ip.dst_ip.v6_addr + 1), + input->ip.ttl, input->ip.tos); + } else if ((prf_msg->key_flag & test_t5_ipv4) == test_t5_ipv4) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated t5 ipv4 profile: id %d, sip 0x%x, " + "srcport %d, dstport %d, protocol %d\n", + p8->info.template, p8->info.src_ip, p8->info.src_port, + p8->info.dst_port, p8->info.proto); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original data: sip: 0x%x, srcport %d, " + " dstport %d, protocol %d\n", + input->ip.src_ip.addr, input->l4.src_port, + input->l4.dst_port, input->ip.proto); + } else if ((prf_msg->key_flag & test_t5_ipv6) == test_t5_ipv6) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw calculated t5 ipv6 profile: sip 0x%llx-%llx, " + "srcport %d, dstport %d, protocol %d\n", + p9->info.src_ipv6_1, p9->info.src_ipv6_2, + p9->info.src_port, p9->info.dst_port, p9->info.proto); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw original data: sip: 0x%llx-%llx, srcport %d, " + " dstport %d, protocol %d\n", + *(u64 *)input->ip.src_ip.v6_addr, + *((u64 *)input->ip.src_ip.v6_addr + 1), + input->l4.src_port, input->l4.dst_port, + input->ip.proto); + } +} + +static void nbl_assign_hash_key(struct nbl_flow_tab_conf *hash_key, + struct nbl_flow_pattern_conf *filter, + struct nbl_resource_mgt *res_mgt, + struct nbl_profile_offload_msg *off_msg) +{ + /* 320 bit key data, namely 5 * 64 bits */ + u32 *kt_data = hash_key->key_value; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_profile_msg *prf_msg = + &tc_flow_mgt->profile_msg[off_msg->profile_id]; + struct nbl_flow_key_info *key_info = prf_msg->flow_keys; + u8 i = 0; + + /* loop through all keys of this profile */ + for (i = 0; i < prf_msg->key_count; i++, key_info++) { + if (!key_info->valid) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw key %s invalid, something went wrong\n", + key_info->name); + + return; + } + + nbl_assign_hash_key_key(kt_data, key_info, prf_msg, + &filter->input, off_msg->assoc_tbl_id); + } + + /* print out the 320 bit key */ + nbl_debug_print_hash_key(common, hash_key, prf_msg, &filter->input); +} + +static inline void nbl_flow_set_bits(u8 *p, u8 mask) +{ + *p |= mask; +} + +static inline void nbl_flow_clr_bits(u8 *p, u8 mask) +{ + *p &= ~mask; +} + +static void nbl_flow_resource_available(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + nbl_flow_set_bits(&tc_flow_mgt->init_status, NBL_FLOW_AVAILABLE_BIT); +} + +void nbl_flow_resource_unavailable(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + nbl_flow_clr_bits(&tc_flow_mgt->init_status, NBL_FLOW_AVAILABLE_BIT); +} + +bool nbl_flow_is_available(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + u8 ret = tc_flow_mgt->init_status & NBL_FLOW_AVAILABLE_BIT; + + return ret != 0; +} + +static bool nbl_flow_is_resource_ready(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + u8 ret = tc_flow_mgt->init_status & NBL_FLOW_INIT_BIT; + + return ret != 0; +} + +static void nbl_flow_set_resource_init_status(struct nbl_tc_flow_mgt *tc_flow_mgt, + bool status) +{ + if (status) + nbl_flow_set_bits(&tc_flow_mgt->init_status, + NBL_FLOW_INIT_BIT); + else + nbl_flow_clr_bits(&tc_flow_mgt->init_status, + NBL_FLOW_INIT_BIT); +} + +/** + * @brief: offload sw-tab to hw + */ +static int nbl_add_nic_hw_flow_tab(void *node, struct nbl_rule_action *act, + struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info) +{ + int rc = 0; + + WARN_ON(!node); + rc = nbl_flow_offload_ops.add(node, act, res_mgt, idx_info); + return rc; +} + +/** + * @brief: hw flow tab destroy + */ +static int nbl_del_nic_hw_flow_tab(void *node, struct nbl_resource_mgt *res_mgt, + struct nbl_flow_idx_info *idx_info) +{ + int rc = 0; + + WARN_ON(!node); + rc = nbl_flow_offload_ops.del(node, res_mgt, idx_info); + return rc; +} + +/** + * @brief: hw flow tab query + */ +__maybe_unused static int nbl_query_nic_hw_flow_tab(void *node, u32 idx, + void *query_rslt) +{ + int rc = 0; + + WARN_ON(!node); + rc = nbl_flow_offload_ops.query(node, idx, query_rslt); + return rc; +} + +int nbl_tc_flow_alloc_bmp_id(unsigned long *bitmap_mng, u32 size, + u8 type, u32 *bitmap_id) +{ + u32 id; + + if (type == NBL_TC_KT_HALF_MODE) { + id = find_first_zero_bit(bitmap_mng, size); + if (id == size) + return -ENOSPC; + set_bit(id, bitmap_mng); + } else { + id = nbl_common_find_available_idx(bitmap_mng, size, 2, 2); + if (id == size) + return -ENOSPC; + set_bit(id, bitmap_mng); + set_bit(id + 1, bitmap_mng); + } + + *bitmap_id = id; + return 0; +} + +void nbl_tc_flow_free_bmp_id(unsigned long *bitmap_mng, u32 id, u8 type) +{ + if (type == NBL_TC_KT_HALF_MODE) { + clear_bit(id, bitmap_mng); + } else { + clear_bit(id, bitmap_mng); + clear_bit(id + 1, bitmap_mng); + } +} + +/** + * @brief: tnl: ipv4 tnl filter hash tab search func + * + * @param[in] tc_flow_mgt: tc flow hw mgt + * @param[in] key: node key info + * @return nbl_flow_tab_filter *: return node ptr + */ +static struct nbl_flow_tab_filter * +nbl_flow_tab_filter_lookup(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_tab_conf *key, u8 profile_id) +{ + struct nbl_flow_tab_filter *tab_filter = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + if (!tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash) + return NULL; + + tab_filter = nbl_common_get_hash_node(tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash, + key); + return tab_filter; +} + +/** + * @brief: flow_tab.insert hash tab node func + * + * @param[in] tc_flow_mgt: tc flow hw mgt + * @param[in] node: node key info + * @return int: 0-success other-fail + */ +static int nbl_insert_flow_tab_filter(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_tab_conf *key, + struct nbl_flow_tab_filter *node, + struct nbl_flow_tab_filter **new_node, + u8 profile_id) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret; + + if (!tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash) + return -EINVAL; + + ret = nbl_common_alloc_hash_node(tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash, key, + node, (void **)new_node); + if (ret) + return ret; + + tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt++; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw insert pid=%d tab_cnt++ =%d\n", + profile_id, tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt); + + return 0; +} + +/** + * @brief:delete ipv4-tnl-hash-list + * @param[in] tc_flow_mgt: tc flow hw mgt + * @return int: 0-success other-fail + * + */ +static int +nbl_flow_flush_flow_tab_hash_list(struct nbl_resource_mgt *res_mgt, + u8 profile_id) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + if (!tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash) + return 0; + + nbl_common_remove_hash_table(tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash, NULL); + tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash = NULL; + + return 0; +} + +static int nbl_flow_flush_hash_list(struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + u8 i = 0; + + for (i = 0; i < NBL_ALL_PROFILE_NUM; i++) + ret |= nbl_flow_flush_flow_tab_hash_list(res_mgt, i); + + return ret; +} + +/** + * @brief: tnl.remove hash tab node func + * + * @param[in] tc_flow_mgt: tc_flow_mgt + * @param[in] key: node key info + * @param[in] off: is need to offload to hw + * @return int: 0-success other-fail + */ +static int nbl_rmv_flow_tab_filter(struct nbl_resource_mgt *res_mgt, + void *key, bool off, bool last_stage, + u8 profile_id) +{ + struct nbl_flow_idx_info idx_info = { 0 }; + struct nbl_flow_tab_filter *node = NULL; + struct nbl_flow_tab_filter tmp_node; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + + spin_lock(&tc_flow_mgt->flow_lock); + node = nbl_flow_tab_filter_lookup(res_mgt, key, profile_id); + if (!node) { + spin_unlock(&tc_flow_mgt->flow_lock); + } else if (node && node->ref_cnt > NBL_FLOW_TAB_ONE_TIME) { + node->ref_cnt--; + spin_unlock(&tc_flow_mgt->flow_lock); + } else { + memcpy(&tmp_node, node, sizeof(*node)); + if (node->edit_item.is_mir) + list_replace_init(&node->edit_item.tc_mcc_list, + &tmp_node.edit_item.tc_mcc_list); + + if (node->assoc_tbl_id >= NBL_FLOW_TABLE_NUM) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw assoc_tbl_id invalid %u.\n", + node->assoc_tbl_id); + return -EINVAL; + } + + if (node->assoc_tbl_id != 0) + nbl_tc_flow_free_bmp_id(tc_flow_mgt->assoc_table_bmp, + node->assoc_tbl_id, 0); + + nbl_common_free_hash_node(tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash, + key); + node = NULL; + + if (tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt > 0) { + tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt--; + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw rmv pid=%d tab_cnt--=%d\n", + profile_id, tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt); + } else { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw rmv pid=%d tab_cnt=%d, do not reduce\n", + profile_id, tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt); + spin_unlock(&tc_flow_mgt->flow_lock); + return -EINVAL; + } + + spin_unlock(&tc_flow_mgt->flow_lock); + + /* del hw */ + ret = 0; + if (off) { + idx_info.last_stage = last_stage; + idx_info.profile_id = profile_id; + ret = nbl_del_nic_hw_flow_tab(&tmp_node, res_mgt, &idx_info); + } + } + return ret; +} + +/** + * @brief: flow_tab.add hash node, and transfer the key value + * + * @param[in] key: node key info + * @param[out] ptr: hash node + * @return int: 0-success other-fail + */ +static int nbl_flow_tab_hash_add(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow *tc_flow_ptr, void **ptr, + struct nbl_profile_offload_msg *prof_off_msg) +{ + struct nbl_flow_tab_filter *node = NULL; + const struct nbl_flow_tab_filter *pre_node = NULL; + struct nbl_flow_tab_conf hash_key; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + u8 profile_id = prof_off_msg->profile_id; + u8 profile_stage = prof_off_msg->profile_stage; + u32 entries = 0; + struct nbl_flow_tab_filter filter_data; + + memset(&hash_key, 0, sizeof(hash_key)); + + if (profile_stage != 0) { + pre_node = tc_flow_ptr->profile_rule[profile_stage - 1]; + if (!pre_node) + return -EINVAL; + prof_off_msg->assoc_tbl_id = (u16)pre_node->assoc_tbl_id; + } + nbl_assign_hash_key(&hash_key, filter, res_mgt, prof_off_msg); + + spin_lock(&tc_flow_mgt->flow_lock); + node = nbl_flow_tab_filter_lookup(res_mgt, &hash_key, profile_id); + if (node) { + if (prof_off_msg->last_stage) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow offload already, drop this one"); + spin_unlock(&tc_flow_mgt->flow_lock); + return -EEXIST; + } + + node->ref_cnt++; + *ptr = node; + tc_flow_ptr->profile_id[profile_stage] = profile_id; + tc_flow_ptr->profile_rule[profile_stage] = node; + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw flow_tab refcnt ++.\n"); + return 0; + } + + if (profile_id <= NBL_PP1_PROFILE_ID_MAX && profile_id > NBL_PP0_PROFILE_ID_MAX) { + entries = NBL_FLOW_TABLE_LEN; + } else if (profile_id <= NBL_PP2_PROFILE_ID_MAX && profile_id > NBL_PP1_PROFILE_ID_MAX) { + entries = NBL_FLOW_TABLE_LEN * 8; + } else { + spin_unlock(&tc_flow_mgt->flow_lock); + return 0; + } + + if (tc_flow_mgt->flow_tab_hash[profile_id].tab_cnt >= entries) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw pid=%d flow_tab num is greater than %d.", + profile_id, entries); + return -EINVAL; + } + + memset(&filter_data, 0, sizeof(filter_data)); + filter_data.ref_cnt = 1; + memcpy(&filter_data.key, &hash_key, sizeof(hash_key)); + + if (prof_off_msg->last_stage) + goto insert_filter; + + /* alloc bmp */ + ret = nbl_tc_flow_alloc_bmp_id(tc_flow_mgt->assoc_table_bmp, + NBL_FLOW_TABLE_NUM, 0, &filter_data.assoc_tbl_id); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw failed to alloc id for flow tab.\n"); + return -ENOSPC; + } + + if (!filter_data.assoc_tbl_id) { + ret = nbl_tc_flow_alloc_bmp_id(tc_flow_mgt->assoc_table_bmp, + NBL_FLOW_TABLE_NUM, 0, &filter_data.assoc_tbl_id); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw failed to alloc id for flow tab.\n"); + return -ENOSPC; + } + } + +insert_filter: + ret = nbl_insert_flow_tab_filter(res_mgt, &hash_key, &filter_data, &node, profile_id); + if (ret) { + if (!prof_off_msg->last_stage) + nbl_tc_flow_free_bmp_id(tc_flow_mgt->assoc_table_bmp, + filter_data.assoc_tbl_id, 0); + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_info(common, NBL_DEBUG_FLOW, + "tc flow hw failed to insert flow tab filter " + "to hash table %d.\n", ret); + return ret; + } + + *ptr = node; + tc_flow_ptr->profile_id[profile_stage] = profile_id; + tc_flow_ptr->profile_rule[profile_stage] = node; + spin_unlock(&tc_flow_mgt->flow_lock); + return ret; +} + +/** + * @brief: outer tnl flow tab resource storage and offload to hw + * + * @param[in] tc_flow_mgt: tc flow hw info + * @param[in] act: nbl_rule_action info + * @param[in] filter: nbl_flow_pattern_conf info + * @param[out] tc_flow_ptr: tc-flow pointer + * @return int: zero init success, other init failed + */ +static int nbl_flow_tab_storage(struct nbl_resource_mgt *res_mgt, + __maybe_unused struct nbl_rule_action *act, + struct nbl_flow_pattern_conf *filter, + struct nbl_tc_flow *tc_flow_ptr, + struct nbl_profile_offload_msg *prof_off_msg) +{ + int ret = 0; + struct nbl_flow_tab_filter *flow_tab_node = NULL; + struct nbl_flow_idx_info idx_info = { 0 }; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + ret = nbl_flow_tab_hash_add(res_mgt, filter, tc_flow_ptr, + (void **)&flow_tab_node, prof_off_msg); + if (ret || !flow_tab_node) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow hw flow_tab hash-list storage fail.\n"); + return ret; + } + if (flow_tab_node->ref_cnt > 1) + return 0; + + flow_tab_node->act_flags = act->flag; + idx_info.profile_id = prof_off_msg->profile_id; + idx_info.last_stage = prof_off_msg->last_stage; + idx_info.key_flag = filter->key_flag; + idx_info.pt_cmd = prof_off_msg->pt_cmd; + ret = nbl_add_nic_hw_flow_tab(flow_tab_node, act, res_mgt, &idx_info); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw nbl_flow_tab_key_2Nic fail.\n"); + return ret; + } + return ret; +} + +/** + * @brief: storage flow tab: + * 1.configure which key template we need to use + * 2.storage key info + * 3.storage action info + * 4.offload to hw + * 5.if tunnel outer flow tab exist,storage tunnel outer flowtab + * + * @param[in] tc_flow_mgt: tc flow hw info + * @param[in] tc_flow_ptr: nbl_tc_flow pointer which + * point to the key template + * @param[in] filter: key info + * @param[in] act: actions info + * @return int: 0-success other-fail. + */ +static int nbl_flow_tab_storage_entr(struct nbl_resource_mgt *res_mgt, + struct nbl_tc_flow *tc_flow_ptr, + struct nbl_flow_pattern_conf *filter, + struct nbl_rule_action *act) +{ + int ret = 0; + int ret_2 = 0; + int i = 0; + struct nbl_profile_assoc_graph *asso_graph = NULL; + struct nbl_profile_offload_msg prof_off_msg = { 0 }; + u8 cur_stage = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + act->next_stg_sel = NEXT_STG_SEL_EPRO; + asso_graph = &tc_flow_mgt->profile_graph[filter->graph_idx]; + + for (i = 0; i < NBL_ASSOC_PROFILE_STAGE_NUM; i++) { + /* pp used to calc ecmp-dphash no need offload flow */ + if (i && asso_graph->profile_id[i] == 0) + break; + + prof_off_msg.profile_id = asso_graph->profile_id[i]; + prof_off_msg.profile_stage = (u8)i; + prof_off_msg.pt_cmd = + tc_flow_mgt->profile_msg[asso_graph->profile_id[i + 1]].pt_cmd; + cur_stage = tc_flow_mgt->profile_msg[prof_off_msg.profile_id].pp_id; + if ((i == NBL_ASSOC_PROFILE_STAGE_NUM - 1) || asso_graph->profile_id[i + 1] == 0) + prof_off_msg.last_stage = true; + + ret = nbl_flow_tab_storage(res_mgt, act, filter, + tc_flow_ptr, &prof_off_msg); + + if (ret) { + nbl_info(common, NBL_DEBUG_FLOW, + "tc flow hw tab storage failed, ret %d.\n", ret); + goto fail_flow_tab; + } + } + + return ret; + +fail_flow_tab: + for (i = prof_off_msg.profile_stage; i >= 0; i--) { + struct nbl_flow_tab_filter *flow_tab_node = + tc_flow_ptr->profile_rule[i]; + if (!flow_tab_node) + continue; + + tc_flow_ptr->profile_rule[i] = NULL; + ret_2 |= nbl_rmv_flow_tab_filter(res_mgt, + &flow_tab_node->key, true, + false, + asso_graph->profile_id[i]); + if (ret_2 != 0 && ret_2 != -ENONET) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw del failed " + "when flow table storage failed. " + "tnl_flag %d, ret_2 %d.\n", + filter->input.tnl_flag, ret_2); + return ret_2; + } + } + return ret; +} + +struct nbl_tc_flow * +nbl_tc_flow_index_lookup(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key) +{ + struct nbl_tc_flow *tc_flow_node = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_index_key_extra extra_key; + + spin_lock(&tc_flow_mgt->flow_lock); + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + nbl_common_get_index_with_data(tc_flow_mgt->flow_idx_tbl, key, &extra_key, NULL, + 0, (void **)&tc_flow_node); + spin_unlock(&tc_flow_mgt->flow_lock); + + return tc_flow_node; +} + +struct nbl_tc_flow * +nbl_tc_flow_insert_index(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key) +{ + int idx; + struct nbl_tc_flow *tc_flow_node = NULL; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_tc_flow tc_node_tmp; + + spin_lock(&tc_flow_mgt->flow_lock); + memset(&tc_node_tmp, 0, sizeof(struct nbl_tc_flow)); + idx = nbl_common_alloc_index(tc_flow_mgt->flow_idx_tbl, key, NULL, &tc_node_tmp, + sizeof(tc_node_tmp), (void **)&tc_flow_node); + if (idx == U32_MAX) + goto out; + + nbl_debug(common, NBL_DEBUG_FLOW, "tc flow hw cookie=%llx add success!\n", key->cookie); +out: + spin_unlock(&tc_flow_mgt->flow_lock); + return tc_flow_node; +} + +int nbl_tc_flow_delete_index(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + spin_lock(&tc_flow_mgt->flow_lock); + nbl_common_free_index(tc_flow_mgt->flow_idx_tbl, key); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw delete flow cookie=0x%llx success.\n", key->cookie); + spin_unlock(&tc_flow_mgt->flow_lock); + + return 0; +} + +/** + * @brief: nbl_profile_assoc_graph_lookup + * @return: + * true : find + * false : not found + */ +static bool nbl_flow_assoc_graph_lookup(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pattern_conf *filter) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + u8 i = 0; + + if (filter->key_flag == 0) + return false; + + for (i = 0; i < NBL_ASSOC_PROFILE_GRAPH_NUM; i++) { + if (tc_flow_mgt->profile_graph[i].key_flag == 0) + continue; + + if ((tc_flow_mgt->profile_graph[i].key_flag & ~NBL_FLOW_KEY_TABLE_IDX_FLAG) == + (tc_flow_mgt->profile_graph[i].key_flag & filter->key_flag)) { + filter->graph_idx = i; + return true; + } + } + + return false; +} + +static int nbl_flow_tc_encap_tbl_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + nbl_common_remove_hash_table(tc_flow_mgt->encap_tbl.flow_tab_hash, NULL); + tc_flow_mgt->encap_tbl.flow_tab_hash = NULL; + + return 0; +} + +/** + * @brief: destroy nbl_tc_flow of all and action hash-list + * + * @param[in] error: error info + * return int: 0-success other-fail + */ +int nbl_flow_flush(struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (!nbl_flow_is_available(tc_flow_mgt)) + return -EINVAL; + + spin_lock(&tc_flow_mgt->flow_lock); + + ret = nbl_flow_flush_hash_list(res_mgt); + if (ret) { + spin_unlock(&tc_flow_mgt->flow_lock); + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw flush_hash_list failed %d.\n", ret); + return -EINVAL; + } + + spin_unlock(&tc_flow_mgt->flow_lock); + + mutex_lock(&tc_flow_mgt->encap_tbl_lock); + nbl_flow_tc_encap_tbl_uninit(res_mgt); + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + + return ret; +} + +static void nbl_flow_clean_create_destroy_cnt(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ + atomic64_set(&tc_flow_mgt->destroy_num, 0); + atomic64_set(&tc_flow_mgt->create_num, 0); +} + +/** + * @brief: flow_tab_filter hash-list init: + * + * @return int: 0-success other-fail. + */ +static int nbl_flow_tab_filter_init(struct nbl_resource_mgt *res_mgt, + u8 profile_id) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + u32 entries = 0; + struct nbl_hash_tbl_key tbl_key = {0}; + + if (profile_id > NBL_PP2_PROFILE_ID_MAX && profile_id < NBL_ALL_PROFILE_NUM) + return 0; + + if (profile_id <= NBL_PP0_PROFILE_ID_MAX) + entries = 0; + else if (profile_id <= NBL_PP1_PROFILE_ID_MAX) + entries = NBL_FLOW_TABLE_LEN; + else if (profile_id <= NBL_PP2_PROFILE_ID_MAX) + entries = NBL_FLOW_TABLE_LEN * 8; + else + entries = 0; + + if (!entries) + return -EINVAL; + + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), sizeof(struct nbl_flow_tab_conf), + sizeof(struct nbl_flow_tab_filter), entries, false); + tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash = + nbl_common_init_hash_table(&tbl_key); + if (!tc_flow_mgt->flow_tab_hash[profile_id].flow_tab_hash) + return -EINVAL; + + return 0; +} + +static int nbl_flow_tc_encap_tbl_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + struct nbl_hash_tbl_key tbl_key = {0}; + + NBL_HASH_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), sizeof(struct nbl_encap_key), + sizeof(struct nbl_encap_entry), NBL_TC_ENCAP_TBL_DEPTH, false); + tc_flow_mgt->encap_tbl.flow_tab_hash = nbl_common_init_hash_table(&tbl_key); + if (!tc_flow_mgt->encap_tbl.flow_tab_hash) + return -EINVAL; + + mutex_init(&tc_flow_mgt->encap_tbl_lock); + + return 0; +} + +static int nbl_flow_pp1_ht0_tbl_hash_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + tc_flow_mgt->pp1_ht0_mng.hash_map = + devm_kzalloc(common->dev, + sizeof(struct nbl_flow_pp_ht_tbl *) * NBL_FEM_HT_PP1_LEN, GFP_KERNEL); + if (!tc_flow_mgt->pp1_ht0_mng.hash_map) + return -ENOMEM; + + return 0; +} + +static void +nbl_flow_pp1_ht0_tbl_hash_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + devm_kfree(common->dev, tc_flow_mgt->pp1_ht0_mng.hash_map); + tc_flow_mgt->pp1_ht0_mng.hash_map = NULL; +} + +static int nbl_flow_pp1_ht1_tbl_hash_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + tc_flow_mgt->pp1_ht1_mng.hash_map = + devm_kzalloc(common->dev, + sizeof(struct nbl_flow_pp_ht_tbl *) * NBL_FEM_HT_PP1_LEN, GFP_KERNEL); + if (!tc_flow_mgt->pp1_ht1_mng.hash_map) + return -ENOMEM; + + return 0; +} + +static void +nbl_flow_pp1_ht1_tbl_hash_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + devm_kfree(common->dev, tc_flow_mgt->pp1_ht1_mng.hash_map); + tc_flow_mgt->pp1_ht1_mng.hash_map = NULL; +} + +static int nbl_flow_pp2_ht0_tbl_hash_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + tc_flow_mgt->pp2_ht0_mng.hash_map = + devm_kzalloc(common->dev, + sizeof(struct nbl_flow_pp_ht_tbl *) * NBL_FEM_HT_PP2_LEN, GFP_KERNEL); + if (!tc_flow_mgt->pp2_ht0_mng.hash_map) + return -ENOMEM; + + return 0; +} + +static void +nbl_flow_pp2_ht0_tbl_hash_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + devm_kfree(common->dev, tc_flow_mgt->pp2_ht0_mng.hash_map); + tc_flow_mgt->pp2_ht0_mng.hash_map = NULL; +} + +static int nbl_flow_pp2_ht1_tbl_hash_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + tc_flow_mgt->pp2_ht1_mng.hash_map = + devm_kzalloc(common->dev, + sizeof(struct nbl_flow_pp_ht_tbl *) * NBL_FEM_HT_PP2_LEN, GFP_KERNEL); + if (!tc_flow_mgt->pp2_ht1_mng.hash_map) + return -ENOMEM; + + return 0; +} + +static void +nbl_flow_pp2_ht1_tbl_hash_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + devm_kfree(common->dev, tc_flow_mgt->pp2_ht1_mng.hash_map); + tc_flow_mgt->pp2_ht1_mng.hash_map = NULL; +} + +struct nbl_flow_pp_ht_tbl * +nbl_pp_ht_lookup(struct nbl_flow_pp_ht_mng *pp_ht_mng, u16 hash_value, + struct nbl_flow_pp_ht_key *pp_ht_key) +{ + struct nbl_flow_pp_ht_tbl *node = NULL; + u16 i; + bool is_find = false; + + if (!pp_ht_mng || !pp_ht_key) + return NULL; + + node = pp_ht_mng->hash_map[hash_value]; + + if (node) { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (!memcmp(pp_ht_key, &node->key[i], sizeof(node->key[i]))) { + is_find = true; + break; + } + } + } + + if (is_find) + return node; + + return NULL; +} + +int nbl_insert_pp_ht(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pp_ht_mng *pp_ht_mng, u16 hash_value0, + u16 hash_value1, u32 key_index) +{ + struct nbl_flow_pp_ht_tbl *node; + + if (!pp_ht_mng) + return -EINVAL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->key[0].vid = 1; + node->key[0].ht_other_index = hash_value1; + node->key[0].kt_index = key_index; + node->ref_cnt = 1; + + pp_ht_mng->hash_map[hash_value0] = node; + + return 0; +} + +int nbl_delete_pp_ht(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pp_ht_mng *pp_ht_mng, + struct nbl_flow_pp_ht_tbl *node, u16 hash_value0, + u16 hash_value1, u32 key_index) +{ + u16 i; + int ret = 0; + bool is_delete = false; + + if (!pp_ht_mng || !node) + return -EINVAL; + + if (node->ref_cnt > NBL_FLOW_TAB_ONE_TIME) { + for (i = 0; i < NBL_HASH_CFT_MAX; i++) { + if (node->key[i].ht_other_index == hash_value1 && + node->key[i].kt_index == key_index) { + node->key[i].vid = 0; + node->key[i].ht_other_index = 0; + node->key[i].kt_index = 0; + node->ref_cnt = node->ref_cnt - 1; + + is_delete = true; + break; + } + } + } else { + pp_ht_mng->hash_map[hash_value0] = NULL; + kfree(node); + node = NULL; + + is_delete = true; + } + + if (is_delete) + return ret; + + return -ENODEV; +} + +bool nbl_pp_ht0_ht1_search(struct nbl_flow_pp_ht_mng *pp_ht0_mng, u16 ht0_hash, + struct nbl_flow_pp_ht_mng *pp_ht1_mng, u16 ht1_hash) +{ + struct nbl_flow_pp_ht_tbl *node0 = NULL; + struct nbl_flow_pp_ht_tbl *node1 = NULL; + u16 i = 0; + bool is_find = false; + + if (!pp_ht0_mng || !pp_ht1_mng) + return is_find; + + node0 = pp_ht0_mng->hash_map[ht0_hash]; + + if (node0) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node0->key[i].vid && + node0->key[i].ht_other_index == ht1_hash) { + is_find = true; + return is_find; + } + + node1 = pp_ht1_mng->hash_map[ht1_hash]; + + if (node1) + for (i = 0; i < NBL_HASH_CFT_MAX; i++) + if (node1->key[i].vid && + node1->key[i].ht_other_index == ht0_hash) { + is_find = true; + return is_find; + } + + return is_find; +} + +static int nbl_flow_pp_at_tbl_init(struct nbl_resource_mgt *res_mgt) +{ + u32 i; + u32 j; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_index_tbl_key tbl_key; + u32 at_idx_num[NBL_PP_TYPE_MAX][NBL_AT_TYPE_MAX] = { + {0, 0, 0}, + {0, NBL_FEM_AT_PP1_LEN, NBL_FEM_AT2_PP1_LEN}, + {0, NBL_FEM_AT_PP2_LEN, NBL_FEM_AT2_PP2_LEN }, + }; + + for (i = 0; i < NBL_PP_TYPE_MAX; i++) { + for (j = 0; j < NBL_AT_TYPE_MAX; j++) { + if (!at_idx_num[i][j]) + continue; + + NBL_INDEX_TBL_KEY_INIT(&tbl_key, NBL_COMMON_TO_DEV(common), 0, + at_idx_num[i][j], + sizeof(struct nbl_flow_pp_at_key)); + tc_flow_mgt->at_mng.at_tbl[i][j] = nbl_common_init_index_table(&tbl_key); + if (!tc_flow_mgt->at_mng.at_tbl[i][j]) + return -ENOMEM; + } + } + + return 0; +} + +static void nbl_flow_pp_at_tbl_uninit(struct nbl_resource_mgt *res_mgt) +{ + u32 i; + u32 j; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + for (i = 0; i < NBL_PP_TYPE_MAX; i++) { + for (j = 0; j < NBL_AT_TYPE_MAX; j++) { + nbl_common_remove_index_table(tc_flow_mgt->at_mng.at_tbl[i][j], NULL); + tc_flow_mgt->at_mng.at_tbl[i][j] = NULL; + } + } +} + +int nbl_pp_at_lookup(struct nbl_resource_mgt *res_mgt, u8 pp_type, u8 at_type, + struct nbl_flow_pp_at_key *act_key, struct nbl_flow_at_tbl **act_node) +{ + int idx; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + void *at_tbl = tc_flow_mgt->at_mng.at_tbl[pp_type][at_type]; + struct nbl_index_key_extra extra_key; + + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, 0, 0, true); + idx = nbl_common_get_index_with_data(at_tbl, act_key->act, &extra_key, NULL, 0, + (void **)&act_node); + return idx; +} + +int nbl_insert_pp_at(struct nbl_resource_mgt *res_mgt, u8 pp_type, u8 at_type, + struct nbl_flow_pp_at_key *act_key, struct nbl_flow_at_tbl **act_node) +{ + int idx; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + void *at_tbl = tc_flow_mgt->at_mng.at_tbl[pp_type][at_type]; + struct nbl_index_key_extra extra_key; + struct nbl_flow_at_tbl at_node_tmp; + + NBL_INDEX_EXTRA_KEY_INIT(&extra_key, NBL_FLOW_AT_IDX_NUM, NBL_FLOW_AT_IDX_MULTIPLE, false); + at_node_tmp.ref_cnt = 1; + idx = nbl_common_alloc_index(at_tbl, act_key->act, &extra_key, &at_node_tmp, + sizeof(struct nbl_flow_at_tbl), (void **)act_node); + return idx; +} + +static int nbl_flow_tcam_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + memset(tc_flow_mgt->tcam_pp0_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp1_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp2_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp0_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp1_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp2_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + + return 0; +} + +static void nbl_flow_tcam_uninit(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + memset(tc_flow_mgt->tcam_pp0_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp1_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp2_key_mng, 0, + sizeof(struct nbl_flow_tcam_key_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp0_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp1_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); + memset(tc_flow_mgt->tcam_pp2_ad_mng, 0, + sizeof(struct nbl_flow_tcam_ad_mng) * NBL_FEM_TCAM_MAX_NUM); +} + +int nbl_tcam_key_lookup(struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_tcam_item *tcam_item, u16 *index) +{ + int ret = 0; + u16 i; + bool is_find = false; + + if (!tcam_pp_key_mng || !tcam_item || !index) + return -EINVAL; + + if (tcam_item->key_mode == NBL_TC_KT_FULL_MODE) { + for (i = 0; i < NBL_FEM_TCAM_MAX_NUM - 1; i += 2) { + if (tcam_pp_key_mng[i].item.key_mode != NBL_TC_KT_FULL_MODE) + continue; + if (!(memcmp(tcam_pp_key_mng[i].item.key, + tcam_item->kt_data.hash_key, + sizeof(tcam_item->kt_data.hash_key) / 2) && + memcmp(tcam_pp_key_mng[i + 1].item.key, + &tcam_item->kt_data.hash_key[20], + sizeof(tcam_item->kt_data.hash_key) / 2))) { + *index = i; + is_find = true; + break; + } + } + } else { + for (i = 0; i < NBL_FEM_TCAM_MAX_NUM; i++) { + if (tcam_pp_key_mng[i].item.key_mode != NBL_TC_KT_HALF_MODE) + continue; + if (!(memcmp(tcam_pp_key_mng[i].item.key, tcam_item->kt_data.hash_key, + sizeof(tcam_item->kt_data.hash_key) / 2))) { + *index = i; + is_find = true; + break; + } + } + } + + if (is_find) + return ret; + + return -ENODEV; +} + +int nbl_insert_tcam_key_ad(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item, + struct nbl_flow_tcam_ad_item *ad_item, + u16 *index) +{ + int ret = 0; + u16 i = 0; + + bool is_insert = false; + + if (!tcam_pp_key_mng || !tcam_pp_ad_mng || !tcam_item || !ad_item || !index) + return -EINVAL; + + if (tcam_item->key_mode == NBL_TC_KT_FULL_MODE) { + for (; i < NBL_FEM_TCAM_MAX_NUM - 1; i += 2) { + if (!(tcam_pp_key_mng[i].item.key_mode && + tcam_pp_key_mng[i + 1].item.key_mode)) { + memcpy(tcam_pp_key_mng[i].item.key, + tcam_item->kt_data.hash_key, + sizeof(tcam_item->kt_data.hash_key) / 2); + memcpy(tcam_pp_key_mng[i + 1].item.key, + &tcam_item->kt_data.hash_key[20], + sizeof(tcam_item->kt_data.hash_key) / 2); + tcam_pp_key_mng[i].item.key_mode = NBL_TC_KT_FULL_MODE; + tcam_pp_key_mng[i + 1].item.key_mode = NBL_TC_KT_FULL_MODE; + tcam_pp_key_mng[i].ref_cnt = 1; + tcam_pp_key_mng[i + 1].ref_cnt = 1; + tcam_pp_key_mng[i].item.sw_hash_id = tcam_item->sw_hash_id; + tcam_pp_key_mng[i].item.profile_id = tcam_item->profile_id; + + memcpy(tcam_pp_ad_mng[i].item.action, ad_item->action, + sizeof(ad_item->action)); + + *index = i; + is_insert = true; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: insert pp%d index=%d,%d\n", + tcam_item->pp_type, *index, *index + 1); + break; + } + } + } else { + for (; i < NBL_FEM_TCAM_MAX_NUM; i++) { + if (!tcam_pp_key_mng[i].item.key_mode) { + memcpy(tcam_pp_key_mng[i].item.key, tcam_item->kt_data.hash_key, + sizeof(tcam_item->kt_data.hash_key) / 2); + tcam_pp_key_mng[i].item.key_mode = NBL_TC_KT_HALF_MODE; + tcam_pp_key_mng[i].ref_cnt = 1; + tcam_pp_key_mng[i].item.sw_hash_id = + tcam_item->sw_hash_id; + tcam_pp_key_mng[i].item.profile_id = + tcam_item->profile_id; + + memcpy(tcam_pp_ad_mng[i].item.action, ad_item->action, + sizeof(ad_item->action)); + + *index = i; + is_insert = true; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: insert pp%d index=%d\n", + tcam_item->pp_type, *index); + break; + } + } + } + + if (is_insert) + return ret; + + return -ENODEV; +} + +int nbl_delete_tcam_key_ad(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + u16 index, u8 key_mode, u8 pp_type) +{ + int ret = 0; + + if (!tcam_pp_key_mng || !tcam_pp_ad_mng) + return -EINVAL; + + if (key_mode == NBL_TC_KT_FULL_MODE) { + if (tcam_pp_key_mng[index].ref_cnt > 1) { + tcam_pp_key_mng[index].ref_cnt--; + tcam_pp_key_mng[index + 1].ref_cnt--; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: ref_cnt-- pp%d index=%d, ref_cnt=%d\n", + pp_type, index, tcam_pp_key_mng[index].ref_cnt); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: ref_cnt-- pp%d index=%d, ref_cnt=%d\n", + pp_type, index + 1, + tcam_pp_key_mng[index + 1].ref_cnt); + } else { + memset(&tcam_pp_key_mng[index], 0, + sizeof(tcam_pp_key_mng[index])); + memset(&tcam_pp_key_mng[index + 1], 0, + sizeof(tcam_pp_key_mng[index + 1])); + memset(&tcam_pp_ad_mng[index], 0, + sizeof(tcam_pp_ad_mng[index])); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: delete pp%d index=%d,%d\n", + pp_type, index, index + 1); + } + } else { + if (tcam_pp_key_mng[index].ref_cnt > 1) { + tcam_pp_key_mng[index].ref_cnt--; + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam:ref_cnt-- pp%d index=%d, ref_cnt=%d\n", + pp_type, index, tcam_pp_key_mng[index].ref_cnt); + } else { + memset(&tcam_pp_key_mng[index], 0, + sizeof(tcam_pp_key_mng[index])); + memset(&tcam_pp_ad_mng[index], 0, + sizeof(tcam_pp_ad_mng[index])); + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw tcam: delete pp%d index=%d\n", pp_type, index); + } + } + + return ret; +} + +static int nbl_flow_mcc_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + nbl_tc_mcc_init(&tc_flow_mgt->tc_mcc_mgt, common); + + return 0; +} + +static struct nbl_flow_info_init flow_info_init_list[] = { + { nbl_flow_pp1_ht0_tbl_hash_init }, + { nbl_flow_pp1_ht1_tbl_hash_init }, + { nbl_flow_pp2_ht0_tbl_hash_init }, + { nbl_flow_pp2_ht1_tbl_hash_init }, + + { nbl_flow_tcam_init }, + { nbl_flow_mcc_init }, +}; + +static struct nbl_flow_info_uninit flow_info_uninit_list[] = { + { nbl_flow_pp1_ht0_tbl_hash_uninit }, + { nbl_flow_pp1_ht1_tbl_hash_uninit }, + { nbl_flow_pp2_ht0_tbl_hash_uninit }, + { nbl_flow_pp2_ht1_tbl_hash_uninit }, + + { nbl_flow_tcam_uninit }, +}; + +static int nbl_flow_info_init_list(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + u32 idx = 0; + u8 profile_id = 0; + struct nbl_profile_msg *profile_msg = NULL; + struct nbl_flow_prf_data *prf_info = NULL; + u32 item_cnt = 0; + int ret = 0; + u8 p_id = NBL_FLOW_PROFILE_START; + + for (profile_id = p_id; profile_id < NBL_ALL_PROFILE_NUM; profile_id++) { + profile_msg = &tc_flow_mgt->profile_msg[profile_id]; + if (profile_msg->key_len != 0) { + ret = nbl_flow_tab_filter_init(res_mgt, profile_id); + if (ret) + return ret; + } + + if (profile_msg->need_upcall && !profile_msg->pt_cmd && + profile_id < NBL_PP_STAGE_PROFILE_NUM) { + prf_info = &tc_flow_mgt->prf_info.prf_data[item_cnt]; + prf_info->pp_id = profile_msg->pp_id; + prf_info->prf_id = profile_msg->profile_id; + ++item_cnt; + } + } + tc_flow_mgt->prf_info.item_cnt = item_cnt; + + for (; idx < ARRAY_SIZE(flow_info_init_list); idx++) { + ret = flow_info_init_list[idx].init_func(res_mgt); + if (ret) + return ret; + } + + ret = nbl_flow_pp_at_tbl_init(res_mgt); + if (ret) + return ret; + + ret = nbl_flow_tc_encap_tbl_init(res_mgt); + + return ret; +} + +void nbl_flow_info_uninit_list(struct nbl_resource_mgt *res_mgt) +{ + u32 idx; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + spin_lock(&tc_flow_mgt->flow_lock); + for (idx = 0; idx < ARRAY_SIZE(flow_info_uninit_list); idx++) + flow_info_uninit_list[idx].uninit_func(res_mgt); + + nbl_flow_pp_at_tbl_uninit(res_mgt); + spin_unlock(&tc_flow_mgt->flow_lock); +} + +static int nbl_tc_flow_resource_init(struct nbl_resource_mgt *res_mgt) +{ + int ret = 0; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + + spin_lock_init(&tc_flow_mgt->flow_lock); + + ret = nbl_flow_info_init_list(res_mgt); + if (ret) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw info init failed.\n"); + goto flow_info_init_failed; + } + + nbl_info(common, NBL_DEBUG_FLOW, "tc flow hw resource init success\n"); + + return ret; + +flow_info_init_failed: + nbl_flow_info_uninit_list(res_mgt); + return ret; +} + +static int nbl_flow_resource_free(struct nbl_resource_mgt *res_mgt) +{ + nbl_flow_flush(res_mgt); + + nbl_flow_info_uninit_list(res_mgt); + + return 0; +} + +/** + * @brief: init flow tab all resource + * + * @param[in] dev: the dev resource + * @return void + * + * the list of function is as follows: + * 1. init nbl_tc_flow list resource + * 2. init all kinds of key template resource + * 3. init action resource + * 4. init counter resource + */ +static int nbl_tc_flow_init(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + int ret = 0; + + tc_flow_mgt->res_mgt = res_mgt; + nbl_flow_clean_create_destroy_cnt(tc_flow_mgt); + + if (nbl_flow_is_resource_ready(tc_flow_mgt)) + return ret; + + tc_flow_mgt->profile_graph_count = g_profile_graph_count; + memcpy(tc_flow_mgt->profile_msg, g_prf_msg, + sizeof(struct nbl_profile_msg) * NBL_ALL_PROFILE_NUM); + memcpy(tc_flow_mgt->profile_graph, g_prf_graph, + sizeof(struct nbl_profile_assoc_graph) * NBL_ASSOC_PROFILE_GRAPH_NUM); + + ret = nbl_tc_flow_resource_init(res_mgt); + if (ret == 0) { + nbl_flow_set_resource_init_status(tc_flow_mgt, true); + nbl_flow_resource_available(tc_flow_mgt); + } else { + return ret; + } + + /* not available still now, depends on mbx */ + return ret; +} + +/** + * @brief: uninit flow tab all resource + * + * @param[in] dev: the dev resource + * @return void + */ +static void nbl_flow_fini(struct nbl_resource_mgt *res_mgt, bool available) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + if (!nbl_flow_is_resource_ready(tc_flow_mgt)) + return; + + if (!available) + return; + + nbl_flow_resource_free(res_mgt); + nbl_flow_set_resource_init_status(tc_flow_mgt, false); +} + +static void +nbl_flow_wait_flows_free_done(struct nbl_tc_flow_mgt *tc_flow_mgt) +{ +#define WAIT_CNT 100 +#define WAIT_TIME 10 /* ms */ + u32 cnt = 0; + + while (1) { + if (cnt > WAIT_CNT) + break; + cnt++; + + if (!atomic64_read(&tc_flow_mgt->ref_cnt)) + break; + mdelay(WAIT_TIME); + } +} + +static int nbl_tc_flow_add_tc_flow(void *priv, struct nbl_tc_flow_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + struct nbl_tc_flow *tc_flow_ptr = NULL; + + if (!tc_flow_mgt) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw add tc_flow_mgt is null.\n"); + return -EINVAL; + } + + if (!nbl_flow_is_available(tc_flow_mgt)) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw resource unavailable.\n"); + return -EINVAL; + } + + if (param->in.type == NBL_TC_PORT_TYPE_VSI) + param->act.flag |= NBL_FLOW_ACTION_EGRESS; + else + param->act.flag |= NBL_FLOW_ACTION_INGRESS; + + param->filter.input.dir = (param->act.flag & NBL_FLOW_ACTION_EGRESS); + + tc_flow_ptr = nbl_tc_flow_insert_index(res_mgt, ¶m->key); + if (!tc_flow_ptr) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw index=%llx add failed!\n", param->key.cookie); + ret = -EINVAL; + goto flow_idx_err; + } + + tc_flow_ptr->flow_stat_id = nbl_fc_add_stats_leonis(priv, NBL_FC_COMMON_TYPE, + param->key.cookie); + if (tc_flow_ptr->flow_stat_id < 0) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw failed to add a counter.\n"); + ret = -EINVAL; + goto stats_out; + } else { + param->act.counter_id = tc_flow_ptr->flow_stat_id; + param->act.flag |= NBL_FLOW_ACTION_COUNTER; + } + + if (!nbl_flow_assoc_graph_lookup(res_mgt, ¶m->filter)) { + nbl_info(common, NBL_DEBUG_FLOW, "tc flow hw can not find graph, key_flag:0x%llx.\n", + param->filter.key_flag); + ret = -EINVAL; + goto out; + } + + ret = nbl_flow_tab_storage_entr(res_mgt, tc_flow_ptr, + ¶m->filter, ¶m->act); + + if (ret) + goto out; + atomic64_inc(&tc_flow_mgt->create_num); + tc_flow_ptr->act_flags = param->act.flag; + if (param->act.flag & NBL_FLOW_ACTION_TUNNEL_ENCAP) { + tc_flow_ptr->encap_key = kzalloc(sizeof(*tc_flow_ptr->encap_key), GFP_KERNEL); + if (!tc_flow_ptr->encap_key) { + ret = -ENOMEM; + goto out; + } + + memcpy(tc_flow_ptr->encap_key, ¶m->act.encap_key, sizeof(param->act.encap_key)); + } + + return ret; + +out: + nbl_fc_del_stats_leonis(priv, param->key.cookie); +stats_out: + nbl_tc_flow_delete_index(res_mgt, ¶m->key); +flow_idx_err: + return ret; +} + +static int nbl_tc_flow_del_edit_act(struct nbl_resource_mgt *res_mgt, + struct nbl_tc_flow *tc_flow_node) +{ + int ret = 0; + + if (tc_flow_node->act_flags & NBL_FLOW_ACTION_TUNNEL_ENCAP) { + ret = nbl_tc_tun_encap_del(res_mgt, tc_flow_node->encap_key); + kfree(tc_flow_node->encap_key); + } + + return ret; +} + +static void nbl_tc_flow_del_filter_tbl(struct nbl_resource_mgt *res_mgt, + struct nbl_tc_flow *tc_flow_node) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + u8 i = 0; + bool last_stage = false; + struct nbl_flow_tab_filter *flow_tab_node; + + for (i = 0; i < NBL_PP_PROFILE_STAGE_NUM; i++) { + flow_tab_node = tc_flow_node->profile_rule[i]; + if (!flow_tab_node) + continue; + + if (i && tc_flow_node->profile_id[i] == 0) + break; + + if (tc_flow_mgt->profile_msg[tc_flow_node->profile_id[i]].key_flag == 0) + break; + + if (i == (NBL_ASSOC_PROFILE_STAGE_NUM - 1) || + tc_flow_node->profile_id[i + 1] == 0) + last_stage = true; + + if (tc_flow_mgt->profile_msg[tc_flow_node->profile_id[i]].g_profile_id < + NBL_PP_STAGE_PROFILE_NUM) { + ret |= nbl_rmv_flow_tab_filter(res_mgt, &flow_tab_node->key, + true, last_stage, tc_flow_node->profile_id[i]); + } + + if (ret != 0 && ret != -ENONET) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw del failed ret %d.\n", ret); + return; + } + } + + /* del actions */ + ret = nbl_tc_flow_del_edit_act(res_mgt, tc_flow_node); + if (ret) + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow del edit action failed ret %d.\n", ret); +} + +static int nbl_tc_flow_del_tc_flow(void *priv, struct nbl_tc_flow_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + int ret = 0; + struct nbl_tc_flow *tc_flow_node = NULL; + + if (!tc_flow_mgt) { + nbl_err(common, NBL_DEBUG_FLOW, "tc flow hw del tc_flow_mgt is null.\n"); + return -EINVAL; + } + + if (!nbl_flow_is_available(tc_flow_mgt)) { + nbl_err(common, NBL_DEBUG_FLOW, + "tc flow hw resource unavailable.\n"); + return -EINVAL; + } + + nbl_fc_del_stats_leonis(priv, param->key.cookie); + tc_flow_node = nbl_tc_flow_index_lookup(res_mgt, ¶m->key); + if (!tc_flow_node) { + nbl_debug(common, NBL_DEBUG_FLOW, + "tc flow hw cookie=%llx not exist to del tc flow!\n", + param->key.cookie); + return -ENOENT; + } + + nbl_tc_flow_del_filter_tbl(res_mgt, tc_flow_node); + ret = nbl_tc_flow_delete_index(res_mgt, ¶m->key); + if (ret) + nbl_info(common, NBL_DEBUG_FLOW, "tc flow hw del tc-flow-list failed.\n"); + else + atomic64_inc(&tc_flow_mgt->destroy_num); + + return ret; +} + +static int nbl_tc_flow_idx_lookup(void *priv, struct nbl_flow_index_key key) +{ + int ret = -ENOKEY; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow *tc_flow_ptr = NULL; + + tc_flow_ptr = nbl_tc_flow_index_lookup(res_mgt, &key); + if (tc_flow_ptr) + ret = 0; + + return ret; +} + +static void nbl_tc_flow_node_del_action_func(void *priv, int index, void *data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_tc_flow *flow_node; + struct nbl_flow_index_key *flow_key = (struct nbl_flow_index_key *)data; + + flow_node = (struct nbl_tc_flow *)((u8 *)flow_key + sizeof(struct nbl_flow_index_key)); + nbl_fc_del_stats_leonis(priv, flow_key->cookie); + nbl_tc_flow_del_filter_tbl(res_mgt, flow_node); + atomic64_inc(&tc_flow_mgt->destroy_num); +} + +int nbl_tc_flow_flush_flow(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_index_tbl_scan_key scan_key; + + NBL_INDEX_TBL_SCAN_KEY_INIT(&scan_key, true, res_mgt, &nbl_tc_flow_node_del_action_func); + nbl_common_scan_index_table(tc_flow_mgt->flow_idx_tbl, &scan_key); + + return 0; +} + +/* NBL_FLOW_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_TC_FLOW_OPS_TBL \ +do { \ + NBL_TC_FLOW_SET_OPS(add_tc_flow, nbl_tc_flow_add_tc_flow); \ + NBL_TC_FLOW_SET_OPS(del_tc_flow, nbl_tc_flow_del_tc_flow); \ + NBL_TC_FLOW_SET_OPS(flow_index_lookup, nbl_tc_flow_idx_lookup); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_tc_flow_setup_mgt(struct device *dev, struct nbl_tc_flow_mgt **tc_flow_mgt) +{ + struct nbl_index_tbl_key flow_idx_tbl_key; + + *tc_flow_mgt = devm_kzalloc(dev, sizeof(struct nbl_tc_flow_mgt), GFP_KERNEL); + if (!*tc_flow_mgt) + return -ENOMEM; + + NBL_INDEX_TBL_KEY_INIT(&flow_idx_tbl_key, dev, 0, NBL_FLOW_INDEX_LEN, + sizeof(struct nbl_flow_index_key)); + (*tc_flow_mgt)->flow_idx_tbl = nbl_common_init_index_table(&flow_idx_tbl_key); + if (!(*tc_flow_mgt)->flow_idx_tbl) + return -ENOMEM; + + return 0; +} + +static void nbl_tc_flow_remove_mgt(struct device *dev, struct nbl_tc_flow_mgt **tc_flow_mgt) +{ + nbl_common_remove_index_table((*tc_flow_mgt)->flow_idx_tbl, NULL); + devm_kfree(dev, *tc_flow_mgt); + *tc_flow_mgt = NULL; +} + +int nbl_tc_flow_mgt_start_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_tc_flow_mgt **tc_flow_mgt; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + int ret = 0; + + tc_flow_mgt = &NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + ret = nbl_tc_flow_setup_mgt(dev, tc_flow_mgt); + if (ret) + return ret; + ret = nbl_tc_flow_init(res_mgt); + + /* init sub-module hw-flow-stats */ + if (!ret) + return nbl_fc_mgt_start_leonis(res_mgt); + return ret; +} + +void nbl_tc_flow_mgt_stop_leonis(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_tc_flow_mgt **tc_flow_mgt; + bool available; + + tc_flow_mgt = &NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + + if (!(*tc_flow_mgt)) + return; + + mdelay(NBL_SAFE_THREADS_WAIT_TIME); + nbl_flow_wait_flows_free_done(*tc_flow_mgt); + + available = nbl_flow_is_available(*tc_flow_mgt); + nbl_flow_fini(res_mgt, available); + nbl_flow_resource_unavailable(*tc_flow_mgt); + nbl_fc_mgt_stop_leonis(res_mgt); + nbl_tc_flow_remove_mgt(dev, tc_flow_mgt); +} + +int nbl_tc_flow_setup_ops_leonis(struct nbl_resource_ops *res_ops) +{ + int ret = 0; +#define NBL_TC_FLOW_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_TC_FLOW_OPS_TBL; +#undef NBL_TC_FLOW_SET_OPS + + ret = nbl_fc_setup_ops_leonis(res_ops); + if (ret) + return ret; + ret = nbl_tc_tun_setup_ops(res_ops); + return ret; +} + +void nbl_tc_flow_remove_ops_leonis(struct nbl_resource_ops *res_ops) +{ +#define NBL_TC_FLOW_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_TC_FLOW_OPS_TBL; +#undef NBL_TC_FLOW_SET_OPS + + nbl_fc_remove_ops_leonis(res_ops); + nbl_tc_tun_remove_ops(res_ops); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..ddbba520572f2a4d24059d12774317ef5f49fdfb --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_flow_leonis.h @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ +#ifndef _NBL_TC_FLOW_LEONIS_H_ +#define _NBL_TC_FLOW_LEONIS_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" +#include "nbl_resource.h" +#include "nbl_tc_mcc_leonis.h" + +#define NBL_FLOW_INGRESS 0 +#define NBL_FLOW_EGRESS 1 + +#define NBL_FLOW_INNER_PATTERN 0 +#define NBL_FLOW_OUTER_PATTERN 1 + +#define NBL_MAX_ETHPORTS 516 +#define NBL_FLOW_ETH_REP_0 2048 +#define NBL_FLOW_ETH_REP_1 2049 +#define NBL_FLOW_ETH_REP_2 2050 +#define NBL_FLOW_ETH_REP_3 2051 +#define NBL_FLOW_BOND_REP_PORT_ID 2052 +#define NBL_ETHER_TYPE_IPV4 4 +#define NBL_ETHER_TYPE_IPV6 6 +#define NBL_FLOW_MAX_REP_ID 0xFFFF + +#define NBL_FLOW_ICMP_REQ_TYPE 8 +#define NBL_FLOW_ICMP_REQ_CODE 0 +#define NBL_FLOW_ICMP_REP_TYPE 0 +#define NBL_FLOW_ICMP_REP_CODE 0 + +#define NBL_FLOW_ICMP6_REQ_TYPE 128 +#define NBL_FLOW_ICMP6_REQ_CODE 0 +#define NBL_FLOW_ICMP6_REP_TYPE 129 +#define NBL_FLOW_ICMP6_REP_CODE 0 + +#define NBL_HASH_CFT_MAX 4 +#define NBL_HASH_CFT_AVL 2 +#define NBL_HASH0 1 +#define NBL_HASH1 2 + +#define NBL_KEY_TYPE_160 0 +#define NBL_KEY_TYPE_320 1 + +#define NBL_FEM_KT_LEN 320 +#define NBL_FEM_KT_HALF_LEN 160 +#define NBL_FEM_AT_LEN 32 +#define NBL_FEM_AT_HALF_LEN 16 +#define NBL_AT_WIDTH 22 + +#define NBL_PP1_AT2_OFFSET (94 * 1024) +#define NBL_PP1_AT_OFFSET (88 * 1024) +#define NBL_PP2_AT2_OFFSET (72 * 1024) + +#define NBL_PP0_POWER 0 +#define NBL_PP1_POWER 12 +#define NBL_PP2_POWER 14 + +#define NBL_FEM_AT_NO_ENTRY (0) +#define NBL_FEM_AT_ONE_ENTRY (1) +#define NBL_FEM_AT_TWO_ENTRY (2) + +#define NBL_HT0_HASH 1 +#define NBL_HT1_HASH 2 + +#define NBL_SAFE_THREADS_WAIT_TIME (200) + +#define NBL_MASK_16 0xffff + +#define NBL_PP_STAGE_PROFILE_NUM (48) +#define NBL_PP_PROFILE_STAGE_NUM (8) + +#define NBL_FLOW_TABLE_LEN (8 * 1024) +#define NBL_TABLE_KEY_VALUE_LEN (40) +#define NBL_TABLE_KEY_DATA_LEN (10) + +#define NBL_BITS_IN_NIBBLE (4) +#define NBL_BITS_IN_U8 (8) +#define NBL_BITS_IN_U16 (16) +#define NBL_BITS_IN_U32 (32) +#define NBL_BITS_IN_U64 (64) + +#define NBL_FLOW_PROFILE_START 16 +#define NBL_FLOW_LEN_INVALID (0xffffffff) + +#define NBL_FLOW_TAB_ONE_TIME 1 +#define NBL_FLOW_TAB_TWO_TIME 2 +#define NBL_FLOW_TABLE_IPV4_DEFAULT_MASK 0xFFFFFFFF +#define NBL_INVALID_U32 0xFFFFFFFF +#define NBL_FLOW_TABLE_L4_PORT_DEFAULT_MASK 0xFFFF +#define NBL_FLOW_TABLE_FULL_MASK_AS_U32 0xFFFFFFFF +#define NBL_FLOW_TABLE_FULL_MASK_AS_U16 0xFFFF +#define NBL_FLOW_TABLE_FULL_MASK_AS_U8 0xFF + +#define NBL_GET_ARG_LEN(sz) ((sz) / sizeof(u32)) +#define NBL_GET_ARG_COPY_LEN(sz) ((sz) * sizeof(u32)) + +/* at node's idx has two continuous idx, and the begin idx need to be even number */ +#define NBL_FLOW_AT_IDX_NUM 2 +#define NBL_FLOW_AT_IDX_MULTIPLE 2 + +struct nbl_tc_flow { + u8 acl_flag:1; + int flow_stat_id; + u64 act_flags; + u8 profile_id[NBL_ASSOC_PROFILE_STAGE_NUM]; + + struct { + void *profile_rule[NBL_ASSOC_PROFILE_STAGE_NUM]; + }; + struct nbl_encap_key *encap_key; +}; + +struct nbl_tcam_item { + union nbl_tc_common_data_u kt_data; + u32 tcam_action[NBL_MAX_ACTION_NUM]; + bool tcam_flag; + u8 key_mode; + u8 pp_type; + u32 *pp_tcam_count; + u16 tcam_index; + u32 sw_hash_id; + u8 profile_id; +}; + +#define NBL_ACT_INGRESS 1 +#define NBL_ACT_ENGRESS 0 + +#define NBL_TC_KT_HALF_MODE 1 +#define NBL_TC_KT_FULL_MODE 2 + +struct nbl_edit_item { + struct list_head tc_mcc_list; + u32 encap_idx; + u16 smac_idx; + u16 dmac_idx; + u16 sip_idx; + u16 dip_idx; + u16 mcc_idx; + bool is_mir; + u8 direct; +}; + +struct nbl_select_input { + struct nbl_flow_pp_ht_mng *pp_ht0_mng; + struct nbl_flow_pp_ht_mng *pp_ht1_mng; + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng; + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng; + unsigned long *pp_kt_bmp; + u32 kt_idx_offset; + u32 *pp_tcam_count; + u32 act_offset; + u32 act2_offset; + u32 pp_kt_num; + u8 pp_type; +}; + +/* flow tab hash-list struct */ +struct nbl_flow_tab_conf { + union { + u32 key_value[NBL_TABLE_KEY_DATA_LEN]; + u8 key_data[NBL_TABLE_KEY_VALUE_LEN]; + }; +}; + +struct nbl_flow_tab_filter { + struct nbl_flow_tab_conf key; + struct nbl_tc_ht_item ht_item; + struct nbl_edit_item edit_item; + struct nbl_act_collect act_collect; + u64 act_flags; + u32 assoc_tbl_id; + u32 tbl_id; + u32 sw_hash_id; + u32 ref_cnt; + u16 tcam_index; + u8 pp_type; + bool tcam_flag; +}; + +struct nbl_flow_idx_info { + u64 key_flag; + u32 flow_idx; + u16 tnl_mac_idx; + u16 pp_flag; + u8 outer_pattern_flag; + u8 profile_id; + bool last_stage; + bool pt_cmd; +}; + +struct nbl_profile_offload_msg { + u16 assoc_tbl_id; + u8 profile_id; + u8 profile_stage; + bool pt_cmd; + bool last_stage; +}; + +struct nbl_mt_input { + u32 tbl_id; + u16 depth; + u16 power; + u8 key[NBL_KT_BYTE_LEN]; + u8 key_full; + u8 at_num; + u8 kt_left_num; + u8 pp_type; +}; + +struct nbl_flow_info_init { + int (*init_func)(struct nbl_resource_mgt *res_mgt); +}; + +struct nbl_flow_info_uninit { + void (*uninit_func)(struct nbl_resource_mgt *res_mgt); +}; + +int nbl_tc_flow_alloc_bmp_id(unsigned long *bitmap_mng, u32 size, + u8 type, u32 *bitmap_id); +void nbl_tc_flow_free_bmp_id(unsigned long *bitmap_mng, u32 id, u8 type); +int nbl_flow_flush(struct nbl_resource_mgt *res_mgt); +void nbl_flow_info_uninit_list(struct nbl_resource_mgt *res_mgt); +void nbl_flow_resource_unavailable(struct nbl_tc_flow_mgt *tc_flow_mgt); +bool nbl_flow_is_available(struct nbl_tc_flow_mgt *tc_flow_mgt); +void nbl_flow_ref_inc(void); +void nbl_flow_ref_dec(void); + +struct nbl_flow_pp_ht_tbl * +nbl_pp_ht_lookup(struct nbl_flow_pp_ht_mng *pp_ht_mng, u16 hash_value, + struct nbl_flow_pp_ht_key *pp_ht_key); +int nbl_insert_pp_ht(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pp_ht_mng *pp_ht_mng, + u16 hash_value0, u16 hash_value1, u32 key_index); +int nbl_delete_pp_ht(struct nbl_resource_mgt *res_mgt, + struct nbl_flow_pp_ht_mng *pp_ht_mng, + struct nbl_flow_pp_ht_tbl *node, u16 hash_value0, + u16 hash_value1, u32 key_index); + +bool nbl_pp_ht0_ht1_search(struct nbl_flow_pp_ht_mng *pp_ht0_mng, u16 ht0_hash, + struct nbl_flow_pp_ht_mng *pp_ht1_mng, u16 ht1_hash); +int nbl_pp_at_lookup(struct nbl_resource_mgt *res_mgt, u8 pp_type, u8 at_type, + struct nbl_flow_pp_at_key *act_key, struct nbl_flow_at_tbl **act_node); + +int nbl_insert_pp_at(struct nbl_resource_mgt *res_mgt, u8 pp_type, u8 at_type, + struct nbl_flow_pp_at_key *act_key, struct nbl_flow_at_tbl **act_node); + +struct nbl_tc_flow * +nbl_tc_flow_index_lookup(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key); +struct nbl_tc_flow * +nbl_tc_flow_insert_index(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key); +int nbl_tc_flow_delete_index(struct nbl_resource_mgt *res_mgt, struct nbl_flow_index_key *key); + +int nbl_tcam_key_lookup(struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_tcam_item *tcam_item, u16 *index); +int nbl_insert_tcam_key_ad(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + struct nbl_tcam_item *tcam_item, + struct nbl_flow_tcam_ad_item *ad_item, + u16 *index); +int nbl_delete_tcam_key_ad(struct nbl_common_info *common, + struct nbl_flow_tcam_key_mng *tcam_pp_key_mng, + struct nbl_flow_tcam_ad_mng *tcam_pp_ad_mng, + u16 index, u8 key_mode, u8 pp_type); + +int nbl_cmdq_flow_ht_clear_2hw(struct nbl_tc_ht_item *ht_item, + u8 pp_type, struct nbl_resource_mgt *res_mgt); + +void nbl_flow_remove_ops(struct nbl_resource_ops *res_ops); +int nbl_flow_setup_ops(struct nbl_resource_ops *res_ops); +void nbl_flow_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_flow_mgt_start(struct nbl_resource_mgt *res_mgt); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..9d44165482126eb746f49e2d14fc8c278a042d79 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.c @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2030 nbl, Inc. + */ +#include "nbl_tc_mcc_leonis.h" + +static u16 nbl_tc_cfg_action_set_dport_mcc_eth(u8 eth, u8 port_type) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.down.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.down.port_type = SET_DPORT_TYPE_ETH_LAG; + set_dport.dport.down.next_stg_sel = NEXT_STG_SEL_EPRO; + if (port_type == NBL_TC_PORT_TYPE_ETH) { + set_dport.dport.down.eth_vld = 1; + set_dport.dport.down.eth_id = eth; + } else { + set_dport.dport.down.lag_vld = 1; + set_dport.dport.down.lag_id = eth; + } + + return set_dport.data; +} + +static u16 nbl_tc_cfg_action_set_dport_mcc_vsi(u16 vsi) +{ + union nbl_action_data set_dport = {.data = 0}; + + set_dport.dport.up.upcall_flag = AUX_FWD_TYPE_NML_FWD; + set_dport.dport.up.port_type = SET_DPORT_TYPE_VSI_HOST; + set_dport.dport.up.port_id = vsi; + set_dport.dport.up.next_stg_sel = NEXT_STG_SEL_EPRO; + + return set_dport.data; +} + +void nbl_tc_mcc_init(struct nbl_tc_mcc_mgt *tc_mcc_mgt, struct nbl_common_info *common) +{ + tc_mcc_mgt->common = common; + INIT_LIST_HEAD(&tc_mcc_mgt->mcc_list); +} + +int nbl_tc_mcc_add_leaf_node(struct nbl_tc_mcc_mgt *tc_mcc_mgt, u16 dport_id, u8 port_type) +{ + struct nbl_tc_mcc_info *mcc_node; + long idx; + + if (tc_mcc_mgt->mcc_offload_cnt >= NBL_TC_MCC_MAX_OFFLOAD_CNT) { + nbl_err(tc_mcc_mgt->common, NBL_DEBUG_FLOW, "tc mcc groups exceed max num\n"); + return -ENOBUFS; + } + + idx = find_first_zero_bit(tc_mcc_mgt->mcc_pool, NBL_TC_MCC_TBL_DEPTH); + /* idx won't exceed NBL_TC_MCC_TBL_DEPTH unless flow call error */ + if (idx >= NBL_TC_MCC_TBL_DEPTH) { + nbl_err(tc_mcc_mgt->common, NBL_DEBUG_FLOW, "tc mcc no available idx\n"); + return -ENOBUFS; + } + mcc_node = kzalloc(sizeof(*mcc_node), GFP_KERNEL); + if (!mcc_node) + return -ENOMEM; + + mcc_node->port_type = port_type; + mcc_node->dport_id = dport_id; + mcc_node->mcc_id = (u16)idx; + + set_bit(idx, tc_mcc_mgt->mcc_pool); + list_add(&mcc_node->node, &tc_mcc_mgt->mcc_list); + nbl_debug(tc_mcc_mgt->common, NBL_DEBUG_FLOW, "tc mcc group %d add member port type %d id %d\n", + (int)idx, port_type, dport_id); + + return idx; +} + +void nbl_tc_mcc_get_list(struct nbl_tc_mcc_mgt *tc_mcc_mgt, struct list_head *tc_mcc_list) +{ + list_replace_init(&tc_mcc_mgt->mcc_list, tc_mcc_list); +} + +void nbl_tc_mcc_free_list(struct nbl_tc_mcc_mgt *tc_mcc_mgt) +{ + struct nbl_tc_mcc_info *mcc_node = NULL; + struct nbl_tc_mcc_info *safe_node = NULL; + + list_for_each_entry_safe(mcc_node, safe_node, &tc_mcc_mgt->mcc_list, node) { + list_del(&mcc_node->node); + clear_bit(mcc_node->mcc_id, tc_mcc_mgt->mcc_pool); + nbl_debug(tc_mcc_mgt->common, NBL_DEBUG_FLOW, + "tc mcc group %d free member port type %d id %d\n", + mcc_node->mcc_id, mcc_node->port_type, mcc_node->dport_id); + kfree(mcc_node); + } +} + +void nbl_tc_mcc_add_hw_tbl(struct nbl_resource_mgt *res_mgt, struct nbl_tc_mcc_mgt *tc_mcc_mgt) +{ + struct nbl_tc_mcc_info *mcc_node = NULL; + struct nbl_phy_ops *phy_ops; + u16 prev_mcc_id, mcc_action; + bool mcc_add_succ = false; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + list_for_each_entry(mcc_node, &tc_mcc_mgt->mcc_list, node) { + if (mcc_node->port_type == NBL_TC_PORT_TYPE_VSI) + mcc_action = nbl_tc_cfg_action_set_dport_mcc_vsi(mcc_node->dport_id); + else + mcc_action = nbl_tc_cfg_action_set_dport_mcc_eth((u8)mcc_node->dport_id, + mcc_node->port_type); + + if (nbl_list_is_first(&mcc_node->node, &tc_mcc_mgt->mcc_list)) + prev_mcc_id = NBL_MCC_ID_INVALID; + else + prev_mcc_id = list_prev_entry(mcc_node, node)->mcc_id; + phy_ops->add_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + prev_mcc_id, mcc_action); + mcc_add_succ = true; + } + if (mcc_add_succ) + ++tc_mcc_mgt->mcc_offload_cnt; +} + +void nbl_tc_mcc_free_hw_tbl(struct nbl_resource_mgt *res_mgt, struct nbl_tc_mcc_mgt *tc_mcc_mgt, + struct list_head *tc_mcc_list) +{ + struct nbl_tc_mcc_info *mcc_node = NULL; + struct nbl_tc_mcc_info *safe_node = NULL; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + list_for_each_entry_safe(mcc_node, safe_node, tc_mcc_list, node) { + phy_ops->del_mcc(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), mcc_node->mcc_id, + NBL_MCC_ID_INVALID, NBL_MCC_ID_INVALID); + list_del(&mcc_node->node); + clear_bit(mcc_node->mcc_id, tc_mcc_mgt->mcc_pool); + nbl_debug(tc_mcc_mgt->common, NBL_DEBUG_FLOW, + "tc mcc group %d free member port type %d id %d\n", + mcc_node->mcc_id, mcc_node->port_type, mcc_node->dport_id); + kfree(mcc_node); + } + --tc_mcc_mgt->mcc_offload_cnt; +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..eaf67e5f453fa267f09d8323794032946ceec111 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_mcc_leonis.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2030 nbl, Inc. + */ +#ifndef _NBL_TC_MCC_LEONIS_H_ +#define _NBL_TC_MCC_LEONIS_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" +#include "nbl_resource.h" + +#define NBL_TC_MCC_MAX_OFFLOAD_CNT (8) + +struct nbl_tc_mcc_info { + struct list_head node; + u16 dport_id; + u16 mcc_id; + u8 port_type; +}; + +void nbl_tc_mcc_init(struct nbl_tc_mcc_mgt *tc_mcc_mgt, struct nbl_common_info *common); +int nbl_tc_mcc_add_leaf_node(struct nbl_tc_mcc_mgt *tc_mcc_mgt, u16 dport_id, u8 port_type); +void nbl_tc_mcc_get_list(struct nbl_tc_mcc_mgt *tc_mcc_mgt, struct list_head *tc_mcc_list); +void nbl_tc_mcc_add_hw_tbl(struct nbl_resource_mgt *res_mgt, struct nbl_tc_mcc_mgt *tc_mcc_mgt); +void nbl_tc_mcc_free_hw_tbl(struct nbl_resource_mgt *res_mgt, struct nbl_tc_mcc_mgt *tc_mcc_mgt, + struct list_head *tc_mcc_list); +void nbl_tc_mcc_free_list(struct nbl_tc_mcc_mgt *tc_mcc_mgt); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c new file mode 100644 index 0000000000000000000000000000000000000000..149152b98d8c8cbf095a145c177c5c8ec2e05f53 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.c @@ -0,0 +1,169 @@ +#include "nbl_resource.h" +#include "nbl_tc_tun_leonis.h" + +static bool nbl_tc_tun_encap_lookup(void *priv, + struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param) +{ + bool encap_find = false; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_encap_entry *encap_node = NULL; + + mutex_lock(&tc_flow_mgt->encap_tbl_lock); + if (!tc_flow_mgt->encap_tbl.flow_tab_hash) { + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + nbl_err(common, NBL_DEBUG_FLOW, "encap hash tbl is null.\n"); + encap_find = false; + goto end; + } + + encap_node = nbl_common_get_hash_node(tc_flow_mgt->encap_tbl.flow_tab_hash, + &rule_act->encap_key); + if (encap_node) { + encap_node->ref_cnt++; + rule_act->encap_idx = encap_node->encap_idx; + rule_act->vni = encap_node->vni; + rule_act->tc_tun_encap_out_dev = encap_node->out_dev; + nbl_debug(common, NBL_DEBUG_FLOW, "encap is exist, vni %d, encap_idx %d", + rule_act->vni, rule_act->encap_idx); + encap_find = true; + } + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); +end: + return encap_find; +} + +int nbl_tc_tun_encap_del(void *priv, struct nbl_encap_key *key) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(tc_flow_mgt->res_mgt); + struct nbl_encap_entry *e = NULL; + const struct nbl_phy_ops *phy_ops; + bool del_hw_encap_tbl = false; + u16 encap_idx = 0; + + res_mgt = tc_flow_mgt->res_mgt; + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + if (!key) { + nbl_err(common, NBL_DEBUG_FLOW, "encap_key is null"); + return -EINVAL; + } + + mutex_lock(&tc_flow_mgt->encap_tbl_lock); + + e = nbl_common_get_hash_node(tc_flow_mgt->encap_tbl.flow_tab_hash, key); + if (e) { + if (e->ref_cnt > 1) { + e->ref_cnt--; + } else { + /* remove encap from hw */ + del_hw_encap_tbl = true; + encap_idx = e->encap_idx; + /* free soft encap hash node */ + clear_bit(e->encap_idx, tc_flow_mgt->encap_tbl_bmp); + nbl_common_free_hash_node(tc_flow_mgt->encap_tbl.flow_tab_hash, key); + tc_flow_mgt->encap_tbl.tab_cnt--; + } + } + + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + + if (del_hw_encap_tbl) + phy_ops->del_tnl_encap(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), encap_idx); + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl tc del encap_idx: %u, encap_node:%p, " + "del_hw:%d", encap_idx, e, del_hw_encap_tbl); + + return 0; +} + +static int nbl_tc_tun_encap_add(void *priv, struct nbl_rule_action *action) +{ + u16 encap_idx; + int ret = 0; + struct nbl_encap_entry e; + struct nbl_encap_entry *encap_node; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_tc_flow_mgt *tc_flow_mgt = NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt); + const struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(tc_flow_mgt->res_mgt); + const struct nbl_phy_ops *phy_ops; + + res_mgt = tc_flow_mgt->res_mgt; + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + mutex_lock(&tc_flow_mgt->encap_tbl_lock); + + encap_idx = (u16)find_first_zero_bit(tc_flow_mgt->encap_tbl_bmp, + NBL_TC_ENCAP_TBL_DEPTH); + if (encap_idx == NBL_TC_ENCAP_TBL_DEPTH) { + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + ret = -ENOSPC; + nbl_info(common, NBL_DEBUG_FLOW, "encap tbl is full, cnt:%u", encap_idx); + goto err; + } + + set_bit(encap_idx, tc_flow_mgt->encap_tbl_bmp); + action->encap_idx = encap_idx; + memset(&e, 0, sizeof(e)); + e.ref_cnt = 1; + e.out_dev = action->tc_tun_encap_out_dev; + memcpy(e.encap_buf, action->encap_buf, NBL_FLOW_ACTION_ENCAP_TOTAL_LEN); + e.encap_size = action->encap_size; + e.encap_idx = action->encap_idx; + e.vni = action->vni; + memcpy(&e.key, &action->encap_key, sizeof(action->encap_key)); + + /* insert encap_node */ + ret = nbl_common_alloc_hash_node(tc_flow_mgt->encap_tbl.flow_tab_hash, + &action->encap_key, &e, (void **)&encap_node); + if (ret) { + clear_bit(encap_idx, tc_flow_mgt->encap_tbl_bmp); + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + nbl_info(common, NBL_DEBUG_FLOW, "alloc encap node failed, ret %d!", ret); + goto err; + } + + tc_flow_mgt->encap_tbl.tab_cnt++; + + mutex_unlock(&tc_flow_mgt->encap_tbl_lock); + + /* fill act_buf and send to hw */ + phy_ops->add_tnl_encap(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), action->encap_buf, + action->encap_idx, action->encap_idx_info); + + nbl_debug(common, NBL_DEBUG_FLOW, "nbl tc new encap_idx: %u.", encap_idx); + +err: + return ret; +} + +/* NBL_TC_TUN_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_TC_TUN_OPS_TBL \ +do { \ + NBL_TC_TUN_SET_OPS(tc_tun_encap_lookup, nbl_tc_tun_encap_lookup); \ + NBL_TC_TUN_SET_OPS(tc_tun_encap_del, nbl_tc_tun_encap_del); \ + NBL_TC_TUN_SET_OPS(tc_tun_encap_add, nbl_tc_tun_encap_add); \ +} while (0) + +int nbl_tc_tun_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_TC_TUN_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_TC_TUN_OPS_TBL; +#undef NBL_TC_TUN_SET_OPS + + return 0; +} + +void nbl_tc_tun_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_TC_TUN_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_TC_TUN_OPS_TBL; +#undef NBL_TC_TUN_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h new file mode 100644 index 0000000000000000000000000000000000000000..424ed8781820d039e0bff4043876dc53e788ed08 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_hw_leonis/nbl_tc_tun_leonis.h @@ -0,0 +1,14 @@ +#ifndef __NBL_TC_TUN_LEONIS_H__ +#define __NBL_TC_TUN_LEONIS_H__ + +#include +#include "nbl_include.h" +#include "nbl_core.h" +#include "nbl_resource.h" + +int nbl_tc_tun_setup_ops(struct nbl_resource_ops *res_ops); +void nbl_tc_tun_remove_ops(struct nbl_resource_ops *res_ops); + +int nbl_tc_tun_encap_del(void *priv, struct nbl_encap_key *key); + +#endif /* end of __NBL_TC_TUN_H__ */ diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c new file mode 100644 index 0000000000000000000000000000000000000000..52782beef501b5915ddccc377c0a98d2a35b192e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_interrupt.h" + +static int nbl_res_intr_destroy_msix_map(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev, *dma_dev; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + struct nbl_msix_map_table *msix_map_table; + u16 *interrupts; + u16 intr_num; + u16 i; + int ret = 0; + + if (!res_mgt) + return -EINVAL; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + dev = NBL_RES_MGT_TO_DEV(res_mgt); + dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + + /* use ctrl dev bdf */ + phy_ops->configure_msix_map(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, false, + 0, 0, 0, 0); + + intr_num = intr_mgt->func_intr_res[func_id].num_interrupts; + interrupts = intr_mgt->func_intr_res[func_id].interrupts; + + WARN_ON(!interrupts); + for (i = 0; i < intr_num; i++) { + if (interrupts[i] >= NBL_MAX_OTHER_INTERRUPT) + clear_bit(interrupts[i] - NBL_MAX_OTHER_INTERRUPT, + intr_mgt->interrupt_net_bitmap); + else + clear_bit(interrupts[i], intr_mgt->interrupt_others_bitmap); + + phy_ops->configure_msix_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, false, + interrupts[i], 0, 0, 0, false); + } + + kfree(interrupts); + intr_mgt->func_intr_res[func_id].interrupts = NULL; + intr_mgt->func_intr_res[func_id].num_interrupts = 0; + + msix_map_table = &intr_mgt->func_intr_res[func_id].msix_map_table; + dma_free_coherent(dma_dev, msix_map_table->size, msix_map_table->base_addr, + msix_map_table->dma); + msix_map_table->size = 0; + msix_map_table->base_addr = NULL; + msix_map_table->dma = 0; + + return ret; +} + +static int nbl_res_intr_configure_msix_map(void *priv, u16 func_id, u16 num_net_msix, + u16 num_others_msix, bool net_msix_mask_en) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev, *dma_dev; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + struct nbl_common_info *common; + struct nbl_msix_map_table *msix_map_table; + struct nbl_msix_map *msix_map_entries; + u16 *interrupts; + u16 requested; + u16 intr_index; + u16 i; + u8 bus, devid, function; + bool msix_mask_en; + int ret = 0; + + if (!res_mgt) + return -EINVAL; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + dev = NBL_RES_MGT_TO_DEV(res_mgt); + dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + common = NBL_RES_MGT_TO_COMMON(res_mgt); + + if (intr_mgt->func_intr_res[func_id].interrupts) + nbl_res_intr_destroy_msix_map(priv, func_id); + + nbl_res_func_id_to_bdf(res_mgt, func_id, &bus, &devid, &function); + + msix_map_table = &intr_mgt->func_intr_res[func_id].msix_map_table; + WARN_ON(msix_map_table->base_addr); + msix_map_table->size = sizeof(struct nbl_msix_map) * NBL_MSIX_MAP_TABLE_MAX_ENTRIES; + msix_map_table->base_addr = dma_alloc_coherent(dma_dev, msix_map_table->size, + &msix_map_table->dma, + GFP_ATOMIC | __GFP_ZERO); + if (!msix_map_table->base_addr) { + pr_err("Allocate DMA memory for function msix map table failed\n"); + msix_map_table->size = 0; + return -ENOMEM; + } + + requested = num_net_msix + num_others_msix; + interrupts = kcalloc(requested, sizeof(interrupts[0]), GFP_ATOMIC); + if (!interrupts) { + pr_err("Allocate function interrupts array failed\n"); + ret = -ENOMEM; + goto alloc_interrupts_err; + } + + intr_mgt->func_intr_res[func_id].interrupts = interrupts; + intr_mgt->func_intr_res[func_id].num_interrupts = requested; + + for (i = 0; i < num_net_msix; i++) { + intr_index = find_first_zero_bit(intr_mgt->interrupt_net_bitmap, + NBL_MAX_NET_INTERRUPT); + if (intr_index == NBL_MAX_NET_INTERRUPT) { + pr_err("There is no available interrupt left\n"); + ret = -EAGAIN; + goto get_interrupt_err; + } + interrupts[i] = intr_index + NBL_MAX_OTHER_INTERRUPT; + set_bit(intr_index, intr_mgt->interrupt_net_bitmap); + } + + for (i = num_net_msix; i < requested; i++) { + intr_index = find_first_zero_bit(intr_mgt->interrupt_others_bitmap, + NBL_MAX_OTHER_INTERRUPT); + if (intr_index == NBL_MAX_OTHER_INTERRUPT) { + pr_err("There is no available interrupt left\n"); + ret = -EAGAIN; + goto get_interrupt_err; + } + interrupts[i] = intr_index; + set_bit(intr_index, intr_mgt->interrupt_others_bitmap); + } + + msix_map_entries = msix_map_table->base_addr; + for (i = 0; i < requested; i++) { + msix_map_entries[i].global_msix_index = interrupts[i]; + msix_map_entries[i].valid = 1; + + if (i < num_net_msix && net_msix_mask_en) + msix_mask_en = 1; + else + msix_mask_en = 0; + phy_ops->configure_msix_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, true, + interrupts[i], bus, devid, function, msix_mask_en); + if (i < num_net_msix) + phy_ops->set_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + interrupts[i], 0, 0); + } + + /* use ctrl dev bdf */ + phy_ops->configure_msix_map(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, true, + msix_map_table->dma, common->bus, common->devid, + NBL_COMMON_TO_PCI_FUNC_ID(common)); + + return 0; + +get_interrupt_err: + while (i--) { + intr_index = interrupts[i]; + if (intr_index >= NBL_MAX_OTHER_INTERRUPT) + clear_bit(intr_index - NBL_MAX_OTHER_INTERRUPT, + intr_mgt->interrupt_net_bitmap); + else + clear_bit(intr_index, intr_mgt->interrupt_others_bitmap); + } + kfree(interrupts); + intr_mgt->func_intr_res[func_id].num_interrupts = 0; + intr_mgt->func_intr_res[func_id].interrupts = NULL; + +alloc_interrupts_err: + dma_free_coherent(dma_dev, msix_map_table->size, msix_map_table->base_addr, + msix_map_table->dma); + msix_map_table->size = 0; + msix_map_table->base_addr = NULL; + msix_map_table->dma = 0; + + return ret; +} + +static int nbl_res_intr_enable_mailbox_irq(void *priv, u16 func_id, u16 vector_id, bool enable_msix) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + u16 global_vector_id; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id]; + phy_ops->enable_mailbox_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), func_id, enable_msix, + global_vector_id); + + return 0; +} + +static int nbl_res_intr_enable_abnormal_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + u16 global_vector_id; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + global_vector_id = intr_mgt->func_intr_res[0].interrupts[vector_id]; + phy_ops->enable_abnormal_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), enable_msix, + global_vector_id); + return 0; +} + +static u8 *nbl_res_get_msix_irq_enable_info(void *priv, u16 global_vector_id, u32 *irq_data) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + return phy_ops->get_msix_irq_enable_info(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_vector_id, + irq_data); +} + +static u16 nbl_res_intr_get_global_vector(void *priv, u16 vsi_id, u16 local_vector_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + return intr_mgt->func_intr_res[func_id].interrupts[local_vector_id]; +} + +static u16 nbl_res_intr_get_msix_entry_id(void *priv, u16 vsi_id, u16 local_vector_id) +{ + return local_vector_id; +} + +static void nbl_res_intr_get_coalesce(void *priv, u16 func_id, u16 vector_id, + struct nbl_chan_param_get_coalesce *ec) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + u16 global_vector_id; + u16 pnum = 0; + u16 rate = 0; + + global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id]; + phy_ops->get_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), global_vector_id, &pnum, &rate); + /* tx and rx using the same interrupt */ + NBL_SET_INTR_COALESCE(ec, rate, pnum, rate, pnum); +} + +static void nbl_res_intr_set_coalesce(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 pnum, u16 rate) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + u16 global_vector_id; + int i; + + for (i = 0; i < num_net_msix; i++) { + global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id + i]; + phy_ops->set_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_vector_id, pnum, rate); + } +} + +static int nbl_res_intr_enable_adminq_irq(void *priv, u16 vector_id, bool enable_msix) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops; + struct nbl_interrupt_mgt *intr_mgt; + u16 global_vector_id; + + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + global_vector_id = intr_mgt->func_intr_res[0].interrupts[vector_id]; + phy_ops->enable_adminq_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), enable_msix, + global_vector_id); + return 0; +} + +static int nbl_res_intr_get_mbx_irq_num(void *priv) +{ + return 1; +} + +static int nbl_res_intr_get_adminq_irq_num(void *priv) +{ + return 1; +} + +static int nbl_res_intr_get_abnormal_irq_num(void *priv) +{ + return 1; +} + +static u16 nbl_res_intr_get_suppress_level(void *priv, u64 rates, u16 last_level) +{ + switch (last_level) { + case NBL_INTR_SUPPRESS_LEVEL0: + if (rates > NBL_INTR_SUPPRESS_LEVEL1_THRESHOLD) + return NBL_INTR_SUPPRESS_LEVEL1; + else + return NBL_INTR_SUPPRESS_LEVEL0; + case NBL_INTR_SUPPRESS_LEVEL1: + if (rates > NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD) + return NBL_INTR_SUPPRESS_LEVEL1; + else + return NBL_INTR_SUPPRESS_LEVEL0; + default: + return NBL_INTR_SUPPRESS_LEVEL0; + } +} + +static void nbl_res_intr_set_intr_suppress_level(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 level) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + u16 global_vector_id; + u16 pnum, rate; + int i; + + switch (level) { + case NBL_INTR_SUPPRESS_LEVEL1: + if (res_mgt->resource_info->board_info.eth_speed == NBL_FW_PORT_SPEED_100G) { + pnum = NBL_INTR_SUPPRESS_LEVEL1_100G_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL1_100G_RATE; + } else { + pnum = NBL_INTR_SUPPRESS_LEVEL1_25G_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL1_25G_RATE; + } + break; + default: + pnum = NBL_INTR_SUPPRESS_LEVEL0_PNUM; + rate = NBL_INTR_SUPPRESS_LEVEL0_RATE; + break; + } + for (i = 0; i < num_net_msix; i++) { + global_vector_id = intr_mgt->func_intr_res[func_id].interrupts[vector_id + i]; + phy_ops->set_coalesce(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + global_vector_id, pnum, rate); + } +} + +static void nbl_res_flr_clear_interrupt(void *priv, u16 vf_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 func_id = vf_id + NBL_MAX_PF; + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + if (intr_mgt->func_intr_res[func_id].interrupts) + nbl_res_intr_destroy_msix_map(priv, func_id); +} + +static void nbl_res_intr_unmask(struct nbl_resource_mgt *res_mgt, u16 interrupts_id) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->enable_msix_irq(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), interrupts_id); +} + +static void nbl_res_unmask_all_interrupts(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_interrupt_mgt *intr_mgt = NBL_RES_MGT_TO_INTR_MGT(res_mgt); + int i, j; + + for (i = 0; i < NBL_MAX_PF; i++) { + if (intr_mgt->func_intr_res[i].interrupts) { + for (j = 0; j < intr_mgt->func_intr_res[i].num_interrupts; j++) + nbl_res_intr_unmask(res_mgt, + intr_mgt->func_intr_res[i].interrupts[j]); + } + } +} + +/* NBL_INTR_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_INTR_OPS_TBL \ +do { \ + NBL_INTR_SET_OPS(configure_msix_map, nbl_res_intr_configure_msix_map); \ + NBL_INTR_SET_OPS(destroy_msix_map, nbl_res_intr_destroy_msix_map); \ + NBL_INTR_SET_OPS(enable_mailbox_irq, nbl_res_intr_enable_mailbox_irq); \ + NBL_INTR_SET_OPS(enable_abnormal_irq, nbl_res_intr_enable_abnormal_irq); \ + NBL_INTR_SET_OPS(enable_adminq_irq, nbl_res_intr_enable_adminq_irq); \ + NBL_INTR_SET_OPS(get_msix_irq_enable_info, nbl_res_get_msix_irq_enable_info); \ + NBL_INTR_SET_OPS(get_global_vector, nbl_res_intr_get_global_vector); \ + NBL_INTR_SET_OPS(get_msix_entry_id, nbl_res_intr_get_msix_entry_id); \ + NBL_INTR_SET_OPS(get_coalesce, nbl_res_intr_get_coalesce); \ + NBL_INTR_SET_OPS(set_coalesce, nbl_res_intr_set_coalesce); \ + NBL_INTR_SET_OPS(get_mbx_irq_num, nbl_res_intr_get_mbx_irq_num); \ + NBL_INTR_SET_OPS(get_adminq_irq_num, nbl_res_intr_get_adminq_irq_num); \ + NBL_INTR_SET_OPS(get_abnormal_irq_num, nbl_res_intr_get_abnormal_irq_num); \ + NBL_INTR_SET_OPS(get_intr_suppress_level, nbl_res_intr_get_suppress_level); \ + NBL_INTR_SET_OPS(set_intr_suppress_level, nbl_res_intr_set_intr_suppress_level);\ + NBL_INTR_SET_OPS(flr_clear_interrupt, nbl_res_flr_clear_interrupt); \ + NBL_INTR_SET_OPS(unmask_all_interrupts, nbl_res_unmask_all_interrupts); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_intr_setup_mgt(struct device *dev, struct nbl_interrupt_mgt **intr_mgt) +{ + *intr_mgt = devm_kzalloc(dev, sizeof(struct nbl_interrupt_mgt), GFP_KERNEL); + if (!*intr_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_intr_remove_mgt(struct device *dev, struct nbl_interrupt_mgt **intr_mgt) +{ + devm_kfree(dev, *intr_mgt); + *intr_mgt = NULL; +} + +int nbl_intr_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_interrupt_mgt **intr_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + intr_mgt = &NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + return nbl_intr_setup_mgt(dev, intr_mgt); +} + +void nbl_intr_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_interrupt_mgt **intr_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + intr_mgt = &NBL_RES_MGT_TO_INTR_MGT(res_mgt); + + if (!(*intr_mgt)) + return; + + nbl_intr_remove_mgt(dev, intr_mgt); +} + +int nbl_intr_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_INTR_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_INTR_OPS_TBL; +#undef NBL_INTR_SET_OPS + + return 0; +} + +void nbl_intr_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_INTR_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_INTR_OPS_TBL; +#undef NBL_INTR_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h new file mode 100644 index 0000000000000000000000000000000000000000..daaef6f86ac99b20150f7f909122933bcd2095c7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_interrupt.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_INTERRUPT_H_ +#define _NBL_INTERRUPT_H_ + +#include "nbl_resource.h" + +#define NBL_MSIX_MAP_TABLE_MAX_ENTRIES (1024) + +#define NBL_INTR_SUPPRESS_LEVEL1_THRESHOLD (100000) /* 100k pps */ +#define NBL_INTR_SUPPRESS_LEVEL1_DOWNGRADE_THRESHOLD (60000) /* 60kpps */ +#define NBL_INTR_SUPPRESS_LEVEL0 (0) +#define NBL_INTR_SUPPRESS_LEVEL1 (1) + +#define NBL_INTR_SUPPRESS_LEVEL0_PNUM (0) +#define NBL_INTR_SUPPRESS_LEVEL1_25G_PNUM (8) +#define NBL_INTR_SUPPRESS_LEVEL1_100G_PNUM (16) +#define NBL_INTR_SUPPRESS_LEVEL0_RATE (0) +#define NBL_INTR_SUPPRESS_LEVEL1_25G_RATE (1) +#define NBL_INTR_SUPPRESS_LEVEL1_100G_RATE (2) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h new file mode 100644 index 0000000000000000000000000000000000000000..ccbc5cf9f1b5898aa5fb81f4ca4a822674c722ec --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_p4_actions.h @@ -0,0 +1,53 @@ +#ifndef _NBL_P4_ACTION_H +#define _NBL_P4_ACTION_H + +// Code generated by P4 compiler. DO NOT EDIT. +#define NBL_ACT_SET_FLAGS 1 +#define NBL_ACT_CLEAR_FLAGS 1 +#define NBL_ACT_SET_AUX_FIELD 1 +#define NBL_ACT_SET_FLOW_STAT0 2 +#define NBL_ACT_SET_FLOW_STAT1 3 +#define NBL_ACT_SET_RSS 4 +#define NBL_ACT_SET_CAR 5 +#define NBL_ACT_SET_FLOW_CAR 6 +#define NBL_ACT_SET_TAB_INDEX 7 +#define NBL_ACT_SET_MIRROR 8 +#define NBL_ACT_SET_DPORT 9 +#define NBL_ACT_SET_QUE_IDX 10 +#define NBL_ACT_SET_MCC 13 +#define NBL_ACT_SET_VNI0 14 +#define NBL_ACT_SET_VNI1 15 +#define NBL_ACT_SET_SPECIAL_FLOW_STAT 16 +#define NBL_ACT_SET_PRBAC 17 +#define NBL_ACT_SET_DP_HASH0 19 +#define NBL_ACT_SET_DP_HASH1 20 +#define NBL_ACT_SET_PRI_MDF0 21 +#define NBL_ACT_SET_PRI_MDF1 21 +#define NBL_ACT_NEXT_AT_HALF0 60 +#define NBL_ACT_NEXT_AT_HALF1 61 +#define NBL_ACT_NEXT_AT_FULL0 62 +#define NBL_ACT_NEXT_AT_FULL1 63 +#define NBL_ACT_REP_IPV4_SIP 32 +#define NBL_ACT_REP_IPV4_DIP 33 +#define NBL_ACT_REP_IPV6_SIP 34 +#define NBL_ACT_REP_IPV6_DIP 35 +#define NBL_ACT_REP_DPORT 36 +#define NBL_ACT_REP_SPORT 37 +#define NBL_ACT_REP_DMAC 38 +#define NBL_ACT_REP_SMAC 39 +#define NBL_ACT_REP_IPV4_DSCP 40 +#define NBL_ACT_REP_IPV6_DSCP 41 +#define NBL_ACT_REP_IPV4_TTL 42 +#define NBL_ACT_REP_IPV6_TTL 43 +#define NBL_ACT_DEL_CVLAN 44 +#define NBL_ACT_DEL_SVLAN 45 +#define NBL_ACT_REP_SVLAN 46 +#define NBL_ACT_REP_CVLAN 47 +#define NBL_ACT_REP_SINGLE_CVLAN 48 +#define NBL_ACT_ADD_SVLAN 49 +#define NBL_ACT_ADD_CVLAN 50 +#define NBL_ACT_TNL_ENCAP 51 +#define NBL_ACT_TNL_DECAP 52 +#define NBL_ACT_REP_OUTER_SPORT 53 + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h new file mode 100644 index 0000000000000000000000000000000000000000..898454b8bba92417978c68bac77a46e56c796f5c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_phy.h @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_PHY_H_ +#define _NBL_PHY_H_ + +#include "nbl_core.h" + +#define NBL_PHY_MGT_TO_COMMON(phy_mgt) ((phy_mgt)->common) +#define NBL_PHY_MGT_TO_DEV(phy_mgt) NBL_COMMON_TO_DEV(NBL_PHY_MGT_TO_COMMON(phy_mgt)) +#define NBL_MEMORY_BAR (0) +#define NBL_MAILBOX_BAR (2) +#define NBL_RDMA_NOTIFY_OFF (8192) + +struct nbl_phy_mgt { + struct nbl_common_info *common; + u8 __iomem *hw_addr; + u8 __iomem *mailbox_bar_hw_addr; + u64 notify_offset; + u32 version; + u32 hw_size; + spinlock_t reg_lock; /* Protect reg access */ + bool should_lock; + u8 resv[3]; + enum nbl_hw_status hw_status; +}; + +#define NBL_DELAY_MIN_TIME_FOR_REGS 400 /* 200us for palladium,3us for s2c */ +#define NBL_DELAY_MAX_TIME_FOR_REGS 500 /* 300us for palladium,5us for s2c */ + +static inline __maybe_unused u32 rd32(u8 __iomem *addr, u64 reg) +{ + return readl(addr + (reg)); +} + +static inline __maybe_unused void wr32_barrier(u8 __iomem *addr, u64 reg, u32 value) +{ + writel((value), (addr + (reg))); +} + +static inline __maybe_unused void nbl_hw_read_regs(struct nbl_phy_mgt *phy_mgt, u64 reg, + u8 *data, u32 len) +{ + u32 size = len / 4; + u32 i = 0; + + if (len % 4) + return; + + if (phy_mgt->hw_status) { + for (i = 0; i < size; i++) + *(u32 *)(data + i * sizeof(u32)) = U32_MAX; + } + + if (size > 1 && phy_mgt->should_lock) + spin_lock(&phy_mgt->reg_lock); + + for (i = 0; i < size; i++) + *(u32 *)(data + i * sizeof(u32)) = rd32(phy_mgt->hw_addr, reg + i * sizeof(u32)); + + if (size > 1 && phy_mgt->should_lock) + spin_unlock(&phy_mgt->reg_lock); +} + +static inline __maybe_unused void nbl_hw_write_regs(struct nbl_phy_mgt *phy_mgt, + u64 reg, const u8 *data, u32 len) +{ + u32 size = len / 4; + u32 i = 0; + + if (len % 4) + return; + + if (phy_mgt->hw_status) + return; + + if (size > 1 && phy_mgt->should_lock) + spin_lock(&phy_mgt->reg_lock); + + for (i = 0; i < size; i++) + /* Used for emu, make sure that we won't write too frequently */ + wr32_barrier(phy_mgt->hw_addr, reg + i * sizeof(u32), + *(u32 *)(data + i * sizeof(u32))); + + if (size > 1 && phy_mgt->should_lock) + spin_unlock(&phy_mgt->reg_lock); +} + +static inline __maybe_unused void nbl_hw_write_be_regs(struct nbl_phy_mgt *phy_mgt, + u64 reg, const u8 *data, u32 len) +{ + u32 size = len / 4; + u32 i = 0; + u32 data_le; + + if (len % 4) + return; + + if (size > 1 && phy_mgt->should_lock) + spin_lock(&phy_mgt->reg_lock); + + for (i = 0; i < size; i++) { + data_le = swab32(*(u32 *)(data + i * sizeof(u32))); + /* Used for emu, make sure that we won't write too frequently */ + wr32_barrier(phy_mgt->hw_addr, reg + i * sizeof(u32), data_le); + } + + data_le = rd32(phy_mgt->hw_addr, 0x0); + if (size > 1 && phy_mgt->should_lock) + spin_unlock(&phy_mgt->reg_lock); +} + +static __maybe_unused void nbl_hw_wr32(struct nbl_phy_mgt *phy_mgt, u64 reg, u32 value) +{ + if (phy_mgt->hw_status) + return; + + /* Used for emu, make sure that we won't write too frequently */ + wr32_barrier(phy_mgt->hw_addr, reg, value); +} + +static __maybe_unused u32 nbl_hw_rd32(struct nbl_phy_mgt *phy_mgt, u64 reg) +{ + if (phy_mgt->hw_status) + return U32_MAX; + + return rd32(phy_mgt->hw_addr, reg); +} + +static __maybe_unused void nbl_mbx_wr32(void *priv, u64 reg, u32 value) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + if (phy_mgt->hw_status) + return; + + writel((value), ((phy_mgt)->mailbox_bar_hw_addr + (reg))); +} + +static __maybe_unused u32 nbl_mbx_rd32(void *priv, u64 reg) +{ + struct nbl_phy_mgt *phy_mgt = (struct nbl_phy_mgt *)priv; + + if (phy_mgt->hw_status) + return U32_MAX; + + return readl((phy_mgt)->mailbox_bar_hw_addr + (reg)); +} + +static __maybe_unused void nbl_hw_read_mbx_regs(struct nbl_phy_mgt *phy_mgt, + u64 reg, u8 *data, u32 len) +{ + u32 i = 0; + + if (len % 4) + return; + + for (i = 0; i < len / 4; i++) + *(u32 *)(data + i * sizeof(u32)) = nbl_mbx_rd32(phy_mgt, reg + i * sizeof(u32)); +} + +static __maybe_unused void nbl_hw_write_mbx_regs(struct nbl_phy_mgt *phy_mgt, + u64 reg, const u8 *data, u32 len) +{ + u32 i = 0; + + if (len % 4) + return; + + for (i = 0; i < len / 4; i++) + /* Used for emu, make sure that we won't write too frequently */ + nbl_mbx_wr32(phy_mgt, reg + i * sizeof(u32), + *(u32 *)(data + i * sizeof(u32))); +} + +/* Mgt structure for each product. + * Every indivisual mgt must have the common mgt as its first member, and contains its unique + * data structure in the reset of it. + */ +struct nbl_phy_mgt_leonis { + struct nbl_phy_mgt phy_mgt; + bool ro_enable; +}; + +struct nbl_phy_mgt_bootis { + struct nbl_phy_mgt phy_mgt; +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.c new file mode 100644 index 0000000000000000000000000000000000000000..42fb5956717e879b9a03d11bd39d25de969bb62d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_queue.h" + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_queue_setup_mgt(struct device *dev, struct nbl_queue_mgt **queue_mgt) +{ + *queue_mgt = devm_kzalloc(dev, sizeof(struct nbl_queue_mgt), GFP_KERNEL); + if (!*queue_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_queue_remove_mgt(struct device *dev, struct nbl_queue_mgt **queue_mgt) +{ + devm_kfree(dev, *queue_mgt); + *queue_mgt = NULL; +} + +int nbl_queue_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_queue_mgt **queue_mgt; + struct nbl_res_product_ops *product_ops = NBL_RES_MGT_TO_PROD_OPS(res_mgt); + int ret = 0; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + queue_mgt = &NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + + ret = nbl_queue_setup_mgt(dev, queue_mgt); + if (ret) + return ret; + + NBL_OPS_CALL(product_ops->queue_mgt_init, (*queue_mgt)); + + return 0; +} + +void nbl_queue_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_queue_mgt **queue_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + queue_mgt = &NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + + if (!(*queue_mgt)) + return; + + nbl_queue_remove_mgt(dev, queue_mgt); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..88e4fff0392666849b08b280e853a519de3d5f8c --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_queue.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_QUEUE_H_ +#define _NBL_QUEUE_H_ + +#include "nbl_resource.h" + +int nbl_queue_setup_ops_leonis(struct nbl_resource_ops *res_ops); +void nbl_queue_mgt_init_leonis(struct nbl_queue_mgt *queue_mgt); +void nbl_queue_mgt_init_bootis(struct nbl_queue_mgt *queue_mgt); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c new file mode 100644 index 0000000000000000000000000000000000000000..ddce1eac4acb5f85494294b56b8e8ab63e85bb97 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.c @@ -0,0 +1,443 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_resource.h" + +static u16 pfvfid_to_vsi_id(void *p, int pfid, int vfid, u16 type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + enum nbl_vsi_serv_type dst_type = NBL_VSI_SERV_PF_DATA_TYPE; + u16 vsi_id; + int diff; + + diff = nbl_common_pf_id_subtraction_mgtpf_id(NBL_RES_MGT_TO_COMMON(res_mgt), pfid); + if (vfid == U32_MAX) { + if (diff < vsi_info->num) { + nbl_res_pf_dev_vsi_type_to_hw_vsi_type(type, &dst_type); + vsi_id = vsi_info->serv_info[diff][dst_type].base_id; + } else { + vsi_id = vsi_info->serv_info[0][NBL_VSI_SERV_PF_EXTRA_TYPE].base_id + + (diff - vsi_info->num); + } + } else { + vsi_id = vsi_info->serv_info[diff][NBL_VSI_SERV_VF_DATA_TYPE].base_id + vfid; + } + + return vsi_id; +} + +static u16 func_id_to_vsi_id(void *p, u16 func_id, u16 type) +{ + int pfid = U32_MAX; + int vfid = U32_MAX; + + nbl_res_func_id_to_pfvfid(p, func_id, &pfid, &vfid); + return nbl_res_pfvfid_to_vsi_id(p, pfid, vfid, type); +} + +static u16 vsi_id_to_func_id(void *p, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + int i, j; + u16 func_id = U16_MAX; + bool vsi_find = false; + + for (i = 0; i < vsi_info->num; i++) { + for (j = 0; j < NBL_VSI_SERV_MAX_TYPE; j++) { + if (vsi_id >= vsi_info->serv_info[i][j].base_id && + (vsi_id < vsi_info->serv_info[i][j].base_id + + vsi_info->serv_info[i][j].num)) { + vsi_find = true; + break; + } + } + + if (vsi_find) + break; + } + + if (vsi_find) { + /* if pf_id < eth_num */ + if (j >= NBL_VSI_SERV_PF_DATA_TYPE && j <= NBL_VSI_SERV_PF_XDP_TYPE) + func_id = i + NBL_COMMON_TO_MGT_PF(common); + /* if vf */ + else if (j == NBL_VSI_SERV_VF_DATA_TYPE) { + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + i; + func_id = sriov_info->start_vf_func_id + + (vsi_id - vsi_info->serv_info[i][NBL_VSI_SERV_VF_DATA_TYPE].base_id); + /* if extra pf */ + } else { + func_id = vsi_info->num + + (vsi_id - vsi_info->serv_info[i][NBL_VSI_SERV_PF_EXTRA_TYPE].base_id); + } + } + + if (func_id == U16_MAX) + pr_err("convert vsi_id %d to func_id failed!\n", vsi_id); + + return func_id; +} + +static int vsi_id_to_pf_id(void *p, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + int i, j; + u32 pf_id = U32_MAX; + bool vsi_find = false; + + for (i = 0; i < vsi_info->num; i++) { + for (j = 0; j < NBL_VSI_SERV_MAX_TYPE; j++) + if (vsi_id >= vsi_info->serv_info[i][j].base_id && + (vsi_id < vsi_info->serv_info[i][j].base_id + + vsi_info->serv_info[i][j].num)){ + vsi_find = true; + break; + } + + if (vsi_find) + break; + } + + if (vsi_find) { + /* if pf_id < eth_num */ + if (j >= NBL_VSI_SERV_PF_DATA_TYPE && j <= NBL_VSI_SERV_VF_DATA_TYPE) + pf_id = i + NBL_COMMON_TO_MGT_PF(common); + /* if extra pf */ + else if (j == NBL_VSI_SERV_PF_EXTRA_TYPE) + pf_id = vsi_info->num + + (vsi_id - vsi_info->serv_info[i][NBL_VSI_SERV_PF_EXTRA_TYPE].base_id); + } + + return pf_id; +} + +static int func_id_to_pfvfid(void *p, u16 func_id, int *pfid, int *vfid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + int diff; + int pf_id_tmp; + + if (func_id < NBL_RES_MGT_TO_PF_NUM(res_mgt)) { + *pfid = func_id; + *vfid = U32_MAX; + return 0; + } + + for (pf_id_tmp = 0; pf_id_tmp < NBL_RES_MGT_TO_PF_NUM(res_mgt); pf_id_tmp++) { + diff = nbl_common_pf_id_subtraction_mgtpf_id(common, pf_id_tmp); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + diff; + if (func_id >= sriov_info->start_vf_func_id && + func_id < sriov_info->start_vf_func_id + sriov_info->num_vfs) { + *pfid = pf_id_tmp; + *vfid = func_id - sriov_info->start_vf_func_id; + return 0; + } + } + + return U32_MAX; +} + +static int func_id_to_bdf(void *p, u16 func_id, u8 *bus, u8 *dev, u8 *function) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + int pfid = U32_MAX; + int vfid = U32_MAX; + int diff; + u8 pf_bus, pf_devfn, devfn; + + if (nbl_res_func_id_to_pfvfid(p, func_id, &pfid, &vfid)) + return U32_MAX; + + diff = nbl_common_pf_id_subtraction_mgtpf_id(common, pfid); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + diff; + pf_bus = PCI_BUS_NUM(sriov_info->bdf); + pf_devfn = sriov_info->bdf & 0xff; + + if (vfid != U32_MAX) { + *bus = pf_bus + ((pf_devfn + sriov_info->offset + sriov_info->stride * vfid) >> 8); + devfn = (pf_devfn + sriov_info->offset + sriov_info->stride * vfid) & 0xff; + } else { + *bus = pf_bus; + devfn = pf_devfn; + } + + *dev = PCI_SLOT(devfn); + *function = PCI_FUNC(devfn); + return 0; +} + +static u16 pfvfid_to_func_id(void *p, int pfid, int vfid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + int diff; + + if (vfid == U32_MAX) + return pfid; + + diff = nbl_common_pf_id_subtraction_mgtpf_id(common, pfid); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + diff; + + return sriov_info->start_vf_func_id + vfid; +} + +static u64 get_func_bar_base_addr(void *p, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_sriov_info *sriov_info; + u64 base_addr = 0; + int pfid = U32_MAX; + int vfid = U32_MAX; + int diff; + + if (nbl_res_func_id_to_pfvfid(p, func_id, &pfid, &vfid)) + return 0; + + diff = nbl_common_pf_id_subtraction_mgtpf_id(common, pfid); + sriov_info = NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) + diff; + if (!sriov_info->pf_bar_start) { + nbl_err(common, NBL_DEBUG_QUEUE, + "Try to get bar addr for func %d, but PF_%d sriov not init", + func_id, pfid); + return 0; + } + + if (vfid == U32_MAX) + base_addr = sriov_info->pf_bar_start; + else + base_addr = sriov_info->vf_bar_start + sriov_info->vf_bar_len * vfid; + + nbl_info(common, NBL_DEBUG_QUEUE, "pfid %d vfid %d base_addr %llx\n", + pfid, vfid, base_addr); + return base_addr; +} + +static u8 vsi_id_to_eth_id(void *p, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + + if (eth_info) + return eth_info->eth_id[nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id)]; + else + return 0; +} + +static u8 eth_id_to_pf_id(void *p, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_eth_info *eth_info = NBL_RES_MGT_TO_ETH_INFO(res_mgt); + int i; + u8 pf_id_offset = 0; + + for_each_set_bit(i, eth_info->eth_bitmap, NBL_MAX_ETHERNET) { + if (i == eth_id) + break; + pf_id_offset++; + } + + return pf_id_offset + NBL_COMMON_TO_MGT_PF(common); +} + +static u8 eth_id_to_lag_id(void *p, u8 eth_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_eth_bond_info *eth_bond_info = NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt); + int i, j; + + for (i = 0; i < NBL_LAG_MAX_NUM; i++) + for (j = 0; j < eth_bond_info->entry[i].lag_num && + NBL_ETH_BOND_VALID_PORT(j); j++) + if (eth_bond_info->entry[i].eth_id[j] == eth_id) + return i; + + return -1; +} + +static bool check_func_active_by_queue(void *p, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)p; + struct nbl_queue_mgt *queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + + return queue_mgt->queue_info[func_id].txrx_queues ? true : false; +} + +int nbl_res_func_id_to_pfvfid(struct nbl_resource_mgt *res_mgt, u16 func_id, int *pfid, int *vfid) +{ + if (!res_mgt->common_ops.func_id_to_pfvfid) + return func_id_to_pfvfid(res_mgt, func_id, pfid, vfid); + + return res_mgt->common_ops.func_id_to_pfvfid(res_mgt, func_id, pfid, vfid); +} + +u16 nbl_res_pfvfid_to_func_id(struct nbl_resource_mgt *res_mgt, int pfid, int vfid) +{ + if (!res_mgt->common_ops.pfvfid_to_func_id) + return pfvfid_to_func_id(res_mgt, pfid, vfid); + + return res_mgt->common_ops.pfvfid_to_func_id(res_mgt, pfid, vfid); +} + +u16 nbl_res_pfvfid_to_vsi_id(struct nbl_resource_mgt *res_mgt, int pfid, int vfid, u16 type) +{ + if (!res_mgt->common_ops.pfvfid_to_vsi_id) + return pfvfid_to_vsi_id(res_mgt, pfid, vfid, type); + + return res_mgt->common_ops.pfvfid_to_vsi_id(res_mgt, pfid, vfid, type); +} + +int nbl_res_func_id_to_bdf(struct nbl_resource_mgt *res_mgt, u16 func_id, u8 *bus, + u8 *dev, u8 *function) +{ + if (!res_mgt->common_ops.func_id_to_bdf) + return func_id_to_bdf(res_mgt, func_id, bus, dev, function); + + return res_mgt->common_ops.func_id_to_bdf(res_mgt, func_id, bus, dev, function); +} + +u16 nbl_res_vsi_id_to_func_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + if (!res_mgt->common_ops.vsi_id_to_func_id) + return vsi_id_to_func_id(res_mgt, vsi_id); + + return res_mgt->common_ops.vsi_id_to_func_id(res_mgt, vsi_id); +} + +int nbl_res_vsi_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + if (!res_mgt->common_ops.vsi_id_to_pf_id) + return vsi_id_to_pf_id(res_mgt, vsi_id); + + return res_mgt->common_ops.vsi_id_to_pf_id(res_mgt, vsi_id); +} + +u16 nbl_res_func_id_to_vsi_id(struct nbl_resource_mgt *res_mgt, u16 func_id, u16 type) +{ + if (!res_mgt->common_ops.func_id_to_vsi_id) + return func_id_to_vsi_id(res_mgt, func_id, type); + + return res_mgt->common_ops.func_id_to_vsi_id(res_mgt, func_id, type); +} + +u64 nbl_res_get_func_bar_base_addr(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + if (!res_mgt->common_ops.get_func_bar_base_addr) + return get_func_bar_base_addr(res_mgt, func_id); + + return res_mgt->common_ops.get_func_bar_base_addr(res_mgt, func_id); +} + +u16 nbl_res_get_particular_queue_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + return res_mgt->common_ops.get_particular_queue_id(res_mgt, vsi_id); +} + +u8 nbl_res_vsi_id_to_eth_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id) +{ + if (!res_mgt->common_ops.vsi_id_to_eth_id) + return vsi_id_to_eth_id(res_mgt, vsi_id); + + return res_mgt->common_ops.vsi_id_to_eth_id(res_mgt, vsi_id); +} + +u8 nbl_res_eth_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + if (!res_mgt->common_ops.eth_id_to_pf_id) + return eth_id_to_pf_id(res_mgt, eth_id); + + return res_mgt->common_ops.eth_id_to_pf_id(res_mgt, eth_id); +} + +u8 nbl_res_eth_id_to_lag_id(struct nbl_resource_mgt *res_mgt, u8 eth_id) +{ + if (!res_mgt->common_ops.eth_id_to_lag_id) + return eth_id_to_lag_id(res_mgt, eth_id); + + return res_mgt->common_ops.eth_id_to_lag_id(res_mgt, eth_id); +} + +bool nbl_res_check_func_active_by_queue(struct nbl_resource_mgt *res_mgt, u16 func_id) +{ + if (!res_mgt->common_ops.check_func_active_by_queue) + return check_func_active_by_queue(res_mgt, func_id); + + return res_mgt->common_ops.check_func_active_by_queue(res_mgt, func_id); +} + +bool nbl_res_get_flex_capability(void *priv, enum nbl_flex_cap_type cap_type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return test_bit(cap_type, res_mgt->flex_capability); +} + +bool nbl_res_get_fix_capability(void *priv, enum nbl_fix_cap_type cap_type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + return test_bit(cap_type, res_mgt->fix_capability); +} + +void nbl_res_set_flex_capability(struct nbl_resource_mgt *res_mgt, enum nbl_flex_cap_type cap_type) +{ + set_bit(cap_type, res_mgt->flex_capability); +} + +void nbl_res_set_fix_capability(struct nbl_resource_mgt *res_mgt, enum nbl_fix_cap_type cap_type) +{ + set_bit(cap_type, res_mgt->fix_capability); +} + +void nbl_res_pf_dev_vsi_type_to_hw_vsi_type(u16 src_type, enum nbl_vsi_serv_type *dst_type) +{ + if (src_type == NBL_VSI_DATA) + *dst_type = NBL_VSI_SERV_PF_DATA_TYPE; + else if (src_type == NBL_VSI_USER) + *dst_type = NBL_VSI_SERV_PF_USER_TYPE; + else if (src_type == NBL_VSI_CTRL) + *dst_type = NBL_VSI_SERV_PF_CTLR_TYPE; + else if (src_type == NBL_VSI_XDP) + *dst_type = NBL_VSI_SERV_PF_XDP_TYPE; +} + +int nbl_res_get_rep_idx(struct nbl_eswitch_info *eswitch_info, u16 rep_vsi_id) +{ + u32 rep_idx = U32_MAX; + + if (rep_vsi_id >= eswitch_info->vf_base_vsi_id) + rep_idx = rep_vsi_id - eswitch_info->vf_base_vsi_id; + + return rep_idx; +} + +bool nbl_res_vf_is_active(void *priv, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = res_mgt->resource_info; + + return test_bit(func_id, resource_info->func_bitmap); +} + +void nbl_res_set_hw_status(void *priv, enum nbl_hw_status hw_status) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + phy_ops->set_hw_status(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), hw_status); +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h new file mode 100644 index 0000000000000000000000000000000000000000..12e567c0b03196a36d18a284226117979668be06 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_resource.h @@ -0,0 +1,1165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_RESOURCE_H_ +#define _NBL_RESOURCE_H_ + +#include "nbl_core.h" +#include "nbl_hw.h" + +#define NBL_RES_MGT_TO_COMMON(res_mgt) ((res_mgt)->common) +#define NBL_RES_MGT_TO_COMMON_OPS(res_mgt) (&((res_mgt)->common_ops)) +#define NBL_RES_MGT_TO_DEV(res_mgt) NBL_COMMON_TO_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)) +#define NBL_RES_MGT_TO_DMA_DEV(res_mgt) \ + NBL_COMMON_TO_DMA_DEV(NBL_RES_MGT_TO_COMMON(res_mgt)) +#define NBL_RES_MGT_TO_INTR_MGT(res_mgt) ((res_mgt)->intr_mgt) +#define NBL_RES_MGT_TO_ACCEL_MGT(res_mgt) ((res_mgt)->accel_mgt) +#define NBL_RES_MGT_TO_QUEUE_MGT(res_mgt) ((res_mgt)->queue_mgt) +#define NBL_RES_MGT_TO_TXRX_MGT(res_mgt) ((res_mgt)->txrx_mgt) +#define NBL_RES_MGT_TO_FLOW_MGT(res_mgt) ((res_mgt)->flow_mgt) +#define NBL_RES_MGT_TO_TC_FLOW_MGT(res_mgt) ((res_mgt)->tc_flow_mgt) +#define NBL_RES_MGT_TO_COUNTER_MGT(res_mgt) (((res_mgt)->tc_flow_mgt)->fc_mgt) +#define NBL_RES_MGT_TO_VSI_MGT(res_mgt) ((res_mgt)->vsi_mgt) +#define NBL_RES_MGT_TO_PORT_MGT(res_mgt) ((res_mgt)->port_mgt) +#define NBL_RES_MGT_TO_ADMINQ_MGT(res_mgt) ((res_mgt)->adminq_mgt) +#define NBL_RES_MGT_TO_INTR_MGT(res_mgt) ((res_mgt)->intr_mgt) +#define NBL_RES_MGT_TO_FD_MGT(res_mgt) ((res_mgt)->fd_mgt) +#define NBL_RES_MGT_TO_PROD_OPS(res_mgt) ((res_mgt)->product_ops) +#define NBL_RES_MGT_TO_RES_INFO(res_mgt) ((res_mgt)->resource_info) +#define NBL_RES_MGT_TO_SRIOV_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->sriov_info) +#define NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->eswitch_info) +#define NBL_RES_MGT_TO_ETH_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->eth_info) +#define NBL_RES_MGT_TO_VSI_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->vsi_info) +#define NBL_RES_MGT_TO_ETH_BOND_INFO(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->eth_bond_info) +#define NBL_RES_MGT_TO_PF_NUM(res_mgt) (NBL_RES_MGT_TO_RES_INFO(res_mgt)->max_pf) + +#define NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt) ((res_mgt)->phy_ops_tbl) +#define NBL_RES_MGT_TO_PHY_OPS(res_mgt) (NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt)->ops) +#define NBL_RES_MGT_TO_PHY_PRIV(res_mgt) (NBL_RES_MGT_TO_PHY_OPS_TBL(res_mgt)->priv) +#define NBL_RES_MGT_TO_CHAN_OPS_TBL(res_mgt) ((res_mgt)->chan_ops_tbl) +#define NBL_RES_MGT_TO_CHAN_OPS(res_mgt) (NBL_RES_MGT_TO_CHAN_OPS_TBL(res_mgt)->ops) +#define NBL_RES_MGT_TO_CHAN_PRIV(res_mgt) (NBL_RES_MGT_TO_CHAN_OPS_TBL(res_mgt)->priv) +#define NBL_RES_MGT_TO_TX_RING(res_mgt, index) \ + (NBL_RES_MGT_TO_TXRX_MGT(res_mgt)->tx_rings[(index)]) +#define NBL_RES_MGT_TO_RX_RING(res_mgt, index) \ + (NBL_RES_MGT_TO_TXRX_MGT(res_mgt)->rx_rings[(index)]) +#define NBL_RES_MGT_TO_VECTOR(res_mgt, index) \ + (NBL_RES_MGT_TO_TXRX_MGT(res_mgt)->vectors[(index)]) + +#define NBL_RES_BASE_QID(res_mgt) NBL_RES_MGT_TO_RES_INFO(res_mgt)->base_qid +#define NBL_RES_NOFITY_QID(res_mgt, local_qid) (NBL_RES_BASE_QID(res_mgt) * 2 + (local_qid)) + +#define NBL_MAX_NET_ID NBL_MAX_FUNC +#define NBL_MAX_JUMBO_FRAME_SIZE (9600) +#define NBL_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + +#define NBL_TPID_PORT_NUM (1031) +#define NBL_VLAN_TPYE (0) +#define NBL_QINQ_TPYE (1) + +/* --------- QUEUE ---------- */ +#define NBL_MAX_TXRX_QUEUE (2048) +#define NBL_DEFAULT_DESC_NUM (1024) +#define NBL_MAX_TXRX_QUEUE_PER_FUNC (256) + +#define NBL_DEFAULT_REP_HW_QUEUE_NUM (16) +#define NBL_DEFAULT_PF_HW_QUEUE_NUM (16) +#define NBL_DEFAULT_USER_HW_QUEUE_NUM (16) +#define NBL_DEFAULT_VF_HW_QUEUE_NUM (2) + +#define NBL_SPECIFIC_VSI_NET_ID_OFFSET (4) +#define NBL_MAX_CACHE_SIZE (256) +#define NBL_MAX_BATCH_DESC (64) + +enum nbl_qid_map_table_type { + NBL_MASTER_QID_MAP_TABLE, + NBL_SLAVE_QID_MAP_TABLE, + NBL_QID_MAP_TABLE_MAX +}; + +struct nbl_queue_vsi_info { + u32 curr_qps; + u16 curr_qps_static; /* This will not be reset when netdev down */ + u16 vsi_index; + u16 vsi_id; + u16 rss_ret_base; + u16 rss_entry_size; + u16 net_id; + u16 queue_offset; + u16 queue_num; + bool rss_vld; + bool vld; +}; + +struct nbl_queue_info { + struct nbl_queue_vsi_info vsi_info[NBL_VSI_MAX]; + u64 notify_addr; + u32 qid_map_index; + u16 num_txrx_queues; + u16 rss_ret_base; + u16 *txrx_queues; + u16 *queues_context; + u16 rss_entry_size; + u16 split; + u32 curr_qps; + u16 queue_size; +}; + +struct nbl_adapt_desc_gother { + u16 level; + u32 uvn_desc_rd_entry; + u64 get_desc_stats_jiffies; +}; + +struct nbl_queue_mgt { + DECLARE_BITMAP(txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE); + DECLARE_BITMAP(rss_ret_bitmap, NBL_EPRO_RSS_RET_TBL_DEPTH); + struct nbl_qid_map_table qid_map_table[NBL_QID_MAP_TABLE_ENTRIES]; + struct nbl_queue_info queue_info[NBL_MAX_FUNC]; + u16 net_id_ref_vsinum[NBL_MAX_NET_ID]; + u32 total_qid_map_entries; + int qid_map_select; + bool qid_map_ready; + u32 qid_map_tail[NBL_QID_MAP_TABLE_MAX]; + struct nbl_adapt_desc_gother adapt_desc_gother; +}; + +/* --------- INTERRUPT ---------- */ +#define NBL_MAX_OTHER_INTERRUPT 1024 +#define NBL_MAX_NET_INTERRUPT 4096 + +struct nbl_msix_map { + u16 valid:1; + u16 global_msix_index:13; + u16 rsv:2; +}; + +struct nbl_msix_map_table { + struct nbl_msix_map *base_addr; + dma_addr_t dma; + size_t size; +}; + +struct nbl_func_interrupt_resource_mng { + u16 num_interrupts; + u16 msix_base; + u16 msix_max; + u16 *interrupts; + struct nbl_msix_map_table msix_map_table; +}; + +struct nbl_interrupt_mgt { + DECLARE_BITMAP(interrupt_net_bitmap, NBL_MAX_NET_INTERRUPT); + DECLARE_BITMAP(interrupt_others_bitmap, NBL_MAX_OTHER_INTERRUPT); + struct nbl_func_interrupt_resource_mng func_intr_res[NBL_MAX_FUNC]; +}; + +struct nbl_port_mgt { +}; + +/* --------- TXRX ---------- */ +struct nbl_txrx_vsi_info { + u16 ring_offset; + u16 ring_num; +}; + +struct nbl_ring_desc { + /* buffer address */ + __le64 addr; + /* buffer length */ + __le32 len; + /* buffer ID */ + __le16 id; + /* the flags depending on descriptor type */ + __le16 flags; +}; + +struct nbl_tx_buffer { + struct nbl_ring_desc *next_to_watch; + union nbl_tx_extend_head *tls_pkthdr; + union { + struct sk_buff *skb; + void *raw_buff; /* for xdp */ + }; + dma_addr_t dma; + u32 len; + + unsigned int bytecount; + unsigned short gso_segs; + bool page; + u32 tx_flags; +}; + +struct nbl_dma_info { + dma_addr_t addr; + struct page *page; +}; + +struct nbl_page_cache { + u32 head; + u32 tail; + struct nbl_dma_info page_cache[NBL_MAX_CACHE_SIZE]; +}; + +struct nbl_rx_buffer { + struct nbl_dma_info *di; + u32 offset; + bool last_in_page; +}; + +struct nbl_res_vector { + struct napi_struct napi; + struct nbl_res_tx_ring *tx_ring; + struct nbl_res_rx_ring *rx_ring; + struct nbl_res_tx_ring *xdp_ring; + u8 *irq_enable_base; + u32 irq_data; + bool started; + bool net_msix_mask_en; +}; + +struct nbl_res_tx_ring { + /*data path*/ + struct nbl_ring_desc *desc; + struct nbl_tx_buffer *tx_bufs; + struct device *dma_dev; + struct net_device *netdev; + u8 __iomem *notify_addr; + struct nbl_queue_stats stats; + struct u64_stats_sync syncp; + struct nbl_tx_queue_stats tx_stats; + + enum nbl_product_type product_type; + u16 queue_index; + u16 desc_num; + u16 notify_qid; + u16 avail_used_flags; + /* device ring wrap counter */ + bool used_wrap_counter; + u16 next_to_use; + u16 next_to_clean; + u16 tail_ptr; + u16 mode; + u16 vlan_tci; + u16 vlan_proto; + u8 eth_id; + u8 extheader_tx_len; + + /* control path */ + // dma for desc[] + dma_addr_t dma; + // size for desc[] + unsigned int size; + bool valid; + + struct nbl_txrx_vsi_info *vsi_info; + void *xdp_prog; + spinlock_t xmit_lock; /* use for xdp tx_act; because XDP queue'num may less then core num */ +} ____cacheline_internodealigned_in_smp; + +struct nbl_res_rx_ring { + /* data path */ + struct nbl_ring_desc *desc; + struct nbl_rx_buffer *rx_bufs; + struct nbl_dma_info *di; + struct device *dma_dev; + struct net_device *netdev; + struct page_pool *page_pool; + struct nbl_queue_stats stats; + struct nbl_rx_queue_stats rx_stats; + struct u64_stats_sync syncp; + struct nbl_page_cache page_cache; + + enum nbl_product_type product_type; + u32 buf_len; + u16 avail_used_flags; + bool used_wrap_counter; + u16 next_to_use; + u16 next_to_clean; + u16 tail_ptr; + u16 mode; + u16 desc_num; + u16 queue_index; + u16 vlan_tci; + u16 vlan_proto; + + /* control path */ + struct nbl_common_info *common; + void *txrx_mgt; + void *xdp_prog; + // dma for desc[] + dma_addr_t dma; + // size for desc[] + unsigned int size; + bool valid; + u16 notify_qid; +} ____cacheline_internodealigned_in_smp; + +struct nbl_txrx_bond_info { + u16 eth_id[NBL_LAG_MAX_PORTS]; + u16 lag_id; + bool bond_enable; +}; + +struct nbl_txrx_mgt { + struct nbl_res_vector **vectors; + struct nbl_res_tx_ring **tx_rings; + struct nbl_res_rx_ring **rx_rings; + struct nbl_txrx_bond_info bond_info; + struct nbl_txrx_vsi_info vsi_info[NBL_VSI_MAX]; + u16 tx_ring_num; + u16 rx_ring_num; + u16 xdp_ring_offset; + u16 xdp_ring_num; +}; + +struct nbl_vsi_mgt { +}; + +struct nbl_emp_version { + char app_version[16]; + char kernel_version[16]; + char build_version[16]; +}; + +struct nbl_adminq_mgt { + struct nbl_emp_version emp_verion; + u32 fw_last_hb_seq; + unsigned long fw_last_hb_time; + + struct work_struct eth_task; + struct nbl_resource_mgt *res_mgt; + u8 module_inplace_changed[NBL_MAX_ETHERNET]; + u8 link_state_changed[NBL_MAX_ETHERNET]; + + bool fw_resetting; + struct wait_queue_head wait_queue; + + struct mutex eth_lock; /* To prevent link_state_changed mismodified. */ + void *cmd_filter; +}; + +/* --------- FLOW ---------- */ +#define NBL_FEM_HT_PP0_LEN (1 * 1024) + +#define NBL_MACVLAN_TABLE_LEN (4096) + +enum nbl_next_stg_id_e { + NBL_NEXT_STG_PA = 1, + NBL_NEXT_STG_IPRO = 2, + NBL_NEXT_STG_PP0_S0 = 3, + NBL_NEXT_STG_PP0_S1 = 4, + NBL_NEXT_STG_PP1_S0 = 5, + NBL_NEXT_STG_PP1_S1 = 6, + NBL_NEXT_STG_PP2_S0 = 7, + NBL_NEXT_STG_PP2_S1 = 8, + NBL_NEXT_STG_MCC = 9, + NBL_NEXT_STG_ACL_S0 = 10, + NBL_NEXT_STG_ACL_S1 = 11, + NBL_NEXT_STG_EPRO = 12, + NBL_NEXT_STG_BYPASS = 0xf, +}; + +enum { + NBL_FLOW_UP_TNL, + NBL_FLOW_UP, + NBL_FLOW_DOWN, + NBL_FLOW_MACVLAN_MAX, + NBL_FLOW_L2_UP = NBL_FLOW_MACVLAN_MAX, + NBL_FLOW_L2_DOWN, + NBL_FLOW_L3_UP, + NBL_FLOW_L3_DOWN, + NBL_FLOW_TYPE_MAX, + NBL_FLOW_TLS_UP = NBL_FLOW_TYPE_MAX, + NBL_FLOW_IPSEC_DOWN, + NBL_FLOW_ACCEL_MAX, + NBL_FLOW_LLDP_LACP_UP, + NBL_FLOW_PMD_ND_UPCALL, +}; + +struct nbl_flow_ht_key { + u16 vid; + u16 ht_other_index; + u32 kt_index; +}; + +struct nbl_flow_ht_tbl { + struct nbl_flow_ht_key key[4]; + u32 ref_cnt; +}; + +struct nbl_flow_ht_mng { + struct nbl_flow_ht_tbl *hash_map[NBL_FEM_HT_PP0_LEN]; +}; + +struct nbl_flow_fem_entry { + s32 type; + u16 flow_id; + u16 ht0_hash; + u16 ht1_hash; + u16 hash_table; + u16 hash_bucket; + u16 tcam_index; + u8 tcam_flag; + u8 flow_type; +}; + +struct nbl_flow_mcc_node { + struct list_head node; + u16 mcc_id; + u16 mcc_head; +}; + +struct nbl_flow_multi_group { + struct list_head mcc_list; + struct list_head mcc_head; + struct nbl_flow_fem_entry entry[NBL_FLOW_TYPE_MAX - NBL_FLOW_MACVLAN_MAX]; + u8 ether_id; + u16 mcc_id; + u16 network_status; + u16 pfc_mode; + u16 bp_mode; +}; + +struct nbl_flow_lacp_rule { + struct nbl_flow_fem_entry entry; + struct list_head node; + u16 vsi; +}; + +struct nbl_flow_lldp_rule { + struct nbl_flow_fem_entry entry; + struct list_head node; + u16 vsi; +}; + +struct nbl_flow_ul4s_rule { + struct nbl_flow_fem_entry ul4s_entry; + struct list_head node; + u16 vsi; + u32 index; +}; + +struct nbl_flow_dipsec_rule { + struct nbl_flow_fem_entry dipsec_entry; + struct list_head node; + u16 vsi; + u32 index; +}; + +#define NBL_FLOW_PMD_ND_UPCALL_NA (0) +#define NBL_FLOW_PMD_ND_UPCALL_NS (1) +#define NBL_FLOW_PMD_ND_UPCALL_FLOW_NUM (2) + +struct nbl_flow_nd_upcall_rule { + struct nbl_flow_fem_entry entry[NBL_FLOW_PMD_ND_UPCALL_FLOW_NUM]; + struct list_head node; +}; + +struct nbl_flow_mgt { + unsigned long *flow_id_bitmap; + DECLARE_BITMAP(tcam_id, NBL_TCAM_TABLE_LEN); + u32 pp_tcam_count; + u32 unicast_mac_threshold; + u32 accel_flow_count; + struct nbl_flow_ht_mng pp0_ht0_mng; + struct nbl_flow_ht_mng pp0_ht1_mng; + struct nbl_flow_multi_group multi_flow[NBL_MAX_ETHERNET]; + void *mac_hash_tbl[NBL_MAX_ETHERNET]; + struct list_head lldp_list; + struct list_head lacp_list; + struct list_head ul4s_head; + struct list_head dprbac_head; + void *mcc_tbl_priv; + struct list_head nd_upcall_list; // note: works only for offload network + // not the physical network +}; + +#define NBL_FLOW_INIT_BIT BIT(1) +#define NBL_FLOW_AVAILABLE_BIT BIT(2) +#define NBL_ALL_PROFILE_NUM (64) +#define NBL_ASSOC_PROFILE_GRAPH_NUM (32) +#define NBL_ASSOC_PROFILE_NUM (16) +#define NBL_ASSOC_PROFILE_STAGE_NUM (8) +#define NBL_PROFILE_KEY_MAX_NUM (32) +#define NBL_FLOW_KEY_NAME_SIZE (32) +#define NBL_FLOW_INDEX_LEN 131072 +#define NBL_FLOW_TABLE_NUM (64 * 1024) +#define NBL_FEM_TCAM_MAX_NUM (64) +#define NBL_AT_MAX_NUM 8 +#define NBL_MAX_ACTION_NUM 16 +#define NBL_ACT_BYTE_LEN 32 + +enum nbl_flow_key_type { + NBL_FLOW_KEY_TYPE_PID, // profile id + NBL_FLOW_KEY_TYPE_ACTION, // AT action data, in 22 bits + NBL_FLOW_KEY_TYPE_PHV, // keys: PHV fields, inport, tab_index + // and other extracted 16 bits actions + NBL_FLOW_KEY_TYPE_MASK, // mask 4 bits + NBL_FLOW_KEY_TYPE_BTS // bit setter +}; + +#define NBL_PP0_KT_NUM (0) +#define NBL_PP1_KT_NUM (12 * 1024) +#define NBL_PP2_KT_NUM (112 * 1024) +#define NBL_PP0_KT_OFFSET (124 * 1024) +#define NBL_PP1_KT_OFFSET (112 * 1024) +#define NBL_FEM_HT_PP0_LEN (1 * 1024) +#define NBL_FEM_HT_PP1_LEN (3 * 1024) +#define NBL_FEM_HT_PP2_LEN (16 * 1024) +#define NBL_FEM_HT_PP0_DEPTH (1 * 1024) +#define NBL_FEM_HT_PP1_DEPTH (3 * 1024) +#define NBL_FEM_HT_PP2_DEPTH (0) +#define NBL_FEM_AT_PP1_LEN (6 * 1024) +#define NBL_FEM_AT2_PP1_LEN (2 * 1024) +#define NBL_FEM_AT_PP2_LEN (72 * 1024) +#define NBL_FEM_AT2_PP2_LEN (16 * 1024) +#define NBL_TC_MCC_TBL_DEPTH (7168) +#define NBL_TC_ENCAP_TBL_DEPTH (4 * 1024) + +struct nbl_flow_key_info { + bool valid; + enum nbl_flow_key_type key_type; + u16 offset; + u16 length; + u8 key_id; + char name[NBL_FLOW_KEY_NAME_SIZE]; +}; + +struct nbl_profile_msg { + bool valid; + // pp loopback or not + bool pp_mode; + bool key_full; + bool pt_cmd; + bool from_start; + bool to_end; + bool need_upcall; + + // id in range of 0 to 2 + u8 pp_id; + + // id in range of 0 to 15 + u8 profile_id; + + // id in range of 0 to 47 + u8 g_profile_id; + + // count of valid profile keys in the flow_keys list + u8 key_count; + u16 key_len; + u64 key_flag; + u8 act_count; + u8 pre_assoc_profile_id[NBL_ASSOC_PROFILE_NUM]; + u8 next_assoc_profile_id[NBL_ASSOC_PROFILE_NUM]; + // store all profile key info + struct nbl_flow_key_info flow_keys[NBL_PROFILE_KEY_MAX_NUM]; +}; + +struct nbl_flow_tab_hash_info { + void *flow_tab_hash; + s32 tab_cnt; +}; + +struct nbl_profile_assoc_graph { + u64 key_flag; + u8 profile_count; + u8 profile_id[NBL_ASSOC_PROFILE_STAGE_NUM]; +}; + +/* pp ht hash-list struct */ +struct nbl_flow_pp_ht_key { + u16 vid; + u16 ht_other_index; + u32 kt_index; +}; + +struct nbl_flow_pp_ht_tbl { + struct nbl_flow_pp_ht_key key[4]; + u32 ref_cnt; +}; + +struct nbl_flow_pp_ht_mng { + struct nbl_flow_pp_ht_tbl **hash_map; +}; + +/* at hash-list struct */ +struct nbl_flow_pp_at_key { + union { + u32 act[NBL_AT_MAX_NUM]; + u8 act_data[NBL_ACT_BYTE_LEN]; + }; +}; + +struct nbl_flow_at_tbl { + u32 ref_cnt; +}; + +struct nbl_flow_at_mng { + void *at_tbl[NBL_PP_TYPE_MAX][NBL_AT_TYPE_MAX]; +}; + +struct nbl_tc_ht_item { + u16 ht_entry; + u16 ht0_hash; + u16 ht1_hash; + u16 hash_bucket; + u32 tbl_id; +}; + +union nbl_tc_common_data_u { + struct nbl_tc_common_data { + u32 rsv[10]; + } __packed info; +#define NBL_TC_COMMON_DATA_TAB_WIDTH (sizeof(struct nbl_tc_common_data) \ + / sizeof(u32)) + u32 data[NBL_TC_COMMON_DATA_TAB_WIDTH]; + u8 hash_key[sizeof(struct nbl_tc_common_data)]; +}; + +struct nbl_tc_kt_item { + union nbl_tc_common_data_u kt_data; + u8 pp_type; + u8 key_type; +}; + +struct nbl_act_collect { + u32 act_vld; + u32 act2_vld; + u32 act_offset; + u32 act2_offset; + u32 act_hw_index; + u32 act2_hw_index; + struct nbl_flow_pp_at_key act_key[2]; +}; + +struct nbl_tc_at_item { + u32 act_buf[NBL_AT_MAX_NUM]; + u32 act_num; + u32 act1_buf[NBL_AT_MAX_NUM]; + u32 act1_num; + u32 act2_buf[NBL_AT_MAX_NUM]; + u32 act2_num; + struct nbl_act_collect act_collect; +}; + +struct nbl_flow_tcam_key_item { + u8 key[NBL_KT_BYTE_HALF_LEN]; + u8 key_mode; + struct nbl_tc_ht_item ht_item; + struct nbl_tc_kt_item kt_item; + struct nbl_tc_at_item at_item; + u32 sw_hash_id; + u8 profile_id; +}; + +struct nbl_flow_tcam_key_mng { + struct nbl_flow_tcam_key_item item; + u32 ref_cnt; +}; + +struct nbl_flow_tcam_ad_item { + u32 action[NBL_MAX_ACTION_NUM]; +}; + +struct nbl_flow_tcam_ad_mng { + struct nbl_flow_tcam_ad_item item; +}; + +struct nbl_count_mng { + u32 pp1_tcam_count; + u32 pp2_tcam_count; +}; + +/* --------- tc flow stats ---------- */ +struct nbl_flow_counter_cache { + u64 packets; + u64 bytes; +}; + +struct nbl_flow_counter { + struct list_head entries; + u64 lastpackets; + u64 lastbytes; + u64 lastuse; + struct nbl_flow_counter_cache cache; + unsigned long cookie; + u32 counter_id; +}; + +struct nbl_flow_update_counter { + u32 counter_id; + unsigned long cookie; +}; + +struct nbl_flow_query_counter { + u32 counter_id[NBL_FLOW_COUNT_NUM]; + unsigned long cookie[NBL_FLOW_COUNT_NUM]; +}; + +struct nbl_fc_mgt; +struct nbl_fc_product_ops { + void (*get_spec_stat_sz)(u16 *hit_sz, u16 *bytes_sz); + void (*get_flow_stat_sz)(u16 *hit_sz, u16 *bytes_sz); + void (*get_spec_stats)(struct nbl_flow_counter *counter, u64 *pkts, u64 *bytes); + void (*get_flow_stats)(struct nbl_flow_counter *counter, u64 *pkts, u64 *bytes); + int (*update_stats)(struct nbl_fc_mgt *mgt, struct nbl_flow_query_counter *counter_array, + u32 flow_num, u32 clear, enum nbl_pp_fc_type fc_type); +}; + +struct nbl_fc_mgt { + spinlock_t counter_lock; /* protect the counter */ + void *cls_cookie_tbl[NBL_FC_TYPE_MAX]; + struct workqueue_struct *counter_wq; + struct nbl_common_info *common; + struct delayed_work counter_work; + struct list_head counter_hash_list; + struct list_head counter_stat_hash_list; + struct nbl_flow_update_counter *counter_update_list; + struct nbl_cmd_hdr cmd_hdr[NBL_CMDQ_MAX_OP_CODE]; + unsigned long query_interval; + unsigned long next_query; + struct nbl_fc_product_ops fc_ops; + enum nbl_product_type type; +}; + +struct nbl_tc_mcc_mgt { + DECLARE_BITMAP(mcc_pool, NBL_TC_MCC_TBL_DEPTH); + struct nbl_common_info *common; + struct list_head mcc_list; + u16 mcc_offload_cnt; +}; + +struct nbl_tc_flow_mgt { + spinlock_t flow_lock; /* used to lock flow resource */ + struct nbl_flow_prf_upcall_info prf_info; + u8 profile_graph_count; + struct nbl_profile_msg profile_msg[NBL_ALL_PROFILE_NUM]; + struct nbl_profile_assoc_graph profile_graph[NBL_ASSOC_PROFILE_GRAPH_NUM]; + void *flow_idx_tbl; + + struct nbl_flow_tab_hash_info flow_tab_hash[NBL_ALL_PROFILE_NUM]; + + DECLARE_BITMAP(assoc_table_bmp, NBL_FLOW_TABLE_NUM); + DECLARE_BITMAP(pp1_kt_bmp, NBL_PP1_KT_NUM); + DECLARE_BITMAP(pp2_kt_bmp, NBL_PP2_KT_NUM); + + u8 init_status; + atomic64_t destroy_num; + atomic64_t create_num; + atomic64_t ref_cnt; + + struct nbl_flow_pp_ht_mng pp0_ht0_mng; + struct nbl_flow_pp_ht_mng pp0_ht1_mng; + struct nbl_flow_pp_ht_mng pp1_ht0_mng; + struct nbl_flow_pp_ht_mng pp1_ht1_mng; + struct nbl_flow_pp_ht_mng pp2_ht0_mng; + struct nbl_flow_pp_ht_mng pp2_ht1_mng; + struct nbl_flow_at_mng at_mng; + + struct nbl_flow_tcam_key_mng tcam_pp0_key_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_ad_mng tcam_pp0_ad_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_key_mng tcam_pp1_key_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_ad_mng tcam_pp1_ad_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_key_mng tcam_pp2_key_mng[NBL_FEM_TCAM_MAX_NUM]; + struct nbl_flow_tcam_ad_mng tcam_pp2_ad_mng[NBL_FEM_TCAM_MAX_NUM]; + + struct nbl_count_mng count_mng; + struct nbl_fc_mgt *fc_mgt; + struct nbl_resource_mgt *res_mgt; + u8 pf_set_tc_count; + struct nbl_tc_mcc_mgt tc_mcc_mgt; + u16 port_tpid_type[NBL_TPID_PORT_NUM]; + + /* encap and decap info */ + struct mutex encap_tbl_lock; /* used to lock encap resource */ + struct nbl_flow_tab_hash_info encap_tbl; + DECLARE_BITMAP(encap_tbl_bmp, NBL_TC_ENCAP_TBL_DEPTH); +}; + +/* --------- ACCEL ---------- */ +#define NBL_MAX_KTLS_SESSION (1024) +#define NBL_MAX_IPSEC_SESSION (2048) +#define NBL_MAX_IPSEC_TCAM (32) +#define NBL_IPSEC_HT_LEN (1 * 512) + +struct nbl_ipsec_ht_mng { + struct nbl_flow_ht_tbl *hash_map[NBL_IPSEC_HT_LEN]; +}; + +struct nbl_accel_uipsec_rule { + struct nbl_flow_fem_entry uipsec_entry; + struct list_head node; + u16 vsi; + u32 index; +}; + +struct nbl_tls_cfg_info { + u16 vld; + u16 vsi; +}; + +struct nbl_accel_mgt { + DECLARE_BITMAP(tx_ktls_bitmap, NBL_MAX_KTLS_SESSION); + DECLARE_BITMAP(rx_ktls_bitmap, NBL_MAX_KTLS_SESSION); + struct nbl_tls_cfg_info dtls_cfg_info[NBL_MAX_KTLS_SESSION]; + struct nbl_tls_cfg_info utls_cfg_info[NBL_MAX_KTLS_SESSION]; + + DECLARE_BITMAP(tx_ipsec_bitmap, NBL_MAX_IPSEC_SESSION); + DECLARE_BITMAP(rx_ipsec_bitmap, NBL_MAX_IPSEC_SESSION); + struct nbl_ipsec_cfg_info tx_cfg_info[NBL_MAX_IPSEC_SESSION]; + struct nbl_ipsec_cfg_info rx_cfg_info[NBL_MAX_IPSEC_SESSION]; + + DECLARE_BITMAP(ipsec_tcam_id, NBL_MAX_IPSEC_TCAM); + struct nbl_ipsec_ht_mng ipsec_ht0_mng; + struct nbl_ipsec_ht_mng ipsec_ht1_mng; + struct list_head uprbac_head; +}; + +/* --------- INFO ---------- */ +#define NBL_RES_RDMA_MAX (63) +#define NBL_RES_RDMA_INTR_NUM (3) +#define NBL_MAX_VF (NBL_MAX_FUNC - NBL_MAX_PF) + +struct nbl_sriov_info { + unsigned int bdf; + unsigned int num_vfs; + unsigned int start_vf_func_id; + unsigned short offset; + unsigned short stride; + unsigned short active_vf_num; + u64 vf_bar_start; + u64 vf_bar_len; + u64 pf_bar_start; +}; + +struct nbl_eswitch_info { + struct nbl_rep_data *rep_data; + int num_vfs; + u16 mode; + u16 vf_base_vsi_id; +}; + +struct nbl_eth_info { + DECLARE_BITMAP(eth_bitmap, NBL_MAX_ETHERNET); + u64 port_caps[NBL_MAX_ETHERNET]; + u64 port_advertising[NBL_MAX_ETHERNET]; + u64 port_lp_advertising[NBL_MAX_ETHERNET]; + u32 link_speed[NBL_MAX_ETHERNET]; /* in Mbps units */ + u8 active_fc[NBL_MAX_ETHERNET]; + u8 active_fec[NBL_MAX_ETHERNET]; + u8 link_state[NBL_MAX_ETHERNET]; + u8 module_inplace[NBL_MAX_ETHERNET]; + u8 port_type[NBL_MAX_ETHERNET]; /* enum nbl_port_type */ + u8 port_max_rate[NBL_MAX_ETHERNET]; /* enum nbl_port_max_rate */ + u8 module_repluged[NBL_MAX_ETHERNET]; + + u8 pf_bitmap[NBL_MAX_ETHERNET]; + u8 eth_num; + u8 resv[3]; + u8 eth_id[NBL_MAX_PF]; + u8 logic_eth_id[NBL_MAX_PF]; +}; + +enum nbl_vsi_serv_type { + NBL_VSI_SERV_PF_DATA_TYPE, + NBL_VSI_SERV_PF_CTLR_TYPE, + NBL_VSI_SERV_PF_USER_TYPE, + NBL_VSI_SERV_PF_XDP_TYPE, + NBL_VSI_SERV_VF_DATA_TYPE, + /* use for pf_num > eth_num, the extra pf belong pf0's switch */ + NBL_VSI_SERV_PF_EXTRA_TYPE, + NBL_VSI_SERV_MAX_TYPE, +}; + +struct nbl_vsi_serv_info { + u16 base_id; + u16 num; +}; + +struct nbl_vsi_mac_info { + u16 vlan_proto; + u16 vlan_tci; + int rate; + u8 mac[ETH_ALEN]; +}; + +struct nbl_vsi_info { + u16 num; + struct nbl_vsi_serv_info serv_info[NBL_MAX_ETHERNET][NBL_VSI_SERV_MAX_TYPE]; + struct nbl_vsi_mac_info mac_info[NBL_MAX_FUNC]; +}; + +#define NBL_RDMA_BOND_KEY_MAGIC 0x1000 +struct nbl_rdma_info { + DECLARE_BITMAP(func_cap, NBL_MAX_FUNC); + u16 rdma_id[NBL_MAX_FUNC]; + u32 mem_type; + /* TODO: merge draco code, and delete this */ + u16 rdma_vacant; +}; + +#define NBL_ETH_BOND_VALID_PORT(x) ((x) < NBL_LAG_MAX_PORTS) +struct nbl_eth_bond_entry { + u8 eth_id[NBL_LAG_MAX_PORTS]; + u16 lag_id; + u16 lag_num; +}; + +struct nbl_eth_bond_info { + struct nbl_eth_bond_entry entry[NBL_LAG_MAX_NUM]; +}; + +struct nbl_net_ring_num_info { + u16 pf_def_max_net_qp_num; + u16 vf_def_max_net_qp_num; + u16 net_max_qp_num[NBL_MAX_FUNC]; +}; + +struct nbl_rdma_cap_info { + u32 valid; + u8 rdma_func_bitmaps[65]; + u8 rsv[7]; +}; + +struct nbl_rdma_mem_type_info { + u32 mem_type; +}; + +struct nbl_resource_info { + /* ctrl-dev owned pfs */ + DECLARE_BITMAP(func_bitmap, NBL_MAX_FUNC); + struct nbl_sriov_info *sriov_info; + struct nbl_eswitch_info *eswitch_info; + struct nbl_eth_info *eth_info; + struct nbl_vsi_info *vsi_info; + struct nbl_eth_bond_info *eth_bond_info; + u32 base_qid; + u32 max_vf_num; + + struct nbl_rdma_info rdma_info; + struct nbl_net_ring_num_info net_ring_num_info; + + /* for af use */ + int p4_used; + u16 eth_mode; + u16 init_acl_refcnt; + u8 max_pf; + u16 nd_upcall_refnt; + struct nbl_board_port_info board_info; + + u8 link_forced_info[NBL_MAX_FUNC]; +}; + +enum { + NBL_FD_MODE_DEFAULT = 0,/* Support src_mac & dst_mac, ipv4 + other in total 512 rules */ + NBL_FD_MODE_FULL, /* Unsupport src_mac & dst_mac, ipv4 + other each 512 rules */ + NBL_FD_MODE_LITE, /* Only support ipv4, 1536 rules */ + NBL_FD_MODE_MAX, +}; + +union nbl_fd_compo_info { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + u16 ethertype; + u32 src_ipv4; + u32 dst_ipv4; + u32 src_ipv6[NBL_IPV6_U32LEN]; + u32 dst_ipv6[NBL_IPV6_U32LEN]; + u8 ipproto; + u16 l4_sport; + u16 l4_dport; + struct nbl_fd_compo_udf { + u32 offset; + u32 data; + } udf; +}; + +struct nbl_flow_direct_entry { + struct list_head node; + u8 pid; + bool udf; + u16 action_index; + u16 depth_index; + struct nbl_chan_param_fdir_replace param; +}; + +struct nbl_flow_direct_info { + struct list_head list[NBL_CHAN_FDIR_RULE_MAX]; + u16 state[NBL_CHAN_FDIR_RULE_MAX]; + u16 cnt[NBL_CHAN_FDIR_RULE_MAX]; +}; + +struct nbl_fd_component_ops { + int (*validate)(struct ethtool_rx_flow_spec *fs); + int (*form)(struct nbl_flow_direct_entry *entry, struct ethtool_rx_flow_spec *fs); + u16 layer; +}; + +struct nbl_flow_direct_mgt { + struct nbl_flow_direct_info info[NBL_MAX_PF]; + u32 max_spec; + u32 udf_offset; + u32 udf_cnt; + u16 udf_layer; + u16 cnt[NBL_FD_PROFILE_MAX]; + u8 mode; + u8 state; +}; + +/* --------- PMD status ---------- */ +struct nbl_upcall_port_info { + bool upcall_port_active; + u16 func_id; +}; + +struct nbl_rep_offload_status { +#define NBL_OFFLOAD_STATUS_MAX_VSI (1024) +#define NBL_OFFLOAD_STATUS_MAX_ETH (4) + DECLARE_BITMAP(rep_vsi_bitmap, NBL_OFFLOAD_STATUS_MAX_VSI); + DECLARE_BITMAP(rep_eth_bitmap, NBL_OFFLOAD_STATUS_MAX_ETH); + bool status[NBL_MAX_ETHERNET]; + bool pmd_debug; + unsigned long timestamp; +}; + +struct nbl_pmd_status { + struct nbl_upcall_port_info upcall_port_info; + struct nbl_rep_offload_status rep_status; +}; + +struct nbl_resource_common_ops { + u16 (*vsi_id_to_func_id)(void *res_mgt, u16 vsi_id); + int (*vsi_id_to_pf_id)(void *res_mgt, u16 vsi_id); + u16 (*pfvfid_to_func_id)(void *res_mgt, int pfid, int vfid); + u16 (*pfvfid_to_vsi_id)(void *res_mgt, int pfid, int vfid, u16 type); + u16 (*func_id_to_vsi_id)(void *res_mgt, u16 func_id, u16 type); + int (*func_id_to_pfvfid)(void *res_mgt, u16 func_id, int *pfid, int *vfid); + int (*func_id_to_bdf)(void *res_mgt, u16 func_id, u8 *bus, u8 *dev, u8 *function); + u64 (*get_func_bar_base_addr)(void *res_mgt, u16 func_id); + u16 (*get_particular_queue_id)(void *res_mgt, u16 vsi_id); + u8 (*vsi_id_to_eth_id)(void *res_mgt, u16 vsi_id); + u8 (*eth_id_to_pf_id)(void *res_mgt, u8 eth_id); + u8 (*eth_id_to_lag_id)(void *res_mgt, u8 eth_id); + bool (*check_func_active_by_queue)(void *res_mgt, u16 func_id); +}; + +struct nbl_res_product_ops { + /* for queue */ + void (*queue_mgt_init)(struct nbl_queue_mgt *queue_mgt); + int (*setup_qid_map_table)(struct nbl_resource_mgt *res_mgt, u16 func_id, u64 notify_addr); + void (*remove_qid_map_table)(struct nbl_resource_mgt *res_mgt, u16 func_id); + int (*init_qid_map_table)(struct nbl_resource_mgt *res_mgt, + struct nbl_queue_mgt *queue_mgt, struct nbl_phy_ops *phy_ops); + + /* for intr */ + void (*nbl_intr_mgt_init)(struct nbl_resource_mgt *res_mgt); +}; + +struct nbl_resource_mgt { + struct nbl_resource_common_ops common_ops; + struct nbl_common_info *common; + struct nbl_resource_info *resource_info; + struct nbl_channel_ops_tbl *chan_ops_tbl; + struct nbl_phy_ops_tbl *phy_ops_tbl; + struct nbl_queue_mgt *queue_mgt; + struct nbl_interrupt_mgt *intr_mgt; + struct nbl_txrx_mgt *txrx_mgt; + struct nbl_flow_mgt *flow_mgt; + struct nbl_tc_flow_mgt *tc_flow_mgt; + struct nbl_vsi_mgt *vsi_mgt; + struct nbl_adminq_mgt *adminq_mgt; + struct nbl_accel_mgt *accel_mgt; + struct nbl_port_mgt *port_mgt; + struct nbl_flow_direct_mgt *fd_mgt; + struct nbl_res_product_ops *product_ops; + DECLARE_BITMAP(flex_capability, NBL_FLEX_CAP_NBITS); + DECLARE_BITMAP(fix_capability, NBL_FIX_CAP_NBITS); +}; + +/* Mgt structure for each product. + * Every indivisual mgt must have the common mgt as its first member, and contains its unique + * data structure in the reset of it. + */ +struct nbl_resource_mgt_leonis { + struct nbl_resource_mgt res_mgt; + struct nbl_pmd_status pmd_status; +}; + +struct nbl_resource_mgt_bootis { + struct nbl_resource_mgt res_mgt; +}; + +struct nbl_resource_mgt_virtio { + struct nbl_resource_mgt res_mgt; +}; + +#define NBL_RES_FW_CMD_FILTER_MAX 8 +struct nbl_res_fw_cmd_filter { + int (*in)(struct nbl_resource_mgt *res_mgt, void *in, u16 in_len); + int (*out)(struct nbl_resource_mgt *res_mgt, void *in, u16 in_len, void *out, u16 out_len); +}; + +u16 nbl_res_vsi_id_to_func_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); +int nbl_res_vsi_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); +u16 nbl_res_pfvfid_to_func_id(struct nbl_resource_mgt *res_mgt, int pfid, int vfid); +u16 nbl_res_pfvfid_to_vsi_id(struct nbl_resource_mgt *res_mgt, int pfid, int vfid, u16 type); +u16 nbl_res_func_id_to_vsi_id(struct nbl_resource_mgt *res_mgt, u16 func_id, u16 type); +int nbl_res_func_id_to_pfvfid(struct nbl_resource_mgt *res_mgt, u16 func_id, int *pfid, int *vfid); +u8 nbl_res_eth_id_to_pf_id(struct nbl_resource_mgt *res_mgt, u8 eth_id); +u8 nbl_res_eth_id_to_lag_id(struct nbl_resource_mgt *res_mgt, u8 eth_id); +int nbl_res_func_id_to_bdf(struct nbl_resource_mgt *res_mgt, u16 func_id, u8 *bus, + u8 *dev, u8 *function); +u64 nbl_res_get_func_bar_base_addr(struct nbl_resource_mgt *res_mgt, u16 func_id); +u16 nbl_res_get_particular_queue_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); +u8 nbl_res_vsi_id_to_eth_id(struct nbl_resource_mgt *res_mgt, u16 vsi_id); +bool nbl_res_check_func_active_by_queue(struct nbl_resource_mgt *res_mgt, u16 func_id); + +int nbl_adminq_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_adminq_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_adminq_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_adminq_remove_ops(struct nbl_resource_ops *resource_ops); + +int nbl_intr_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_intr_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_intr_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_intr_remove_ops(struct nbl_resource_ops *resource_ops); + +int nbl_queue_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_queue_mgt_stop(struct nbl_resource_mgt *res_mgt); + +int nbl_txrx_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_txrx_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_txrx_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_txrx_remove_ops(struct nbl_resource_ops *resource_ops); + +int nbl_vsi_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_vsi_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_vsi_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_vsi_remove_ops(struct nbl_resource_ops *resource_ops); + +int nbl_accel_setup_ops(struct nbl_resource_ops *res_ops); +int nbl_accel_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_accel_mgt_stop(struct nbl_resource_mgt *res_mgt); + +int nbl_fd_mgt_start(struct nbl_resource_mgt *res_mgt); +void nbl_fd_mgt_stop(struct nbl_resource_mgt *res_mgt); +int nbl_fd_setup_ops(struct nbl_resource_ops *resource_ops); +void nbl_fd_remove_ops(struct nbl_resource_ops *resource_ops); + +bool nbl_res_get_flex_capability(void *priv, enum nbl_flex_cap_type cap_type); +bool nbl_res_get_fix_capability(void *priv, enum nbl_fix_cap_type cap_type); +void nbl_res_set_flex_capability(struct nbl_resource_mgt *res_mgt, enum nbl_flex_cap_type cap_type); +void nbl_res_set_fix_capability(struct nbl_resource_mgt *res_mgt, enum nbl_fix_cap_type cap_type); + +int nbl_res_open_sfp(struct nbl_resource_mgt *res_mgt, u8 eth_id); +int nbl_res_get_eth_mac(struct nbl_resource_mgt *res_mgt, u8 *mac, u8 eth_id); +void nbl_res_pf_dev_vsi_type_to_hw_vsi_type(u16 src_type, enum nbl_vsi_serv_type *dst_type); +int nbl_res_get_rep_idx(struct nbl_eswitch_info *eswitch_info, u16 rep_vsi_id); +bool nbl_res_vf_is_active(void *priv, u16 func_id); +void nbl_res_set_hw_status(void *priv, enum nbl_hw_status hw_status); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..515ceb631b103d8d5c2b5e836d286db0d4f2419b --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.c @@ -0,0 +1,3330 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_txrx.h" +#include +#include +#include +#include +#include +#include + +DEFINE_STATIC_KEY_FALSE(nbl_xdp_locking_key); + +static bool nbl_txrx_within_vsi(struct nbl_txrx_vsi_info *vsi_info, u16 ring_index) +{ + return ring_index >= vsi_info->ring_offset && + ring_index < vsi_info->ring_offset + vsi_info->ring_num; +} + +static struct nbl_res_tx_ring * +nbl_alloc_tx_ring(struct nbl_resource_mgt *res_mgt, struct net_device *netdev, u16 ring_index, + u16 desc_num) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_res_tx_ring *ring; + + ring = devm_kzalloc(dev, sizeof(struct nbl_res_tx_ring), GFP_KERNEL); + if (!ring) + return NULL; + + ring->vsi_info = txrx_mgt->vsi_info; + ring->dma_dev = common->dma_dev; + ring->product_type = common->product_type; + ring->eth_id = common->eth_id; + ring->queue_index = ring_index; + ring->notify_addr = phy_ops->get_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt)); + ring->notify_qid = NBL_RES_NOFITY_QID(res_mgt, ring_index * 2 + 1); + ring->netdev = netdev; + ring->desc_num = desc_num; + ring->used_wrap_counter = 1; + ring->avail_used_flags |= BIT(NBL_PACKED_DESC_F_AVAIL); + + if (res_mgt->resource_info->eswitch_info) + ring->mode = res_mgt->resource_info->eswitch_info->mode; + + return ring; +} + +static int nbl_alloc_tx_rings(struct nbl_resource_mgt *res_mgt, struct net_device *netdev, + u16 tx_num, u16 desc_num) +{ + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_res_tx_ring *ring; + u32 ring_index; + + if (txrx_mgt->tx_rings) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Try to allocate tx_rings which already exists\n"); + return -EINVAL; + } + + txrx_mgt->tx_ring_num = tx_num; + + txrx_mgt->tx_rings = devm_kcalloc(dev, tx_num, + sizeof(struct nbl_res_tx_ring *), GFP_KERNEL); + if (!txrx_mgt->tx_rings) + return -ENOMEM; + + for (ring_index = 0; ring_index < tx_num; ring_index++) { + ring = txrx_mgt->tx_rings[ring_index]; + WARN_ON(ring); + ring = nbl_alloc_tx_ring(res_mgt, netdev, ring_index, desc_num); + if (!ring) + goto alloc_tx_ring_failed; + + WRITE_ONCE(txrx_mgt->tx_rings[ring_index], ring); + } + + return 0; + +alloc_tx_ring_failed: + while (ring_index--) + devm_kfree(dev, txrx_mgt->tx_rings[ring_index]); + devm_kfree(dev, txrx_mgt->tx_rings); + txrx_mgt->tx_rings = NULL; + return -ENOMEM; +} + +static void nbl_free_tx_rings(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct nbl_res_tx_ring *ring; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + u16 ring_count; + u16 ring_index; + + ring_count = txrx_mgt->tx_ring_num; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = txrx_mgt->tx_rings[ring_index]; + devm_kfree(dev, ring); + } + devm_kfree(dev, txrx_mgt->tx_rings); + txrx_mgt->tx_rings = NULL; +} + +static int nbl_alloc_rx_rings(struct nbl_resource_mgt *res_mgt, struct net_device *netdev, + u16 rx_num, u16 desc_num) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_res_rx_ring *ring; + u32 ring_index; + + if (txrx_mgt->rx_rings) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Try to allocate rx_rings which already exists\n"); + return -EINVAL; + } + + txrx_mgt->rx_ring_num = rx_num; + + txrx_mgt->rx_rings = devm_kcalloc(dev, rx_num, + sizeof(struct nbl_res_rx_ring *), GFP_KERNEL); + if (!txrx_mgt->rx_rings) + return -ENOMEM; + + for (ring_index = 0; ring_index < rx_num; ring_index++) { + ring = txrx_mgt->rx_rings[ring_index]; + WARN_ON(ring); + ring = devm_kzalloc(dev, sizeof(struct nbl_res_rx_ring), GFP_KERNEL); + if (!ring) + goto alloc_rx_ring_failed; + + ring->common = common; + ring->txrx_mgt = txrx_mgt; + ring->dma_dev = common->dma_dev; + ring->queue_index = ring_index; + ring->notify_qid = NBL_RES_NOFITY_QID(res_mgt, ring_index * 2); + ring->netdev = netdev; + ring->desc_num = desc_num; + /* TODO: maybe TX buffer length should be determined by other factors */ + ring->buf_len = NBL_RX_BUFSZ - NBL_RX_PAD; + + ring->used_wrap_counter = 1; + ring->avail_used_flags |= BIT(NBL_PACKED_DESC_F_AVAIL); + WRITE_ONCE(txrx_mgt->rx_rings[ring_index], ring); + } + + return 0; + +alloc_rx_ring_failed: + while (ring_index--) + devm_kfree(dev, txrx_mgt->rx_rings[ring_index]); + devm_kfree(dev, txrx_mgt->rx_rings); + txrx_mgt->rx_rings = NULL; + return -ENOMEM; +} + +static void nbl_free_rx_rings(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct nbl_res_rx_ring *ring; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + u16 ring_count; + u16 ring_index; + + ring_count = txrx_mgt->rx_ring_num; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = txrx_mgt->rx_rings[ring_index]; + devm_kfree(dev, ring); + } + devm_kfree(dev, txrx_mgt->rx_rings); + txrx_mgt->rx_rings = NULL; +} + +static int nbl_alloc_vectors(struct nbl_resource_mgt *res_mgt, u16 total_num, u16 xdp_ring_offset) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct nbl_res_vector *vector; + u32 index; + u16 xdp_ring_num; + + if (txrx_mgt->vectors) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Try to allocate vectors which already exists\n"); + return -EINVAL; + } + + txrx_mgt->vectors = devm_kcalloc(dev, xdp_ring_offset, sizeof(struct nbl_res_vector *), + GFP_KERNEL); + if (!txrx_mgt->vectors) + return -ENOMEM; + + for (index = 0; index < xdp_ring_offset; index++) { + vector = txrx_mgt->vectors[index]; + WARN_ON(vector); + vector = devm_kzalloc(dev, sizeof(struct nbl_res_vector), GFP_KERNEL); + if (!vector) + goto alloc_vector_failed; + + vector->rx_ring = txrx_mgt->rx_rings[index]; + vector->tx_ring = txrx_mgt->tx_rings[index]; + WRITE_ONCE(txrx_mgt->vectors[index], vector); + } + + xdp_ring_num = total_num - xdp_ring_offset; + for (index = 0; index < xdp_ring_num; index++) { + vector = txrx_mgt->vectors[index]; + vector->xdp_ring = txrx_mgt->tx_rings[index + xdp_ring_offset]; + } + + return 0; + +alloc_vector_failed: + while (index--) + devm_kfree(dev, txrx_mgt->vectors[index]); + devm_kfree(dev, txrx_mgt->vectors); + txrx_mgt->vectors = NULL; + return -ENOMEM; +} + +static void nbl_free_vectors(struct nbl_resource_mgt *res_mgt) +{ + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + struct nbl_res_vector *vector; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + u16 count, index; + + count = txrx_mgt->xdp_ring_offset; + for (index = 0; index < count; index++) { + vector = txrx_mgt->vectors[index]; + devm_kfree(dev, vector); + } + devm_kfree(dev, txrx_mgt->vectors); + txrx_mgt->vectors = NULL; +} + +static int nbl_res_txrx_alloc_rings(void *priv, struct net_device *netdev, + struct nbl_ring_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + int err = 0; + + err = nbl_alloc_tx_rings(res_mgt, netdev, param->tx_ring_num, param->queue_size); + if (err) + return err; + + err = nbl_alloc_rx_rings(res_mgt, netdev, param->rx_ring_num, param->queue_size); + if (err) + goto alloc_rx_rings_err; + + err = nbl_alloc_vectors(res_mgt, param->tx_ring_num, param->xdp_ring_offset); + if (err) + goto alloc_vectors_err; + + txrx_mgt->xdp_ring_offset = param->xdp_ring_offset; + txrx_mgt->xdp_ring_num = param->tx_ring_num - param->xdp_ring_offset; + + if (txrx_mgt->xdp_ring_num && num_online_cpus() > txrx_mgt->xdp_ring_num) + static_branch_inc(&nbl_xdp_locking_key); + + nbl_info(res_mgt->common, NBL_DEBUG_RESOURCE, + "Alloc rings for %d tx, %d rx, %d xdp_offset, %d desc\n", + param->tx_ring_num, param->rx_ring_num, param->xdp_ring_offset, param->queue_size); + return 0; + +alloc_vectors_err: + nbl_free_rx_rings(res_mgt); +alloc_rx_rings_err: + nbl_free_tx_rings(res_mgt); + return err; +} + +static void nbl_res_txrx_remove_rings(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + + if (txrx_mgt->xdp_ring_num && num_online_cpus() > txrx_mgt->xdp_ring_num && + static_key_enabled(&nbl_xdp_locking_key)) + static_branch_dec(&nbl_xdp_locking_key); + + nbl_free_vectors(res_mgt); + nbl_free_tx_rings(res_mgt); + nbl_free_rx_rings(res_mgt); + nbl_info(res_mgt->common, NBL_DEBUG_RESOURCE, "Remove rings"); +} + +static dma_addr_t nbl_res_txrx_start_tx_ring(void *priv, u8 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); + + if (tx_ring->tx_bufs) { + nbl_err(res_mgt->common, NBL_DEBUG_RESOURCE, + "Try to setup a TX ring with buffer management array already allocated\n"); + return (dma_addr_t)NULL; + } + + tx_ring->tx_bufs = devm_kcalloc(dev, tx_ring->desc_num, sizeof(*tx_ring->tx_bufs), + GFP_KERNEL); + if (!tx_ring->tx_bufs) + return (dma_addr_t)NULL; + + /* Alloc twice memory, and second half is used to back up the desc for desc checking */ + tx_ring->size = ALIGN(tx_ring->desc_num * sizeof(struct nbl_ring_desc), PAGE_SIZE); + tx_ring->desc = dmam_alloc_coherent(dma_dev, tx_ring->size, &tx_ring->dma, + GFP_KERNEL | __GFP_ZERO); + if (!tx_ring->desc) { + nbl_err(res_mgt->common, NBL_DEBUG_RESOURCE, + "Allocate %u bytes descriptor DMA memory for TX queue %u failed\n", + tx_ring->size, tx_ring->queue_index); + goto alloc_dma_err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->tail_ptr = 0; + + tx_ring->valid = true; + nbl_debug(res_mgt->common, NBL_DEBUG_RESOURCE, "Start tx ring %d", ring_index); + return tx_ring->dma; + +alloc_dma_err: + devm_kfree(dev, tx_ring->tx_bufs); + tx_ring->tx_bufs = NULL; + tx_ring->size = 0; + return (dma_addr_t)NULL; +} + +static inline bool nbl_rx_cache_get(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info) +{ + struct nbl_page_cache *cache = &rx_ring->page_cache; + struct nbl_rx_queue_stats *stats = &rx_ring->rx_stats; + + if (unlikely(cache->head == cache->tail)) { + stats->rx_cache_empty++; + return false; + } + + if (page_ref_count(cache->page_cache[cache->head].page) != 1) { + stats->rx_cache_busy++; + return false; + } + + *dma_info = cache->page_cache[cache->head]; + cache->head = (cache->head + 1) & (NBL_MAX_CACHE_SIZE - 1); + stats->rx_cache_reuse++; + + dma_sync_single_for_device(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, DMA_FROM_DEVICE); + return true; +} + +static inline int nbl_page_alloc_pool(struct nbl_res_rx_ring *rx_ring, + struct nbl_dma_info *dma_info) +{ + if (nbl_rx_cache_get(rx_ring, dma_info)) + return 0; + + dma_info->page = page_pool_dev_alloc_pages(rx_ring->page_pool); + if (unlikely(!dma_info->page)) + return -ENOMEM; + + dma_info->addr = dma_map_page_attrs(rx_ring->dma_dev, dma_info->page, 0, PAGE_SIZE, + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); + + if (unlikely(dma_mapping_error(rx_ring->dma_dev, dma_info->addr))) { + page_pool_recycle_direct(rx_ring->page_pool, dma_info->page); + dma_info->page = NULL; + return -ENOMEM; + } + + return 0; +} + +static inline int nbl_get_rx_frag(struct nbl_res_rx_ring *rx_ring, struct nbl_rx_buffer *buffer) +{ + int err = 0; + + /* first buffer alloc page */ + if (buffer->offset == NBL_RX_PAD) + err = nbl_page_alloc_pool(rx_ring, buffer->di); + + return err; +} + +static inline bool nbl_alloc_rx_bufs(struct nbl_res_rx_ring *rx_ring, u16 count) +{ + u32 buf_len; + u16 next_to_use, head; + __le16 head_flags = 0; + struct nbl_ring_desc *rx_desc, *head_desc; + struct nbl_rx_buffer *rx_buf; + int i; + + if (unlikely(!rx_ring || !count)) { + nbl_warn(NBL_RING_TO_COMMON(rx_ring), NBL_DEBUG_RESOURCE, + "invalid input parameters, rx_ring is %p, count is %d.\n", rx_ring, count); + return -EINVAL; + } + + buf_len = rx_ring->buf_len; + next_to_use = rx_ring->next_to_use; + + head = next_to_use; + head_desc = NBL_RX_DESC(rx_ring, next_to_use); + rx_desc = NBL_RX_DESC(rx_ring, next_to_use); + rx_buf = NBL_RX_BUF(rx_ring, next_to_use); + + if (unlikely(!rx_desc || !rx_buf)) { + nbl_warn(NBL_RING_TO_COMMON(rx_ring), NBL_DEBUG_RESOURCE, + "invalid input parameters, next_to_use:%d, rx_desc is %p, rx_buf is %p.\n", + next_to_use, rx_desc, rx_buf); + return -EINVAL; + } + + do { + if (nbl_get_rx_frag(rx_ring, rx_buf)) + break; + + for (i = 0; i < NBL_RX_PAGE_PER_FRAGS; i++, rx_desc++, rx_buf++) { + rx_desc->addr = cpu_to_le64(rx_buf->di->addr + rx_buf->offset); + rx_desc->len = cpu_to_le32(buf_len); + rx_desc->id = cpu_to_le16(next_to_use); + + if (likely(head != next_to_use || i)) + rx_desc->flags = cpu_to_le16(rx_ring->avail_used_flags | + NBL_PACKED_DESC_F_WRITE); + else + head_flags = cpu_to_le16(rx_ring->avail_used_flags | + NBL_PACKED_DESC_F_WRITE); + } + + next_to_use += NBL_RX_PAGE_PER_FRAGS; + rx_ring->tail_ptr += NBL_RX_PAGE_PER_FRAGS; + count -= NBL_RX_PAGE_PER_FRAGS; + if (next_to_use == rx_ring->desc_num) { + next_to_use = 0; + rx_desc = NBL_RX_DESC(rx_ring, next_to_use); + rx_buf = NBL_RX_BUF(rx_ring, next_to_use); + rx_ring->avail_used_flags ^= + BIT(NBL_PACKED_DESC_F_AVAIL) | + BIT(NBL_PACKED_DESC_F_USED); + } + } while (count); + + if (next_to_use != head) { + /* wmb */ + wmb(); + head_desc->flags = head_flags; + rx_ring->next_to_use = next_to_use; + } + + return !!count; +} + +static void nbl_unmap_and_free_tx_resource(struct nbl_res_tx_ring *ring, + struct nbl_tx_buffer *tx_buffer, + bool free, bool in_napi) +{ + struct device *dma_dev = NBL_RING_TO_DMA_DEV(ring); + + if (tx_buffer->skb) { + if (likely(!nbl_res_txrx_is_xdp_ring(ring))) { + if (likely(free)) { + if (in_napi) + napi_consume_skb(tx_buffer->skb, NBL_TX_POLL_WEIGHT); + else + dev_kfree_skb_any(tx_buffer->skb); + } + } else { + if (likely(free)) + page_frag_free(tx_buffer->raw_buff); + } + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(dma_dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (tx_buffer->page && dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(dma_dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_single(dma_dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + kfree(tx_buffer->tls_pkthdr); + tx_buffer->tls_pkthdr = NULL; + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + tx_buffer->page = 0; + dma_unmap_len_set(tx_buffer, len, 0); +} + +static void nbl_free_tx_ring_bufs(struct nbl_res_tx_ring *tx_ring) +{ + struct nbl_tx_buffer *tx_buffer; + u16 i; + + i = tx_ring->next_to_clean; + tx_buffer = NBL_TX_BUF(tx_ring, i); + while (i != tx_ring->next_to_use) { + nbl_unmap_and_free_tx_resource(tx_ring, tx_buffer, true, false); + i++; + tx_buffer++; + if (i == tx_ring->desc_num) { + i = 0; + tx_buffer = NBL_TX_BUF(tx_ring, i); + } + } + + tx_ring->next_to_clean = 0; + tx_ring->next_to_use = 0; + tx_ring->tail_ptr = 0; + + tx_ring->used_wrap_counter = 1; + tx_ring->avail_used_flags = BIT(NBL_PACKED_DESC_F_AVAIL); + memset(tx_ring->desc, 0, tx_ring->size); +} + +static void nbl_res_txrx_stop_tx_ring(void *priv, u8 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); + struct nbl_res_vector *vector = NULL; + + if (!nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_XDP], ring_index)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + + if (vector) { + vector->started = false; + /* Flush napi task, to ensue the sched napi finish. So napi will no to access the + * ring memory(wild point), bacause the vector->started has set false. + */ + napi_synchronize(&vector->napi); + } + + tx_ring->valid = false; + + nbl_free_tx_ring_bufs(tx_ring); + WRITE_ONCE(NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index), tx_ring); + + devm_kfree(dev, tx_ring->tx_bufs); + tx_ring->tx_bufs = NULL; + + dmam_free_coherent(dma_dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + tx_ring->dma = (dma_addr_t)NULL; + tx_ring->size = 0; + + nbl_debug(res_mgt->common, NBL_DEBUG_RESOURCE, "Stop tx ring %d", ring_index); +} + +static inline bool nbl_rx_cache_put(struct nbl_res_rx_ring *rx_ring, struct nbl_dma_info *dma_info) +{ + struct nbl_page_cache *cache = &rx_ring->page_cache; + u32 tail_next = (cache->tail + 1) & (NBL_MAX_CACHE_SIZE - 1); + struct nbl_rx_queue_stats *stats = &rx_ring->rx_stats; + + if (tail_next == cache->head) { + stats->rx_cache_full++; + return false; + } + + if (!dev_page_is_reusable(dma_info->page)) { + stats->rx_cache_waive++; + return false; + } + + cache->page_cache[cache->tail] = *dma_info; + cache->tail = tail_next; + + return true; +} + +static inline void nbl_page_release_dynamic(struct nbl_res_rx_ring *rx_ring, + struct nbl_dma_info *dma_info, bool recycle) +{ + if (likely(recycle)) { + if (nbl_rx_cache_put(rx_ring, dma_info)) + return; + dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); + page_pool_recycle_direct(rx_ring->page_pool, dma_info->page); + } else { + dma_unmap_page_attrs(rx_ring->dma_dev, dma_info->addr, PAGE_SIZE, + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); + page_pool_put_page(rx_ring->page_pool, dma_info->page, PAGE_SIZE, true); + } +} + +static inline void nbl_put_rx_frag(struct nbl_res_rx_ring *rx_ring, + struct nbl_rx_buffer *buffer, bool recycle) +{ + if (buffer->last_in_page) + nbl_page_release_dynamic(rx_ring, buffer->di, recycle); +} + +static void nbl_free_rx_ring_bufs(struct nbl_res_rx_ring *rx_ring) +{ + struct nbl_rx_buffer *rx_buf; + u16 i; + + i = rx_ring->next_to_clean; + rx_buf = NBL_RX_BUF(rx_ring, i); + while (i != rx_ring->next_to_use) { + nbl_put_rx_frag(rx_ring, rx_buf, false); + i++; + rx_buf++; + if (i == rx_ring->desc_num) { + i = 0; + rx_buf = NBL_RX_BUF(rx_ring, i); + } + } + + for (i = rx_ring->page_cache.head; i != rx_ring->page_cache.tail; + i = (i + 1) & (NBL_MAX_CACHE_SIZE - 1)) { + struct nbl_dma_info *dma_info = &rx_ring->page_cache.page_cache[i]; + + nbl_page_release_dynamic(rx_ring, dma_info, false); + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->tail_ptr = 0; + rx_ring->page_cache.head = 0; + rx_ring->page_cache.tail = 0; + + rx_ring->used_wrap_counter = 1; + rx_ring->avail_used_flags = BIT(NBL_PACKED_DESC_F_AVAIL); + memset(rx_ring->desc, 0, rx_ring->size); +} + +static dma_addr_t nbl_res_txrx_start_rx_ring(void *priv, u8 ring_index, bool use_napi) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + struct nbl_res_vector *vector = NULL; + struct page_pool_params pp_params = {0}; + int i, j; + + if (rx_ring->rx_bufs) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Try to setup a RX ring with buffer management array already allocated\n"); + return (dma_addr_t)NULL; + } + + if (!nbl_txrx_within_vsi(&txrx_mgt->vsi_info[NBL_VSI_XDP], ring_index)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + + pp_params.order = 0; + pp_params.flags = 0; + pp_params.pool_size = rx_ring->desc_num; + pp_params.nid = dev_to_node(dev); + pp_params.dev = dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + + rx_ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_ring->page_pool)) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Page_pool Allocate %u Failed failed\n", + rx_ring->queue_index); + return (dma_addr_t)NULL; + } + + rx_ring->di = kvzalloc_node(array_size(rx_ring->desc_num / NBL_RX_PAGE_PER_FRAGS, + sizeof(struct nbl_dma_info)), + GFP_KERNEL, dev_to_node(dev)); + if (!rx_ring->di) { + nbl_err(common, NBL_DEBUG_RESOURCE, "Dma info Allocate %u Failed failed\n", + rx_ring->queue_index); + goto alloc_di_err; + } + + rx_ring->rx_bufs = devm_kcalloc(dev, rx_ring->desc_num, sizeof(*rx_ring->rx_bufs), + GFP_KERNEL); + if (!rx_ring->rx_bufs) + goto alloc_buffers_err; + + /* Alloc twice memory, and second half is used to back up the desc for desc checking */ + rx_ring->size = ALIGN(rx_ring->desc_num * sizeof(struct nbl_ring_desc), PAGE_SIZE); + rx_ring->desc = dmam_alloc_coherent(dma_dev, rx_ring->size, &rx_ring->dma, + GFP_KERNEL | __GFP_ZERO); + if (!rx_ring->desc) { + nbl_err(common, NBL_DEBUG_RESOURCE, + "Allocate %u bytes descriptor DMA memory for RX queue %u failed\n", + rx_ring->size, rx_ring->queue_index); + goto alloc_dma_err; + } + + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + rx_ring->tail_ptr = 0; + + j = 0; + for (i = 0; i < rx_ring->desc_num / NBL_RX_PAGE_PER_FRAGS; i++) { + struct nbl_dma_info *di = &rx_ring->di[i]; + struct nbl_rx_buffer *buffer; + int f; + + for (f = 0; f < NBL_RX_PAGE_PER_FRAGS; f++, j++) { + buffer = &rx_ring->rx_bufs[j]; + buffer->di = di; + buffer->offset = NBL_RX_PAD + f * NBL_RX_BUFSZ; + buffer->last_in_page = false; + } + + buffer->last_in_page = true; + } + + if (nbl_alloc_rx_bufs(rx_ring, rx_ring->desc_num - NBL_MAX_BATCH_DESC)) + goto alloc_rx_bufs_err; + + rx_ring->valid = true; + if (use_napi && vector) + vector->started = true; + + nbl_debug(common, NBL_DEBUG_RESOURCE, "Start rx ring %d", ring_index); + return rx_ring->dma; + +alloc_rx_bufs_err: + nbl_free_rx_ring_bufs(rx_ring); + dmam_free_coherent(dma_dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + rx_ring->dma = (dma_addr_t)NULL; +alloc_dma_err: + devm_kfree(dev, rx_ring->rx_bufs); + rx_ring->rx_bufs = NULL; +alloc_buffers_err: + kvfree(rx_ring->di); +alloc_di_err: + page_pool_destroy(rx_ring->page_pool); + rx_ring->size = 0; + return (dma_addr_t)NULL; +} + +static void nbl_res_txrx_stop_rx_ring(void *priv, u8 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct device *dev = NBL_RES_MGT_TO_DEV(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + + rx_ring->valid = false; + + nbl_free_rx_ring_bufs(rx_ring); + WRITE_ONCE(NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index), rx_ring); + + devm_kfree(dev, rx_ring->rx_bufs); + kvfree(rx_ring->di); + rx_ring->rx_bufs = NULL; + + dmam_free_coherent(dma_dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + rx_ring->dma = (dma_addr_t)NULL; + rx_ring->size = 0; + + page_pool_destroy(rx_ring->page_pool); + + nbl_debug(res_mgt->common, NBL_DEBUG_RESOURCE, "Stop rx ring %d", ring_index); +} + +static inline bool nbl_ring_desc_used(struct nbl_ring_desc *ring_desc, bool used_wrap_counter) +{ + bool avail; + bool used; + u16 flags; + + flags = le16_to_cpu(ring_desc->flags); + avail = !!(flags & BIT(NBL_PACKED_DESC_F_AVAIL)); + used = !!(flags & BIT(NBL_PACKED_DESC_F_USED)); + + return avail == used && used == used_wrap_counter; +} + +static inline void nbl_rep_update_tx_stats(struct net_device *netdev, struct nbl_tx_buffer *buffer) +{ + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + u16 rep_data_index = 0, rep_vsi_id; + + if (!eswitch_info || eswitch_info->mode != NBL_ESWITCH_OFFLOADS) + return; + + if (!buffer->skb) + return; + + rep_vsi_id = *(u16 *)&buffer->skb->cb[NBL_SKB_FILL_VSI_ID_OFF]; + rep_data_index = nbl_res_get_rep_idx(eswitch_info, rep_vsi_id); + if (rep_data_index >= eswitch_info->num_vfs) + return; + + if (eswitch_info->rep_data[rep_data_index].rep_vsi_id == rep_vsi_id) { + u64_stats_update_begin(&eswitch_info->rep_data[rep_data_index].rep_syncp); + eswitch_info->rep_data[rep_data_index].tx_packets += buffer->gso_segs; + eswitch_info->rep_data[rep_data_index].tx_bytes += buffer->bytecount; + u64_stats_update_end(&eswitch_info->rep_data[rep_data_index].rep_syncp); + } +} + +static struct net_device *nbl_get_rep_netdev(struct nbl_resource_mgt *res_mgt, u16 rep_vsi_id) +{ + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + u16 rep_data_index = 0; + + rep_data_index = nbl_res_get_rep_idx(eswitch_info, rep_vsi_id); + if (rep_data_index >= eswitch_info->num_vfs) + return NULL; + if (eswitch_info->rep_data[rep_data_index].rep_vsi_id == rep_vsi_id) + return eswitch_info->rep_data[rep_data_index].netdev; + nbl_info(common, NBL_DEBUG_RESOURCE, "get rep netdev error rep_vsi_id:%d\n", rep_vsi_id); + return NULL; +} + +static inline void nbl_rep_update_rx_stats(struct net_device *netdev, + struct sk_buff *skb, u16 sport_id) +{ + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_eswitch_info *eswitch_info = NBL_RES_MGT_TO_ESWITCH_INFO(res_mgt); + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct net_device *rep_netdev = NULL; + u16 rep_data_index = 0; + + if (!eswitch_info || eswitch_info->mode != NBL_ESWITCH_OFFLOADS) + return; + + rep_data_index = nbl_res_get_rep_idx(eswitch_info, sport_id); + if (rep_data_index >= eswitch_info->num_vfs) + return; + + rep_netdev = nbl_get_rep_netdev(res_mgt, sport_id); + if (!rep_netdev) { + /* it's a common case when switchdev mode is opening */ + nbl_info(common, NBL_DEBUG_RESOURCE, + "rep update netdev fail. sport_id:%d\n", sport_id); + return; + } + skb->dev = rep_netdev; + + if (eswitch_info->rep_data[rep_data_index].rep_vsi_id == sport_id) { + u64_stats_update_begin(&eswitch_info->rep_data[rep_data_index].rep_syncp); + eswitch_info->rep_data[rep_data_index].rx_packets += 1; + eswitch_info->rep_data[rep_data_index].rx_bytes += skb->len; + u64_stats_update_end(&eswitch_info->rep_data[rep_data_index].rep_syncp); + } +} + +static int nbl_res_txrx_clean_tx_irq(struct nbl_res_tx_ring *tx_ring) +{ + struct nbl_tx_buffer *tx_buffer; + struct nbl_ring_desc *tx_desc; + unsigned int i = tx_ring->next_to_clean; + unsigned int total_tx_pkts = 0; + unsigned int total_tx_bytes = 0; + unsigned int total_tx_descs = 0; + int count = 64; + + tx_buffer = NBL_TX_BUF(tx_ring, i); + tx_desc = NBL_TX_DESC(tx_ring, i); + i -= tx_ring->desc_num; + + do { + struct nbl_ring_desc *end_desc = tx_buffer->next_to_watch; + + if (!end_desc) + break; + + /* smp_rmb */ + smp_rmb(); + + if (!nbl_ring_desc_used(tx_desc, tx_ring->used_wrap_counter)) + break; + + total_tx_pkts += tx_buffer->gso_segs; + total_tx_bytes += tx_buffer->bytecount; + + if (nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_CTRL], tx_ring->queue_index)) + nbl_rep_update_tx_stats(tx_ring->netdev, tx_buffer); + + while (true) { + total_tx_descs++; + nbl_unmap_and_free_tx_resource(tx_ring, tx_buffer, true, true); + if (tx_desc == end_desc) + break; + i++; + tx_buffer++; + tx_desc++; + if (unlikely(!i)) { + i -= tx_ring->desc_num; + tx_buffer = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_ring->used_wrap_counter ^= 1; + } + } + + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->desc_num; + tx_buffer = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_ring->used_wrap_counter ^= 1; + } + + prefetch(tx_desc); + + } while (--count); + + i += tx_ring->desc_num; + + tx_ring->next_to_clean = i; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_tx_bytes; + tx_ring->stats.packets += total_tx_pkts; + tx_ring->stats.descs += total_tx_descs; + u64_stats_update_end(&tx_ring->syncp); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_tx_pkts && netif_carrier_ok(tx_ring->netdev) && + nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_DATA], tx_ring->queue_index) && + (nbl_unused_tx_desc_count(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index)) { + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + dev_dbg(NBL_RING_TO_DEV(tx_ring), "wake queue %u\n", tx_ring->queue_index); + } + } + + return count; +} + +static void nbl_rx_csum(struct nbl_res_rx_ring *rx_ring, struct sk_buff *skb, + struct nbl_rx_extend_head *hdr) +{ + skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + + /* if user disable RX Checksum Offload, then stack verify the rx checksum */ + if (!(rx_ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (!hdr->checksum_status) + return; + + if (hdr->error_code) { + rx_ring->rx_stats.rx_csum_errors++; + return; + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; + rx_ring->rx_stats.rx_csum_packets++; +} + +static inline void nbl_add_rx_frag(struct nbl_rx_buffer *rx_buffer, + struct sk_buff *skb, unsigned int size) +{ + page_ref_inc(rx_buffer->di->page); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->di->page, + rx_buffer->offset, size, NBL_RX_BUFSZ); +} + +#ifdef CONFIG_TLS_DEVICE +static void nbl_resync_update_sn(struct net_device *netdev, struct sk_buff *skb, u16 offset) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + struct net *net = dev_net(netdev); + struct sock *sk; + struct tls_context *tls_ctx; + struct nbl_ktls_offload_context_rx **ctx; + struct nbl_ktls_offload_context_rx *priv; + struct iphdr *iph; + struct tcphdr *th; + int depth = 0; + __be32 seq; + + skb->mac_len = ETH_HLEN; + (void)__vlan_get_protocol(skb, eth->h_proto, &depth); + iph = (struct iphdr *)(skb->data + depth); + + if (iph->version == 4) { + depth += iph->ihl * 4; + th = (void *)iph + iph->ihl * 4; + + sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, + iph->saddr, th->source, iph->daddr, + th->dest, netdev->ifindex); + } else { + struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; + + depth += sizeof(struct ipv6hdr); + th = (void *)ipv6h + sizeof(struct ipv6hdr); + + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, + &ipv6h->saddr, th->source, + &ipv6h->daddr, ntohs(th->dest), + netdev->ifindex, 0); + } + + depth += th->doff * 4; + if (unlikely(!sk)) + return; + + if (unlikely(sk->sk_state == TCP_TIME_WAIT)) + goto unref; + seq = th->seq; + seq = htonl(ntohl(seq) + offset - depth - 1); + tls_offload_rx_resync_request(sk, seq); + tls_ctx = tls_get_ctx(sk); + ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + priv = *ctx; + priv->tcp_seq = ntohl(th->seq); + +unref: + sock_gen_put(sk); +} + +static int nbl_ktls_rx_handle_skb(struct nbl_res_rx_ring *rx_ring, struct sk_buff *skb, + struct nbl_rx_extend_head *hdr) +{ + if (!hdr->l4s_hdl_ind) + return 0; + + if (hdr->l4s_dec_ind) { + skb->decrypted = 1; + rx_ring->rx_stats.tls_decrypted_packets++; + } else if (hdr->l4s_resync_ind) { + rx_ring->rx_stats.tls_resync_req_num++; + nbl_resync_update_sn(rx_ring->netdev, skb, hdr->l4s_tcp_offset); + dev_dbg(NBL_RING_TO_DEV(rx_ring), "ingress ktls %u resync sn\n", hdr->l4s_sid); + } else if (!hdr->l4s_check_ind) { + dev_dbg(NBL_RING_TO_DEV(rx_ring), "ingress ktls %u auth fail\n", hdr->l4s_sid); + } else { + dev_err(NBL_RING_TO_DEV(rx_ring), "ingress ktls %u unknown error\n", hdr->l4s_sid); + } + + return 0; +} + +#else +static int nbl_ktls_rx_handle_skb(struct nbl_res_rx_ring *rx_ring, struct sk_buff *skb, + struct nbl_rx_extend_head *hdr) +{ + return 0; +} +#endif + +static inline int nbl_rx_vlan_pop(struct nbl_res_rx_ring *rx_ring, struct sk_buff *skb) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; + + if (!rx_ring->vlan_proto) + return 0; + + if (rx_ring->vlan_proto != ntohs(veth->h_vlan_proto) || + (rx_ring->vlan_tci & VLAN_VID_MASK) != (ntohs(veth->h_vlan_TCI) & VLAN_VID_MASK)) + return 1; + + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + __skb_pull(skb, VLAN_HLEN); + + return 0; +} + +static void nbl_txrx_register_vsi_ring(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + + txrx_mgt->vsi_info[vsi_index].ring_offset = ring_offset; + txrx_mgt->vsi_info[vsi_index].ring_num = ring_num; +} + +static void nbl_res_txrx_cfg_txrx_vlan(void *priv, u16 vlan_tci, u16 vlan_proto, u8 vsi_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_txrx_vsi_info *vsi_info = &txrx_mgt->vsi_info[vsi_index]; + struct nbl_res_tx_ring *tx_ring; + struct nbl_res_rx_ring *rx_ring; + u16 i; + + if (!txrx_mgt->tx_rings || !txrx_mgt->rx_rings) + return; + + for (i = vsi_info->ring_offset; i < vsi_info->ring_offset + vsi_info->ring_num; i++) { + tx_ring = txrx_mgt->tx_rings[i]; + rx_ring = txrx_mgt->rx_rings[i]; + + if (tx_ring) { + tx_ring->vlan_tci = vlan_tci; + tx_ring->vlan_proto = vlan_proto; + } + + if (rx_ring) { + rx_ring->vlan_tci = vlan_tci; + rx_ring->vlan_proto = vlan_proto; + } + } +} + +/** + * Current version support merging multiple descriptor for one packet. + */ +static struct sk_buff *nbl_construct_skb(struct nbl_res_rx_ring *rx_ring, struct napi_struct *napi, + struct nbl_rx_buffer *rx_buf, unsigned int size) +{ + struct sk_buff *skb; + char *p, *buf; + int tailroom, shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + unsigned int truesize = NBL_RX_BUFSZ; + unsigned int headlen; + + /* p point dma buff start, buf point whole buffer start*/ + p = page_address(rx_buf->di->page) + rx_buf->offset; + buf = p - NBL_RX_PAD; + + /* p point pkt start */ + p += NBL_BUFFER_HDR_LEN; + tailroom = truesize - size - NBL_RX_PAD; + size -= NBL_BUFFER_HDR_LEN; + + if (size > NBL_RX_HDR_SIZE && tailroom >= shinfo_size) { + skb = build_skb(buf, truesize); + if (unlikely(!skb)) + return NULL; + + page_ref_inc(rx_buf->di->page); + skb_reserve(skb, p - buf); + skb_put(skb, size); + goto ok; + } + + skb = napi_alloc_skb(napi, NBL_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + headlen = size; + if (headlen > NBL_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, p, NBL_RX_HDR_SIZE); + memcpy(__skb_put(skb, headlen), p, ALIGN(headlen, sizeof(long))); + size -= headlen; + if (size) { + page_ref_inc(rx_buf->di->page); + skb_add_rx_frag(skb, 0, rx_buf->di->page, + rx_buf->offset + NBL_BUFFER_HDR_LEN + headlen, + size, truesize); + } +ok: + skb_record_rx_queue(skb, rx_ring->queue_index); + + return skb; +} + +static inline struct nbl_rx_buffer *nbl_get_rx_buf(struct nbl_res_rx_ring *rx_ring) +{ + struct nbl_rx_buffer *rx_buf; + + rx_buf = NBL_RX_BUF(rx_ring, rx_ring->next_to_clean); + prefetchw(rx_buf->di->page); + + dma_sync_single_range_for_cpu(rx_ring->dma_dev, rx_buf->di->addr, rx_buf->offset, + rx_ring->buf_len, DMA_FROM_DEVICE); + + return rx_buf; +} + +static inline void nbl_put_rx_buf(struct nbl_res_rx_ring *rx_ring, struct nbl_rx_buffer *rx_buf) +{ + u16 ntc = rx_ring->next_to_clean + 1; + + /* if at the end of the ring, reset ntc and flip used wrap bit */ + if (unlikely(ntc >= rx_ring->desc_num)) { + ntc = 0; + rx_ring->used_wrap_counter ^= 1; + } + + rx_ring->next_to_clean = ntc; + prefetch(NBL_RX_DESC(rx_ring, ntc)); + + nbl_put_rx_frag(rx_ring, rx_buf, true); +} + +static inline int nbl_maybe_stop_tx(struct nbl_res_tx_ring *tx_ring, unsigned int size) +{ + if (likely(nbl_unused_tx_desc_count(tx_ring) >= size)) + return 0; + + if (!nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_DATA], tx_ring->queue_index)) + return -EBUSY; + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "unused_desc_count:%u, size:%u, stop queue %u\n", + nbl_unused_tx_desc_count(tx_ring), size, tx_ring->queue_index); + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* smp_mb */ + smp_mb(); + + if (likely(nbl_unused_tx_desc_count(tx_ring) < size)) + return -EBUSY; + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "unused_desc_count:%u, size:%u, start queue %u\n", + nbl_unused_tx_desc_count(tx_ring), size, tx_ring->queue_index); + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static int +nbl_res_txrx_run_xdp(struct nbl_res_rx_ring *rx_ring, struct nbl_ring_desc *rx_desc, + struct nbl_rx_buffer *rx_buf, struct nbl_xdp_output *xdp_output) +{ + return NBL_XDP_PASS; +} + +static int nbl_res_txrx_register_xdp_rxq(void *priv, u8 ring_index) +{ + return 0; +} + +static void nbl_res_txrx_unregister_xdp_rxq(void *priv, u8 ring_index) +{ + /* nothing need to do */ +} + +static int nbl_res_txrx_clean_rx_irq(struct nbl_res_rx_ring *rx_ring, + struct napi_struct *napi, + int budget) +{ + struct nbl_xdp_output xdp_output; + struct nbl_ring_desc *rx_desc; + struct nbl_rx_buffer *rx_buf; + struct nbl_rx_extend_head *hdr; + struct sk_buff *skb = NULL; + unsigned int total_rx_pkts = 0; + unsigned int total_rx_bytes = 0; + unsigned int xdp_tx_pkts = 0; + unsigned int xdp_redirect_pkts = 0; + unsigned int xdp_oversize = 0; + unsigned int size; + int nbl_act; + u32 rx_multicast_packets = 0; + u32 rx_unicast_packets = 0; + int xdp_act_final = 0; + u16 desc_count = 0; + u16 num_buffers = 0; + u16 cleaned_count = nbl_unused_rx_desc_count(rx_ring); + u16 sport_id, sport_type; + bool failure = 0; + bool drop = 0; + + while (likely(total_rx_pkts < budget)) { + rx_desc = NBL_RX_DESC(rx_ring, rx_ring->next_to_clean); + if (!nbl_ring_desc_used(rx_desc, rx_ring->used_wrap_counter)) + break; + + // nbl_trace(clean_rx_irq, rx_ring, rx_desc); + + dma_rmb(); + size = le32_to_cpu(rx_desc->len); + rx_buf = nbl_get_rx_buf(rx_ring); + + if (READ_ONCE(rx_ring->xdp_prog)) { + memset(&xdp_output, 0, sizeof(xdp_output)); + nbl_act = nbl_res_txrx_run_xdp(rx_ring, rx_desc, rx_buf, &xdp_output); + if (nbl_act) { + cleaned_count += xdp_output.desc_done_num; + if (unlikely(xdp_output.multicast)) + rx_multicast_packets++; + else + rx_unicast_packets++; + + if (xdp_output.xdp_tx_act) { + xdp_tx_pkts++; + xdp_act_final |= NBL_XDP_TX; + } else if (xdp_output.xdp_redirect_act) { + xdp_redirect_pkts++; + xdp_act_final |= NBL_XDP_REDIRECT; + } + + if (xdp_output.xdp_oversize) + xdp_oversize++; + + total_rx_pkts++; + total_rx_bytes += xdp_output.bytes; + + continue; + } + } + + desc_count++; + + if (skb) { + nbl_add_rx_frag(rx_buf, skb, size); + } else { + hdr = page_address(rx_buf->di->page) + rx_buf->offset; + net_prefetch(hdr); + skb = nbl_construct_skb(rx_ring, napi, rx_buf, size); + if (unlikely(!skb)) { + rx_ring->rx_stats.rx_alloc_buf_err_cnt++; + break; + } + + num_buffers = le16_to_cpu(hdr->num_buffers); + sport_id = hdr->sport_id; + sport_type = hdr->sport; + nbl_rx_csum(rx_ring, skb, hdr); + nbl_ktls_rx_handle_skb(rx_ring, skb, hdr); + drop = nbl_rx_vlan_pop(rx_ring, skb); + } + + cleaned_count++; + nbl_put_rx_buf(rx_ring, rx_buf); + if (desc_count < num_buffers) + continue; + desc_count = 0; + + if (unlikely(eth_skb_pad(skb))) { + skb = NULL; + drop = 0; + continue; + } + + if (unlikely(drop)) { + kfree(skb); + skb = NULL; + drop = 0; + continue; + } + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + if (unlikely(skb->pkt_type == PACKET_BROADCAST || + skb->pkt_type == PACKET_MULTICAST)) + rx_multicast_packets++; + else + rx_unicast_packets++; + + total_rx_bytes += skb->len; + if (sport_type) + nbl_rep_update_rx_stats(rx_ring->netdev, skb, sport_id); + + // nbl_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); + napi_gro_receive(napi, skb); + skb = NULL; + drop = 0; + total_rx_pkts++; + } + + if (cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))) + failure = nbl_alloc_rx_bufs(rx_ring, cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_pkts; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.rx_multicast_packets += rx_multicast_packets; + rx_ring->rx_stats.rx_unicast_packets += rx_unicast_packets; + u64_stats_update_end(&rx_ring->syncp); + + return failure ? budget : total_rx_pkts; +} + +static int nbl_res_napi_poll(struct napi_struct *napi, int budget) +{ + struct nbl_res_vector *vector = container_of(napi, struct nbl_res_vector, napi); + struct nbl_res_tx_ring *tx_ring; + struct nbl_res_tx_ring *xdp_ring; + struct nbl_res_rx_ring *rx_ring; + int complete = 1, cleaned = 0, tx_done = 1, xdp_done = 1; + + tx_ring = vector->tx_ring; + rx_ring = vector->rx_ring; + xdp_ring = vector->xdp_ring; + + if (vector->started) { + tx_done = nbl_res_txrx_clean_tx_irq(tx_ring); + if (xdp_ring && xdp_ring->valid) + xdp_done = nbl_res_txrx_clean_tx_irq(xdp_ring); + + cleaned = nbl_res_txrx_clean_rx_irq(rx_ring, napi, budget); + } + + if (!tx_done || !xdp_done) + complete = 0; + + if (cleaned >= budget) + complete = 0; + + if (!complete) + return budget; + + if (!napi_complete_done(napi, cleaned)) + return min_t(int, cleaned, budget - 1); + + /* unmask irq passthrough for performace */ + if (vector->net_msix_mask_en) + writel(vector->irq_data, vector->irq_enable_base); + + return min_t(int, cleaned, budget - 1); +} + +static inline unsigned int nbl_txd_use_count(unsigned int size) +{ + /* TODO: how to compute tx desc needed more efficiently */ + return DIV_ROUND_UP(size, NBL_TXD_DATALEN_MAX); +} + +static unsigned int nbl_xmit_desc_count(struct sk_buff *skb) +{ + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int size; + unsigned int count; + + /* We need: 1 descriptor per page * PAGE_SIZE/NBL_MAX_DATA_PER_TX_DESC, + * + 1 desc for skb_headlen/NBL_MAX_DATA_PER_TX_DESC, + * + 2 desc gap to keep tail from touching head, + * otherwise try next time. + */ + size = skb_headlen(skb); + count = 2; + for (;;) { + count += nbl_txd_use_count(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +/* set up TSO(TCP Segmentation Offload) */ +static int nbl_tx_tso(struct nbl_tx_buffer *first, struct nbl_tx_hdr_param *hdr_param) +{ + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u8 l4_start; + u32 payload_len; + u8 header_len = 0; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 1; + + if (!skb_is_gso(skb)) + return 1; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize IP header fields*/ + if (ip.v4->version == IP_VERSION_V4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + + /* length of (MAC + IP) header */ + l4_start = (u8)(l4.hdr - skb->data); + + /* l4 packet length */ + payload_len = skb->len - l4_start; + + /* remove l4 packet length from L4 pseudo-header checksum */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(payload_len)); + /* compute length of UDP segmentation header */ + header_len = (u8)sizeof(l4.udp) + l4_start; + } else { + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(payload_len)); + /* compute length of TCP segmentation header */ + header_len = (u8)(l4.tcp->doff * 4 + l4_start); + } + + hdr_param->tso = 1; + hdr_param->mss = skb_shinfo(skb)->gso_size; + hdr_param->total_hlen = header_len; + + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * header_len; + first->tx_flags = NBL_TX_FLAGS_TSO; + + return first->gso_segs; +} + +/* set up Tx checksum offload */ +static int nbl_tx_csum(struct nbl_tx_buffer *first, struct nbl_tx_hdr_param *hdr_param) +{ + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + __be16 frag_off, protocol; + u8 inner_ip_type = 0, l4_type = 0, l4_csum = 0, l4_proto = 0; + u32 l2_len = 0, l3_len = 0, l4_len = 0; + unsigned char *exthdr; + int ret; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + l2_len = ip.hdr - skb->data; + + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_IP)) { + inner_ip_type = NBL_TX_IIPT_IPV4; + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + inner_ip_type = NBL_TX_IIPT_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + + if (l4.hdr != exthdr) { + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); + if (ret < 0) + return -1; + } + } else { + return -1; + } + + l3_len = l4.hdr - ip.hdr; + + switch (l4_proto) { + case IPPROTO_TCP: + l4_type = NBL_TX_L4T_TCP; + l4_len = l4.tcp->doff; + l4_csum = 1; + break; + case IPPROTO_UDP: + l4_type = NBL_TX_L4T_UDP; + l4_len = (sizeof(struct udphdr) >> 2); + l4_csum = 1; + break; + case IPPROTO_SCTP: + if (first->tx_flags & NBL_TX_FLAGS_TSO) + return -1; + l4_type = NBL_TX_L4T_RSV; + l4_len = (sizeof(struct sctphdr) >> 2); + l4_csum = 1; + break; + default: + if (first->tx_flags & NBL_TX_FLAGS_TSO) + return -2; + + /* unsopported L4 protocol, device cannot offload L4 checksum, + * so software compute L4 checskum + */ + skb_checksum_help(skb); + return 0; + } + + hdr_param->mac_len = l2_len >> 1; + hdr_param->ip_len = l3_len >> 2; + hdr_param->l4_len = l4_len; + hdr_param->l4_type = l4_type; + hdr_param->inner_ip_type = inner_ip_type; + hdr_param->l3_csum_en = 0; + hdr_param->l4_csum_en = l4_csum; + + return 1; +} + +static int nbl_map_skb(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, + u16 first, u16 *desc_index) +{ + u16 index = *desc_index; + const skb_frag_t *frag; + unsigned int frag_num = skb_shinfo(skb)->nr_frags; + struct device *dma_dev = NBL_RING_TO_DMA_DEV(tx_ring); + struct nbl_tx_buffer *tx_buffer = NBL_TX_BUF(tx_ring, index); + struct nbl_ring_desc *tx_desc = NBL_TX_DESC(tx_ring, index); + unsigned int i; + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + dma = dma_map_single(dma_dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) + return -1; + + tx_buffer->dma = dma; + tx_buffer->len = size; + + tx_desc->addr = cpu_to_le64(dma); + tx_desc->len = size; + if (!first) + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags | NBL_PACKED_DESC_F_NEXT); + + index++; + tx_desc++; + tx_buffer++; + if (index == tx_ring->desc_num) { + index = 0; + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_buffer = NBL_TX_BUF(tx_ring, 0); + } + + if (!frag_num) { + *desc_index = index; + return 0; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < frag_num; i++) { + size = skb_frag_size(frag); + dma = skb_frag_dma_map(dma_dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma)) { + *desc_index = index; + return -1; + } + + tx_buffer->dma = dma; + tx_buffer->len = size; + tx_buffer->page = 1; + + tx_desc->addr = cpu_to_le64(dma); + tx_desc->len = size; + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags | NBL_PACKED_DESC_F_NEXT); + index++; + tx_desc++; + tx_buffer++; + if (index == tx_ring->desc_num) { + index = 0; + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_buffer = NBL_TX_BUF(tx_ring, 0); + } + frag++; + } + + *desc_index = index; + return 0; +} + +static inline void nbl_tx_fill_tx_extend_header_bootis(union nbl_tx_extend_head *pkthdr, + struct nbl_tx_hdr_param *param) +{ + pkthdr->bootis.tso = param->tso; + pkthdr->bootis.mss = param->mss; + pkthdr->bootis.dport_info = 0; + pkthdr->bootis.dport_id = param->dport_id; + pkthdr->bootis.dport = NBL_TX_DPORT_ETH; + /* 0x0: drop, 0x1: normal fwd, 0x2: rsv, 0x3: cpu set dport */ + pkthdr->bootis.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + pkthdr->bootis.rss_lag_en = param->rss_lag_en; + + pkthdr->bootis.mac_len = param->mac_len; + pkthdr->bootis.ip_len = param->ip_len; + pkthdr->bootis.inner_ip_type = param->inner_ip_type; + pkthdr->bootis.l3_csum_en = param->l3_csum_en; + + pkthdr->bootis.l4_len = param->l4_len; + pkthdr->bootis.l4_type = param->l4_type; + pkthdr->bootis.l4_csum_en = param->l4_csum_en; +} + +static inline void nbl_tx_fill_tx_extend_header_leonis(union nbl_tx_extend_head *pkthdr, + struct nbl_tx_hdr_param *param) +{ + pkthdr->mac_len = param->mac_len; + pkthdr->ip_len = param->ip_len; + pkthdr->l4_len = param->l4_len; + pkthdr->l4_type = param->l4_type; + pkthdr->inner_ip_type = param->inner_ip_type; + + pkthdr->l4s_sid = param->l4s_sid; + pkthdr->l4s_sync_ind = param->l4s_sync_ind; + pkthdr->l4s_hdl_ind = param->l4s_hdl_ind; + pkthdr->l4s_pbrac_mode = param->l4s_pbrac_mode; + + pkthdr->mss = param->mss; + pkthdr->tso = param->tso; + + pkthdr->fwd = param->fwd; + pkthdr->rss_lag_en = param->rss_lag_en; + pkthdr->dport = param->dport; + pkthdr->dport_id = param->dport_id; + + pkthdr->l3_csum_en = param->l3_csum_en; + pkthdr->l4_csum_en = param->l4_csum_en; +} + +static inline void nbl_tx_fill_tx_extend_header_virtio(union nbl_tx_extend_head *pkthdr, + struct nbl_tx_hdr_param *param) +{ + pkthdr->bootis.tso = 0; + pkthdr->bootis.dport_info = 0; + pkthdr->bootis.dport_id = 0; + pkthdr->bootis.dport = 0; + /* 0x0: drop, 0x1: normal fwd, 0x2: rsv, 0x3: cpu set dport */ + pkthdr->bootis.fwd = NBL_TX_FWD_TYPE_NORMAL; +} + +#ifdef CONFIG_TLS_DEVICE +static bool nbl_ktls_send_init_packet(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_offload_context_tx *priv_tx) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_tx_buffer *first; + struct nbl_ring_desc *first_desc; + struct nbl_ktls_init_packet *init_packet; + struct nbl_notify_param notify_param = {0}; + dma_addr_t hdrdma; + u16 avail_used_flags = tx_ring->avail_used_flags; + u16 head = tx_ring->next_to_use; + u16 i = head; + + first = NBL_TX_BUF(tx_ring, head); + first_desc = NBL_TX_DESC(tx_ring, head); + + init_packet = kzalloc(sizeof(*init_packet), GFP_KERNEL); + if (!init_packet) + return false; + + init_packet->pkthdr.l4s_sid = priv_tx->index; + init_packet->pkthdr.l4s_sync_ind = 1; + init_packet->pkthdr.l4s_hdl_ind = 1; + init_packet->init_payload.initial = 1; + init_packet->init_payload.sync = 0; + init_packet->init_payload.sid = priv_tx->index; + memcpy(init_packet->init_payload.iv, priv_tx->iv, NBL_KTLS_IV_LEN); + memcpy(init_packet->init_payload.rec_num, priv_tx->rec_num, NBL_KTLS_REC_LEN); + /* Since the logic will add 1 to iv and rec_seq before using them, + * to ensure the consistency of software and hardware, + * the software will be delivered after subtracting 1 + */ + nbl_ktls_bigint_decrement(init_packet->init_payload.iv, NBL_KTLS_IV_LEN); + nbl_ktls_bigint_decrement(init_packet->init_payload.rec_num, NBL_KTLS_REC_LEN); + + hdrdma = dma_map_single(dma_dev, init_packet, sizeof(*init_packet), DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + kfree(init_packet); + return false; + } + + first_desc->len = cpu_to_le32(sizeof(*init_packet)); + first_desc->addr = cpu_to_le64(hdrdma); + first_desc->id = cpu_to_le16(head); + first_desc->flags = cpu_to_le16(avail_used_flags); + + first->dma = hdrdma; + first->len = sizeof(*init_packet); + first->tls_pkthdr = &init_packet->pkthdr; + i++; + if (i == tx_ring->desc_num) { + i = 0; + tx_ring->avail_used_flags ^= 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } + + first->next_to_watch = first_desc; + tx_ring->next_to_use = i; + + notify_param.notify_qid = tx_ring->notify_qid; + notify_param.tail_ptr = i; + phy_ops->update_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¬ify_param); + + return true; +} + +static enum nbl_ktls_sync_retval +nbl_tls_resync_info_get(struct nbl_ktls_offload_context_tx *priv_tx, u32 seq, + int datalen, struct nbl_tx_resync_info *info) +{ + int remaining = 0; + int i = 0; + enum nbl_ktls_sync_retval ret = NBL_KTLS_SYNC_DONE; + struct tls_record_info *record; + struct tls_offload_context_tx *tx_ctx; + unsigned long flags; + bool ends_before; + + tx_ctx = priv_tx->tx_ctx; + + spin_lock_irqsave(&tx_ctx->lock, flags); + record = tls_get_record(tx_ctx, seq, &info->rec_num); + if (!record) { + ret = NBL_KTLS_SYNC_FAIL; + goto out; + } + + ends_before = before(seq + datalen - 1, tls_record_start_seq(record)); + + if (unlikely(tls_record_is_start_marker(record))) { + ret = ends_before ? NBL_KTLS_SYNC_SKIP_NO_DATA : NBL_KTLS_SYNC_FAIL; + goto out; + } else if (ends_before) { + ret = NBL_KTLS_SYNC_FAIL; + goto out; + } + + info->resync_len = seq - tls_record_start_seq(record); + remaining = info->resync_len; + + while (remaining > 0) { + skb_frag_t *frag = &record->frags[i]; + + remaining -= skb_frag_size(frag); + info->frags[i++] = *frag; + } + + if (remaining < 0) + skb_frag_size_add(&info->frags[i - 1], remaining); + + info->nr_frags = i; +out: + spin_unlock_irqrestore(&tx_ctx->lock, flags); + return ret; +} + +static bool nbl_ktls_send_resync_one(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_sync_packet *sync_packet, + struct nbl_tx_resync_info *info) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + struct nbl_tx_buffer *tx_buffer; + struct nbl_ring_desc *tx_desc; + struct nbl_notify_param notify_param = {0}; + dma_addr_t hdrdma; + u16 head = tx_ring->next_to_use; + u32 red_off = 0; + int len, k; + + tx_buffer = NBL_TX_BUF(tx_ring, head); + tx_desc = NBL_TX_DESC(tx_ring, head); + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "send one resync packet.\n"); + for (k = 0; k < info->nr_frags; k++) { + skb_frag_t *f = &info->frags[k]; + u8 *vaddr = kmap_local_page(skb_frag_page(f)); + u32 f_off = skb_frag_off(f); + u32 fsz = skb_frag_size(f); + + memcpy(sync_packet->sync_payload.redata + red_off, vaddr + f_off, fsz); + kunmap_local(vaddr); + red_off += fsz; + } + + len = info->resync_len + NBL_KTLS_SYNC_PKT_LEN; + + hdrdma = dma_map_single(dma_dev, sync_packet, len, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + kfree(sync_packet); + return false; + } + + tx_desc->addr = cpu_to_le64(hdrdma); + tx_desc->len = cpu_to_le32(len); + tx_desc->id = cpu_to_le16(head); + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags); + + tx_buffer->dma = hdrdma; + tx_buffer->len = len; + tx_buffer->next_to_watch = tx_desc; + tx_buffer->tls_pkthdr = &sync_packet->pkthdr; + + if (head + 1 == tx_ring->desc_num) { + tx_ring->next_to_use = 0; + tx_ring->avail_used_flags ^= 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } else { + tx_ring->next_to_use = head + 1; + } + + notify_param.notify_qid = tx_ring->notify_qid; + notify_param.tail_ptr = tx_ring->next_to_use; + phy_ops->update_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¬ify_param); + + return true; +} + +static bool nbl_ktls_send_resync_mul(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_sync_packet *sync_packet, + struct nbl_tx_resync_info *info) +{ + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct device *dma_dev = NBL_RES_MGT_TO_DMA_DEV(res_mgt); + union nbl_tx_extend_head *pkthdr; + struct nbl_tx_buffer *head_buffer; + struct nbl_ring_desc *head_desc; + struct nbl_tx_buffer *tx_buffer = NBL_TX_BUF(tx_ring, tx_ring->next_to_use); + struct nbl_ring_desc *tx_desc; + struct nbl_notify_param notify_param = {0}; + dma_addr_t hdrdma; + dma_addr_t bufdma; + dma_addr_t firstdma = 0; + skb_frag_t *frag; + u16 avail_used_flags = tx_ring->avail_used_flags; + u16 head = tx_ring->next_to_use; + u16 index = head; + u32 total_len = 0; + u32 remain_len; + int last_len; + int len, k; + unsigned int fsz; + + last_len = info->resync_len % NBL_KTLS_PER_CELL_LEN + NBL_KTLS_PER_CELL_LEN; + if (last_len > NBL_KTLS_MAX_CELL_LEN) + last_len -= NBL_KTLS_PER_CELL_LEN; + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "send mul resync packet.\n"); + /* Each packet in the middle is 512 bytes */ + remain_len = info->resync_len - last_len; + + head_buffer = NBL_TX_BUF(tx_ring, head); + head_desc = NBL_TX_DESC(tx_ring, head); + + hdrdma = dma_map_single(dma_dev, sync_packet, NBL_KTLS_SYNC_PKT_LEN, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + kfree(sync_packet); + goto dma_map_error; + } + + head_desc->addr = cpu_to_le64(hdrdma); + head_desc->len = cpu_to_le32(NBL_KTLS_SYNC_PKT_LEN); + head_desc->id = cpu_to_le16(head); + + head_buffer->dma = hdrdma; + head_buffer->len = NBL_KTLS_SYNC_PKT_LEN; + head_buffer->tls_pkthdr = &sync_packet->pkthdr; + + for (k = 0; k < info->nr_frags; k++) { + frag = &info->frags[k]; + fsz = skb_frag_size(frag); + dev_dbg(NBL_RING_TO_DEV(tx_ring), "send frag %d len %u.\n", k, fsz); + bufdma = skb_frag_dma_map(dma_dev, frag, 0, fsz, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, bufdma)) { + index++; + goto dma_map_error; + } + firstdma = bufdma; + total_len = fsz; + while (fsz) { + index++; + if (index == tx_ring->desc_num) { + index = 0; + tx_buffer = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } else { + tx_buffer = NBL_TX_BUF(tx_ring, index); + tx_desc = NBL_TX_DESC(tx_ring, index); + } + + len = remain_len % NBL_KTLS_PER_CELL_LEN; + len = (len) ? (len) : min_t(unsigned int, fsz, NBL_KTLS_PER_CELL_LEN); + if (fsz < len || remain_len == 0) + len = fsz; + + tx_desc->addr = cpu_to_le64(bufdma); + tx_desc->len = cpu_to_le32(len); + tx_desc->id = cpu_to_le16(head); + tx_desc->flags = cpu_to_le16(tx_ring->avail_used_flags); + dev_dbg(NBL_RING_TO_DEV(tx_ring), + "send %u packet len %d remain_len %u.\n", head, len, remain_len); + + head_buffer->next_to_watch = tx_desc; + + bufdma += len; + fsz -= len; + if (remain_len == 0) { + last_len -= len; + if (last_len > 0) + tx_desc->flags = cpu_to_le16(le16_to_cpu(tx_desc->flags) | + NBL_PACKED_DESC_F_NEXT); + continue; + } + + remain_len -= len; + if (remain_len % NBL_KTLS_PER_CELL_LEN) { + tx_desc->flags = cpu_to_le16(le16_to_cpu(tx_desc->flags) | + NBL_PACKED_DESC_F_NEXT); + continue; + } + + head_desc->flags = cpu_to_le16(avail_used_flags | NBL_PACKED_DESC_F_NEXT); + + index++; + if (index == tx_ring->desc_num) { + index = 0; + head = 0; + head_buffer = NBL_TX_BUF(tx_ring, 0); + head_desc = NBL_TX_DESC(tx_ring, 0); + tx_ring->avail_used_flags ^= + 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } else { + head = index; + head_buffer = NBL_TX_BUF(tx_ring, head); + head_desc = NBL_TX_DESC(tx_ring, head); + } + avail_used_flags = tx_ring->avail_used_flags; + + pkthdr = kzalloc(sizeof(*pkthdr), GFP_KERNEL); + if (!pkthdr) + goto dma_map_error; + + pkthdr->l4s_sid = sync_packet->pkthdr.l4s_sid; + pkthdr->l4s_redun_ind = 1; + pkthdr->l4s_hdl_ind = 1; + hdrdma = dma_map_single(dma_dev, pkthdr, + sizeof(union nbl_tx_extend_head), DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + kfree(pkthdr); + goto dma_map_error; + } + + head_desc->addr = cpu_to_le64(hdrdma); + head_desc->len = cpu_to_le32(sizeof(union nbl_tx_extend_head)); + head_desc->id = cpu_to_le16(head); + + head_buffer->dma = hdrdma; + head_buffer->len = sizeof(union nbl_tx_extend_head); + head_buffer->tls_pkthdr = pkthdr; + } + tx_buffer->dma = firstdma; + tx_buffer->len = total_len; + } + + /* wmb for head desc */ + wmb(); + + head_desc->flags = cpu_to_le16(avail_used_flags | NBL_PACKED_DESC_F_NEXT); + + if (index + 1 == tx_ring->desc_num) { + tx_ring->next_to_use = 0; + tx_ring->avail_used_flags ^= 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } else { + tx_ring->next_to_use = index + 1; + } + + notify_param.notify_qid = tx_ring->notify_qid; + notify_param.tail_ptr = tx_ring->next_to_use; + phy_ops->update_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¬ify_param); + + return true; + +dma_map_error: + while (index != tx_ring->next_to_use) { + if (unlikely(!index)) + index = tx_ring->desc_num; + index--; + nbl_unmap_and_free_tx_resource(tx_ring, NBL_TX_BUF(tx_ring, index), false, false); + } + tx_ring->avail_used_flags = avail_used_flags; + + return false; +} + +/* Set the maximum packet length to 768 bytes and occur in the first or last packets. + * The middle packets are all 512 bytes long bacause the hardware cell is 512 bytes. + * If pkt_len > 768, pkt_len -= 512, make sure the packet is greater than 256 bytes. + */ +static bool nbl_ktls_send_resync_packet(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_offload_context_tx *priv_tx, + struct nbl_tx_resync_info *info) +{ + struct nbl_ktls_sync_packet *sync_packet; + __be64 rec_num; + + sync_packet = kzalloc(sizeof(*sync_packet), GFP_KERNEL); + if (!sync_packet) + return false; + + sync_packet->pkthdr.l4s_sid = priv_tx->index; + sync_packet->pkthdr.l4s_redun_ind = 1; + sync_packet->pkthdr.l4s_redun_head_ind = 1; + sync_packet->pkthdr.l4s_hdl_ind = 1; + sync_packet->sync_payload.sync = 1; + sync_packet->sync_payload.sid = priv_tx->index; + + if (info->resync_len == 0) + info->rec_num = info->rec_num - 1; + rec_num = cpu_to_be64(info->rec_num); + memcpy(sync_packet->sync_payload.rec_num, &rec_num, NBL_KTLS_REC_LEN); + + if (info->resync_len <= NBL_KTLS_MAX_CELL_LEN) { + sync_packet->sync_payload.redlen = htons(info->resync_len); + return nbl_ktls_send_resync_one(res_mgt, tx_ring, sync_packet, info); + } + + sync_packet->sync_payload.redlen = htons(NBL_KTLS_PER_CELL_LEN); + return nbl_ktls_send_resync_mul(res_mgt, tx_ring, sync_packet, info); +} + +/* Handle packet out-of-order function */ +static enum nbl_ktls_sync_retval +nbl_ktls_tx_handle_ooo(struct nbl_resource_mgt *res_mgt, struct nbl_res_tx_ring *tx_ring, + struct nbl_ktls_offload_context_tx *priv_tx, + u32 tcp_seq, int datalen) +{ + enum nbl_ktls_sync_retval ret; + struct nbl_tx_resync_info resync_info = {0}; + + ret = nbl_tls_resync_info_get(priv_tx, tcp_seq, datalen, &resync_info); + if (unlikely(ret != NBL_KTLS_SYNC_DONE)) + return ret; + + dev_dbg(NBL_RING_TO_DEV(tx_ring), "rec_num %llu, resync_len %u, nr_frags %u.\n", + resync_info.rec_num, resync_info.resync_len, resync_info.nr_frags); + if (unlikely(!nbl_ktls_send_resync_packet(res_mgt, tx_ring, priv_tx, &resync_info))) + return NBL_KTLS_SYNC_FAIL; + + return NBL_KTLS_SYNC_DONE; +} + +static bool nbl_ktls_tx_offload_handle(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct sk_buff *skb, + struct nbl_tx_hdr_param *accel_state) +{ + struct net_device *netdev = tx_ring->netdev; + struct tls_context *tls_ctx; + struct nbl_ktls_offload_context_tx **ctx; + struct nbl_ktls_offload_context_tx *priv_tx; + enum nbl_ktls_sync_retval ret; + u32 tcp_seq = 0; + int datalen = 0; + + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (!datalen) + return 0; + + tls_ctx = tls_get_ctx(skb->sk); + if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) + goto err_out; + + ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + priv_tx = *ctx; + /* config data to hardware */ + if (priv_tx->ctx_post_pending) { + priv_tx->ctx_post_pending = false; + if (!nbl_ktls_send_init_packet(res_mgt, tx_ring, priv_tx)) + goto err_out; + } + + tcp_seq = ntohl(tcp_hdr(skb)->seq); + dev_dbg(NBL_RING_TO_DEV(tx_ring), "ktls tx tcp_seq %u.\n", tcp_seq); + if (unlikely(priv_tx->expected_tcp != tcp_seq)) { + dev_dbg(NBL_RING_TO_DEV(tx_ring), "ktls tx tcp_seq %u, but expected_tcp %u.\n", + tcp_seq, priv_tx->expected_tcp); + ret = nbl_ktls_tx_handle_ooo(res_mgt, tx_ring, priv_tx, tcp_seq, datalen); + tx_ring->tx_stats.tls_ooo_packets++; + switch (ret) { + case NBL_KTLS_SYNC_DONE: + break; + case NBL_KTLS_SYNC_SKIP_NO_DATA: + if (likely(!skb->decrypted)) + goto out; + WARN_ON_ONCE(1); + goto err_out; + case NBL_KTLS_SYNC_FAIL: + goto err_out; + } + } + + priv_tx->expected_tcp = tcp_seq + datalen; + accel_state->l4s_sid = priv_tx->index; + accel_state->l4s_pbrac_mode = 0; + accel_state->l4s_hdl_ind = 1; + + tx_ring->tx_stats.tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; + tx_ring->tx_stats.tls_encrypted_bytes += datalen; + +out: + return 0; + +err_out: + dev_kfree_skb_any(skb); + return 1; +} +#else +static bool nbl_ktls_tx_offload_handle(struct nbl_resource_mgt *res_mgt, + struct nbl_res_tx_ring *tx_ring, + struct sk_buff *skb, + struct nbl_tx_hdr_param *accel_state) +{ + return true; +} +#endif + +static bool nbl_tx_map_need_broadcast_check(struct sk_buff *skb) +{ + __be16 protocol; + + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_ARP)) { + return true; + } else if (protocol == htons(ETH_P_IPV6)) { + if (pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)) && + ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { + struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1); + + if (m->icmph.icmp6_code == 0 && (m->icmph.icmp6_type == + NDISC_NEIGHBOUR_SOLICITATION || + m->icmph.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) { + return true; + } + } + } + return false; +} + +static bool nbl_skb_is_lacp_or_lldp(struct sk_buff *skb) +{ + __be16 protocol; + + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_SLOW) || protocol == htons(ETH_P_LLDP)) + return true; + + return false; +} + +static int nbl_tx_map(struct nbl_res_tx_ring *tx_ring, struct sk_buff *skb, + struct nbl_tx_hdr_param *hdr_param) +{ + struct device *dma_dev = NBL_RING_TO_DMA_DEV(tx_ring); + struct nbl_tx_buffer *first; + struct nbl_ring_desc *first_desc; + struct nbl_ring_desc *tx_desc; + union nbl_tx_extend_head *pkthdr; + dma_addr_t hdrdma; + int tso, csum; + u16 desc_index = tx_ring->next_to_use; + u16 head = desc_index; + u16 avail_used_flags = tx_ring->avail_used_flags; + u32 pkthdr_len; + bool can_push; + + first_desc = NBL_TX_DESC(tx_ring, desc_index); + first = NBL_TX_BUF(tx_ring, desc_index); + first->gso_segs = 1; + first->bytecount = skb->len; + first->tx_flags = 0; + first->skb = skb; + skb_tx_timestamp(skb); + + can_push = !skb_header_cloned(skb) && skb_headroom(skb) >= sizeof(*pkthdr); + + if (can_push) + pkthdr = (union nbl_tx_extend_head *)(skb->data - sizeof(*pkthdr)); + else + pkthdr = (union nbl_tx_extend_head *)(skb->cb); + + tso = nbl_tx_tso(first, hdr_param); + if (tso < 0) { + netdev_err(tx_ring->netdev, "tso ret:%d\n", tso); + goto out_drop; + } + + csum = nbl_tx_csum(first, hdr_param); + if (csum < 0) { + netdev_err(tx_ring->netdev, "csum ret:%d\n", csum); + goto out_drop; + } + + memset(pkthdr, 0, sizeof(*pkthdr)); + switch (tx_ring->product_type) { + case NBL_LEONIS_TYPE: + nbl_tx_fill_tx_extend_header_leonis(pkthdr, hdr_param); + break; + case NBL_BOOTIS_TYPE: + nbl_tx_fill_tx_extend_header_bootis(pkthdr, hdr_param); + break; + case NBL_VIRTIO_TYPE: + nbl_tx_fill_tx_extend_header_virtio(pkthdr, hdr_param); + break; + default: + netdev_err(tx_ring->netdev, "fill tx extend header failed, product type: %d, eth: %u.\n", + tx_ring->product_type, hdr_param->dport_id); + goto out_drop; + } + + pkthdr_len = sizeof(union nbl_tx_extend_head); + + if (can_push) { + __skb_push(skb, pkthdr_len); + if (nbl_map_skb(tx_ring, skb, 1, &desc_index)) + goto dma_map_error; + __skb_pull(skb, pkthdr_len); + } else { + hdrdma = dma_map_single(dma_dev, pkthdr, pkthdr_len, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, hdrdma)) { + tx_ring->tx_stats.tx_dma_busy++; + return NETDEV_TX_BUSY; + } + + first_desc->addr = cpu_to_le64(hdrdma); + first_desc->len = pkthdr_len; + + first->dma = hdrdma; + first->len = pkthdr_len; + + desc_index++; + if (desc_index == tx_ring->desc_num) { + desc_index = 0; + tx_ring->avail_used_flags ^= 1 << NBL_PACKED_DESC_F_AVAIL | + 1 << NBL_PACKED_DESC_F_USED; + } + if (nbl_map_skb(tx_ring, skb, 0, &desc_index)) + goto dma_map_error; + } + + /* stats */ + if (is_multicast_ether_addr(skb->data)) + tx_ring->tx_stats.tx_multicast_packets += tso; + else + tx_ring->tx_stats.tx_unicast_packets += tso; + + if (tso > 1) { + tx_ring->tx_stats.tso_packets++; + tx_ring->tx_stats.tso_bytes += skb->len; + } + tx_ring->tx_stats.tx_csum_packets += csum; + + tx_desc = NBL_TX_DESC(tx_ring, (desc_index == 0 ? tx_ring->desc_num : desc_index) - 1); + tx_desc->flags &= cpu_to_le16(~NBL_PACKED_DESC_F_NEXT); + first->next_to_watch = tx_desc; + first_desc->len += (hdr_param->total_hlen << NBL_TX_TOTAL_HEADERLEN_SHIFT); + first_desc->id = cpu_to_le16(skb_shinfo(skb)->gso_size); + + /* wmb */ + wmb(); + + /* first desc last set flag */ + if (first_desc == tx_desc) + first_desc->flags = cpu_to_le16(avail_used_flags); + else + first_desc->flags = cpu_to_le16(avail_used_flags | NBL_PACKED_DESC_F_NEXT); + + tx_ring->next_to_use = desc_index; + + nbl_maybe_stop_tx(tx_ring, DESC_NEEDED); + /* kick doorbell passthrough for performace */ + writel(tx_ring->notify_qid, tx_ring->notify_addr); + + // nbl_trace(tx_map_ok, tx_ring, skb, head, first_desc, pkthdr); + + return NETDEV_TX_OK; + +dma_map_error: + while (desc_index != head) { + if (unlikely(!desc_index)) + desc_index = tx_ring->desc_num; + desc_index--; + nbl_unmap_and_free_tx_resource(tx_ring, NBL_TX_BUF(tx_ring, desc_index), + false, false); + } + + tx_ring->avail_used_flags = avail_used_flags; + tx_ring->tx_stats.tx_dma_busy++; + return NETDEV_TX_BUSY; + +out_drop: + netdev_err(tx_ring->netdev, "tx_map, free_skb\n"); + tx_ring->tx_stats.tx_skb_free++; + // nbl_trace(tx_map_drop, tx_ring, skb); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static netdev_tx_t nbl_res_txrx_rep_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring = txrx_mgt->tx_rings[skb_get_queue_mapping(skb)]; + struct nbl_tx_hdr_param hdr_param = { + .mac_len = 14 >> 1, + .ip_len = 20 >> 2, + .l4_len = 20 >> 2, + .mss = 256, + }; + unsigned int count; + int ret = 0; + + count = nbl_xmit_desc_count(skb); + /* TODO: we can not tranmit a packet with more than 32 descriptors */ + WARN_ON(count > MAX_DESC_NUM_PER_PKT); + if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { + if (net_ratelimit()) + dev_dbg(NBL_RING_TO_DEV(tx_ring), "There is no enough " + "descriptor to transmit packet in queue %u\n", + tx_ring->queue_index); + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + eth_skb_pad(skb); + + hdr_param.dport_id = *(u16 *)(&skb->cb[NBL_SKB_FILL_VSI_ID_OFF]); + hdr_param.dport = NBL_TX_DPORT_HOST; + hdr_param.rss_lag_en = 1; + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + + ret = nbl_tx_map(tx_ring, skb, &hdr_param); + + return ret; +} + +static netdev_tx_t nbl_res_txrx_self_test_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring = txrx_mgt->tx_rings[skb_get_queue_mapping(skb)]; + struct nbl_tx_hdr_param hdr_param = { + .mac_len = 14 >> 1, + .ip_len = 20 >> 2, + .l4_len = 20 >> 2, + .mss = 256, + }; + unsigned int count; + + count = nbl_xmit_desc_count(skb); + /* TODO: we can not tranmit a packet with more than 32 descriptors */ + WARN_ON(count > MAX_DESC_NUM_PER_PKT); + if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { + if (net_ratelimit()) + dev_dbg(NBL_RING_TO_DEV(tx_ring), "There is no enough " + "descriptor to transmit packet in queue %u\n", + tx_ring->queue_index); + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* for dstore and eth, min packet len is 60 */ + eth_skb_pad(skb); + + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + hdr_param.dport = NBL_TX_DPORT_ETH; + hdr_param.dport_id = tx_ring->eth_id; + hdr_param.rss_lag_en = 0; + + return nbl_tx_map(tx_ring, skb, &hdr_param); +} + +static netdev_tx_t nbl_res_txrx_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nbl_resource_mgt *res_mgt = + NBL_ADAPTER_TO_RES_MGT(NBL_NETDEV_TO_ADAPTER(netdev)); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring = txrx_mgt->tx_rings[skb_get_queue_mapping(skb)]; + struct nbl_tx_hdr_param hdr_param = { + .mac_len = 14 >> 1, + .ip_len = 20 >> 2, + .l4_len = 20 >> 2, + .mss = 256, + }; + struct sk_buff *skb2 = NULL; + unsigned int count; + int ret = 0; + + // nbl_trace(xmit_frame_ring, tx_ring, skb); + + count = nbl_xmit_desc_count(skb); + /* TODO: we can not tranmit a packet with more than 32 descriptors */ + WARN_ON(count > MAX_DESC_NUM_PER_PKT); + if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { + if (net_ratelimit()) + dev_dbg(NBL_RING_TO_DEV(tx_ring), "There is no enough " + "descriptor to transmit packet in queue %u\n", + tx_ring->queue_index); + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + if (tx_ring->vlan_proto) { + skb = vlan_insert_tag_set_proto(skb, htons(tx_ring->vlan_proto), + tx_ring->vlan_tci); + if (!skb) + return NETDEV_TX_OK; + } + + if (nbl_ktls_device_offload(skb)) + if (nbl_ktls_tx_offload_handle(res_mgt, tx_ring, skb, &hdr_param)) + return NETDEV_TX_OK; + + /* for dstore and eth, min packet len is 60 */ + eth_skb_pad(skb); + + hdr_param.dport_id = tx_ring->eth_id; + hdr_param.fwd = 1; + hdr_param.rss_lag_en = 0; + + /* ipro fwd to eth port */ + if (tx_ring->mode == NBL_ESWITCH_OFFLOADS) { + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + hdr_param.dport = NBL_TX_DPORT_ETH; + if (txrx_mgt->bond_info.bond_enable && !nbl_skb_is_lacp_or_lldp(skb)) { + hdr_param.dport_id = txrx_mgt->bond_info.lag_id << + NBL_TX_DPORT_ID_LAG_OFFSET; + hdr_param.rss_lag_en = 1; + } + } + + if (nbl_skb_is_lacp_or_lldp(skb)) { + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + hdr_param.dport = NBL_TX_DPORT_ETH; + } + + /* for unicast packet tx_map all */ + if (txrx_mgt->bond_info.bond_enable && nbl_tx_map_need_broadcast_check(skb)) { + int ret2; + + hdr_param.fwd = NBL_TX_FWD_TYPE_CPU_ASSIGNED; + hdr_param.dport = NBL_TX_DPORT_ETH; + hdr_param.dport_id = txrx_mgt->bond_info.eth_id[0]; + hdr_param.rss_lag_en = 0; + + skb2 = skb_copy(skb, GFP_ATOMIC); + ret |= nbl_tx_map(tx_ring, skb, &hdr_param); + if (likely(skb2)) { + hdr_param.dport_id = txrx_mgt->bond_info.eth_id[1]; + ret2 = nbl_tx_map(tx_ring, skb2, &hdr_param); + if (ret2) + dev_kfree_skb_any(skb2); + } + + } else { + ret = nbl_tx_map(tx_ring, skb, &hdr_param); + } + + return ret; +} + +static void nbl_res_txrx_kick_rx_ring(void *priv, u16 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + struct nbl_notify_param notify_param = {0}; + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, index); + + notify_param.notify_qid = rx_ring->notify_qid; + notify_param.tail_ptr = rx_ring->tail_ptr; + phy_ops->update_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), ¬ify_param); +} + +static int nbl_res_txring_is_invalid(struct nbl_resource_mgt *res_mgt, + struct seq_file *m, int index) +{ + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *tx_ring; + u8 ring_num = txrx_mgt->tx_ring_num; + + if (index >= ring_num) { + seq_printf(m, "Invalid tx index %d, max ring num is %d\n", index, ring_num); + return -EINVAL; + } + + tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, index); + if (!tx_ring || !tx_ring->valid) { + seq_puts(m, "Ring doesn't exist, wrong index or the netdev might be stopped\n"); + return -EINVAL; + } + + return 0; +} + +static int nbl_res_rxring_is_invalid(struct nbl_resource_mgt *res_mgt, + struct seq_file *m, int index) +{ + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring; + u8 ring_num = txrx_mgt->rx_ring_num; + + if (index >= ring_num) { + seq_printf(m, "Invalid rx index %d, max ring num is %d\n", index, ring_num); + return -EINVAL; + } + + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, index); + if (!rx_ring || !rx_ring->valid) { + seq_puts(m, "Ring doesn't exist, wrong index or the netdev might be stopped\n"); + return -EINVAL; + } + + return 0; +} + +static int nbl_res_rx_dump_ring(struct nbl_resource_mgt *res_mgt, struct seq_file *m, int index) +{ + struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, index); + struct nbl_ring_desc *desc; + int i; + + if (nbl_res_rxring_is_invalid(res_mgt, m, index)) + return 0; + + seq_printf(m, "queue_index %d desc_num %d used_wrap_counter 0x%x avail_used_flags 0x%x\n", + ring->queue_index, ring->desc_num, + ring->used_wrap_counter, ring->avail_used_flags); + seq_printf(m, "ntu 0x%x, ntc 0x%x, tail_ptr 0x%x\n", + ring->next_to_use, ring->next_to_clean, ring->tail_ptr); + seq_printf(m, "desc dma 0x%llx, HZ %u\n", ring->dma, HZ); + + seq_puts(m, "desc:\n"); + for (i = 0; i < ring->desc_num; i++) { + desc = ring->desc + i; + seq_printf(m, "desc id %d, addr 0x%llx len %d flag 0x%x\n", + desc->id, desc->addr, desc->len, desc->flags); + } + + return 0; +} + +static int nbl_res_tx_dump_ring(struct nbl_resource_mgt *res_mgt, struct seq_file *m, int index) +{ + struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, index); + struct nbl_ring_desc *desc; + u32 total_header_len; + u32 desc_len; + int i; + + if (nbl_res_txring_is_invalid(res_mgt, m, index)) + return 0; + + seq_printf(m, "queue_index %d desc_num %d used_wrap_counter 0x%x avail_used_flags 0x%x\n", + ring->queue_index, ring->desc_num, + ring->used_wrap_counter, ring->avail_used_flags); + seq_printf(m, "ntu 0x%x, ntc 0x%x tail_ptr 0x%x\n", + ring->next_to_use, ring->next_to_clean, ring->tail_ptr); + seq_printf(m, "desc dma 0x%llx, HZ %u\n", ring->dma, HZ); + seq_printf(m, "tx_skb_free %llu\n", ring->tx_stats.tx_skb_free); + + seq_puts(m, "desc:\n"); + for (i = 0; i < ring->desc_num; i++) { + desc = ring->desc + i; + total_header_len = desc->len >> NBL_TX_TOTAL_HEADERLEN_SHIFT; + desc_len = desc->len & 0xFFFFFF; + seq_printf(m, "desc %d: id/gso_size %d, addr 0x%llx len %d header_len %d flag 0x%x\n", + i, desc->id, desc->addr, desc_len, total_header_len, desc->flags); + } + + return 0; +} + +static int nbl_res_txrx_dump_ring(void *priv, struct seq_file *m, bool is_tx, int index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + if (is_tx) + return nbl_res_tx_dump_ring(res_mgt, m, index); + else + return nbl_res_rx_dump_ring(res_mgt, m, index); +} + +static int nbl_res_tx_dump_ring_stats(struct nbl_resource_mgt *res_mgt, + struct seq_file *m, int index) +{ + struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, index); + + if (nbl_res_txring_is_invalid(res_mgt, m, index)) + return 0; + + seq_printf(m, "pkts: %lld, bytes: %lld, descs: %lld\n", + ring->stats.packets, ring->stats.bytes, ring->stats.descs); + seq_printf(m, "tso_pkts: %lld, tso_bytes: %lld, tx_checksum_pkts: %lld\n", + ring->tx_stats.tso_packets, ring->tx_stats.tso_bytes, + ring->tx_stats.tx_csum_packets); + seq_printf(m, "tx_busy: %lld, tx_dma_busy: %lld\n", + ring->tx_stats.tx_busy, ring->tx_stats.tx_dma_busy); + seq_printf(m, "tx_multicast_pkts: %lld, tx_unicast_pkts: %lld\n", + ring->tx_stats.tx_multicast_packets, + ring->tx_stats.tx_unicast_packets); + seq_printf(m, "tx_skb_free: %lld, tx_desc_addr_err: %lld, tx_desc_len_err: %lld\n", + ring->tx_stats.tx_skb_free, ring->tx_stats.tx_desc_addr_err_cnt, + ring->tx_stats.tx_desc_len_err_cnt); + return 0; +} + +static int nbl_res_rx_dump_ring_stats(struct nbl_resource_mgt *res_mgt, + struct seq_file *m, int index) +{ + struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, index); + + if (nbl_res_rxring_is_invalid(res_mgt, m, index)) + return 0; + + seq_printf(m, "rx_checksum_pkts: %lld, rx_checksum_errors: %lld\n", + ring->rx_stats.rx_csum_packets, ring->rx_stats.rx_csum_errors); + seq_printf(m, "rx_multicast_pkts: %lld, rx_unicast_pkts: %lld\n", + ring->rx_stats.rx_multicast_packets, + ring->rx_stats.rx_unicast_packets); + seq_printf(m, "rx_desc_addr_err: %lld\n", + ring->rx_stats.rx_desc_addr_err_cnt); + seq_printf(m, "rx_alloc_buf_err_cnt: %lld\n", + ring->rx_stats.rx_alloc_buf_err_cnt); + + return 0; +} + +static int nbl_res_txrx_dump_ring_stats(void *priv, struct seq_file *m, bool is_tx, int index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + + if (is_tx) + return nbl_res_tx_dump_ring_stats(res_mgt, m, index); + else + return nbl_res_rx_dump_ring_stats(res_mgt, m, index); +} + +static struct napi_struct *nbl_res_txrx_get_vector_napi(void *priv, u16 index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + + if (!txrx_mgt->vectors || index >= txrx_mgt->rx_ring_num) { + nbl_err(common, NBL_DEBUG_RESOURCE, "vectors not allocated\n"); + return NULL; + } + + return &txrx_mgt->vectors[index]->napi; +} + +static void nbl_res_txrx_set_vector_info(void *priv, u8 *irq_enable_base, + u32 irq_data, u16 index, bool mask_en) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = res_mgt->txrx_mgt; + + if (!txrx_mgt->vectors || index >= txrx_mgt->rx_ring_num) { + nbl_err(common, NBL_DEBUG_RESOURCE, "vectors not allocated\n"); + return; + } + + txrx_mgt->vectors[index]->irq_enable_base = irq_enable_base; + txrx_mgt->vectors[index]->irq_data = irq_data; + txrx_mgt->vectors[index]->net_msix_mask_en = mask_en; +} + +static void nbl_res_get_pt_ops(void *priv, struct nbl_resource_pt_ops *pt_ops) +{ + pt_ops->start_xmit = nbl_res_txrx_start_xmit; + pt_ops->rep_xmit = nbl_res_txrx_rep_xmit; + pt_ops->self_test_xmit = nbl_res_txrx_self_test_start_xmit; + pt_ops->napi_poll = nbl_res_napi_poll; +} + +static u32 nbl_res_txrx_get_tx_headroom(void *priv) +{ + return sizeof(union nbl_tx_extend_head); +} + +static void nbl_res_txrx_get_queue_stats(void *priv, u8 queue_id, + struct nbl_queue_stats *queue_stats, bool is_tx) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct u64_stats_sync *syncp; + struct nbl_queue_stats *stats; + unsigned int start; + + if (is_tx) { + struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, queue_id); + + syncp = &ring->syncp; + stats = &ring->stats; + } else { + struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, queue_id); + + syncp = &ring->syncp; + stats = &ring->stats; + } + + do { + start = u64_stats_fetch_begin(syncp); + memcpy(queue_stats, stats, sizeof(*stats)); + } while (u64_stats_fetch_retry(syncp, start)); +} + +static void nbl_res_txrx_get_net_stats(void *priv, struct nbl_stats *net_stats) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + int i; + u64 bytes = 0, packets = 0; + u64 tso_packets = 0, tso_bytes = 0; + u64 tx_csum_packets = 0; + u64 rx_csum_packets = 0, rx_csum_errors = 0; + u64 tx_multicast_packets = 0, tx_unicast_packets = 0; + u64 rx_multicast_packets = 0, rx_unicast_packets = 0; +#ifdef CONFIG_TLS_DEVICE + u64 tls_encrypted_packets = 0; + u64 tls_encrypted_bytes = 0; + u64 tls_ooo_packets = 0; + u64 tls_decrypted_packets = 0; + u64 tls_resync_req_num = 0; +#endif + u64 tx_busy = 0, tx_dma_busy = 0; + u64 tx_desc_addr_err_cnt = 0; + u64 tx_desc_len_err_cnt = 0; + u64 rx_desc_addr_err_cnt = 0; + u64 rx_alloc_buf_err_cnt = 0; + u64 rx_cache_reuse = 0; + u64 rx_cache_full = 0; + u64 rx_cache_empty = 0; + u64 rx_cache_busy = 0; + u64 rx_cache_waive = 0; + u64 tx_skb_free = 0; + unsigned int start; + + rcu_read_lock(); + for (i = 0; i < txrx_mgt->rx_ring_num; i++) { + struct nbl_res_rx_ring *ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); + + do { + start = u64_stats_fetch_begin(&ring->syncp); + bytes += ring->stats.bytes; + packets += ring->stats.packets; + rx_csum_packets += ring->rx_stats.rx_csum_packets; + rx_csum_errors += ring->rx_stats.rx_csum_errors; + rx_multicast_packets += ring->rx_stats.rx_multicast_packets; + rx_unicast_packets += ring->rx_stats.rx_unicast_packets; + rx_desc_addr_err_cnt += ring->rx_stats.rx_desc_addr_err_cnt; + rx_alloc_buf_err_cnt += ring->rx_stats.rx_alloc_buf_err_cnt; + rx_cache_reuse += ring->rx_stats.rx_cache_reuse; + rx_cache_full += ring->rx_stats.rx_cache_full; + rx_cache_empty += ring->rx_stats.rx_cache_empty; + rx_cache_busy += ring->rx_stats.rx_cache_busy; + rx_cache_waive += ring->rx_stats.rx_cache_waive; +#ifdef CONFIG_TLS_DEVICE + tls_decrypted_packets += ring->rx_stats.tls_decrypted_packets; + tls_resync_req_num += ring->rx_stats.tls_resync_req_num; +#endif + } while (u64_stats_fetch_retry(&ring->syncp, start)); + } + + net_stats->rx_packets = packets; + net_stats->rx_bytes = bytes; + + net_stats->rx_csum_packets = rx_csum_packets; + net_stats->rx_csum_errors = rx_csum_errors; + net_stats->rx_multicast_packets = rx_multicast_packets; + net_stats->rx_unicast_packets = rx_unicast_packets; +#ifdef CONFIG_TLS_DEVICE + net_stats->tls_decrypted_packets = tls_decrypted_packets; + net_stats->tls_resync_req_num = tls_resync_req_num; +#endif + + bytes = 0; + packets = 0; + + for (i = 0; i < txrx_mgt->tx_ring_num; i++) { + struct nbl_res_tx_ring *ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i); + + do { + start = u64_stats_fetch_begin(&ring->syncp); + bytes += ring->stats.bytes; + packets += ring->stats.packets; + tso_packets += ring->tx_stats.tso_packets; + tso_bytes += ring->tx_stats.tso_bytes; + tx_csum_packets += ring->tx_stats.tx_csum_packets; + tx_busy += ring->tx_stats.tx_busy; + tx_dma_busy += ring->tx_stats.tx_dma_busy; + tx_multicast_packets += ring->tx_stats.tx_multicast_packets; + tx_unicast_packets += ring->tx_stats.tx_unicast_packets; + tx_skb_free += ring->tx_stats.tx_skb_free; + tx_desc_addr_err_cnt += ring->tx_stats.tx_desc_addr_err_cnt; + tx_desc_len_err_cnt += ring->tx_stats.tx_desc_len_err_cnt; +#ifdef CONFIG_TLS_DEVICE + tls_encrypted_packets += ring->tx_stats.tls_encrypted_packets; + tls_encrypted_bytes += ring->tx_stats.tls_encrypted_bytes; + tls_ooo_packets += ring->tx_stats.tls_ooo_packets; +#endif + } while (u64_stats_fetch_retry(&ring->syncp, start)); + } + + rcu_read_unlock(); + + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + net_stats->tso_packets = tso_packets; + net_stats->tso_bytes = tso_bytes; + net_stats->tx_csum_packets = tx_csum_packets; + net_stats->tx_busy = tx_busy; + net_stats->tx_dma_busy = tx_dma_busy; + net_stats->tx_multicast_packets = tx_multicast_packets; + net_stats->tx_unicast_packets = tx_unicast_packets; + net_stats->tx_skb_free = tx_skb_free; + net_stats->tx_desc_addr_err_cnt = tx_desc_addr_err_cnt; + net_stats->tx_desc_len_err_cnt = tx_desc_len_err_cnt; + net_stats->rx_desc_addr_err_cnt = rx_desc_addr_err_cnt; + net_stats->rx_alloc_buf_err_cnt = rx_alloc_buf_err_cnt; + net_stats->rx_cache_reuse = rx_cache_reuse; + net_stats->rx_cache_full = rx_cache_full; + net_stats->rx_cache_empty = rx_cache_empty; + net_stats->rx_cache_busy = rx_cache_busy; + net_stats->rx_cache_waive = rx_cache_waive; +#ifdef CONFIG_TLS_DEVICE + net_stats->tls_encrypted_packets = tls_encrypted_packets; + net_stats->tls_encrypted_bytes = tls_encrypted_bytes; + net_stats->tls_ooo_packets = tls_ooo_packets; +#endif +} + +static u16 nbl_res_txrx_get_max_desc_num(void) +{ + return NBL_MAX_DESC_NUM; +} + +static u16 nbl_res_txrx_get_min_desc_num(void) +{ + return NBL_MIN_DESC_NUM; +} + +static u16 nbl_res_txrx_get_tx_desc_num(void *priv, u32 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *ring = txrx_mgt->tx_rings[ring_index]; + + return ring->desc_num; +} + +static u16 nbl_res_txrx_get_rx_desc_num(void *priv, u32 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *ring = txrx_mgt->rx_rings[ring_index]; + + return ring->desc_num; +} + +static void nbl_res_txrx_set_tx_desc_num(void *priv, u32 ring_index, u16 desc_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_tx_ring *ring = txrx_mgt->tx_rings[ring_index]; + + ring->desc_num = desc_num; +} + +static void nbl_res_txrx_set_rx_desc_num(void *priv, u32 ring_index, u16 desc_num) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *ring = txrx_mgt->rx_rings[ring_index]; + + ring->desc_num = desc_num; +} + +static struct sk_buff *nbl_fetch_rx_buffer_lb_test(struct nbl_res_rx_ring *rx_ring, + const struct nbl_ring_desc *rx_desc, + u16 *num_buffers) +{ + struct nbl_rx_buffer *rx_buf; + struct sk_buff *skb; + const struct page *page; + const void *page_addr; + struct nbl_rx_extend_head *hdr; + u32 size = 256; + + rx_buf = nbl_get_rx_buf(rx_ring); + page = rx_buf->di->page; + prefetchw(page); + + page_addr = page_address(page) + rx_buf->offset; + prefetch(page_addr); + + skb = alloc_skb(size, GFP_KERNEL); + if (unlikely(!skb)) + return NULL; + + prefetchw(skb->data); + /* get number of buffers */ + hdr = (struct nbl_rx_extend_head *)page_addr; + *num_buffers = le16_to_cpu(hdr->num_buffers); + nbl_rx_csum(rx_ring, skb, hdr); + + memcpy(__skb_put(skb, size), page_addr + sizeof(*hdr), ALIGN(size, sizeof(long))); + + nbl_put_rx_buf(rx_ring, rx_buf); + + return skb; +} + +static struct sk_buff *nbl_res_txrx_clean_rx_lb_test(void *priv, u32 ring_index) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt); + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring = txrx_mgt->rx_rings[ring_index]; + struct nbl_ring_desc *rx_desc; + struct sk_buff *skb; + u16 num_buffers = 0; + u16 cleaned_count = nbl_unused_rx_desc_count(rx_ring); + + if (cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))) { + nbl_alloc_rx_bufs(rx_ring, cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))); + cleaned_count = 0; + } + + rx_desc = NBL_RX_DESC(rx_ring, rx_ring->next_to_clean); + if (!nbl_ring_desc_used(rx_desc, rx_ring->used_wrap_counter)) + return NULL; + + /* rmb for desc used */ + rmb(); + + skb = nbl_fetch_rx_buffer_lb_test(rx_ring, rx_desc, &num_buffers); + if (!skb) + return NULL; + + cleaned_count++; + + if (num_buffers > 1) + nbl_err(common, NBL_DEBUG_RESOURCE, "More than one desc in lb rx, not supported\n"); + + if (cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))) + nbl_alloc_rx_bufs(rx_ring, cleaned_count & (~(NBL_MAX_BATCH_DESC - 1))); + + return skb; +} + +static int nbl_res_txrx_cfg_duppkt_info(void *priv, struct nbl_lag_member_list_param *param) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_txrx_bond_info *bond_info = &txrx_mgt->bond_info; + int i = 0; + + if (!param->duppkt_enable) { + memset(bond_info, 0, sizeof(*bond_info)); + return 0; + } else if (param->lag_num > 1) { + for (i = 0; i < param->lag_num && i < NBL_LAG_MAX_NUM; i++) + bond_info->eth_id[i] = param->member_list[i].eth_id; + bond_info->bond_enable = 1; + bond_info->lag_id = param->lag_id; + } + + return 0; +} + +static int +nbl_res_queue_stop_abnormal_sw_queue(void *priv, u16 local_queue_id, int type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_res_vector *vector = NULL; + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, local_queue_id); + + if (type != NBL_TX) + return 0; + + if (tx_ring && !nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_XDP], local_queue_id)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, local_queue_id); + + if (!tx_ring->valid) + return -EINVAL; + + if (vector && !vector->started) + return -EINVAL; + + if (vector) { + vector->started = false; + napi_synchronize(&vector->napi); + netif_stop_subqueue(tx_ring->netdev, local_queue_id); + } + + return 0; +} + +static dma_addr_t nbl_res_txrx_restore_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_res_vector *vector = NULL; + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + + if (tx_ring && !nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_XDP], ring_index)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + + switch (type) { + case NBL_TX: + if (tx_ring && tx_ring->valid) { + nbl_res_txrx_stop_tx_ring(res_mgt, ring_index); + return nbl_res_txrx_start_tx_ring(res_mgt, ring_index); + } else { + return (dma_addr_t)NULL; + } + break; + case NBL_RX: + if (rx_ring && rx_ring->valid) { + nbl_res_txrx_stop_rx_ring(res_mgt, ring_index); + return nbl_res_txrx_start_rx_ring(res_mgt, ring_index, true); + } else { + return (dma_addr_t)NULL; + } + break; + default: + break; + } + + return (dma_addr_t)NULL; +} + +static int nbl_res_txrx_restart_abnormal_ring(void *priv, int ring_index, int type) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_res_tx_ring *tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, ring_index); + struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, ring_index); + struct nbl_res_vector *vector = NULL; + int ret = 0; + + if (tx_ring && !nbl_txrx_within_vsi(&tx_ring->vsi_info[NBL_VSI_XDP], ring_index)) + vector = NBL_RES_MGT_TO_VECTOR(res_mgt, ring_index); + + switch (type) { + case NBL_TX: + if (tx_ring && tx_ring->valid) { + writel(tx_ring->notify_qid, tx_ring->notify_addr); + netif_start_subqueue(tx_ring->netdev, ring_index); + } else { + ret = -EINVAL; + } + break; + case NBL_RX: + if (rx_ring && rx_ring->valid) + nbl_res_txrx_kick_rx_ring(res_mgt, ring_index); + else + ret = -EINVAL; + break; + default: + break; + } + + if (vector) + vector->started = true; + + return ret; +} + +static void nbl_res_txrx_set_xdp_prog(void *priv, void *prog) +{ + int i; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_txrx_mgt *txrx_mgt = NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + struct nbl_res_rx_ring *rx_ring; + struct nbl_res_tx_ring *tx_ring; + + for (i = 0; i < txrx_mgt->xdp_ring_num; i++) { + rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, i); + if (!rx_ring) + continue; + + WRITE_ONCE(rx_ring->xdp_prog, prog); + } + + for (i = 0; i < txrx_mgt->xdp_ring_num; i++) { + tx_ring = NBL_RES_MGT_TO_TX_RING(res_mgt, i + txrx_mgt->xdp_ring_offset); + if (!tx_ring) + continue; + + WRITE_ONCE(tx_ring->xdp_prog, prog); + } +} + +/* NBL_TXRX_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_TXRX_OPS_TBL \ +do { \ + NBL_TXRX_SET_OPS(get_resource_pt_ops, nbl_res_get_pt_ops); \ + NBL_TXRX_SET_OPS(alloc_rings, nbl_res_txrx_alloc_rings); \ + NBL_TXRX_SET_OPS(remove_rings, nbl_res_txrx_remove_rings); \ + NBL_TXRX_SET_OPS(start_tx_ring, nbl_res_txrx_start_tx_ring); \ + NBL_TXRX_SET_OPS(stop_tx_ring, nbl_res_txrx_stop_tx_ring); \ + NBL_TXRX_SET_OPS(start_rx_ring, nbl_res_txrx_start_rx_ring); \ + NBL_TXRX_SET_OPS(stop_rx_ring, nbl_res_txrx_stop_rx_ring); \ + NBL_TXRX_SET_OPS(kick_rx_ring, nbl_res_txrx_kick_rx_ring); \ + NBL_TXRX_SET_OPS(dump_ring, nbl_res_txrx_dump_ring); \ + NBL_TXRX_SET_OPS(dump_ring_stats, nbl_res_txrx_dump_ring_stats); \ + NBL_TXRX_SET_OPS(get_vector_napi, nbl_res_txrx_get_vector_napi); \ + NBL_TXRX_SET_OPS(set_vector_info, nbl_res_txrx_set_vector_info); \ + NBL_TXRX_SET_OPS(get_tx_headroom, nbl_res_txrx_get_tx_headroom); \ + NBL_TXRX_SET_OPS(get_queue_stats, nbl_res_txrx_get_queue_stats); \ + NBL_TXRX_SET_OPS(get_net_stats, nbl_res_txrx_get_net_stats); \ + NBL_TXRX_SET_OPS(get_max_desc_num, nbl_res_txrx_get_max_desc_num); \ + NBL_TXRX_SET_OPS(get_min_desc_num, nbl_res_txrx_get_min_desc_num); \ + NBL_TXRX_SET_OPS(get_tx_desc_num, nbl_res_txrx_get_tx_desc_num); \ + NBL_TXRX_SET_OPS(get_rx_desc_num, nbl_res_txrx_get_rx_desc_num); \ + NBL_TXRX_SET_OPS(set_tx_desc_num, nbl_res_txrx_set_tx_desc_num); \ + NBL_TXRX_SET_OPS(set_rx_desc_num, nbl_res_txrx_set_rx_desc_num); \ + NBL_TXRX_SET_OPS(clean_rx_lb_test, nbl_res_txrx_clean_rx_lb_test); \ + NBL_TXRX_SET_OPS(cfg_duppkt_info, nbl_res_txrx_cfg_duppkt_info); \ + NBL_TXRX_SET_OPS(stop_abnormal_sw_queue, nbl_res_queue_stop_abnormal_sw_queue); \ + NBL_TXRX_SET_OPS(restore_abnormal_ring, nbl_res_txrx_restore_abnormal_ring); \ + NBL_TXRX_SET_OPS(restart_abnormal_ring, nbl_res_txrx_restart_abnormal_ring); \ + NBL_TXRX_SET_OPS(register_vsi_ring, nbl_txrx_register_vsi_ring); \ + NBL_TXRX_SET_OPS(cfg_txrx_vlan, nbl_res_txrx_cfg_txrx_vlan); \ + NBL_TXRX_SET_OPS(set_rings_xdp_prog, nbl_res_txrx_set_xdp_prog); \ + NBL_TXRX_SET_OPS(register_xdp_rxq, nbl_res_txrx_register_xdp_rxq); \ + NBL_TXRX_SET_OPS(unregister_xdp_rxq, nbl_res_txrx_unregister_xdp_rxq); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_txrx_setup_mgt(struct device *dev, struct nbl_txrx_mgt **txrx_mgt) +{ + *txrx_mgt = devm_kzalloc(dev, sizeof(struct nbl_txrx_mgt), GFP_KERNEL); + if (!*txrx_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_txrx_remove_mgt(struct device *dev, struct nbl_txrx_mgt **txrx_mgt) +{ + devm_kfree(dev, *txrx_mgt); + *txrx_mgt = NULL; +} + +int nbl_txrx_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_txrx_mgt **txrx_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + txrx_mgt = &NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + + return nbl_txrx_setup_mgt(dev, txrx_mgt); +} + +void nbl_txrx_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_txrx_mgt **txrx_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + txrx_mgt = &NBL_RES_MGT_TO_TXRX_MGT(res_mgt); + + if (!(*txrx_mgt)) + return; + + nbl_txrx_remove_mgt(dev, txrx_mgt); +} + +int nbl_txrx_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_TXRX_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_TXRX_OPS_TBL; +#undef NBL_TXRX_SET_OPS + + return 0; +} + +void nbl_txrx_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_TXRX_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_TXRX_OPS_TBL; +#undef NBL_TXRX_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h new file mode 100644 index 0000000000000000000000000000000000000000..22a186f30ef08c2300f9b9e5a82aec9805ec9887 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_txrx.h @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_TXRX_H_ +#define _NBL_TXRX_H_ + +#include "nbl_resource.h" + +#define NBL_RING_TO_COMMON(ring) ((ring)->common) +#define NBL_RING_TO_DEV(ring) ((ring)->dma_dev) +#define NBL_RING_TO_DMA_DEV(ring) ((ring)->dma_dev) + +#define NBL_MIN_DESC_NUM 128 +#define NBL_MAX_DESC_NUM 32768 + +#define NBL_PACKED_DESC_F_NEXT 1 +#define NBL_PACKED_DESC_F_WRITE 2 + +#define DEFAULT_MAX_PF_QUEUE_PAIRS_NUM 16 +#define DEFAULT_MAX_VF_QUEUE_PAIRS_NUM 2 + +#define NBL_PACKED_DESC_F_AVAIL 7 +#define NBL_PACKED_DESC_F_USED 15 + +#define NBL_TX_DESC(tx_ring, i) (&(((tx_ring)->desc)[i])) +#define NBL_RX_DESC(rx_ring, i) (&(((rx_ring)->desc)[i])) +#define NBL_TX_BUF(tx_ring, i) (&(((tx_ring)->tx_bufs)[i])) +#define NBL_RX_BUF(rx_ring, i) (&(((rx_ring)->rx_bufs)[i])) + +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +#define NBL_TX_POLL_WEIGHT 256 + +#define NBL_RX_BUF_256 256 +#define NBL_RX_HDR_SIZE NBL_RX_BUF_256 +#define NBL_RX_BUF_WRITE 16 +#define NBL_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD - NBL_BUFFER_HDR_LEN) +#define NBL_XDP_RX_HARD_BUFF (NBL_RX_PAD + NBL_BUFFER_HDR_LEN) + +#define NBL_TXD_DATALEN_BITS 16 +#define NBL_TXD_DATALEN_MAX BIT(NBL_TXD_DATALEN_BITS) + +#define MAX_DESC_NUM_PER_PKT (32) + +#define NBL_RX_BUFSZ (2048) +#define NBL_RX_BUFSZ_ORDER (11) + +#define NBL_BUFFER_HDR_LEN (sizeof(struct nbl_rx_extend_head)) + +#define NBL_ETH_FRAME_MIN_SIZE 60 + +#define NBL_TX_TSO_MSS_MIN (256) +#define NBL_TX_TSO_MSS_MAX (16383) +#define NBL_TX_TSO_L2L3L4_HDR_LEN_MIN (42) +#define NBL_TX_TSO_L2L3L4_HDR_LEN_MAX (128) +#define NBL_TX_CHECKSUM_OFFLOAD_L2L3L4_HDR_LEN_MAX (255) +#define IP_VERSION_V4 (4) +#define NBL_TX_FLAGS_TSO BIT(0) + +#define NBL_TX_TOTAL_HEADERLEN_SHIFT 24 + +#define NBL_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define NBL_RX_PAGE_PER_FRAGS (PAGE_SIZE >> NBL_RX_BUFSZ_ORDER) + +#define NBL_KTLS_INIT_PAD_LEN 28 +#define NBL_KTLS_SYNC_PKT_LEN 30 +#define NBL_KTLS_PER_CELL_LEN 4096 +#define NBL_KTLS_MAX_CELL_LEN 6144 + +/* TX inner IP header type */ +enum nbl_tx_iipt { + NBL_TX_IIPT_NONE = 0x0, + NBL_TX_IIPT_IPV6 = 0x1, + NBL_TX_IIPT_IPV4 = 0x2, + NBL_TX_IIPT_RSV = 0x3 +}; + +/* TX L4 packet type */ +enum nbl_tx_l4t { + NBL_TX_L4T_NONE = 0x0, + NBL_TX_L4T_TCP = 0x1, + NBL_TX_L4T_UDP = 0x2, + NBL_TX_L4T_RSV = 0x3 +}; + +struct nbl_tx_hdr_param { + u8 l4s_pbrac_mode; + u8 l4s_hdl_ind; + u8 l4s_sync_ind; + u8 tso; + u16 l4s_sid; + u16 mss; + u8 mac_len; + u8 ip_len; + u8 l4_len; + u8 l4_type; + u8 inner_ip_type; + u8 l3_csum_en; + u8 l4_csum_en; + u16 total_hlen; + u16 dport_id:10; + u16 fwd:2; + u16 dport:3; + u16 rss_lag_en:1; +}; + +union nbl_tx_extend_head { + struct { + /* DW0 */ + u32 mac_len :5; + u32 ip_len :5; + u32 l4_len :4; + u32 l4_type :2; + u32 inner_ip_type :2; + u32 external_ip_type :2; + u32 external_ip_len :5; + u32 l4_tunnel_type :2; + u32 l4_tunnel_len :5; + /* DW1 */ + u32 l4s_sid :10; + u32 l4s_sync_ind :1; + u32 l4s_redun_ind :1; + u32 l4s_redun_head_ind :1; + u32 l4s_hdl_ind :1; + u32 l4s_pbrac_mode :1; + u32 rsv0 :2; + u32 mss :14; + u32 tso :1; + /* DW2 */ + /* if dport = NBL_TX_DPORT_ETH; dport_info = 0 + * if dport = NBL_TX_DPORT_HOST; dport_info = host queue id + * if dport = NBL_TX_DPORT_ECPU; dport_info = ecpu queue_id + */ + u32 dport_info :11; + /* if dport = NBL_TX_DPORT_ETH; dport_id[3:0] = eth port id, dport_id[9:4] = lag id + * if dport = NBL_TX_DPORT_HOST; dport_id[9:0] = host vsi_id + * if dport = NBL_TX_DPORT_ECPU; dport_id[9:0] = ecpu vsi_id + */ + u32 dport_id :10; +#define NBL_TX_DPORT_ID_LAG_OFFSET (4) + u32 dport :3; +#define NBL_TX_DPORT_ETH (0) +#define NBL_TX_DPORT_HOST (1) +#define NBL_TX_DPORT_ECPU (2) +#define NBL_TX_DPORT_EMP (3) +#define NBL_TX_DPORT_BMC (4) + u32 fwd :2; +#define NBL_TX_FWD_TYPE_DROP (0) +#define NBL_TX_FWD_TYPE_NORMAL (1) +#define NBL_TX_FWD_TYPE_RSV (2) +#define NBL_TX_FWD_TYPE_CPU_ASSIGNED (3) + u32 rss_lag_en :1; + u32 l4_csum_en :1; + u32 l3_csum_en :1; + u32 rsv1 :3; + }; + struct bootis_hdr { + /* DW0 */ + u32 mac_len :5; + u32 ip_len :5; + u32 l4_len :4; + u32 l4_type :2; + u32 inner_ip_type :2; + u32 external_ip_type :2; + u32 external_ip_len :5; + u32 l4_tunnel_type :2; + u32 l4_tunnel_len :5; + /* DW1 */ + u32 l4s_sid :10; + u32 inner_l3_cs :1; + u32 inner_l4_cs :1; + u32 dport :3; + u32 tag_idx :2; + u32 mss :14; + u32 tso :1; + /* DW2 */ + u32 dport_info :11; + u32 dport_id :12; + u32 tag_en :1; + u32 fwd :2; + u32 rss_lag_en :1; + u32 l4_csum_en :1; + u32 l3_csum_en :1; + u32 rsv1 :3; + } bootis; +}; + +struct nbl_rx_extend_head { + /* DW0 */ + /* 0x0:eth, 0x1:host, 0x2:ecpu, 0x3:emp, 0x4:bcm */ + uint32_t sport :3; + uint32_t dport_info :11; + /* sport = 0, sport_id[3:0] = eth id, + * sport = 1, sport_id[9:0] = host vsi_id, + * sport = 2, sport_id[9:0] = ecpu vsi_id, + */ + uint32_t sport_id :10; + /* 0x0:drop, 0x1:normal, 0x2:cpu upcall */ + uint32_t fwd :2; + uint32_t rsv0 :6; + /* DW1 */ + uint32_t error_code :6; + uint32_t ptype :10; + uint32_t profile_id :4; + uint32_t checksum_status :1; + uint32_t rsv1 :1; + uint32_t l4s_sid :10; + /* DW2 */ + uint32_t rsv3 :2; + uint32_t l4s_hdl_ind :1; + uint32_t l4s_tcp_offset :14; + uint32_t l4s_resync_ind :1; + uint32_t l4s_check_ind :1; + uint32_t l4s_dec_ind :1; + uint32_t rsv2 :4; + uint32_t num_buffers :8; +} __packed; + +struct nbl_ktls_init_payload { + /* DW0 */ + u16 initial:1; + u16 rsv1:7; + u16 sync:1; + u16 rsv2:7; + u16 sid:10; + u16 rsv3:6; + /* DW1 */ + u16 rsv4; + u16 rsv5; + /* DWX */ + u8 rec_num[NBL_KTLS_REC_LEN]; + u8 iv[NBL_KTLS_IV_LEN]; + u8 pad[NBL_KTLS_INIT_PAD_LEN]; +}; + +struct nbl_ktls_sync_payload { + /* DW0 */ + u16 initial:1; + u16 rsv1:7; + u16 sync:1; + u16 rsv2:7; + u16 sid:10; + u16 rsv3:6; + /* DW1 */ + u16 rsv4; + u16 rsv5; + /* DWX */ + u8 rec_num[NBL_KTLS_REC_LEN]; + __be16 redlen; + u8 redata[NBL_KTLS_MAX_CELL_LEN]; +}; + +struct nbl_ktls_init_packet { + union nbl_tx_extend_head pkthdr; + struct nbl_ktls_init_payload init_payload; +}; + +struct nbl_ktls_sync_packet { + union nbl_tx_extend_head pkthdr; + struct nbl_ktls_sync_payload sync_payload; +}; + +enum nbl_ktls_sync_retval { + NBL_KTLS_SYNC_DONE, + NBL_KTLS_SYNC_SKIP_NO_DATA, + NBL_KTLS_SYNC_FAIL, +}; + +struct nbl_tx_resync_info { + u64 rec_num; + u32 resync_len; + u32 nr_frags; + skb_frag_t frags[MAX_SKB_FRAGS]; +}; + +#define NBL_XDP_PASS 0 +#define NBL_XDP_CONSUMED BIT(0) +#define NBL_XDP_TX BIT(1) +#define NBL_XDP_REDIRECT BIT(2) +#define NBL_XDP_ABORTED BIT(3) +#define NBL_XDP_DROP BIT(4) + +struct nbl_xdp_output { + u16 desc_done_num; + bool xdp_tx_act; + bool xdp_redirect_act; + bool xdp_drop; + bool multicast; + bool xdp_oversize; + u8 resv; + u64 bytes; +}; + +DECLARE_STATIC_KEY_FALSE(nbl_xdp_locking_key); + +static inline u16 nbl_unused_rx_desc_count(struct nbl_res_rx_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->desc_num) + ntc - ntu - 1; +} + +static inline u16 nbl_unused_tx_desc_count(struct nbl_res_tx_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->desc_num) + ntc - ntu - 1; +} + +static inline bool nbl_ktls_device_offload(struct sk_buff *skb) +{ +#ifdef CONFIG_TLS_DEVICE + return tls_is_skb_tx_device_offloaded(skb); +#else + return false; +#endif +} + +static inline void nbl_ktls_bigint_decrement(u8 *data, int len) +{ + int i; + + for (i = len - 1; i >= 0; i--) { + if (data[i] == 0) { + data[i] = 0xFF; + } else { + --data[i]; + break; + } + } +} + +static inline +struct nbl_res_tx_ring *nbl_res_txrx_select_xdp_ring(struct nbl_txrx_mgt *txrx_mgt) +{ + int ring_idx; + int cpu_id = smp_processor_id(); + struct nbl_res_tx_ring *xdp_ring; + + if (!txrx_mgt->xdp_ring_num) + return NULL; + + if (static_key_enabled(&nbl_xdp_locking_key)) + ring_idx = cpu_id % txrx_mgt->xdp_ring_num; + else + ring_idx = cpu_id; + + xdp_ring = txrx_mgt->tx_rings[ring_idx + txrx_mgt->xdp_ring_offset]; + return xdp_ring; +} + +static inline bool nbl_res_txrx_is_xdp_ring(struct nbl_res_tx_ring *ring) +{ + return READ_ONCE(ring->xdp_prog) ? true : false; +} + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c new file mode 100644 index 0000000000000000000000000000000000000000..a1d3adf2f2fa974e8945f019fbfb454bd3048436 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include "nbl_vsi.h" + +static int nbl_res_set_promisc_mode(void *priv, u16 vsi_id, u16 mode) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + u16 pf_id = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + u16 eth_id = nbl_res_vsi_id_to_eth_id(res_mgt, vsi_id); + + if (pf_id >= NBL_RES_MGT_TO_PF_NUM(res_mgt)) + return -EINVAL; + + phy_ops->set_promisc_mode(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, eth_id, mode); + + return 0; +} + +static int nbl_res_set_spoof_check_addr(void *priv, u16 vsi_id, u8 *mac) +{ + u16 func_id; + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + /* if pf has cfg vf-mac, and the vf has active. it can change spoof mac. */ + if (!is_zero_ether_addr(vsi_info->mac_info[func_id].mac) && + nbl_res_check_func_active_by_queue(res_mgt, func_id)) { + return 0; + } + + return phy_ops->set_spoof_check_addr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vsi_id, mac); +} + +static int nbl_res_set_vf_spoof_check(void *priv, u16 vsi_id, int vfid, u8 enable) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + int pfid = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + u16 vf_vsi = vfid == -1 ? vsi_id : nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, + NBL_VSI_DATA); + + return phy_ops->set_spoof_check_enable(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), vf_vsi, enable); +} + +static u16 nbl_res_get_vf_function_id(void *priv, u16 vsi_id, int vfid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 vf_vsi; + int pfid = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + vf_vsi = vfid == -1 ? vsi_id : nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); + + return nbl_res_vsi_id_to_func_id(res_mgt, vf_vsi); +} + +static u16 nbl_res_get_vf_vsi_id(void *priv, u16 vsi_id, int vfid) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + u16 vf_vsi; + int pfid = nbl_res_vsi_id_to_pf_id(res_mgt, vsi_id); + + vf_vsi = vfid == -1 ? vsi_id : nbl_res_pfvfid_to_vsi_id(res_mgt, pfid, vfid, NBL_VSI_DATA); + return vf_vsi; +} + +static int nbl_res_vsi_init_chip_module(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_queue_mgt *queue_mgt; + struct nbl_phy_ops *phy_ops; + int ret = 0; + + if (!res_mgt) + return -EINVAL; + + queue_mgt = NBL_RES_MGT_TO_QUEUE_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + ret = phy_ops->init_chip_module(NBL_RES_MGT_TO_PHY_PRIV(res_mgt), + res_mgt->resource_info->board_info.eth_speed, + res_mgt->resource_info->board_info.eth_num); + + return ret; +} + +static int nbl_res_vsi_init(void *priv) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_vsi_mgt *vsi_mgt; + struct nbl_phy_ops *phy_ops; + int ret = 0; + + if (!res_mgt) + return -EINVAL; + + vsi_mgt = NBL_RES_MGT_TO_VSI_MGT(res_mgt); + phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt); + + /* TODO: unnecessary? */ + + return ret; +} + +static void nbl_res_get_phy_caps(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps) +{ + /*TODO need to get it through adminq*/ + phy_caps->speed = 0xFF; + phy_caps->fec_ability = BIT(ETHTOOL_FEC_RS_BIT) | BIT(ETHTOOL_FEC_BASER_BIT); + phy_caps->pause_param = 0x3; +} + +static void nbl_res_register_func_mac(void *priv, u8 *mac, u16 func_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + ether_addr_copy(vsi_info->mac_info[func_id].mac, mac); +} + +static int nbl_res_register_func_link_forced(void *priv, u16 func_id, u8 link_forced, + bool *should_notify) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + + resource_info->link_forced_info[func_id] = link_forced; + *should_notify = test_bit(func_id, resource_info->func_bitmap); + + return 0; +} + +static int nbl_res_get_link_forced(void *priv, u16 vsi_id) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + u16 func_id = nbl_res_vsi_id_to_func_id(res_mgt, vsi_id); + + return resource_info->link_forced_info[func_id]; +} + +static int nbl_res_register_func_vlan(void *priv, u16 func_id, + u16 vlan_tci, u16 vlan_proto, bool *should_notify) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_resource_info *resource_info = NBL_RES_MGT_TO_RES_INFO(res_mgt); + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + vsi_info->mac_info[func_id].vlan_proto = vlan_proto; + vsi_info->mac_info[func_id].vlan_tci = vlan_tci; + *should_notify = test_bit(func_id, resource_info->func_bitmap); + + return 0; +} + +static int nbl_res_register_rate(void *priv, u16 func_id, int rate) +{ + struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv; + struct nbl_vsi_info *vsi_info = NBL_RES_MGT_TO_VSI_INFO(res_mgt); + + vsi_info->mac_info[func_id].rate = rate; + + return 0; +} + +/* NBL_vsi_SET_OPS(ops_name, func) + * + * Use X Macros to reduce setup and remove codes. + */ +#define NBL_VSI_OPS_TBL \ +do { \ + NBL_VSI_SET_OPS(init_chip_module, nbl_res_vsi_init_chip_module); \ + NBL_VSI_SET_OPS(vsi_init, nbl_res_vsi_init); \ + NBL_VSI_SET_OPS(set_promisc_mode, nbl_res_set_promisc_mode); \ + NBL_VSI_SET_OPS(set_spoof_check_addr, nbl_res_set_spoof_check_addr); \ + NBL_VSI_SET_OPS(set_vf_spoof_check, nbl_res_set_vf_spoof_check); \ + NBL_VSI_SET_OPS(get_phy_caps, nbl_res_get_phy_caps); \ + NBL_VSI_SET_OPS(get_vf_function_id, nbl_res_get_vf_function_id); \ + NBL_VSI_SET_OPS(get_vf_vsi_id, nbl_res_get_vf_vsi_id); \ + NBL_VSI_SET_OPS(register_func_mac, nbl_res_register_func_mac); \ + NBL_VSI_SET_OPS(register_func_link_forced, nbl_res_register_func_link_forced); \ + NBL_VSI_SET_OPS(register_func_vlan, nbl_res_register_func_vlan); \ + NBL_VSI_SET_OPS(get_link_forced, nbl_res_get_link_forced); \ + NBL_VSI_SET_OPS(register_func_rate, nbl_res_register_rate); \ +} while (0) + +/* Structure starts here, adding an op should not modify anything below */ +static int nbl_vsi_setup_mgt(struct device *dev, struct nbl_vsi_mgt **vsi_mgt) +{ + *vsi_mgt = devm_kzalloc(dev, sizeof(struct nbl_vsi_mgt), GFP_KERNEL); + if (!*vsi_mgt) + return -ENOMEM; + + return 0; +} + +static void nbl_vsi_remove_mgt(struct device *dev, struct nbl_vsi_mgt **vsi_mgt) +{ + devm_kfree(dev, *vsi_mgt); + *vsi_mgt = NULL; +} + +int nbl_vsi_mgt_start(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_vsi_mgt **vsi_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + vsi_mgt = &NBL_RES_MGT_TO_VSI_MGT(res_mgt); + + return nbl_vsi_setup_mgt(dev, vsi_mgt); +} + +void nbl_vsi_mgt_stop(struct nbl_resource_mgt *res_mgt) +{ + struct device *dev; + struct nbl_vsi_mgt **vsi_mgt; + + dev = NBL_RES_MGT_TO_DEV(res_mgt); + vsi_mgt = &NBL_RES_MGT_TO_VSI_MGT(res_mgt); + + if (!(*vsi_mgt)) + return; + + nbl_vsi_remove_mgt(dev, vsi_mgt); +} + +int nbl_vsi_setup_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_VSI_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = func; ; } while (0) + NBL_VSI_OPS_TBL; +#undef NBL_VSI_SET_OPS + + return 0; +} + +void nbl_vsi_remove_ops(struct nbl_resource_ops *res_ops) +{ +#define NBL_VSI_SET_OPS(name, func) do {res_ops->NBL_NAME(name) = NULL; ; } while (0) + NBL_VSI_OPS_TBL; +#undef NBL_VSI_SET_OPS +} diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h new file mode 100644 index 0000000000000000000000000000000000000000..50be586cd06c8a29832b03192ce172046b068299 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_hw/nbl_vsi.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_VSI_H_ +#define _NBL_VSI_H_ + +#include "nbl_resource.h" + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..7b19d0a471829c24191f5d1b750d3ce7be7b7628 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_channel.h @@ -0,0 +1,1252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEF_CHANNEL_H_ +#define _NBL_DEF_CHANNEL_H_ + +#include "nbl_include.h" + +#define NBL_CHAN_OPS_TBL_TO_OPS(chan_ops_tbl) ((chan_ops_tbl)->ops) +#define NBL_CHAN_OPS_TBL_TO_PRIV(chan_ops_tbl) ((chan_ops_tbl)->priv) + +#define NBL_CHAN_SEND(chan_send, dst_id, mesg_type, \ + argument, arg_length, response, resp_length, need_ack) \ +do { \ + typeof(chan_send) *__chan_send = &(chan_send); \ + __chan_send->dstid = (dst_id); \ + __chan_send->msg_type = (mesg_type); \ + __chan_send->arg = (argument); \ + __chan_send->arg_len = (arg_length); \ + __chan_send->resp = (response); \ + __chan_send->resp_len = (resp_length); \ + __chan_send->ack = (need_ack); \ +} while (0) + +#define NBL_CHAN_ACK(chan_ack, dst_id, mesg_type, msg_id, err_code, ack_data, data_length) \ +do { \ + typeof(chan_ack) *__chan_ack = &(chan_ack); \ + __chan_ack->dstid = (dst_id); \ + __chan_ack->msg_type = (mesg_type); \ + __chan_ack->msgid = (msg_id); \ + __chan_ack->err = (err_code); \ + __chan_ack->data = (ack_data); \ + __chan_ack->data_len = (data_length); \ +} while (0) + +typedef void (*nbl_chan_resp)(void *, u16, u16, void *, u32); + +enum { + NBL_CHAN_RESP_OK, + NBL_CHAN_RESP_ERR, +}; + +enum nbl_chan_msg_type { + NBL_CHAN_MSG_ACK, + NBL_CHAN_MSG_ADD_MACVLAN, + NBL_CHAN_MSG_DEL_MACVLAN, + NBL_CHAN_MSG_ADD_MULTI_RULE, + NBL_CHAN_MSG_DEL_MULTI_RULE, + NBL_CHAN_MSG_SETUP_MULTI_GROUP, + NBL_CHAN_MSG_REMOVE_MULTI_GROUP, + NBL_CHAN_MSG_REGISTER_NET, + NBL_CHAN_MSG_UNREGISTER_NET, + NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, + NBL_CHAN_MSG_FREE_TXRX_QUEUES, + NBL_CHAN_MSG_SETUP_QUEUE, + NBL_CHAN_MSG_REMOVE_ALL_QUEUES, + NBL_CHAN_MSG_CFG_DSCH, + NBL_CHAN_MSG_SETUP_CQS, + NBL_CHAN_MSG_REMOVE_CQS, + NBL_CHAN_MSG_CFG_QDISC_MQPRIO, + NBL_CHAN_MSG_CONFIGURE_MSIX_MAP, + NBL_CHAN_MSG_DESTROY_MSIX_MAP, + NBL_CHAN_MSG_MAILBOX_ENABLE_IRQ, + NBL_CHAN_MSG_GET_GLOBAL_VECTOR, + NBL_CHAN_MSG_GET_VSI_ID, + NBL_CHAN_MSG_SET_PROSISC_MODE, + NBL_CHAN_MSG_GET_FIRMWARE_VERSION, + NBL_CHAN_MSG_GET_QUEUE_ERR_STATS, + NBL_CHAN_MSG_GET_COALESCE, + NBL_CHAN_MSG_SET_COALESCE, + NBL_CHAN_MSG_SET_SPOOF_CHECK_ADDR, + NBL_CHAN_MSG_SET_VF_SPOOF_CHECK, + NBL_CHAN_MSG_GET_RXFH_INDIR_SIZE, + NBL_CHAN_MSG_GET_RXFH_INDIR, + NBL_CHAN_MSG_GET_RXFH_RSS_KEY, + NBL_CHAN_MSG_GET_RXFH_RSS_ALG_SEL, + NBL_CHAN_MSG_GET_PHY_CAPS, + NBL_CHAN_MSG_GET_PHY_STATE, + NBL_CHAN_MSG_REGISTER_RDMA, + NBL_CHAN_MSG_UNREGISTER_RDMA, + NBL_CHAN_MSG_GET_REAL_HW_ADDR, + NBL_CHAN_MSG_GET_REAL_BDF, + NBL_CHAN_MSG_GRC_PROCESS, + NBL_CHAN_MSG_SET_SFP_STATE, + NBL_CHAN_MSG_SET_ETH_LOOPBACK, + NBL_CHAN_MSG_CHECK_ACTIVE_VF, + NBL_CHAN_MSG_GET_PRODUCT_FLEX_CAP, + NBL_CHAN_MSG_ALLOC_KTLS_TX_INDEX, + NBL_CHAN_MSG_FREE_KTLS_TX_INDEX, + NBL_CHAN_MSG_CFG_KTLS_TX_KEYMAT, + NBL_CHAN_MSG_ALLOC_KTLS_RX_INDEX, + NBL_CHAN_MSG_FREE_KTLS_RX_INDEX, + NBL_CHAN_MSG_CFG_KTLS_RX_KEYMAT, + NBL_CHAN_MSG_CFG_KTLS_RX_RECORD, + NBL_CHAN_MSG_ADD_KTLS_RX_FLOW, + NBL_CHAN_MSG_DEL_KTLS_RX_FLOW, + NBL_CHAN_MSG_ALLOC_IPSEC_TX_INDEX, + NBL_CHAN_MSG_FREE_IPSEC_TX_INDEX, + NBL_CHAN_MSG_ALLOC_IPSEC_RX_INDEX, + NBL_CHAN_MSG_FREE_IPSEC_RX_INDEX, + NBL_CHAN_MSG_CFG_IPSEC_TX_SAD, + NBL_CHAN_MSG_CFG_IPSEC_RX_SAD, + NBL_CHAN_MSG_ADD_IPSEC_TX_FLOW, + NBL_CHAN_MSG_DEL_IPSEC_TX_FLOW, + NBL_CHAN_MSG_ADD_IPSEC_RX_FLOW, + NBL_CHAN_MSG_DEL_IPSEC_RX_FLOW, + NBL_CHAN_MSG_NOTIFY_IPSEC_HARD_EXPIRE, + NBL_CHAN_MSG_GET_MBX_IRQ_NUM, + NBL_CHAN_MSG_CLEAR_FLOW, + NBL_CHAN_MSG_CLEAR_QUEUE, + NBL_CHAN_MSG_GET_ETH_ID, + NBL_CHAN_MSG_SET_OFFLOAD_STATUS, + + NBL_CHAN_MSG_INIT_OFLD, + NBL_CHAN_MSG_INIT_CMDQ, + NBL_CHAN_MSG_DESTROY_CMDQ, + NBL_CHAN_MSG_RESET_CMDQ, + NBL_CHAN_MSG_INIT_FLOW, + NBL_CHAN_MSG_DEINIT_FLOW, + NBL_CHAN_MSG_OFFLOAD_FLOW_RULE, + NBL_CHAN_MSG_GET_ACL_SWITCH, + NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, + NBL_CHAN_MSG_INIT_REP, + NBL_CHAN_MSG_GET_LINE_RATE_INFO, + + NBL_CHAN_MSG_REGISTER_NET_REP, + NBL_CHAN_MSG_UNREGISTER_NET_REP, + NBL_CHAN_MSG_REGISTER_ETH_REP, + NBL_CHAN_MSG_UNREGISTER_ETH_REP, + NBL_CHAN_MSG_REGISTER_UPCALL_PORT, + NBL_CHAN_MSG_UNREGISTER_UPCALL_PORT, + NBL_CHAN_MSG_GET_PORT_STATE, + NBL_CHAN_MSG_SET_PORT_ADVERTISING, + NBL_CHAN_MSG_GET_MODULE_INFO, + NBL_CHAN_MSG_GET_MODULE_EEPROM, + NBL_CHAN_MSG_GET_LINK_STATE, + NBL_CHAN_MSG_NOTIFY_LINK_STATE, + + NBL_CHAN_MSG_GET_QUEUE_CXT, + NBL_CHAN_MSG_CFG_LOG, + NBL_CHAN_MSG_INIT_VDPAQ, + NBL_CHAN_MSG_DESTROY_VDPAQ, + NBL_CHAN_GET_UPCALL_PORT, + NBL_CHAN_MSG_NOTIFY_ETH_REP_LINK_STATE, + NBL_CHAN_MSG_SET_ETH_MAC_ADDR, + NBL_CHAN_MSG_GET_FUNCTION_ID, + NBL_CHAN_MSG_GET_CHIP_TEMPERATURE, + + NBL_CHAN_MSG_DISABLE_PHY_FLOW, + NBL_CHAN_MSG_ENABLE_PHY_FLOW, + NBL_CHAN_MSG_SET_UPCALL_RULE, + NBL_CHAN_MSG_UNSET_UPCALL_RULE, + + NBL_CHAN_MSG_GET_REG_DUMP, + NBL_CHAN_MSG_GET_REG_DUMP_LEN, + + NBL_CHAN_MSG_CFG_LAG_HASH_ALGORITHM, + NBL_CHAN_MSG_CFG_LAG_MEMBER_FWD, + NBL_CHAN_MSG_CFG_LAG_MEMBER_LIST, + NBL_CHAN_MSG_CFG_LAG_MEMBER_UP_ATTR, + NBL_CHAN_MSG_ADD_LAG_FLOW, + NBL_CHAN_MSG_DEL_LAG_FLOW, + + NBL_CHAN_MSG_SWITCHDEV_INIT_CMDQ, + NBL_CHAN_MSG_SWITCHDEV_DEINIT_CMDQ, + NBL_CHAN_MSG_SET_TC_FLOW_INFO, + NBL_CHAN_MSG_UNSET_TC_FLOW_INFO, + NBL_CHAN_MSG_INIT_ACL, + NBL_CHAN_MSG_UNINIT_ACL, + + NBL_CHAN_MSG_CFG_LAG_MCC, + + NBL_CHAN_MSG_REGISTER_VSI2Q, + NBL_CHAN_MSG_SETUP_Q2VSI, + NBL_CHAN_MSG_REMOVE_Q2VSI, + NBL_CHAN_MSG_SETUP_RSS, + NBL_CHAN_MSG_REMOVE_RSS, + NBL_CHAN_MSG_GET_REP_QUEUE_INFO, + NBL_CHAN_MSG_CTRL_PORT_LED, + NBL_CHAN_MSG_NWAY_RESET, + NBL_CHAN_MSG_SET_INTL_SUPPRESS_LEVEL, + NBL_CHAN_MSG_GET_ETH_STATS, + NBL_CHAN_MSG_GET_MODULE_TEMPERATURE, + NBL_CHAN_MSG_GET_BOARD_INFO, + + NBL_CHAN_MSG_GET_P4_USED, + NBL_CHAN_MSG_GET_VF_BASE_VSI_ID, + + NBL_CHAN_MSG_ADD_LLDP_FLOW, + NBL_CHAN_MSG_DEL_LLDP_FLOW, + + NBL_CHAN_MSG_CFG_ETH_BOND_INFO, + NBL_CHAN_MSG_CFG_DUPPKT_MCC, + + NBL_CHAN_MSG_ADD_ND_UPCALL_FLOW, + NBL_CHAN_MSG_DEL_ND_UPCALL_FLOW, + + NBL_CHAN_MSG_GET_BOARD_ID, + + NBL_CHAN_MSG_SET_SHAPING_DPORT_VLD, + NBL_CHAN_MSG_SET_DPORT_FC_TH_VLD, + + NBL_CHAN_MSG_REGISTER_RDMA_BOND, + NBL_CHAN_MSG_UNREGISTER_RDMA_BOND, + + NBL_CHAN_MSG_RESTORE_NETDEV_QUEUE, + NBL_CHAN_MSG_RESTART_NETDEV_QUEUE, + NBL_CHAN_MSG_RESTORE_HW_QUEUE, + + NBL_CHAN_MSG_KEEP_ALIVE, + + NBL_CHAN_MSG_GET_BASE_MAC_ADDR, + + NBL_CHAN_MSG_CFG_BOND_SHAPING, + NBL_CHAN_MSG_CFG_BGID_BACK_PRESSURE, + + NBL_CHAN_MSG_ALLOC_KT_BLOCK, + NBL_CHAN_MSG_FREE_KT_BLOCK, + + NBL_CHAN_MSG_GET_USER_QUEUE_INFO, + NBL_CHAN_MSG_GET_ETH_BOND_INFO, + + NBL_CHAN_MSG_CLEAR_ACCEL_FLOW, + NBL_CHAN_MSG_SET_BRIDGE_MODE, + + NBL_CHAN_MSG_GET_VF_FUNCTION_ID, + NBL_CHAN_MSG_NOTIFY_LINK_FORCED, + + NBL_CHAN_MSG_SET_PMD_DEBUG, + + NBL_CHAN_MSG_REGISTER_FUNC_MAC, + NBL_CHAN_MSG_SET_TX_RATE, + + NBL_CHAN_MSG_REGISTER_FUNC_LINK_FORCED, + NBL_CHAN_MSG_GET_LINK_FORCED, + + NBL_CHAN_MSG_REGISTER_FUNC_VLAN, + + NBL_CHAN_MSG_GET_FD_FLOW, + NBL_CHAN_MSG_GET_FD_FLOW_CNT, + NBL_CHAN_MSG_GET_FD_FLOW_ALL, + NBL_CHAN_MSG_GET_FD_FLOW_MAX, + NBL_CHAN_MSG_REPLACE_FD_FLOW, + NBL_CHAN_MSG_REMOVE_FD_FLOW, + NBL_CHAN_MSG_CFG_FD_FLOW_STATE, + + NBL_CHAN_MSG_REGISTER_FUNC_RATE, + NBL_CHAN_MSG_NOTIFY_VLAN, + NBL_CHAN_MSG_GET_XDP_QUEUE_INFO, + + NBL_CHAN_MSG_STOP_ABNORMAL_SW_QUEUE, + NBL_CHAN_MSG_STOP_ABNORMAL_HW_QUEUE, + NBL_CHAN_MSG_NOTIFY_RESET_EVENT, + NBL_CHAN_MSG_ACK_RESET_EVENT, + NBL_CHAN_MSG_GET_VF_VSI_ID, + + NBL_CHAN_MSG_CONFIGURE_QOS, + NBL_CHAN_MSG_GET_PFC_BUFFER_SIZE, + NBL_CHAN_MSG_SET_PFC_BUFFER_SIZE, + NBL_CHAN_MSG_GET_VF_STATS, + + /* mailbox msg end */ + NBL_CHAN_MSG_MAILBOX_MAX, + + /* adminq msg */ + NBL_CHAN_MSG_ADMINQ_GET_EMP_VERSION = 0x8101, /* Deprecated, should not be used */ + NBL_CHAN_MSG_ADMINQ_GET_NVM_VERSION = 0x8102, + NBL_CHAN_MSG_ADMINQ_REBOOT = 0x8104, + NBL_CHAN_MSG_ADMINQ_FLR_NOTIFY = 0x8105, + NBL_CHAN_MSG_ADMINQ_NOTIFY_FW_RESET = 0x8106, + NBL_CHAN_MSG_ADMINQ_LOAD_P4 = 0x8107, + NBL_CHAN_MSG_ADMINQ_LOAD_P4_DEFAULT = 0x8108, + NBL_CHAN_MSG_ADMINQ_EXT_ALERT = 0x8109, + NBL_CHAN_MSG_ADMINQ_FLASH_ERASE = 0x8201, + NBL_CHAN_MSG_ADMINQ_FLASH_READ = 0x8202, + NBL_CHAN_MSG_ADMINQ_FLASH_WRITE = 0x8203, + NBL_CHAN_MSG_ADMINQ_FLASH_ACTIVATE = 0x8204, + NBL_CHAN_MSG_ADMINQ_RESOURCE_WRITE = 0x8205, + NBL_CHAN_MSG_ADMINQ_RESOURCE_READ = 0x8206, + NBL_CHAN_MSG_ADMINQ_GET_NVM_BANK_INDEX = 0x820B, + NBL_CHAN_MSG_ADMINQ_VERIFY_NVM_BANK = 0x820C, + NBL_CHAN_MSG_ADMINQ_FLASH_LOCK = 0x820D, + NBL_CHAN_MSG_ADMINQ_FLASH_UNLOCK = 0x820E, + NBL_CHAN_MSG_ADMINQ_MANAGE_PORT_ATTRIBUTES = 0x8300, + NBL_CHAN_MSG_ADMINQ_PORT_NOTIFY = 0x8301, + NBL_CHAN_MSG_ADMINQ_GET_MODULE_EEPROM = 0x8302, + NBL_CHAN_MSG_ADMINQ_GET_ETH_STATS = 0x8303, + /* TODO: new kernel and ethtool support show fec stats */ + NBL_CHAN_MSG_ADMINQ_GET_FEC_STATS = 0x408, + NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_WRITE = 0x8F01, + NBL_CHAN_MSG_ADMINQ_EMP_CONSOLE_READ = 0x8F02, + + NBL_CHAN_MSG_MAX, +}; + +#define NBL_CHAN_ADMINQ_FUNCTION_ID (0xFFFF) + +struct nbl_chan_vsi_qid_info { + u16 vsi_id; + u16 local_qid; +}; + +enum nbl_chan_state { + NBL_CHAN_INTERRUPT_READY, + NBL_CHAN_RESETTING, + NBL_CHAN_ABNORMAL, + NBL_CHAN_STATE_NBITS +}; + +struct nbl_chan_param_add_macvlan { + u8 mac[ETH_ALEN]; + u16 vlan; + u16 vsi; +}; + +struct nbl_chan_param_del_macvlan { + u8 mac[ETH_ALEN]; + u16 vlan; + u16 vsi; +}; + +struct nbl_chan_param_register_net_info { + u16 pf_bdf; + u64 vf_bar_start; + u64 vf_bar_size; + u16 total_vfs; + u16 offset; + u16 stride; + u64 pf_bar_start; +}; + +struct nbl_chan_param_alloc_txrx_queues { + u16 vsi_id; + u16 queue_num; +}; + +struct nbl_chan_param_register_vsi2q { + u16 vsi_index; + u16 vsi_id; + u16 queue_offset; + u16 queue_num; +}; + +struct nbl_chan_param_setup_queue { + struct nbl_txrx_queue_param queue_param; + bool is_tx; +}; + +struct nbl_chan_param_cfg_dsch { + u16 vsi_id; + bool vld; +}; + +struct nbl_chan_param_setup_cqs { + u16 vsi_id; + u16 real_qps; +}; + +struct nbl_chan_param_set_promisc_mode { + u16 vsi_id; + u16 mode; +}; + +struct nbl_chan_param_cfg_msix_map { + u16 num_net_msix; + u16 num_others_msix; + u16 msix_mask_en; +}; + +struct nbl_chan_param_enable_mailbox_irq { + u16 vector_id; + bool enable_msix; +}; + +struct nbl_chan_param_get_global_vector { + u16 vsi_id; + u16 vector_id; +}; + +struct nbl_chan_param_get_vsi_id { + u16 vsi_id; + u16 type; +}; + +struct nbl_chan_param_get_eth_id { + u16 vsi_id; + u8 eth_mode; + u8 eth_id; + u8 logic_eth_id; +}; + +struct nbl_chan_param_get_queue_info { + u16 queue_num; + u16 queue_size; +}; + +struct nbl_chan_param_set_eth_loopback { + u32 eth_port_id; + u32 enable; +}; + +struct nbl_chan_param_get_queue_err_stats { + u8 queue_id; + bool is_tx; +}; + +struct nbl_chan_param_set_coalesce { + u16 local_vector_id; + u16 vector_num; + u16 rx_max_coalesced_frames; + u16 rx_coalesce_usecs; +}; + +struct nbl_chan_param_set_spoof_check_addr { + u16 vsi_id; + u8 mac[ETH_ALEN]; +}; + +struct nbl_chan_param_set_vf_spoof_check { + u16 vsi_id; + u16 vf_id; + bool enable; +}; + +struct nbl_chan_param_get_rxfh_indir { + u16 vsi_id; + u32 rxfh_indir_size; +}; + +struct nbl_chan_result_get_real_bdf { + u8 bus; + u8 dev; + u8 function; +}; + +struct nbl_chan_param_set_upcall { + u16 vsi_id; + u8 eth_id; +}; + +struct nbl_chan_param_set_func_vld { + u8 eth_id; + bool vld; +}; + +struct nbl_chan_param_nvm_version_resp { + char magic[8]; /* "M181FWV0" */ + u32 version; /* major << 16 | minor << 8 | revision */ + u32 build_date; /* 0x20231231 - 2023.12.31 */ + u32 build_time; /* 0x00123456 - 12:34:56 */ + u32 build_hash; /* git commit hash */ + u32 rsv[2]; +}; + +struct nbl_chan_param_flash_read { + u32 bank_id; + u32 offset; + u32 len; +#define NBL_CHAN_FLASH_READ_LEN 0x800 +}; + +struct nbl_chan_param_flash_erase { + u32 bank_id; + u32 offset; + u32 len; +#define NBL_CHAN_FLASH_ERASE_LEN 0x1000 +}; + +struct nbl_chan_resource_write_param { + u32 resid; + u32 offset; + u32 len; + u8 data[]; +}; + +struct nbl_chan_resource_read_param { + u32 resid; + u32 offset; + u32 len; +}; + +struct nbl_chan_param_flash_write { + u32 bank_id; + u32 offset; + u32 len; +#define NBL_CHAN_FLASH_WRITE_LEN 0x800 + u8 data[NBL_CHAN_FLASH_WRITE_LEN]; +}; + +struct nbl_chan_param_load_p4 { + u8 name[NBL_P4_SECTION_NAME_LEN]; + u32 addr; + u32 size; + u32 section_index; + u32 section_offset; + u32 load_start; + u32 load_end; + u8 data[]; +}; + +struct nbl_chan_result_flash_activate { + u32 err_code; + u32 reset_flag; +}; + +struct nbl_chan_param_set_sfp_state { + u8 eth_id; + u8 state; +}; + +struct nbl_chan_param_get_module_eeprom { + u8 eth_id; + struct ethtool_eeprom eeprom; +}; + +struct nbl_chan_param_module_eeprom_info { + u8 eth_id; + u8 i2c_address; + u8 page; + u8 bank; + u32 write:1; + u32 rsvd:31; + u16 offset; + u16 length; +#define NBL_MODULE_EEPRO_WRITE_MAX_LEN (4) + u8 data[NBL_MODULE_EEPRO_WRITE_MAX_LEN]; +}; + +struct nbl_chan_param_eth_rep_notify_link_state { + u8 eth_id; + u8 link_state; +}; + +struct nbl_chan_cfg_ktls_keymat { + u32 index; + u8 mode; +#define NBL_CHAN_SALT_LEN 4 +#define NBL_CHAN_KEY_LEN 32 + u8 salt[NBL_CHAN_SALT_LEN]; + u8 key[NBL_CHAN_KEY_LEN]; + u8 key_len; +}; + +struct nbl_chan_cfg_ktls_record { + bool init; + u32 index; + u32 tcp_sn; + u64 rec_num; +}; + +struct nbl_chan_cfg_ktls_flow { + u32 index; + u32 vsi; +#define NBL_CHAN_KTLS_FLOW_LEN 12 + u32 data[NBL_CHAN_KTLS_FLOW_LEN]; +}; + +struct nbl_chan_ipsec_index { + int index; + struct nbl_ipsec_cfg_info cfg_info; +}; + +struct nbl_chan_cfg_ipsec_sad { + u32 index; + struct nbl_ipsec_sa_entry sa_entry; +}; + +struct nbl_chan_cfg_ipsec_flow { + u32 index; + u32 vsi; +#define NBL_CHAN_IPSEC_FLOW_LEN 12 + u32 data[NBL_CHAN_IPSEC_FLOW_LEN]; +}; + +/* for PMD driver */ +struct nbl_chan_param_get_rep_vsi_id { + u16 pf_id; + u16 vf_id; +}; + +struct nbl_chan_param_register_net_rep { + u16 pf_id; + u16 vf_id; +}; + +struct nbl_chan_param_set_eth_mac_addr { + u8 mac[ETH_ALEN]; + u8 eth_id; +}; + +struct nbl_chan_cmdq_init_info { + u64 pa; + u32 len; + u16 vsi_id; + u16 bdf_num; +}; + +struct nbl_chan_rep_cfg_info { + u16 vsi_id; + u8 inner_type; + u8 outer_type; + u8 rep_type; +}; + +struct nbl_flow_prf_data { + u16 pp_id; + u16 prf_id; +}; + +struct nbl_flow_prf_upcall_info { + u32 item_cnt; +#define NBL_MAX_PP_NUM 64 + struct nbl_flow_prf_data prf_data[NBL_MAX_PP_NUM]; +}; + +struct nbl_acl_cfg_param { + u32 acl_enable:1; + u32 acl_key_width:9; + u32 acl_key_cap:16; + u32 acl_tcam_idx:4; + u32 acl_stage:1; + u32 loop_en:1; +#define NBL_ACL_TCAM_CFG_NUM 4 +#define NBL_ACL_AD_CFG_NUM 4 + u32 tcam_cfg[NBL_ACL_TCAM_CFG_NUM]; + u32 action_cfg[NBL_ACL_AD_CFG_NUM]; +}; + +struct nbl_chan_flow_init_info { + u8 acl_switch; + u16 vsi_id; + u16 acl_loop_en; +#define NBL_ACL_CFG_CNT 2 + struct nbl_acl_cfg_param acl_cfg[NBL_ACL_CFG_CNT]; + struct nbl_flow_prf_upcall_info flow_cfg; +}; + +#pragma pack(1) + +struct nbl_chan_regs_info { + union { + u16 depth; + struct { + u16 ram_id:5; + u16 s_depth:11; + }; + }; + u16 data_len:6; /* align to u32 */ + u16 tbl_name:7; + u16 mode:3; + u32 data[]; +}; + +struct nbl_chan_bulk_regs_info { + u32 item_cnt:9; + u32 rsv:7; + u32 data_len:16; /* align to u32 */ + u32 data[]; +}; + +#pragma pack() + +struct nbl_chan_param_get_queue_cxt { + u16 vsi_id; + u16 local_queue; +}; + +struct nbl_chan_param_cfg_log { + u16 vsi_id; + u16 qps; + bool vld; +}; + +struct nbl_chan_vdpaq_init_info { + u64 pa; + u32 size; +}; + +struct nbl_chan_param_cfg_lag_hash_algorithm { + u16 eth_id; + u16 lag_id; + enum netdev_lag_hash hash_type; +}; + +struct nbl_chan_param_cfg_lag_member_fwd { + u16 eth_id; + u16 lag_id; + u8 fwd; +}; + +struct nbl_chan_param_cfg_lag_member_up_attr { + u16 eth_id; + u16 lag_id; + bool enable; +}; + +struct nbl_chan_param_cfg_lag_mcc { + u16 eth_id; + u16 lag_id; + bool enable; +}; + +struct nbl_chan_param_cfg_bond_shaping { + u8 eth_id; + bool enable; +}; + +struct nbl_chan_param_cfg_bgid_back_pressure { + u8 main_eth_id; + u8 other_eth_id; + bool enable; +}; + +struct nbl_chan_param_ctrl_port_led { + u32 eth_id; + enum nbl_led_reg_ctrl led_status; +}; + +struct nbl_chan_param_set_intr_suppress_level { + u16 local_vector_id; + u16 vector_num; + u16 level; +}; + +struct nbl_chan_param_get_private_stat_data { + u32 eth_id; + u32 data_len; +}; + +struct nbl_chan_param_get_hwmon { + u32 senser_id; + enum nbl_hwmon_type type; +}; + +struct nbl_chan_param_nd_upcall { + u16 vsi_id; + bool for_pmd; +}; + +struct nbl_chan_param_restore_queue { + u16 local_queue_id; + int type; +}; + +struct nbl_chan_param_restart_queue { + u16 local_queue_id; + int type; +}; + +struct nbl_chan_param_restore_hw_queue { + u16 vsi_id; + u16 local_queue_id; + dma_addr_t dma; + int type; +}; + +struct nbl_chan_param_stop_abnormal_sw_queue { + u16 local_queue_id; + int type; +}; + +struct nbl_chan_param_stop_abnormal_hw_queue { + u16 vsi_id; + u16 local_queue_id; + int type; +}; + +struct nbl_chan_param_get_vf_func_id { + u16 vsi_id; + int vf_id; +}; + +struct nbl_chan_param_get_vf_vsi_id { + u16 vsi_id; + int vf_id; +}; + +struct nbl_chan_param_register_func_mac { + u16 func_id; + u8 mac[ETH_ALEN]; +}; + +struct nbl_chan_param_register_vlan { + u16 func_id; + u16 vlan_tci; + u16 vlan_proto; +}; + +struct nbl_chan_param_set_tx_rate { + u16 func_id; + int tx_rate; +}; + +struct nbl_chan_param_register_func_link_forced { + u16 func_id; + u8 link_forced; + bool should_notify; +}; + +struct nbl_chan_param_notify_link_state { + u8 link_state; + u32 link_speed; +}; + +struct nbl_register_net_param { + u16 pf_bdf; + u64 vf_bar_start; + u64 vf_bar_size; + u16 total_vfs; + u16 offset; + u16 stride; + u64 pf_bar_start; +}; + +struct nbl_register_net_result { + u16 tx_queue_num; + u16 rx_queue_num; + u16 queue_size; + u16 rdma_enable; + + u64 hw_features; + u64 features; + + u16 max_mtu; + u16 queue_offset; + + u8 mac[ETH_ALEN]; + u16 vlan_proto; + u16 vlan_tci; + u32 rate; +}; + +#define NBL_CHAN_FDIR_FLOW_RULE_SIZE 1024 +enum nbl_chan_fdir_flow_type { + NBL_CHAN_FDIR_FLOW_FULL, /* for DPDK isolate flow */ + NBL_CHAN_FDIR_FLOW_ETHER, + NBL_CHAN_FDIR_FLOW_IPv4, + NBL_CHAN_FDIR_FLOW_IPv6, + NBL_CHAN_FDIR_FLOW_TCP_IPv4, + NBL_CHAN_FDIR_FLOW_TCP_IPv6, + NBL_CHAN_FDIR_FLOW_UDP_IPv4, + NBL_CHAN_FDIR_FLOW_UDP_IPv6, + NBL_CHAN_FDIR_FLOW_MAX_TYPE, +}; + +enum nbl_chan_fdir_rule_type { + NBL_CHAN_FDIR_RULE_NORMAL, + NBL_CHAN_FDIR_RULE_ISOLATE, + NBL_CHAN_FDIR_RULE_MAX, +}; + +enum nbl_chan_fdir_component_type { + NBL_CHAN_FDIR_KEY_SRC_MAC, + NBL_CHAN_FDIR_KEY_DST_MAC, + NBL_CHAN_FDIR_KEY_PROTO, + NBL_CHAN_FDIR_KEY_SRC_IPv4, + NBL_CHAN_FDIR_KEY_DST_IPv4, + NBL_CHAN_FDIR_KEY_L4PROTO, + NBL_CHAN_FDIR_KEY_SRC_IPv6, + NBL_CHAN_FDIR_KEY_DST_IPv6, + NBL_CHAN_FDIR_KEY_SPORT, + NBL_CHAN_FDIR_KEY_DPORT, + NBL_CHAN_FDIR_KEY_UDF, + NBL_CHAN_FDIR_ACTION_QUEUE, + NBL_CHAN_FDIR_ACTION_VSI +}; + +enum { + NBL_FD_STATE_OFF = 0, + NBL_FD_STATE_ON, + NBL_FD_STATE_FLUSH, + NBL_FD_STATE_MAX, +}; + +struct nbl_chan_param_fdir_replace { + enum nbl_chan_fdir_flow_type flow_type; + enum nbl_chan_fdir_rule_type rule_type; + u32 base_length; + u32 vsi; + u32 location; + u16 vf; + u16 ring; + u16 dport; + u16 global_queue_id; + bool order; + u32 tlv_length; + u8 tlv[]; +}; + +#define NBL_CHAN_FDIR_FLOW_TLV_SIZE (1024 - sizeof(struct nbl_chan_param_fdir_replace)) +#define NBL_CHAN_FDIR_TLV_HEADER_LEN 4 + +struct nbl_chan_param_fdir_del { + enum nbl_chan_fdir_rule_type rule_type; + u32 location; + u16 vsi; +}; + +struct nbl_chan_param_fdir_flowcnt { + enum nbl_chan_fdir_rule_type rule_type; + u16 vsi; +}; + +struct nbl_chan_param_get_fd_flow { + u32 location; + enum nbl_chan_fdir_rule_type rule_type; + u16 vsi_id; +}; + +#define NBL_CHAN_GET_FD_LOCS_MAX 512 +struct nbl_chan_param_get_fd_flow_all { + enum nbl_chan_fdir_rule_type rule_type; + u16 start; + u16 num; + u16 vsi_id; +}; + +struct nbl_chan_result_get_fd_flow_all { + u32 rule_locs[NBL_CHAN_GET_FD_LOCS_MAX]; +}; + +struct nbl_chan_param_config_fd_flow_state { + enum nbl_chan_fdir_rule_type rule_type; + u16 vsi_id; + u16 state; +}; + +struct nbl_lag_mem_list_info { + u16 vsi_id; + u8 eth_id; + bool active; +}; + +struct nbl_lag_member_list_param { + struct net_device *bond_netdev; + u16 lag_num; + u16 lag_id; + /* port_list only contains ports that are active */ + u8 port_list[NBL_LAG_MAX_PORTS]; + /* member_list always contains all registered member */ + struct nbl_lag_mem_list_info member_list[NBL_LAG_MAX_PORTS]; + bool duppkt_enable; +}; + +struct nbl_queue_err_stats { + u16 dvn_pkt_drop_cnt; + u32 uvn_stat_pkt_drop; +}; + +struct nbl_rdma_register_param { + bool has_rdma; + u32 mem_type; + int intr_num; + int id; +}; + +struct nbl_phy_caps { + u32 speed; /* enum nbl_eth_speed */ + u32 fec_ability; + u32 pause_param; /* bit0 tx, bit1 rx */ +}; + +struct nbl_fc_info { + u32 rx_pause; + u32 tx_pause; +}; + +/* for pmd driver */ +struct nbl_register_net_rep_result { + u16 vsi_id; + u16 func_id; +}; + +/* emp to ctrl dev notify */ +struct nbl_port_notify { + u32 id; + u32 speed; /* in 10 Mbps units */ + u8 link_state:1; /* 0:down, 1:up */ + u8 module_inplace:1; /* 0: not inplace, 1:inplace */ + u8 revd0:6; + u8 flow_ctrl; /* enum nbl_flow_ctrl */ + u8 fec; /* enum nbl_port_fec */ + u8 active_lanes; + u8 rsvd1[4]; + u64 advertising; /* enum nbl_port_cap */ + u64 lp_advertising; /* enum nbl_port_cap */ +}; + +#define NBL_EMP_ALERT_DATA_MAX_SIZE 64 +struct nbl_chan_param_emp_alert_event { + u16 type; + u16 len; + u8 data[NBL_EMP_ALERT_DATA_MAX_SIZE]; +}; + +struct nbl_port_state { + u64 port_caps; + u64 port_advertising; + u64 port_lp_advertising; + u32 link_speed; + u8 active_fc; + u8 active_fec; /* enum nbl_port_fec */ + u8 link_state; + u8 module_inplace; + u8 port_type; /* enum nbl_port_type */ + u8 port_max_rate; /* enum nbl_port_max_rate */ + u8 fw_port_max_speed; /* enum nbl_fw_port_speed */ + u8 module_repluged; +}; + +struct nbl_port_advertising { + u8 eth_id; + u64 speed_advert; + u8 active_fc; + u8 active_fec; /* enum nbl_port_fec */ + u8 autoneg; +}; + +struct nbl_port_key { + u32 id; /* port id */ + u32 subop; /* 1: read, 2: write */ + u64 data[]; /* [47:0]: data, [55:48]: rsvd, [63:56]: key */ +}; + +struct nbl_eth_link_info { + u8 link_status; + u32 link_speed; +}; + +struct nbl_board_port_info { + u8 eth_num; + u8 eth_speed; + u8 p4_version; + u8 rsv[5]; +}; + +struct nbl_bond_port_info { + u16 vsi_id; + u8 eth_id; + u8 is_active; +}; + +struct nbl_bond_info { + struct nbl_bond_port_info port[NBL_LAG_MAX_PORTS]; + u8 lag_id; + u8 mem_num; +}; + +struct nbl_bond_param { + struct nbl_bond_info info[NBL_LAG_MAX_NUM]; + u8 lag_num; +}; + +/* to support channel req and response use different driver version, + * to define the struct to same with struct ethtool_coalesce + */ +struct nbl_chan_param_get_coalesce { + u32 cmd; + u32 rx_coalesce_usecs; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + u32 tx_coalesce_usecs; + u32 tx_max_coalesced_frames; + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + u32 stats_block_coalesce_usecs; + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + u32 rate_sample_interval; +}; + +enum nbl_fw_reset_type { + NBL_FW_HIGH_TEMP_RESET, + NBL_FW_RESET_TYPE_MAX, +}; + +struct nbl_chan_param_notify_fw_reset_info { + u16 type; /* enum nbl_fw_reset_type */ + u16 len; + u16 data[]; +}; + +struct nbl_chan_param_configure_qos { + u8 eth_id; + u8 trust; + u8 pfc[NBL_MAX_PFC_PRIORITIES]; + u8 dscp2prio_map[NBL_DSCP_MAX]; +}; + +struct nbl_chan_param_set_pfc_buffer_size { + u8 eth_id; + u8 prio; + int xoff; + int xon; +}; + +struct nbl_chan_param_get_pfc_buffer_size { + u8 eth_id; + u8 prio; +}; + +struct nbl_chan_param_get_pfc_buffer_size_resp { + int xoff; + int xon; +}; + +struct nbl_chan_send_info { + void *arg; + size_t arg_len; + void *resp; + size_t resp_len; + u16 dstid; + u16 msg_type; + u16 ack; + u16 ack_len; +}; + +struct nbl_chan_ack_info { + void *data; + int err; + u32 data_len; + u16 dstid; + u16 msg_type; + u16 msgid; +}; + +enum nbl_channel_type { + NBL_CHAN_TYPE_MAILBOX, + NBL_CHAN_TYPE_ADMINQ, + NBL_CHAN_TYPE_MAX +}; + +#define NBL_LINE_RATE_INFO_LENGTH (3) +struct nbl_rep_line_rate_info { + u16 vsi_id; + u16 func_id; + u32 data[NBL_LINE_RATE_INFO_LENGTH]; +}; + +struct nbl_channel_ops { + int (*send_msg)(void *priv, struct nbl_chan_send_info *chan_send); + int (*send_ack)(void *priv, struct nbl_chan_ack_info *chan_ack); + int (*register_msg)(void *priv, u16 msg_type, nbl_chan_resp func, void *callback_priv); + int (*cfg_chan_qinfo_map_table)(void *priv, u8 chan_type); + bool (*check_queue_exist)(void *priv, u8 chan_type); + int (*setup_queue)(void *priv, u8 chan_type); + int (*set_listener_info)(void *priv, void *shm_ring, struct eventfd_ctx *eventfd); + int (*set_listener_msgtype)(void *priv, int msgtype); + void (*clear_listener_info)(void *priv); + int (*teardown_queue)(void *priv, u8 chan_type); + void (*clean_queue_subtask)(void *priv, u8 chan_type); + int (*dump_txq)(void *priv, struct seq_file *m, u8 type); + int (*dump_rxq)(void *priv, struct seq_file *m, u8 type); + u32 (*get_adminq_tx_buf_size)(void *priv); + int (*init_cmdq)(struct device *dev, void *priv); + int (*deinit_cmdq)(struct device *dev, void *priv, u8 inst_id); + int (*send_cmd)(void *priv, const void *hdr, void *cmd); + int (*setup_keepalive)(void *priv, u16 dest_id, u8 chan_type); + void (*remove_keepalive)(void *priv, u8 chan_type); + void (*register_chan_task)(void *priv, u8 chan_type, struct work_struct *task); + void (*set_queue_state)(void *priv, enum nbl_chan_state state, u8 chan_type, u8 set); +}; + +struct nbl_channel_ops_tbl { + struct nbl_channel_ops *ops; + void *priv; +}; + +int nbl_chan_init_common(void *p, struct nbl_init_param *param); +void nbl_chan_remove_common(void *p); +int nbl_chan_init_bootis(void *p, struct nbl_init_param *param); +void nbl_chan_remove_bootis(void *p); +int nbl_chan_init_virtio(void *p, struct nbl_init_param *param); +void nbl_chan_remove_virtio(void *p); + +enum nbl_cmd_opcode_list { + NBL_CMD_OP_WRITE, + NBL_CMD_OP_READ, + NBL_CMD_OP_SEARCH, + NBL_CMD_OP_DELETE, +}; + +enum nbl_flow_opcode_list { + NBL_OPCODE_QUERY, + NBL_OPCODE_ADD, + NBL_OPCODE_UPDATE, + NBL_OPCODE_DELETE, +}; + +/* command header structure */ +struct nbl_cmd_hdr { + u8 block; + u8 module; + u8 table; + u16 opcode; +}; + +struct nbl_cmd_content { + u32 in_length; + u32 out_length; + u64 in_params; + u64 out_params; + u16 entries; + u32 idx; + u64 in; + u64 out; + void *in_va; + void *out_va; + u32 wait; +}; + +#define NBL_CMDQ_MAX_OP_CODE 16 +/* register block, module and table info */ +enum nbl_flow_opcode { + NBL_FEM_KTAT_WRITE, + NBL_FEM_KTAT_READ, + NBL_FEM_KTAT_SEARCH, + NBL_FEM_HT_WRITE, + NBL_FEM_HT_READ, + NBL_ACL_TCAM_WRITE, + NBL_ACL_TCAM_READ, + NBL_ACL_TCAM_QUERY, + NBL_ACL_FLOWID_READ, + NBL_ACL_STATID_READ, +}; + +#define NBL_BLOCK_PPE 0 +#define NBL_BLOCK_DP 1 +#define NBL_BLOCK_IFC 2 +#define NBL_MODULE_FEM 0 +#define NBL_MODULE_ACL 1 +#define NBL_TABLE_FEM_KTAT 0 +#define NBL_TABLE_FEM_HT 1 +#define NBL_TABLE_ACL_TCAM 0 +#define NBL_TABLE_ACL_FLOWID 1 +#define NBL_TABLE_ACL_STATID 2 + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h new file mode 100644 index 0000000000000000000000000000000000000000..fb91ebfc6c40c9341d2d3706209e64b3d2624319 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_common.h @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEF_COMMON_H_ +#define _NBL_DEF_COMMON_H_ + +#include "nbl_include.h" +#include +#include + +#define NBL_OK 0 +#define NBL_CONTINUE 1 +#define NBL_FAIL -1 + +#define NBL_HASH_CFT_MAX 4 +#define NBL_HASH_CFT_AVL 2 + +#define NBL_CRC16_CCITT(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0x0000, 1, 0x0000) +#define NBL_CRC16_CCITT_FALSE(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0xFFFF, 0, 0x0000) +#define NBL_CRC16_XMODEM(data, size) \ + nbl_calc_crc16(data, size, 0x1021, 0x0000, 0, 0x0000) +#define NBL_CRC16_IBM(data, size) \ + nbl_calc_crc16(data, size, 0x8005, 0x0000, 1, 0x0000) + +static inline void nbl_tcam_truth_value_convert(u64 *data, u64 *mask) +{ + u64 tcam_x = 0; + u64 tcam_y = 0; + + tcam_x = *data & ~(*mask); + tcam_y = ~(*data) & ~(*mask); + + *data = tcam_x; + *mask = tcam_y; +} + +static inline u8 nbl_invert_uint8(const u8 data) +{ + u8 i, result = 0; + + for (i = 0; i < 8; i++) { + if (data & (1 << i)) + result |= 1 << (7 - i); + } + + return result; +} + +static inline u16 nbl_invert_uint16(const u16 data) +{ + u16 i, result = 0; + + for (i = 0; i < 16; i++) { + if (data & (1 << i)) + result |= 1 << (15 - i); + } + + return result; +} + +static inline u16 nbl_calc_crc16(const u8 *data, u32 size, u16 crc_poly, + u16 init_value, u8 ref_flag, u16 xorout) +{ + u16 crc_reg = init_value, tmp = 0; + u8 j, byte = 0; + + while (size--) { + byte = *(data++); + if (ref_flag) + byte = nbl_invert_uint8(byte); + crc_reg ^= byte << 8; + for (j = 0; j < 8; j++) { + tmp = crc_reg & 0x8000; + crc_reg <<= 1; + if (tmp) + crc_reg ^= crc_poly; + } + } + + if (ref_flag) + crc_reg = nbl_invert_uint16(crc_reg); + + crc_reg = crc_reg ^ xorout; + return crc_reg; +} + +static inline u16 nbl_hash_transfer(u16 hash, u16 power, u16 depth) +{ + u16 temp = 0; + u16 val = 0; + u32 val2 = 0; + u16 off = 16 - power; + + temp = (hash >> power); + val = hash << off; + val = val >> off; + + if (depth == 0) { + val = temp + val; + val = val << off; + val = val >> off; + } else { + val2 = val; + val2 *= depth; + val2 = val2 >> power; + val = (u16)val2; + } + + return val; +} + +/* debug masks - set these bits in adapter->debug_mask to control output */ +enum nbl_debug_mask { + /* BIT0~BIT30 use to define adapter debug_mask */ + NBL_DEBUG_MAIN = 0x00000001, + NBL_DEBUG_COMMON = 0x00000002, + NBL_DEBUG_DEBUGFS = 0x00000004, + NBL_DEBUG_PHY = 0x00000008, + NBL_DEBUG_FLOW = 0x00000010, + NBL_DEBUG_RESOURCE = 0x00000020, + NBL_DEBUG_QUEUE = 0x00000040, + NBL_DEBUG_INTR = 0x00000080, + NBL_DEBUG_ADMINQ = 0x00000100, + NBL_DEBUG_DEVLINK = 0x00000200, + NBL_DEBUG_ACCEL = 0x00000400, + NBL_DEBUG_MBX = 0x00000800, + NBL_DEBUG_ST = 0x00001000, + NBL_DEBUG_VSI = 0x00002000, + NBL_DEBUG_CUSTOMIZED_P4 = 0x00004000, + + /* BIT31 use to distinguish netif debug level or adapter debug_mask */ + NBL_DEBUG_USER = 0x80000000, + + /* Means turn on all adapter debug_mask */ + NBL_DEBUG_ALL = 0xFFFFFFFF +}; + +#define nbl_err(common, lvl, fmt, ...) \ +do { \ + typeof(common) _common = (common); \ + if (((lvl) & NBL_COMMON_TO_DEBUG_LVL(_common))) \ + dev_err(NBL_COMMON_TO_DEV(_common), fmt, ##__VA_ARGS__); \ +} while (0) + +#define nbl_warn(common, lvl, fmt, ...) \ +do { \ + typeof(common) _common = (common); \ + if (((lvl) & NBL_COMMON_TO_DEBUG_LVL(_common))) \ + dev_warn(NBL_COMMON_TO_DEV(_common), fmt, ##__VA_ARGS__); \ +} while (0) + +#define nbl_info(common, lvl, fmt, ...) \ +do { \ + typeof(common) _common = (common); \ + if (((lvl) & NBL_COMMON_TO_DEBUG_LVL(_common))) \ + dev_info(NBL_COMMON_TO_DEV(_common), fmt, ##__VA_ARGS__); \ +} while (0) + +#define nbl_debug(common, lvl, fmt, ...) \ +do { \ + typeof(common) _common = (common); \ + if (((lvl) & NBL_COMMON_TO_DEBUG_LVL(_common))) \ + dev_dbg(NBL_COMMON_TO_DEV(_common), fmt, ##__VA_ARGS__); \ +} while (0) + +static void __maybe_unused nbl_printk(struct device *dev, int level, const char *format, ...) +{ + struct va_format vaf; + va_list args; + + if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG, + "Level %d is out of range, set to default level\n", level)) + level = LOGLEVEL_DEFAULT; + + va_start(args, format); + vaf.fmt = format; + vaf.va = &args; + + dev_printk_emit(level, dev, "%s %s: %pV", dev_driver_string(dev), dev_name(dev), + &vaf); + va_end(args); +} + +/* support LOGLEVEL_EMERG/LOGLEVEL_CRIT logvel */ +#define nbl_log(common, level, format, ...) \ +do { \ + typeof(common) _common = (common); \ + nbl_printk(NBL_COMMON_TO_DEV(_common), level, format, ##__VA_ARGS__); \ +} while (0) + +#define NBL_COMMON_TO_PDEV(common) ((common)->pdev) +#define NBL_COMMON_TO_DEV(common) ((common)->dev) +#define NBL_COMMON_TO_DMA_DEV(common) ((common)->dma_dev) +#define NBL_COMMON_TO_VSI_ID(common) ((common)->vsi_id) +#define NBL_COMMON_TO_ETH_ID(common) ((common)->eth_id) +#define NBL_COMMON_TO_ETH_MODE(common) ((common)->eth_mode) +#define NBL_COMMON_TO_DEBUG_LVL(common) ((common)->debug_lvl) +#define NBL_COMMON_TO_VF_CAP(common) ((common)->is_vf) +#define NBL_COMMON_TO_PCI_USING_DAC(common) ((common)->pci_using_dac) +#define NBL_COMMON_TO_MGT_PF(common) ((common)->mgt_pf) +#define NBL_COMMON_TO_PCI_FUNC_ID(common) ((common)->function) +#define NBL_COMMON_TO_BOARD_ID(common) ((common)->board_id) +#define NBL_COMMON_TO_LOGIC_ETH_ID(common) ((common)->logic_eth_id) + +#define NBL_ONE_ETHERNET_PORT (1) +#define NBL_TWO_ETHERNET_PORT (2) +#define NBL_FOUR_ETHERNET_PORT (4) +#define NBL_TWO_ETHERNET_VSI_ID_GAP (512) +#define NBL_FOUR_ETHERNET_VSI_ID_GAP (256) +#define NBL_VSI_ID_GAP(mode) ((mode) == NBL_FOUR_ETHERNET_PORT ? \ + NBL_FOUR_ETHERNET_VSI_ID_GAP : \ + NBL_TWO_ETHERNET_VSI_ID_GAP) + +#define NBL_BOOTIS_ECPU_ETH0_FUNCTION (2) +#define NBL_BOOTIS_ECPU_ETH1_FUNCTION (3) +#define NBL_BOOTIS_ECPU_ETH0_VSI (2020) +#define NBL_BOOTIS_ECPU_ETH1_VSI (2021) + +#define NBL_REP_FILL_EXT_HDR (1) +#define NBL_PF_FILL_EXT_HDR (2) + +#define NBL_SKB_FILL_VSI_ID_OFF (32) +#define NBL_SKB_FILL_EXT_HDR_OFF (34) +#define NBL_INVALID_QUEUE_ID (0xFFFF) + +#define NBL_INDEX_SIZE_MAX (512 * 1024) /* index max sise */ + +#define NBL_INDEX_TBL_KEY_INIT(key, dev_arg, start_index_arg, index_size_arg, key_size_arg) \ +do { \ + typeof(key) __key = key; \ + __key->dev = dev_arg; \ + __key->start_index = start_index_arg; \ + __key->index_size = index_size_arg; \ + __key->key_size = key_size_arg; \ +} while (0) + +struct nbl_common_info { + struct pci_dev *pdev; + struct device *dev; + struct device *dma_dev; + u32 debug_lvl; + u32 msg_enable; + u16 vsi_id; + u8 eth_id; + u8 logic_eth_id; + u8 eth_mode; + u8 is_vf; + + u8 function; + u8 devid; + u8 bus; + + u16 mgt_pf; + u8 board_id; + + bool pci_using_dac; + u8 tc_inst_id; /* for tc flow and cmdq */ + + enum nbl_product_type product_type; +}; + +struct nbl_netdev_rep_attr { + struct attribute attr; + ssize_t (*show)(struct device *dev, + struct nbl_netdev_rep_attr *attr, char *buf); + ssize_t (*store)(struct device *dev, + struct nbl_netdev_rep_attr *attr, const char *buf, size_t len); + int rep_id; +}; + +struct nbl_index_tbl_key { + struct device *dev; + u32 start_index; + u32 index_size; /* the avail index is [start_index, start_index + index_size) */ + u32 key_size; +}; + +struct nbl_index_key_extra { + u32 index_num; + /* begin_idx % begin_idx_multiple = 0; eg value = 2 (the begin_idx must be even num) */ + u32 begin_idx_multiple; + /* true: not alloc a new node, index_num and multiple value not care in this case */ + bool not_alloc_new_node; +}; + +#define NBL_INDEX_EXTRA_KEY_INIT(key, idx_num_arg, multiple_arg, not_alloc_arg) \ +do { \ + typeof(key) __key = key; \ + __key->index_num = idx_num_arg; \ + __key->begin_idx_multiple = multiple_arg; \ + __key->not_alloc_new_node = not_alloc_arg; \ +} while (0) + +struct nbl_index_tbl_del_key { + void *action_priv; + void (*action_func)(void *priv, int index, void *data); +}; + +#define NBL_INDEX_TBL_DEL_KEY_INIT(key, priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +struct nbl_index_tbl_scan_key { + bool del; + u8 resv[3]; + void *action_priv; + void (*action_func)(void *priv, int index, void *data); +}; + +#define NBL_INDEX_TBL_SCAN_KEY_INIT(key, del_arg, priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->del = del_arg; \ + memset(__key->resv, 0, sizeof(__key->resv)); \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +struct nbl_hash_tbl_key { + struct device *dev; + u16 key_size; + u16 data_size; /* no include key or node member */ + u16 bucket_size; + u8 lock_need; /* true: support multi thread operation */ + u8 resv; +}; + +#define NBL_HASH_TBL_KEY_INIT(key, dev_arg, key_size_arg, data_size_arg, bucket_size_arg, \ + lock_need_args) \ +do { \ + typeof(key) __key = key; \ + __key->dev = dev_arg; \ + __key->key_size = key_size_arg; \ + __key->data_size = data_size_arg; \ + __key->bucket_size = bucket_size_arg; \ + __key->lock_need = lock_need_args; \ + __key->resv = 0; \ +} while (0) + +enum nbl_hash_tbl_op_type { + NBL_HASH_TBL_OP_SHOW = 0, + NBL_HASH_TBL_OP_DELETE, +}; + +struct nbl_hash_tbl_del_key { + void *action_priv; + void (*action_func)(void *priv, void *key, void *data); +}; + +#define NBL_HASH_TBL_DEL_KEY_INIT(key, priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +struct nbl_hash_tbl_scan_key { + enum nbl_hash_tbl_op_type op_type; + void *match_condition; + /* match ret value must be 0 if the node accord with the condition */ + int (*match_func)(void *condition, void *key, void *data); + void *action_priv; + void (*action_func)(void *priv, void *key, void *data); +}; + +#define NBL_HASH_TBL_SCAN_KEY_INIT(key, op_type_arg, con_arg, match_func_arg, priv_arg, \ + act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->op_type = op_type_arg; \ + __key->match_condition = con_arg; \ + __key->match_func = match_func_arg; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +struct nbl_hash_xy_tbl_key { + struct device *dev; + u16 x_axis_key_size; + u16 y_axis_key_size; /* y_axis_key_len = key_len - x_axis_key_len */ + u16 data_size; /* no include key or node member */ + u16 bucket_size; + u16 x_axis_bucket_size; + u16 y_axis_bucket_size; + u8 lock_need; /* true: support multi thread operation */ + u8 resv[3]; +}; + +#define NBL_HASH_XY_TBL_KEY_INIT(key, dev_arg, x_key_size_arg, y_key_size_arg, data_size_arg, \ + bucket_size_args, x_bucket_size_arg, y_bucket_size_arg, \ + lock_need_args) \ +do { \ + typeof(key) __key = key; \ + __key->dev = dev_arg; \ + __key->x_axis_key_size = x_key_size_arg; \ + __key->y_axis_key_size = y_key_size_arg; \ + __key->data_size = data_size_arg; \ + __key->bucket_size = bucket_size_args; \ + __key->x_axis_bucket_size = x_bucket_size_arg; \ + __key->y_axis_bucket_size = y_bucket_size_arg; \ + __key->lock_need = lock_need_args; \ + memset(__key->resv, 0, sizeof(__key->resv)); \ +} while (0) + +enum nbl_hash_xy_tbl_scan_type { + NBL_HASH_TBL_ALL_SCAN = 0, + NBL_HASH_TBL_X_AXIS_SCAN, + NBL_HASH_TBL_Y_AXIS_SCAN, +}; + +/* true: only query the match one, eg. if x_axis: mac; y_axist: vlan*/ +/** + * member "only_query_exist" use + * if true: only query the match one, eg. if x_axis: mac; y_axis: vlan, if only to query the tbl + * has a gevin "mac", the nbl_hash_xy_tbl_scan_key struct use as flow: + * op_type = NBL_HASH_TBL_OP_SHOW; + * scan_type = NBL_HASH_TBL_X_AXIS_SCAN; + * only_query_exist = true; + * x_key = the mac_addr; + * y_key = NULL; + * match_func = NULL; + * action_func = NULL; + */ +struct nbl_hash_xy_tbl_scan_key { + enum nbl_hash_tbl_op_type op_type; + enum nbl_hash_xy_tbl_scan_type scan_type; + bool only_query_exist; + u8 resv[3]; + void *x_key; + void *y_key; + void *match_condition; + /* match ret value must be 0 if the node accord with the condition */ + int (*match_func)(void *condition, void *x_key, void *y_key, void *data); + void *action_priv; + void (*action_func)(void *priv, void *x_key, void *y_key, void *data); +}; + +#define NBL_HASH_XY_TBL_SCAN_KEY_INIT(key, op_type_arg, scan_type_arg, query_flag_arg, \ + x_key_arg, y_key_arg, con_arg, match_func_arg, \ + priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->op_type = op_type_arg; \ + __key->scan_type = scan_type_arg; \ + __key->only_query_exist = query_flag_arg; \ + memset(__key->resv, 0, sizeof(__key->resv)); \ + __key->x_key = x_key_arg; \ + __key->y_key = y_key_arg; \ + __key->match_condition = con_arg; \ + __key->match_func = match_func_arg; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +struct nbl_hash_xy_tbl_del_key { + void *action_priv; + void (*action_func)(void *priv, void *x_key, void *y_key, void *data); +}; + +#define NBL_HASH_XY_TBL_DEL_KEY_INIT(key, priv_arg, act_func_arg) \ +do { \ + typeof(key) __key = key; \ + __key->action_priv = priv_arg; \ + __key->action_func = act_func_arg; \ +} while (0) + +void nbl_convert_mac(u8 *mac, u8 *reverse_mac); + +void nbl_common_queue_work(struct work_struct *task, bool ctrl_task, bool singlethread); +void nbl_common_queue_work_rdma(struct work_struct *task, bool singlethread); +void nbl_common_queue_delayed_work(struct delayed_work *task, u32 msec, + bool ctrl_task, bool singlethread); +void nbl_common_queue_delayed_work_keepalive(struct delayed_work *task, u32 msec); +void nbl_common_release_task(struct work_struct *task); +void nbl_common_alloc_task(struct work_struct *task, void *func); +void nbl_common_release_delayed_task(struct delayed_work *task); +void nbl_common_alloc_delayed_task(struct delayed_work *task, void *func); +void nbl_common_flush_task(struct work_struct *task); + +void nbl_common_destroy_wq(void); +int nbl_common_create_wq(void); + +void nbl_debugfs_func_init(void *p, struct nbl_init_param *param); +void nbl_debugfs_func_remove(void *p); + +int nbl_dma_iommu_change_translate(struct nbl_common_info *common); +void nbl_dma_iommu_exit_translate(struct nbl_common_info *common); +bool nbl_dma_iommu_status(struct pci_dev *pdev); +bool nbl_dma_remap_status(struct pci_dev *pdev, u64 *dma_limit); +void nbl_net_addr_rep_attr(struct nbl_netdev_rep_attr *rep_attr, int rep_id); +u32 nbl_common_pf_id_subtraction_mgtpf_id(struct nbl_common_info *common, u32 pf_id); +void *nbl_common_init_index_table(struct nbl_index_tbl_key *key); +void nbl_common_remove_index_table(void *priv, struct nbl_index_tbl_del_key *key); +void nbl_common_scan_index_table(void *priv, struct nbl_index_tbl_scan_key *key); +int nbl_common_get_index(void *priv, void *key, struct nbl_index_key_extra *extra_key); +int nbl_common_get_index_with_data(void *priv, void *key, struct nbl_index_key_extra *extra_key, + void *data, u32 data_size, void **output_data); +int nbl_common_alloc_index(void *priv, void *key, struct nbl_index_key_extra *extra_key, + void *data, u32 data_size, void **output_data); +void nbl_common_free_index(void *priv, void *key); +int nbl_common_find_available_idx(unsigned long *addr, u32 size, u32 idx_num, u32 multiple); + +/* ---- EVENT-NOTIFIER ---- */ +enum nbl_event_type { + NBL_EVENT_RDMA_BOND_UPDATE = 0, + NBL_EVENT_OFFLOAD_STATUS_CHANGED, + NBL_EVENT_LINK_STATE_UPDATE, + NBL_EVENT_DEV_MODE_SWITCH, + NBL_EVENT_ACL_STATE_UPDATE, + NBL_EVENT_NETDEV_STATE_CHANGE, + NBL_EVENT_RESET_EVENT, + NBL_EVENT_QUEUE_ALLOC, + NBL_EVENT_MAX, +}; + +struct nbl_event_callback { + int (*callback)(u16 type, void *event_data, void *callback_data); + void *callback_data; +}; + +enum nbl_rdma_subevent_type { + NBL_SUBEVENT_CREATE_ADEV = 1, + NBL_SUBEVENT_RELEASE_ADEV, + NBL_SUBEVENT_CREATE_BOND_ADEV, + NBL_SUBEVENT_RELEASE_BOND_ADEV, + NBL_SUBEVENT_UPDATE_BOND_MEMBER, + NBL_SUBEVENT_MAX, +}; + +struct nbl_event_rdma_bond_update { + enum nbl_rdma_subevent_type subevent; + struct nbl_lag_member_list_param param; +}; + +struct nbl_event_offload_status_data { + u16 pf_vsi_id; + bool status; +}; + +enum nbl_dev_mode_switch_op { + NBL_DEV_KERNEL_TO_USER, + NBL_DEV_USER_TO_KERNEL, + NBL_DEV_SET_USER_PROMISC_MODE, +}; + +struct nbl_event_dev_mode_switch_data { + int op; + int ret; + bool promosic; +}; + +struct nbl_event_acl_state_update_data { + bool is_offload; +}; + +struct nbl_event_queue_update_data { + u16 func_id; + u16 ring_num; + u16 *map; +}; + +typedef int (*handle_tlv)(u16 type, u16 length, u8 *val, void *data); + +void nbl_event_notify(enum nbl_event_type type, void *event_data, u16 src_vsi_id, u16 board_id); +int nbl_event_register(enum nbl_event_type type, struct nbl_event_callback *callback, + u16 src_vsi_id, u16 board_id); +void nbl_event_unregister(enum nbl_event_type type, struct nbl_event_callback *callback, + u16 src_vsi_id, u16 board_id); +int nbl_event_init(void); +void nbl_event_remove(void); + +void *nbl_common_init_hash_table(struct nbl_hash_tbl_key *key); +void nbl_common_remove_hash_table(void *priv, struct nbl_hash_tbl_del_key *key); +int nbl_common_alloc_hash_node(void *priv, void *key, void *data, void **out_data); +void *nbl_common_get_hash_node(void *priv, void *key); +void nbl_common_free_hash_node(void *priv, void *key); +void nbl_common_scan_hash_node(void *priv, struct nbl_hash_tbl_scan_key *key); +u16 nbl_common_get_hash_node_num(void *priv); + +void *nbl_common_init_hash_xy_table(struct nbl_hash_xy_tbl_key *key); +void nbl_common_remove_hash_xy_table(void *priv, struct nbl_hash_xy_tbl_del_key *key); +int nbl_common_alloc_hash_xy_node(void *priv, void *x_key, void *y_key, void *data); +void *nbl_common_get_hash_xy_node(void *priv, void *x_key, void *y_key); +void nbl_common_free_hash_xy_node(void *priv, void *x_key, void *y_key); +u16 nbl_common_scan_hash_xy_node(void *priv, struct nbl_hash_xy_tbl_scan_key *key); +u16 nbl_common_get_hash_xy_node_num(void *priv); + +void nbl_flow_direct_parse_tlv_data(u8 *tlv, u32 length, handle_tlv callback, void *data); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..5f7af46fe2993c223651a0ae00ddc84e48fb143e --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dev.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEF_DEV_H_ +#define _NBL_DEF_DEV_H_ + +#include "nbl_include.h" + +#define NBL_DEV_OPS_TBL_TO_OPS(dev_ops_tbl) ((dev_ops_tbl)->ops) +#define NBL_DEV_OPS_TBL_TO_PRIV(dev_ops_tbl) ((dev_ops_tbl)->priv) + +struct nbl_dev_ops { +}; + +struct nbl_dev_ops_tbl { + struct nbl_dev_ops *ops; + void *priv; +}; + +int nbl_dev_init(void *p, struct nbl_init_param *param); +void nbl_dev_remove(void *p); +int nbl_dev_start(void *p, struct nbl_init_param *param); +void nbl_dev_stop(void *p); +int nbl_dev_init_emp_class(void); +void nbl_dev_destroy_emp_class(void); + +void nbl_dev_user_module_init(void); +void nbl_dev_user_module_destroy(void); +int nbl_dev_create_rep(void *p, int num_vfs); +int nbl_dev_destroy_rep(void *p); + +int nbl_dev_setup_vf_config(void *p, int num_vfs); +void nbl_dev_remove_vf_config(void *p); +int nbl_dev_resume(void *p); +int nbl_dev_suspend(void *p); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..587ecea46a4caea2700222eadfd9c61f09d666c3 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_dispatch.h @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEF_DISPATCH_H_ +#define _NBL_DEF_DISPATCH_H_ + +#include "nbl_include.h" + +#define NBL_DISP_OPS_TBL_TO_OPS(disp_ops_tbl) ((disp_ops_tbl)->ops) +#define NBL_DISP_OPS_TBL_TO_PRIV(disp_ops_tbl) ((disp_ops_tbl)->priv) + +enum { + NBL_DISP_CTRL_LVL_NEVER = 0, + NBL_DISP_CTRL_LVL_MGT, + NBL_DISP_CTRL_LVL_NET, + NBL_DISP_CTRL_LVL_ALWAYS, + NBL_DISP_CTRL_LVL_MAX, +}; + +struct nbl_dispatch_ops { + int (*init_chip_module)(void *priv); + void (*get_resource_pt_ops)(void *priv, struct nbl_resource_pt_ops *pt_ops); + int (*queue_init)(void *priv); + int (*vsi_init)(void *priv); + int (*configure_msix_map)(void *priv, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en); + int (*destroy_msix_map)(void *priv); + int (*enable_mailbox_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_abnormal_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_adminq_irq)(void *p, u16 vector_id, bool enable_msix); + u16 (*get_global_vector)(void *priv, u16 vsi_id, u16 local_vector_id); + u16 (*get_msix_entry_id)(void *priv, u16 vsi_id, u16 local_vector_id); + u32 (*get_chip_temperature)(void *priv, enum nbl_hwmon_type type, u32 senser_id); + int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_hwmon_type type); + + int (*get_mbx_irq_num)(void *priv); + int (*get_adminq_irq_num)(void *priv); + int (*get_abnormal_irq_num)(void *priv); + int (*alloc_rings)(void *priv, struct net_device *netdev, struct nbl_ring_param *param); + void (*remove_rings)(void *priv); + dma_addr_t (*start_tx_ring)(void *priv, u8 ring_index); + void (*stop_tx_ring)(void *priv, u8 ring_index); + dma_addr_t (*start_rx_ring)(void *priv, u8 ring_index, bool use_napi); + void (*stop_rx_ring)(void *priv, u8 ring_index); + void (*kick_rx_ring)(void *priv, u16 index); + void (*set_rings_xdp_prog)(void *priv, void *prog); + int (*register_xdp_rxq)(void *priv, u8 ring_index); + void (*unregister_xdp_rxq)(void *priv, u8 ring_index); + int (*dump_ring)(void *priv, struct seq_file *m, bool is_tx, int index); + int (*dump_ring_stats)(void *priv, struct seq_file *m, bool is_tx, int index); + struct napi_struct *(*get_vector_napi)(void *priv, u16 index); + void (*set_vector_info)(void *priv, u8 *irq_enable_base, u32 irq_data, + u16 index, bool mask_en); + int (*register_net)(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result); + void (*register_vsi_ring)(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num); + int (*unregister_net)(void *priv); + int (*alloc_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num); + void (*free_txrx_queues)(void *priv, u16 vsi_id); + int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); + void (*remove_all_queues)(void *priv, u16 vsi_id); + int (*register_vsi2q)(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num); + int (*setup_q2vsi)(void *priv, u16 vsi_id); + void (*remove_q2vsi)(void *priv, u16 vsi_id); + int (*setup_rss)(void *priv, u16 vsi_id); + void (*remove_rss)(void *priv, u16 vsi_id); + int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld); + int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps); + void (*remove_cqs)(void *priv, u16 vsi_id); + int (*cfg_qdisc_mqprio)(void *priv, struct nbl_tc_qidsc_param *param); + void (*clear_queues)(void *priv, u16 vsi_id); + int (*check_offload_status)(void *priv, bool *is_down); + u16 (*get_vsi_global_qid)(void *priv, u16 vsi_id, u16 local_qid); + u16 (*get_local_queue_id)(void *priv, u16 vsi_id, u16 global_queue_id); + u16 (*get_vsi_global_queue_id)(void *priv, u16 vsi_id, u16 local_qid); + + u8* (*get_msix_irq_enable_info)(void *priv, u16 global_vector_id, u32 *irq_data); + int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); + int (*set_vf_spoof_check)(void *priv, u16 vsi_id, int vfid, u8 enable); + void (*get_base_mac_addr)(void *priv, u8 *mac); + + int (*add_macvlan)(void *priv, u8 *mac, u16 vlan, u16 vsi); + void (*del_macvlan)(void *priv, u8 *mac, u16 vlan, u16 vsi); + int (*add_lag_flow)(void *priv, u16 vsi); + void (*del_lag_flow)(void *priv, u16 vsi); + int (*add_lldp_flow)(void *priv, u16 vsi); + void (*del_lldp_flow)(void *priv, u16 vsi); + int (*add_multi_rule)(void *priv, u16 vsi); + void (*del_multi_rule)(void *priv, u16 vsi); + int (*setup_multi_group)(void *priv); + void (*remove_multi_group)(void *priv); + void (*clear_accel_flow)(void *priv, u16 vsi_id); + void (*clear_flow)(void *priv, u16 vsi_id); + void (*dump_flow)(void *priv, struct seq_file *m); + + u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); + void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id); + int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode); + u32 (*get_tx_headroom)(void *priv); + void (*get_rep_feature)(void *priv, struct nbl_register_net_result *register_result); + void (*get_rep_queue_info)(void *priv, u16 *queue_num, u16 *queue_size); + void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + void (*set_eswitch_mode)(void *priv, u16 switch_mode); + u16 (*get_eswitch_mode)(void *priv); + int (*alloc_rep_data)(void *priv, int num_vfs, u16 vf_base_vsi_id); + void (*free_rep_data)(void *priv); + void (*set_rep_netdev_info)(void *priv, void *rep_data); + void (*unset_rep_netdev_info)(void *priv); + struct net_device *(*get_rep_netdev_info)(void *priv, u16 rep_data_index); + int (*disable_phy_flow)(void *priv, u8 eth_id); + int (*enable_phy_flow)(void *priv, u8 eth_id); + void (*init_acl)(void *priv); + void (*uninit_acl)(void *priv); + int (*set_upcall_rule)(void *priv, u8 eth_id, u16 vsi_id); + int (*unset_upcall_rule)(void *priv, u8 eth_id); + void (*set_shaping_dport_vld)(void *priv, u8 eth_id, bool vld); + void (*set_dport_fc_th_vld)(void *priv, u8 eth_id, bool vld); + void (*get_rep_stats)(void *priv, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats, bool is_tx); + u16 (*get_rep_index)(void *priv, u16 vsi_id); + + void (*get_firmware_version)(void *priv, char *firmware_verion, u8 max_len); + int (*get_driver_info)(void *priv, struct nbl_driver_info *driver_info); + void (*get_queue_stats)(void *priv, u8 queue_id, + struct nbl_queue_stats *queue_stats, bool is_tx); + int (*get_queue_err_stats)(void *priv, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, bool is_tx); + void (*get_net_stats)(void *priv, struct nbl_stats *queue_stats); + void (*get_private_stat_len)(void *priv, u32 *len); + void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data, u32 data_len); + void (*fill_private_stat_strings)(void *priv, u8 *strings); + u16 (*get_max_desc_num)(void *priv); + u16 (*get_min_desc_num)(void *priv); + u16 (*get_tx_desc_num)(void *priv, u32 ring_index); + u16 (*get_rx_desc_num)(void *priv, u32 ring_index); + void (*set_tx_desc_num)(void *priv, u32 ring_index, u16 desc_num); + void (*set_rx_desc_num)(void *priv, u32 ring_index, u16 desc_num); + void (*get_coalesce)(void *priv, u16 vector_id, struct nbl_chan_param_get_coalesce *ec); + void (*set_coalesce)(void *priv, u16 vector_id, u16 num_net_msix, u16 pnum, u16 rate); + u16 (*get_intr_suppress_level)(void *priv, u64 rate, u16 last_level); + void (*set_intr_suppress_level)(void *priv, u16 vector_id, + u16 num_net_msix, u16 level); + void (*get_rxfh_indir_size)(void *priv, u16 vsi_id, u32 *rxfh_indir_size); + void (*get_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir, u32 indir_size); + void (*get_rxfh_rss_key_size)(void *priv, u32 *rxfh_rss_key_size); + void (*get_rxfh_rss_key)(void *priv, u8 *rss_key, u32 rss_key_size); + void (*get_rxfh_rss_alg_sel)(void *priv, u8 *alg_sel, u8 eth_id); + int (*get_port_attributes)(void *priv); + int (*enable_port)(void *priv, bool enable); + void (*init_port)(void *priv); + int (*cfg_eth_bond_info)(void *priv, struct nbl_lag_member_list_param *param); + int (*get_eth_bond_info)(void *priv, struct nbl_bond_param *param); + void (*recv_port_notify)(void *priv); + int (*get_port_state)(void *priv, u8 eth_id, struct nbl_port_state *port_state); + int (*set_port_advertising)(void *priv, struct nbl_port_advertising *port_advertising); + int (*get_module_info)(void *priv, u8 eth_id, struct ethtool_modinfo *info); + int (*get_module_eeprom)(void *priv, u8 eth_id, struct ethtool_eeprom *eeprom, u8 *data); + int (*get_link_state)(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info); + int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); + int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); + int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); + int (*nway_reset)(void *priv, u8 eth_id); + void (*adapt_desc_gother)(void *priv); + void (*flr_clear_net)(void *priv, u16 vfid); + void (*flr_clear_queues)(void *priv, u16 vfid); + void (*flr_clear_accel_flow)(void *priv, u16 vfid); + void (*flr_clear_flows)(void *priv, u16 vfid); + void (*flr_clear_interrupt)(void *priv, u16 vfid); + void (*flr_clear_accel)(void *priv, u16 vfid); + void (*flr_clear_rdma)(void *priv, u16 vfid); + u16 (*covert_vfid_to_vsi_id)(void *priv, u16 vfid); + void (*unmask_all_interrupts)(void *priv); + void (*keep_alive)(void *priv); + void (*cfg_eth_bond_event)(void *priv, bool enable); + int (*set_bridge_mode)(void *priv, u16 bmode); + void (*cfg_txrx_vlan)(void *priv, u16 vlan_tci, u16 vlan_proto, u8 vsi_index); + + void (*setup_rdma_id)(void *priv); + void (*remove_rdma_id)(void *priv); + void (*register_rdma)(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param); + void (*unregister_rdma)(void *priv, u16 vsi_id); + void (*register_rdma_bond)(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param); + void (*unregister_rdma_bond)(void *priv, u16 lag_id); + u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); + u64 (*get_real_hw_addr)(void *priv, u16 vsi_id); + u16 (*get_function_id)(void *priv, u16 vsi_id); + void (*get_real_bdf)(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function); + int (*enable_lag_protocol)(void *priv, u16 eth_id, bool lag_en); + int (*cfg_lag_hash_algorithm)(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type); + int (*cfg_lag_member_fwd)(void *priv, u16 eth_id, u16 lag_id, u8 fwd); + int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); + int (*cfg_lag_mcc)(void *priv, u16 eth_id, u16 lag_id, bool enable); + int (*cfg_duppkt_info)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_duppkt_mcc)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_bond_shaping)(void *priv, u8 eth_id, bool enable); + void (*cfg_bgid_back_pressure)(void *priv, u8 main_eth_id, u8 other_eth_id, bool enable); + + bool (*check_fw_heartbeat)(void *priv); + bool (*check_fw_reset)(void *priv); + int (*flash_lock)(void *priv); + int (*flash_unlock)(void *priv); + int (*flash_prepare)(void *priv); + int (*flash_image)(void *priv, u32 module, const u8 *data, size_t len); + int (*flash_activate)(void *priv); + void (*get_phy_caps)(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps); + int (*set_sfp_state)(void *priv, u8 eth_id, u8 state); + int (*set_eth_loopback)(void *priv, u8 enable); + struct sk_buff *(*clean_rx_lb_test)(void *priv, u32 ring_index); + int (*passthrough_fw_cmd)(void *priv, struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result); + int (*update_ring_num)(void *priv); + int (*update_rdma_cap)(void *priv); + int (*update_rdma_mem_type)(void *priv); + u16 (*get_rdma_cap_num)(void *priv); + int (*set_ring_num)(void *priv, struct nbl_fw_cmd_net_ring_num_param *param); + + u32 (*check_active_vf)(void *priv); + int (*get_board_id)(void *priv); + + void (*get_reg_dump)(void *priv, u32 *data, u32 len); + int (*get_reg_dump_len)(void *priv); + + u32 (*get_adminq_tx_buf_size)(void *priv); + int (*emp_console_write)(void *priv, char *buf, size_t count); + bool (*get_product_flex_cap)(void *priv, enum nbl_flex_cap_type cap_type); + bool (*get_product_fix_cap)(void *priv, enum nbl_fix_cap_type cap_type); + int (*alloc_ktls_tx_index)(void *priv, u16 vsi); + void (*free_ktls_tx_index)(void *priv, u32 index); + void (*cfg_ktls_tx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + int (*alloc_ktls_rx_index)(void *priv, u16 vsi); + void (*free_ktls_rx_index)(void *priv, u32 index); + void (*cfg_ktls_rx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + void (*cfg_ktls_rx_record)(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init); + int (*add_ktls_rx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ktls_rx_flow)(void *priv, u32 index); + + int (*alloc_ipsec_tx_index)(void *priv, struct nbl_ipsec_cfg_info *cfg_info); + void (*free_ipsec_tx_index)(void *priv, u32 index); + int (*alloc_ipsec_rx_index)(void *priv, struct nbl_ipsec_cfg_info *cfg_info); + void (*free_ipsec_rx_index)(void *priv, u32 index); + void (*cfg_ipsec_tx_sad)(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry); + void (*cfg_ipsec_rx_sad)(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry); + int (*add_ipsec_tx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ipsec_tx_flow)(void *priv, u32 index); + int (*add_ipsec_rx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ipsec_rx_flow)(void *priv, u32 index); + bool (*check_ipsec_status)(void *priv); + u32 (*get_dipsec_lft_info)(void *priv); + void (*handle_dipsec_soft_expire)(void *priv, u32 index); + void (*handle_dipsec_hard_expire)(void *priv, u32 index); + u32 (*get_uipsec_lft_info)(void *priv); + void (*handle_uipsec_soft_expire)(void *priv, u32 index); + void (*handle_uipsec_hard_expire)(void *priv, u32 index); + + void (*dummy_func)(void *priv); + + void (*configure_virtio_dev_msix)(void *priv, u16 vector); + void (*configure_rdma_msix_off)(void *priv, u16 vector); + void (*configure_virtio_dev_ready)(void *priv); + + int (*switchdev_init_cmdq)(void *priv); + int (*switchdev_deinit_cmdq)(void *priv); + int (*add_tc_flow)(void *priv, struct nbl_tc_flow_param *param); + int (*del_tc_flow)(void *priv, struct nbl_tc_flow_param *param); + int (*flow_index_lookup)(void *priv, struct nbl_flow_index_key key); + + bool (*tc_tun_encap_lookup)(void *priv, struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param); + int (*tc_tun_encap_del)(void *priv, struct nbl_encap_key *key); + int (*tc_tun_encap_add)(void *priv, struct nbl_rule_action *action); + + int (*set_tc_flow_info)(void *priv); + int (*unset_tc_flow_info)(void *priv); + int (*get_tc_flow_info)(void *priv); + int (*query_tc_stats)(void *priv, struct nbl_stats_param *param); + + u32 (*get_p4_version)(void *priv); + int (*get_p4_info)(void *priv, char *verify_code); + int (*load_p4)(void *priv, struct nbl_load_p4_param *param); + int (*load_p4_default)(void *priv); + int (*get_p4_used)(void *priv); + int (*set_p4_used)(void *priv, int p4_type); + u16 (*get_vf_base_vsi_id)(void *priv, u16 pf_id); + + int (*add_nd_upcall_flow)(void *priv, u16 vsi_id, bool for_pmd); + void (*del_nd_upcall_flow)(void *priv); + + dma_addr_t (*restore_abnormal_ring)(void *priv, int ring_index, int type); + int (*restart_abnormal_ring)(void *priv, int ring_index, int type); + int (*restore_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type); + int (*stop_abnormal_sw_queue)(void *priv, u16 local_queue_id, int type); + int (*stop_abnormal_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, int type); + u16 (*get_vf_function_id)(void *priv, u16 vsi_id, int vf_id); + u16 (*get_vf_vsi_id)(void *priv, u16 vsi_id, int vf_id); + int (*set_pmd_debug)(void *priv, bool pmd_debug); + + void (*register_func_mac)(void *priv, u8 *mac, u16 func_id); + int (*register_func_vlan)(void *priv, u16 func_id, u16 vlan_tci, + u16 vlan_proto, bool *should_notify); + int (*register_func_rate)(void *priv, u16 func_id, int rate); + int (*register_func_link_forced)(void *priv, u16 func_id, u8 link_forced, + bool *should_notify); + int (*get_link_forced)(void *priv, u16 vsi_id); + int (*set_tx_rate)(void *priv, u16 func_id, int tx_rate); + + void (*get_driver_version)(void *priv, char *ver, int len); + + int (*get_fd_flow)(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd); + int (*get_fd_flow_cnt)(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id); + int (*config_fd_flow_state)(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state); + int (*get_fd_flow_all)(void *priv, struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs); + int (*get_fd_flow_max)(void *priv); + + int (*replace_fd_flow)(void *priv, struct nbl_chan_param_fdir_replace *info); + int (*remove_fd_flow)(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id); + + void (*cfg_fd_update_event)(void *priv, bool enable); + void (*dump_fd_flow)(void *priv, struct seq_file *m); + + void (*get_xdp_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); + void (*get_active_func_bitmaps)(void *priv, unsigned long *bitmap, int max_func); + int (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + int (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); + int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); +}; + +struct nbl_dispatch_ops_tbl { + struct nbl_dispatch_ops *ops; + void *priv; +}; + +int nbl_disp_init(void *p, struct nbl_init_param *param); +void nbl_disp_remove(void *p); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h new file mode 100644 index 0000000000000000000000000000000000000000..8440d82f92db9bb7e4943594c7c4f50f99df74fe --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_phy.h @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEF_PHY_H_ +#define _NBL_DEF_PHY_H_ + +#include "nbl_include.h" + +#define NBL_PHY_OPS_TBL_TO_OPS(phy_ops_tbl) ((phy_ops_tbl)->ops) +#define NBL_PHY_OPS_TBL_TO_PRIV(phy_ops_tbl) ((phy_ops_tbl)->priv) + +struct nbl_phy_ops { + int (*init_chip_module)(void *priv, u8 eth_speed, u8 eth_num); + int (*get_firmware_version)(void *priv, char *firmware_verion); + int (*flow_init)(void *priv); + int (*init_qid_map_table)(void *priv); + int (*set_qid_map_table)(void *priv, void *data, int qid_map_select); + int (*set_qid_map_ready)(void *priv, bool ready); + int (*cfg_ipro_queue_tbl)(void *priv, u16 queue_id, u16 vsi_id, u8 enable); + int (*cfg_ipro_dn_sport_tbl)(void *priv, u16 vsi_id, u16 dst_eth_id, u16 bmode, bool binit); + int (*set_vnet_queue_info)(void *priv, struct nbl_vnet_queue_info_param *param, + u16 queue_id); + int (*clear_vnet_queue_info)(void *priv, u16 queue_id); + int (*cfg_vnet_qinfo_log)(void *priv, u16 queue_id, bool vld); + int (*reset_dvn_cfg)(void *priv, u16 queue_id); + int (*reset_uvn_cfg)(void *priv, u16 queue_id); + int (*restore_dvn_context)(void *priv, u16 queue_id, u16 split, u16 last_avail_index); + int (*restore_uvn_context)(void *priv, u16 queue_id, u16 split, u16 last_avail_index); + int (*get_tx_queue_cfg)(void *priv, void *data, u16 queue_id); + int (*get_rx_queue_cfg)(void *priv, void *data, u16 queue_id); + int (*cfg_tx_queue)(void *priv, void *data, u16 queue_id); + int (*cfg_rx_queue)(void *priv, void *data, u16 queue_id); + bool (*check_q2tc)(void *priv, u16 queue_id); + int (*cfg_q2tc_netid)(void *priv, u16 queue_id, u16 netid, u16 vld); + int (*cfg_q2tc_tcid)(void *priv, u16 queue_id, u16 tcid); + int (*set_tc_wgt)(void *priv, u16 func_id, u8 *weight, u16 num_tc); + int (*set_tc_spwrr)(void *priv, u16 func_id, u8 spwrr); + int (*set_shaping)(void *priv, u16 func_id, u64 total_tx_rate, u8 vld, bool active); + void (*active_shaping)(void *priv, u16 func_id); + void (*deactive_shaping)(void *priv, u16 func_id); + int (*cfg_dsch_net_to_group)(void *priv, u16 func_id, u16 group_id, u16 vld); + int (*cfg_dsch_group_to_port)(void *priv, u16 group_id, u16 dport, u16 vld); + int (*init_epro_rss_key)(void *priv); + void (*read_rss_key)(void *priv, u8 *rss_key); + void (*read_rss_indir)(void *priv, u16 vsi_id, u32 *rss_indir, + u16 rss_ret_base, u16 rss_entry_size); + void (*get_rss_alg_sel)(void *priv, u8 eth_id, u8 *rss_alg_sel); + int (*init_epro_vpt_tbl)(void *priv, u16 vsi_id); + int (*set_epro_rss_default)(void *priv, u16 vsi_id); + int (*cfg_epro_rss_ret)(void *priv, u32 index, u8 size_type, u32 q_num, u16 *queue_list); + int (*set_epro_rss_pt)(void *priv, u16 vsi_id, u16 rss_ret_base, u16 rss_entry_size); + int (*clear_epro_rss_pt)(void *priv, u16 vsi_id); + int (*disable_dvn)(void *priv, u16 queue_id); + int (*disable_uvn)(void *priv, u16 queue_id); + int (*lso_dsch_drain)(void *priv, u16 queue_id); + int (*rsc_cache_drain)(void *priv, u16 queue_id); + u16 (*save_dvn_ctx)(void *priv, u16 queue_id, u16 split); + u16 (*save_uvn_ctx)(void *priv, u16 queue_id, u16 split, u16 queue_size); + void (*get_rx_queue_err_stats)(void *priv, u16 queue_id, + struct nbl_queue_err_stats *queue_err_stats); + void (*get_tx_queue_err_stats)(void *priv, u16 queue_id, + struct nbl_queue_err_stats *queue_err_stats); + void (*setup_queue_switch)(void *priv, u16 eth_id); + void (*init_pfc)(void *priv, u8 ether_ports); + int (*cfg_phy_flow)(void *priv, u16 vsi_id, u16 count, u8 eth_id, bool status); + u32 (*get_chip_temperature)(void *priv, enum nbl_hwmon_type type, u32 senser_id); + int (*cfg_eth_port_priority_replace)(void *priv, u8 eth_id, bool status); + + int (*cfg_epro_vpt_tbl)(void *priv, u16 vsi_id); + void (*set_promisc_mode)(void *priv, u16 vsi_id, u16 eth_id, u16 mode); + void (*configure_msix_map)(void *priv, u16 func_id, bool valid, dma_addr_t dma_addr, + u8 bus, u8 devid, u8 function); + void (*configure_msix_info)(void *priv, u16 func_id, bool valid, u16 interrupt_id, + u8 bus, u8 devid, u8 function, bool net_msix_mask_en); + void (*get_msix_resource)(void *priv, u16 func_id, u16 *msix_base, u16 *msix_max); + void (*get_coalesce)(void *priv, u16 interrupt_id, u16 *pnum, u16 *rate); + void (*set_coalesce)(void *priv, u16 interrupt_id, u16 pnum, u16 rate); + + void (*update_mailbox_queue_tail_ptr)(void *priv, u16 tail_ptr, u8 txrx); + void (*config_mailbox_rxq)(void *priv, dma_addr_t dma_addr, int size_bwid); + void (*config_mailbox_txq)(void *priv, dma_addr_t dma_addr, int size_bwid); + void (*stop_mailbox_rxq)(void *priv); + void (*stop_mailbox_txq)(void *priv); + u16 (*get_mailbox_rx_tail_ptr)(void *priv); + bool (*check_mailbox_dma_err)(void *priv, bool tx); + u32 (*get_host_pf_mask)(void *priv); + u32 (*get_host_pf_fid)(void *priv, u8 func_id); + void (*cfg_mailbox_qinfo)(void *priv, u16 func_id, u16 bus, u16 devid, u16 function); + void (*enable_mailbox_irq)(void *priv, u16 func_id, bool enable_msix, u16 global_vector_id); + void (*enable_abnormal_irq)(void *priv, bool enable_msix, u16 global_vector_id); + void (*enable_msix_irq)(void *priv, u16 global_vector_id); + u8 *(*get_msix_irq_enable_info)(void *priv, u16 global_vector_id, u32 *irq_data); + void (*config_adminq_rxq)(void *priv, dma_addr_t dma_addr, int size_bwid); + void (*config_adminq_txq)(void *priv, dma_addr_t dma_addr, int size_bwid); + void (*stop_adminq_rxq)(void *priv); + void (*stop_adminq_txq)(void *priv); + void (*cfg_adminq_qinfo)(void *priv, u16 bus, u16 devid, u16 function); + void (*enable_adminq_irq)(void *priv, bool enable_msix, u16 global_vector_id); + void (*update_adminq_queue_tail_ptr)(void *priv, u16 tail_ptr, u8 txrx); + u16 (*get_adminq_rx_tail_ptr)(void *priv); + bool (*check_adminq_dma_err)(void *priv, bool tx); + + void (*update_tail_ptr)(void *priv, struct nbl_notify_param *param); + u8* (*get_tail_ptr)(void *priv); + + int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); + int (*set_spoof_check_enable)(void *priv, u16 vsi_id, u8 enable); + + u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); + int (*enable_lag_protocol)(void *priv, u16 eth_id, void *data); + int (*cfg_lag_hash_algorithm)(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type); + int (*cfg_lag_member_fwd)(void *priv, u16 eth_id, u16 lag_id, u8 fwd); + int (*set_sfp_state)(void *priv, u8 eth_id, u8 state); + int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); + int (*cfg_lag_mcc)(void *priv, u16 mcc_id, u16 action); + bool (*get_lag_fwd)(void *priv, u16 eth_id); + + int (*cfg_bond_shaping)(void *priv, u8 eth_id, u8 speed, bool enable); + void (*cfg_bgid_back_pressure)(void *priv, u8 main_eth_id, u8 other_eth_id, + bool enable, u8 speed); + + void (*clear_acl)(void *priv); + int (*set_fd_udf)(void *priv, u8 lxmode, u8 offset); + int (*clear_fd_udf)(void *priv); + int (*set_fd_tcam_cfg_default)(void *priv); + int (*set_fd_tcam_cfg_lite)(void *priv); + int (*set_fd_tcam_cfg_full)(void *priv); + int (*set_fd_tcam_ram)(void *priv, struct nbl_acl_tcam_param *data, + struct nbl_acl_tcam_param *mask, u16 ram_index, u32 depth_index); + int (*set_fd_action_ram)(void *priv, u32 action, u16 ram_index, u32 depth_index); + void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); + enum nbl_hw_status (*get_hw_status)(void *priv); + + /* For leonis */ + int (*set_ht)(void *priv, u16 hash, u16 hash_other, u8 ht_table, + u8 bucket, u32 key_index, u8 valid); + int (*set_kt)(void *priv, u8 *key, u32 key_index, u8 key_type); + int (*search_key)(void *priv, u8 *key, u8 key_type); + int (*add_tcam)(void *priv, u32 index, u8 *key, u32 *action, u8 key_type, u8 pp_type); + void (*del_tcam)(void *priv, u32 index, u8 key_type, u8 pp_type); + int (*add_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 action); + void (*del_mcc)(void *priv, u16 mcc_id, u16 prev_mcc_id, u16 next_mcc_id); + int (*add_tnl_encap)(void *priv, const u8 encap_buf[], u16 encap_idx, + union nbl_flow_encap_offset_tbl_u encap_idx_info); + void (*del_tnl_encap)(void *priv, u16 encap_idx); + int (*init_fem)(void *priv); + void (*init_acl)(void *priv); + void (*uninit_acl)(void *priv); + int (*set_upcall_rule)(void *priv, u8 idx, u16 vsi_id); + int (*unset_upcall_rule)(void *priv, u8 idx); + void (*set_shaping_dport_vld)(void *priv, u8 eth_id, bool vld); + void (*set_dport_fc_th_vld)(void *priv, u8 eth_id, bool vld); + void (*cfg_ktls_tx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + void (*cfg_ktls_rx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + void (*cfg_ktls_rx_record)(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init); + int (*init_acl_stats)(void *priv); + + void (*cfg_dipsec_nat)(void *priv, u16 sport); + void (*cfg_dipsec_sad_iv)(void *priv, u32 index, u64 iv); + void (*cfg_dipsec_sad_esn)(void *priv, u32 index, u32 sn, u32 esn, u8 wrap_en, u8 enable); + void (*cfg_dipsec_sad_lifetime)(void *priv, u32 index, u32 lft_cnt, + u32 lft_diff, u8 limit_enable, u8 limit_type); + void (*cfg_dipsec_sad_crypto)(void *priv, u32 index, u32 *key, u32 salt, + u32 crypto_type, u8 tunnel_mode, u8 icv_len); + void (*cfg_dipsec_sad_encap)(void *priv, u32 index, u8 nat_flag, + u16 dport, u32 spi, u32 *ip_data); + u32 (*read_dipsec_status)(void *priv); + u32 (*reset_dipsec_status)(void *priv); + u32 (*read_dipsec_lft_info)(void *priv); + void (*cfg_dipsec_lft_info)(void *priv, u32 index, u32 lifetime_diff, + u32 flag_wen, u32 msb_wen); + void (*init_dprbac)(void *priv); + void (*cfg_uipsec_nat)(void *priv, u8 nat_flag, u16 dport); + void (*cfg_uipsec_sad_esn)(void *priv, u32 index, u32 sn, u32 esn, u8 overlap, u8 enable); + void (*cfg_uipsec_sad_lifetime)(void *priv, u32 index, u32 lft_cnt, + u32 lft_diff, u8 limit_enable, u8 limit_type); + void (*cfg_uipsec_sad_crypto)(void *priv, u32 index, u32 *key, u32 salt, + u32 crypto_type, u8 tunnel_mode, u8 icv_len); + void (*cfg_uipsec_sad_window)(void *priv, u32 index, u8 window_en, u8 option); + void (*cfg_uipsec_em_tcam)(void *priv, u16 tcam_index, u32 *data); + void (*cfg_uipsec_em_ad)(void *priv, u16 tcam_index, u32 index); + void (*clear_uipsec_tcam_ad)(void *priv, u16 tcam_index); + void (*cfg_uipsec_em_ht)(void *priv, u32 index, u16 ht_table, u16 ht_index, + u16 ht_other_index, u16 ht_bucket); + void (*cfg_uipsec_em_kt)(void *priv, u32 index, u32 *data); + void (*clear_uipsec_ht_kt)(void *priv, u32 index, u16 ht_table, + u16 ht_index, u16 ht_bucket); + u32 (*read_uipsec_status)(void *priv); + u32 (*reset_uipsec_status)(void *priv); + u32 (*read_uipsec_lft_info)(void *priv); + void (*cfg_uipsec_lft_info)(void *priv, u32 index, u32 lifetime_diff, + u32 flag_wen, u32 msb_wen); + void (*init_uprbac)(void *priv); + + u32 (*get_fw_ping)(void *priv); + void (*set_fw_ping)(void *priv, u32 ping); + u32 (*get_fw_pong)(void *priv); + void (*set_fw_pong)(void *priv, u32 pong); + + int (*init_vdpaq)(void *priv, u16 func_id, u16 bdf, u64 pa, u32 size); + void (*destroy_vdpaq)(void *priv); + + void (*get_reg_dump)(void *priv, u32 *data, u32 len); + int (*get_reg_dump_len)(void *priv); + int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); + u32 (*get_uvn_desc_entry_stats)(void *priv); + void (*set_uvn_desc_wr_timeout)(void *priv, u16 timeout); + void (*set_tc_kgen_cvlan_zero)(void *priv); + void (*unset_tc_kgen_cvlan)(void *priv); + void (*set_ped_tab_vsi_type)(void *priv, u32 port_id, u16 eth_proto); + void (*load_p4)(void *priv, u32 addr, u32 size, u8 *data); + void (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + void (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); + int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); + + /* For bootis */ + int (*add_mv_tbl)(void *priv, u16 vsi, const void *key, const void *act, u16 result_idx); + int (*del_mv_tbl)(void *priv, const void *key); + int (*cfg_rss_alg)(void *priv, u16 vsi, const void *param); + void (*cfg_padpt_txrx_enable)(void *priv, bool tx_enable, bool rx_enable); + int (*init_port)(void *priv); + int (*init_fec)(void *priv); + int (*setup_loopback)(void *priv, u32 eth_id, u32 enable); + bool (*sfp_is_present)(void *priv, u32 eth_id); + int (*read_i2c)(void *priv, u32 eth_id, u16 slave_addr, + u8 channel, u8 read_byte, u8 addr, u32 *rdata); + int (*get_eth_mac_address)(void *priv, u32 eth_id, u8 *mac_addr); + int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); + + /* for board cfg */ + u32 (*get_fw_eth_num)(void *priv); + u32 (*get_fw_eth_map)(void *priv); + void (*get_board_info)(void *priv, struct nbl_board_port_info *board); + u32 (*get_quirks)(void *priv); + + /* for userspace */ + int (*init_offload_fwd)(void *priv, u16 vsi_id); + int (*init_cmdq)(void *priv, void *data, u16 func_id); + int (*reset_cmdq)(void *priv); + int (*destroy_cmdq)(void *priv); + void (*update_cmdq_tail)(void *priv, u32 doorbell); + int (*init_rep)(void *priv, u16 vsi_id, u8 inner_type, + u8 outer_type, u8 rep_type); + int (*init_flow)(void *priv, void *data); + int (*deinit_flow)(void *priv); + int (*offload_flow_rule)(void *priv, void *data); + int (*get_flow_acl_switch)(void *priv, u8 *acl_enable); + void (*get_line_rate_info)(void *priv, void *data, void *result); + void (*set_eth_stats_snapshot)(void *priv, u32 eth_id, u8 snapshot); + void (*get_eth_ip_reg)(void *priv, u32 eth_id, u64 addr_off, u32 *data); + int (*set_eth_fec_mode)(void *priv, u32 eth_id, enum nbl_port_mode mode); + void (*clear_profile_table_action)(void *priv); + + /* For virtio */ + void (*get_common_cfg)(void *priv, u32 offset, void *buf, u32 len); + void (*set_common_cfg)(void *priv, u32 offset, void *buf, u32 len); + void (*get_device_cfg)(void *priv, u32 offset, void *buf, u32 len); + void (*set_device_cfg)(void *priv, u32 offset, void *buf, u32 len); + bool (*get_rdma_capability)(void *priv); +}; + +struct nbl_phy_ops_tbl { + struct nbl_phy_ops *ops; + void *priv; +}; + +int nbl_phy_init_leonis(void *p, struct nbl_init_param *param); +void nbl_phy_remove_leonis(void *p); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h new file mode 100644 index 0000000000000000000000000000000000000000..5fa6a50c13bb28c8d34abcec4f363c493c3c3e38 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_resource.h @@ -0,0 +1,381 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEF_RESOURCE_H_ +#define _NBL_DEF_RESOURCE_H_ + +#include "nbl_include.h" + +#define NBL_RES_OPS_TBL_TO_OPS(res_ops_tbl) ((res_ops_tbl)->ops) +#define NBL_RES_OPS_TBL_TO_PRIV(res_ops_tbl) ((res_ops_tbl)->priv) + +struct nbl_resource_pt_ops { + netdev_tx_t (*start_xmit)(struct sk_buff *skb, struct net_device *netdev); + netdev_tx_t (*rep_xmit)(struct sk_buff *skb, struct net_device *netdev); + netdev_tx_t (*self_test_xmit)(struct sk_buff *skb, struct net_device *netdev); + int (*napi_poll)(struct napi_struct *napi, int budget); + int (*xdp_xmit)(struct net_device *netdev, int n, struct xdp_frame **frame, u32 flags); +}; + +struct nbl_resource_ops { + int (*init_chip_module)(void *priv); + void (*get_resource_pt_ops)(void *priv, struct nbl_resource_pt_ops *pt_ops); + int (*queue_init)(void *priv); + int (*vsi_init)(void *priv); + int (*configure_msix_map)(void *priv, u16 func_id, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en); + int (*destroy_msix_map)(void *priv, u16 func_id); + int (*enable_mailbox_irq)(void *priv, u16 func_id, u16 vector_id, bool enable_msix); + int (*enable_abnormal_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_adminq_irq)(void *p, u16 vector_id, bool enable_msix); + u16 (*get_global_vector)(void *priv, u16 vsi_id, u16 local_vector_id); + u16 (*get_msix_entry_id)(void *priv, u16 vsi_id, u16 local_vector_id); + u32 (*get_chip_temperature)(void *priv, enum nbl_hwmon_type type, u32 senser_id); + int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_hwmon_type type); + int (*get_mbx_irq_num)(void *priv); + int (*get_adminq_irq_num)(void *priv); + int (*get_abnormal_irq_num)(void *priv); + + int (*alloc_rings)(void *priv, struct net_device *netdev, struct nbl_ring_param *param); + void (*remove_rings)(void *priv); + dma_addr_t (*start_tx_ring)(void *priv, u8 ring_index); + void (*stop_tx_ring)(void *priv, u8 ring_index); + dma_addr_t (*start_rx_ring)(void *priv, u8 ring_index, bool use_napi); + void (*stop_rx_ring)(void *priv, u8 ring_index); + void (*update_rx_ring)(void *priv, u16 index); + void (*kick_rx_ring)(void *priv, u16 index); + int (*dump_ring)(void *priv, struct seq_file *m, bool is_tx, int index); + int (*dump_ring_stats)(void *priv, struct seq_file *m, bool is_tx, int index); + void (*set_rings_xdp_prog)(void *priv, void *prog); + int (*register_xdp_rxq)(void *priv, u8 ring_index); + void (*unregister_xdp_rxq)(void *priv, u8 ring_index); + struct napi_struct *(*get_vector_napi)(void *priv, u16 index); + void (*set_vector_info)(void *priv, u8 *irq_enable_base, u32 irq_data, + u16 index, bool mask_en); + void (*register_vsi_ring)(void *priv, u16 vsi_index, u16 ring_offset, u16 ring_num); + int (*register_net)(void *priv, u16 func_id, + struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result); + int (*unregister_net)(void *priv, u16 func_id); + int (*alloc_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num); + void (*free_txrx_queues)(void *priv, u16 vsi_id); + int (*register_vsi2q)(void *priv, u16 vsi_index, u16 vsi_id, + u16 queue_offset, u16 queue_num); + int (*setup_q2vsi)(void *priv, u16 vsi_id); + void (*remove_q2vsi)(void *priv, u16 vsi_id); + int (*setup_rss)(void *priv, u16 vsi_id); + void (*remove_rss)(void *priv, u16 vsi_id); + int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx); + void (*remove_all_queues)(void *priv, u16 vsi_id); + int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld); + int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps); + void (*remove_cqs)(void *priv, u16 vsi_id); + int (*cfg_qdisc_mqprio)(void *priv, struct nbl_tc_qidsc_param *param); + void (*clear_queues)(void *priv, u16 vsi_id); + int (*check_offload_status)(void *priv, bool *is_down); + u16 (*get_local_queue_id)(void *priv, u16 vsi_id, u16 global_queue_id); + u16 (*get_global_queue_id)(void *priv, u16 vsi_id, u16 local_queue_id); + + u8* (*get_msix_irq_enable_info)(void *priv, u16 global_vector_id, u32 *irq_data); + + int (*set_spoof_check_addr)(void *priv, u16 vsi_id, u8 *mac); + int (*set_vf_spoof_check)(void *priv, u16 vsi_id, int vfid, u8 enable); + void (*get_base_mac_addr)(void *priv, u8 *mac); + + int (*add_macvlan)(void *priv, u8 *mac, u16 vlan, u16 vsi); + void (*del_macvlan)(void *priv, u8 *mac, u16 vlan, u16 vsi); + int (*add_lag_flow)(void *priv, u16 vsi); + void (*del_lag_flow)(void *priv, u16 vsi); + int (*add_lldp_flow)(void *priv, u16 vsi); + void (*del_lldp_flow)(void *priv, u16 vsi); + int (*add_multi_rule)(void *priv, u16 vsi); + void (*del_multi_rule)(void *priv, u16 vsi); + int (*setup_multi_group)(void *priv); + void (*remove_multi_group)(void *priv); + void (*clear_accel_flow)(void *priv, u16 vsi_id); + void (*clear_flow)(void *priv, u16 vsi_id); + void (*dump_flow)(void *priv, struct seq_file *m); + + u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); + void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id); + int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode); + u32 (*get_tx_headroom)(void *priv); + void (*get_rep_feature)(void *priv, struct nbl_register_net_result *register_result); + void (*get_rep_queue_info)(void *priv, u16 *queue_num, u16 *queue_size); + void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + + void (*set_eswitch_mode)(void *priv, u16 switch_mode); + u16 (*get_eswitch_mode)(void *priv); + int (*alloc_rep_data)(void *priv, int num_vfs, u16 vf_base_vsi_id); + void (*free_rep_data)(void *priv); + void (*set_rep_netdev_info)(void *priv, void *rep_data); + void (*unset_rep_netdev_info)(void *priv); + struct net_device *(*get_rep_netdev_info)(void *priv, u16 rep_data_index); + int (*disable_phy_flow)(void *priv, u8 eth_id); + int (*enable_phy_flow)(void *priv, u8 eth_id); + void (*init_acl)(void *priv); + void (*uninit_acl)(void *priv); + int (*set_upcall_rule)(void *priv, u8 eth_id, u16 vsi_id); + int (*unset_upcall_rule)(void *priv, u8 eth_id); + void (*set_shaping_dport_vld)(void *priv, u8 eth_id, bool vld); + void (*set_dport_fc_th_vld)(void *priv, u8 eth_id, bool vld); + void (*get_rep_stats)(void *priv, u16 rep_vsi_id, + struct nbl_rep_stats *rep_stats, bool is_tx); + u16 (*get_rep_index)(void *priv, u16 vsi_id); + void (*get_queue_stats)(void *priv, u8 queue_id, + struct nbl_queue_stats *queue_stats, bool is_tx); + int (*get_queue_err_stats)(void *priv, u16 func_id, u8 queue_id, + struct nbl_queue_err_stats *queue_err_stats, bool is_tx); + void (*get_net_stats)(void *priv, struct nbl_stats *queue_stats); + void (*get_private_stat_len)(void *priv, u32 *len); + void (*get_private_stat_data)(void *priv, u32 eth_id, u64 *data); + void (*fill_private_stat_strings)(void *priv, u8 *strings); + u16 (*get_max_desc_num)(void); + u16 (*get_min_desc_num)(void); + u16 (*get_tx_desc_num)(void *priv, u32 ring_index); + u16 (*get_rx_desc_num)(void *priv, u32 ring_index); + void (*set_tx_desc_num)(void *priv, u32 ring_index, u16 desc_num); + void (*set_rx_desc_num)(void *priv, u32 ring_index, u16 desc_num); + void (*get_coalesce)(void *priv, u16 func_id, u16 vector_id, + struct nbl_chan_param_get_coalesce *ec); + void (*set_coalesce)(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 pnum, u16 rate); + u16 (*get_intr_suppress_level)(void *priv, u64 rate, u16 last_level); + void (*set_intr_suppress_level)(void *priv, u16 func_id, u16 vector_id, + u16 num_net_msix, u16 level); + void (*get_rxfh_indir_size)(void *priv, u16 vsi_id, u32 *rxfh_indir_size); + void (*get_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir); + void (*get_rxfh_rss_key_size)(void *priv, u32 *rxfh_rss_key_size); + void (*get_rxfh_rss_key)(void *priv, u8 *rss_key); + void (*get_rss_alg_sel)(void *priv, u8 *alg_sel, u8 eth_id); + int (*get_firmware_version)(void *priv, char *firmware_verion); + int (*get_driver_info)(void *priv, struct nbl_driver_info *driver_info); + int (*nway_reset)(void *priv, u8 eth_id); + void (*cfg_txrx_vlan)(void *priv, u16 vlan_tci, u16 vlan_proto, u8 vsi_index); + + void (*setup_rdma_id)(void *priv); + void (*remove_rdma_id)(void *priv); + void (*register_rdma)(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param); + void (*unregister_rdma)(void *priv, u16 vsi_id); + void (*register_rdma_bond)(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param); + void (*unregister_rdma_bond)(void *priv, u16 lag_id); + u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); + u64 (*get_real_hw_addr)(void *priv, u16 vsi_id); + u16 (*get_function_id)(void *priv, u16 vsi_id); + void (*get_real_bdf)(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function); + + int (*enable_lag_protocol)(void *priv, u16 eth_id, bool lag_en); + int (*cfg_lag_hash_algorithm)(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type); + int (*cfg_lag_member_fwd)(void *priv, u16 eth_id, u16 lag_id, u8 fwd); + int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); + int (*cfg_duppkt_info)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_duppkt_mcc)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_lag_mcc)(void *priv, u16 eth_id, u16 lag_id, bool enable); + int (*cfg_bond_shaping)(void *priv, u8 eth_id, bool enable); + void (*cfg_bgid_back_pressure)(void *priv, u8 main_eth_id, u8 other_eth_id, bool enable); + + int (*init_port)(void *priv); + int (*get_port_attributes)(void *priv); + int (*update_ring_num)(void *priv); + int (*update_rdma_cap)(void *priv); + int (*update_rdma_mem_type)(void *priv); + u16 (*get_rdma_cap_num)(void *priv); + int (*set_ring_num)(void *priv, struct nbl_fw_cmd_net_ring_num_param *param); + int (*enable_port)(void *priv, bool enable); + int (*cfg_eth_bond_info)(void *priv, struct nbl_lag_member_list_param *param); + int (*get_eth_bond_info)(void *priv, struct nbl_bond_param *param); + void (*cfg_eth_bond_event)(void *priv, bool enable); + void (*recv_port_notify)(void *priv, void *data); + int (*get_port_state)(void *priv, u8 eth_id, struct nbl_port_state *port_state); + int (*set_port_advertising)(void *priv, struct nbl_port_advertising *port_advertising); + int (*get_module_info)(void *priv, u8 eth_id, struct ethtool_modinfo *info); + int (*get_module_eeprom)(void *priv, u8 eth_id, struct ethtool_eeprom *eeprom, u8 *data); + int (*get_link_state)(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info); + int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); + int (*process_abnormal_event)(void *priv, struct nbl_abnormal_event_info *abnomal_info); + int (*ctrl_port_led)(void *priv, u8 eth_id, enum nbl_led_reg_ctrl led_ctrl, u32 *led_reg); + void (*adapt_desc_gother)(void *priv); + void (*flr_clear_net)(void *priv, u16 vfid); + void (*flr_clear_queues)(void *priv, u16 vfid); + void (*flr_clear_accel_flow)(void *priv, u16 vfid); + void (*flr_clear_flows)(void *priv, u16 vfid); + void (*flr_clear_interrupt)(void *priv, u16 vfid); + void (*flr_clear_accel)(void *priv, u16 vfid); + void (*flr_clear_rdma)(void *priv, u16 vfid); + u16 (*covert_vfid_to_vsi_id)(void *priv, u16 vfid); + void (*unmask_all_interrupts)(void *priv); + int (*set_bridge_mode)(void *priv, u16 func_id, u16 bmode); + u16 (*get_vf_function_id)(void *priv, u16 vsi_id, int vf_id); + u16 (*get_vf_vsi_id)(void *priv, u16 vsi_id, int vf_id); + + bool (*check_fw_heartbeat)(void *priv); + bool (*check_fw_reset)(void *priv); + int (*flash_lock)(void *priv); + int (*flash_unlock)(void *priv); + int (*flash_prepare)(void *priv); + int (*flash_image)(void *priv, u32 module, const u8 *data, size_t len); + int (*flash_activate)(void *priv); + void (*get_phy_caps)(void *priv, u8 eth_id, struct nbl_phy_caps *phy_caps); + int (*set_sfp_state)(void *priv, u8 eth_id, u8 state); + int (*setup_loopback)(void *priv, u32 eth_id, u32 enable); + struct sk_buff *(*clean_rx_lb_test)(void *priv, u32 ring_index); + int (*passthrough_fw_cmd)(void *priv, struct nbl_passthrough_fw_cmd_param *param, + struct nbl_passthrough_fw_cmd_param *result); + + u32 (*check_active_vf)(void *priv, u16 func_id); + int (*get_board_id)(void *priv); + + void (*get_reg_dump)(void *priv, u32 *data, u32 len); + int (*get_reg_dump_len)(void *priv); + + bool (*get_product_flex_cap)(void *priv, enum nbl_flex_cap_type cap_type); + bool (*get_product_fix_cap)(void *priv, enum nbl_fix_cap_type cap_type); + int (*alloc_ktls_tx_index)(void *priv, u16 vsi); + void (*free_ktls_tx_index)(void *priv, u32 index); + void (*cfg_ktls_tx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + int (*alloc_ktls_rx_index)(void *priv, u16 vsi); + void (*free_ktls_rx_index)(void *priv, u32 index); + void (*cfg_ktls_rx_keymat)(void *priv, u32 index, u8 mode, u8 *salt, u8 *key, u8 key_len); + void (*cfg_ktls_rx_record)(void *priv, u32 index, u32 tcp_sn, u64 rec_num, bool init); + int (*add_ktls_rx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ktls_rx_flow)(void *priv, u32 index); + + int (*alloc_ipsec_tx_index)(void *priv, struct nbl_ipsec_cfg_info *cfg_info); + void (*free_ipsec_tx_index)(void *priv, u32 index); + int (*alloc_ipsec_rx_index)(void *priv, struct nbl_ipsec_cfg_info *cfg_info); + void (*free_ipsec_rx_index)(void *priv, u32 index); + void (*cfg_ipsec_tx_sad)(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry); + void (*cfg_ipsec_rx_sad)(void *priv, u32 index, struct nbl_ipsec_sa_entry *sa_entry); + int (*add_ipsec_tx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ipsec_tx_flow)(void *priv, u32 index); + int (*add_ipsec_rx_flow)(void *priv, u32 index, u32 *data, u16 vsi); + void (*del_ipsec_rx_flow)(void *priv, u32 index); + bool (*check_ipsec_status)(void *priv); + u32 (*get_dipsec_lft_info)(void *priv); + void (*handle_dipsec_soft_expire)(void *priv, u32 index); + void (*handle_dipsec_hard_expire)(void *priv, u32 index); + u32 (*get_uipsec_lft_info)(void *priv); + void (*handle_uipsec_soft_expire)(void *priv, u32 index); + void (*handle_uipsec_hard_expire)(void *priv, u32 index); + + dma_addr_t (*restore_abnormal_ring)(void *priv, int ring_index, int type); + int (*restart_abnormal_ring)(void *priv, int ring_index, int type); + int (*restore_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, + dma_addr_t dma, int type); + int (*stop_abnormal_sw_queue)(void *priv, u16 local_queue_id, int type); + int (*stop_abnormal_hw_queue)(void *priv, u16 vsi_id, u16 local_queue_id, int type); + + void (*register_func_mac)(void *priv, u8 *mac, u16 func_id); + int (*register_func_link_forced)(void *priv, u16 func_id, u8 link_forced, + bool *should_notify); + int (*register_func_vlan)(void *priv, u16 func_id, + u16 vlan_tci, u16 vlan_proto, bool *should_notify); + int (*register_func_rate)(void *priv, u16 func_id, int rate); + int (*get_link_forced)(void *priv, u16 vsi_id); + int (*set_tx_rate)(void *priv, u16 func_id, int tx_rate); + + void (*get_driver_version)(void *priv, char *ver, int len); + + int (*get_fd_flow)(void *priv, u16 vsi_id, u32 location, + enum nbl_chan_fdir_rule_type rule_type, + struct nbl_chan_param_fdir_replace *cmd); + int (*get_fd_flow_cnt)(void *priv, enum nbl_chan_fdir_rule_type rule_type, u16 vsi_id); + int (*config_fd_flow_state)(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u16 vsi_id, u16 state); + int (*get_fd_flow_all)(void *priv, struct nbl_chan_param_get_fd_flow_all *param, + u32 *rule_locs); + int (*get_fd_flow_max)(void *priv); + + int (*replace_fd_flow)(void *priv, struct nbl_chan_param_fdir_replace *info); + int (*remove_fd_flow)(void *priv, enum nbl_chan_fdir_rule_type rule_type, + u32 loc, u16 vsi_id); + void (*dump_fd_flow)(void *priv, struct seq_file *m); + void (*cfg_fd_update_event)(void *priv, bool enable); + + /* for vdpa driver */ + int (*cfg_queue_log)(void *priv, u16 vsi_id, u16 qps, bool vld); + u16 (*get_queue_ctx)(void *priv, u16 vsi_id, u16 qid); + int (*init_vdpaq)(void *priv, u16 func_id, u64 pa, u32 size); + void (*destroy_vdpaq)(void *priv); + int (*get_upcall_port)(void *priv, u16 *bdf); + + /* for pmd driver */ + void (*register_net_rep)(void *priv, u16 pf_id, u16 vf_id, + struct nbl_register_net_rep_result *result); + void (*unregister_net_rep)(void *priv, u16 vsi_id); + void (*register_eth_rep)(void *priv, u8 eth_id); + void (*unregister_eth_rep)(void *priv, u8 eth_id); + u16 (*get_vsi_global_queue_id)(void *priv, u16 vsi_id, u16 local_qid); + void (*get_line_rate_info)(void *priv, void *data, void *result); + int (*register_upcall_port)(void *priv, u16 func_id); + void (*unregister_upcall_port)(void *priv, u16 func_id); + void (*set_offload_status)(void *priv, u16 func_id); + void (*init_offload_fwd)(void *priv, u16 vsi_id); + int (*add_nd_upcall_flow)(void *priv, u16 vsi_id, bool mode); + void (*del_nd_upcall_flow)(void *priv); + void (*init_cmdq)(void *priv, void *data, u16 func_id); + void (*reset_cmdq)(void *priv); + void (*destroy_cmdq)(void *priv); + void (*init_rep)(void *priv, u16 vsi_id, u8 inner_type, + u8 outer_type, u8 rep_type); + void (*init_flow)(void *priv, void *param); + void (*deinit_flow)(void *priv); + void (*offload_flow_rule)(void *priv, void *data); + void (*get_flow_acl_switch)(void *priv, u8 *acl_enable); + void (*get_board_info)(void *priv, struct nbl_board_port_info *board_info); + + /* For virtio */ + void (*configure_virtio_dev_msix)(void *priv, u16 vector); + void (*configure_rdma_msix_off)(void *priv, u16 vector); + void (*configure_virtio_dev_ready)(void *priv); + + int (*switchdev_init_cmdq)(void *priv); + int (*switchdev_deinit_cmdq)(void *priv, u8 index); + int (*add_tc_flow)(void *priv, struct nbl_tc_flow_param *param); + int (*del_tc_flow)(void *priv, struct nbl_tc_flow_param *param); + int (*flow_index_lookup)(void *priv, struct nbl_flow_index_key key); + + bool (*tc_tun_encap_lookup)(void *priv, struct nbl_rule_action *rule_act, + struct nbl_tc_flow_param *param); + int (*tc_tun_encap_del)(void *priv, struct nbl_encap_key *key); + int (*tc_tun_encap_add)(void *priv, struct nbl_rule_action *action); + + int (*set_tc_flow_info)(void *priv); + int (*unset_tc_flow_info)(void *priv); + int (*get_tc_flow_info)(void *priv); + int (*query_tc_stats)(void *priv, struct nbl_stats_param *param); + + u32 (*get_p4_version)(void *priv); + int (*get_p4_info)(void *priv, char *verify_code); + int (*load_p4)(void *priv, struct nbl_load_p4_param *param); + int (*load_p4_default)(void *priv); + int (*get_p4_used)(void *priv); + int (*set_p4_used)(void *priv, int p4_type); + + u16 (*get_vf_base_vsi_id)(void *priv, u16 pf_id); + + int (*set_pmd_debug)(void *priv, bool pmd_debug); + + void (*get_xdp_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); + void (*get_active_func_bitmaps)(void *priv, unsigned long *bitmap, int max_func); + int (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + int (*set_eth_pfc)(void *priv, u8 eth_id, u8 *pfc); + int (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); + int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); +}; + +struct nbl_resource_ops_tbl { + struct nbl_resource_ops *ops; + void *priv; +}; + +int nbl_res_init_leonis(void *p, struct nbl_init_param *param); +void nbl_res_remove_leonis(void *p); +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h new file mode 100644 index 0000000000000000000000000000000000000000..c7ba4b56d7e859769e28fbca98dc5905c158af17 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_def_service.h @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEF_SERVICE_H_ +#define _NBL_DEF_SERVICE_H_ + +#include "nbl_include.h" + +#define NBL_SERV_OPS_TBL_TO_OPS(serv_ops_tbl) ((serv_ops_tbl)->ops) +#define NBL_SERV_OPS_TBL_TO_PRIV(serv_ops_tbl) ((serv_ops_tbl)->priv) + +struct nbl_service_traffic_switch { + u16 normal_vsi; + u16 sync_other_vsi; + u16 async_other_vsi; + bool promisc; + bool has_lacp; + bool has_lldp; +}; + +struct nbl_service_ops { + int (*init_chip_factory)(void *priv); + int (*destroy_chip_factory)(void *p); + int (*init_chip)(void *p); + int (*destroy_chip)(void *p); + int (*configure_msix_map)(void *p, u16 num_net_msix, u16 num_others_msix, + bool net_msix_mask_en); + int (*destroy_msix_map)(void *priv); + int (*enable_mailbox_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_abnormal_irq)(void *p, u16 vector_id, bool enable_msix); + int (*enable_adminq_irq)(void *p, u16 vector_id, bool enable_msix); + int (*request_net_irq)(void *priv, struct nbl_msix_info_param *msix_info); + void (*free_net_irq)(void *priv, struct nbl_msix_info_param *msix_info); + u16 (*get_global_vector)(void *priv, u16 local_vector_id); + u16 (*get_msix_entry_id)(void *priv, u16 local_vector_id); + void (*get_common_irq_num)(void *priv, struct nbl_common_irq_num *irq_num); + void (*get_ctrl_irq_num)(void *priv, struct nbl_ctrl_irq_num *irq_num); + int (*get_port_attributes)(void *p); + int (*update_template_config)(void *priv); + int (*enable_port)(void *p, bool enable); + void (*init_port)(void *priv); + void (*set_netdev_carrier_state)(void *p, struct net_device *netdev, u8 link_state); + + int (*vsi_open)(void *priv, struct net_device *netdev, u16 vsi_index, + u16 real_qps, bool use_napi); + int (*vsi_stop)(void *priv, u16 vsi_index); + int (*switch_traffic_default_dest)(void *priv, struct nbl_service_traffic_switch *info); + int (*config_fd_flow_state)(void *priv, enum nbl_chan_fdir_rule_type type, u32 state); + + int (*netdev_open)(struct net_device *netdev); + int (*netdev_stop)(struct net_device *netdev); + netdev_tx_t (*start_xmit)(struct sk_buff *skb, struct net_device *netdev); + int (*change_mtu)(struct net_device *netdev, int new_mtu); + void (*get_stats64)(struct net_device *netdev, struct rtnl_link_stats64 *stats); + void (*set_rx_mode)(struct net_device *dev); + void (*change_rx_flags)(struct net_device *dev, int flag); + int (*set_mac)(struct net_device *dev, void *p); + int (*rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); + int (*rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); + int (*set_features)(struct net_device *dev, netdev_features_t features); + netdev_features_t (*features_check)(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features); + int (*setup_tc)(struct net_device *dev, enum tc_setup_type type, void *type_data); + int (*set_vf_spoofchk)(struct net_device *netdev, int vf_id, bool ena); + void (*tx_timeout)(struct net_device *netdev, u32 txqueue); + int (*bridge_setlink)(struct net_device *netdev, struct nlmsghdr *nlh, + u16 flags, struct netlink_ext_ack *extack); + + int (*bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, int nlflags); + int (*set_vf_link_state)(struct net_device *dev, int vf_id, int link_state); + int (*set_vf_mac)(struct net_device *netdev, int vf_id, u8 *mac); + int (*set_vf_rate)(struct net_device *netdev, int vf_id, int min_rate, int max_rate); + int (*set_vf_vlan)(struct net_device *dev, int vf_id, u16 vlan, u8 pri, __be16 proto); + int (*get_vf_config)(struct net_device *dev, int vf_id, struct ifla_vf_info *ivi); + u16 (*select_queue)(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev); + int (*get_phys_port_name)(struct net_device *dev, char *name, size_t len); + int (*get_port_parent_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); + + int (*register_net)(void *priv, struct nbl_register_net_param *register_param, + struct nbl_register_net_result *register_result); + int (*unregister_net)(void *priv); + int (*setup_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num, u16 net_vector_id); + void (*remove_txrx_queues)(void *priv, u16 vsi_id); + int (*register_vsi_info)(void *priv, struct nbl_vsi_param *vsi_param); + int (*init_tx_rate)(void *priv, u16 vsi_id); + int (*setup_q2vsi)(void *priv, u16 vsi_id); + void (*remove_q2vsi)(void *priv, u16 vsi_id); + int (*setup_rss)(void *priv, u16 vsi_id); + void (*remove_rss)(void *priv, u16 vsi_id); + int (*check_offload_status)(void *priv); + u32 (*get_chip_temperature)(void *priv, enum nbl_hwmon_type type, u32 senser_id); + int (*get_module_temperature)(void *priv, u8 eth_id, enum nbl_hwmon_type type); + + int (*alloc_rings)(void *priv, struct net_device *dev, struct nbl_ring_param *param); + void (*cpu_affinity_init)(void *priv, u16 rings_num); + void (*free_rings)(void *priv); + int (*enable_napis)(void *priv, u16 vsi_index); + void (*disable_napis)(void *priv, u16 vsi_index); + void (*set_mask_en)(void *priv, bool enable); + int (*start_net_flow)(void *priv, struct net_device *dev, u16 vsi_id, u16 vid); + void (*stop_net_flow)(void *priv, u16 vsi_id); + int (*set_lldp_flow)(void *priv, u16 vsi_id); + void (*remove_lldp_flow)(void *priv, u16 vsi_id); + int (*start_mgt_flow)(void *priv); + void (*stop_mgt_flow)(void *priv); + u32 (*get_tx_headroom)(void *priv); + int (*set_spoof_check_addr)(void *priv, u8 *mac); + + u16 (*get_vsi_id)(void *priv, u16 func_id, u16 type); + void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id, u8 *logic_eth_id); + void (*debugfs_init)(void *priv); + void (*debugfs_netops_create)(void *priv, u16 tx_queue_num, u16 rx_queue_num); + void (*debugfs_ctrlops_create)(void *priv); + void (*debugfs_exit)(void *priv); + int (*setup_net_resource_mgt)(void *priv, struct net_device *dev, + u16 vlan_proto, u16 vlan_tci, u32 rate); + void (*remove_net_resource_mgt)(void *priv); + int (*enable_lag_protocol)(void *priv, u16 eth_id, bool lag_en); + int (*cfg_lag_hash_algorithm)(void *priv, u16 eth_id, u16 lag_id, + enum netdev_lag_hash hash_type); + int (*cfg_lag_member_fwd)(void *priv, u16 eth_id, u16 lag_id, u8 fwd); + int (*cfg_lag_member_list)(void *priv, struct nbl_lag_member_list_param *param); + int (*cfg_lag_member_up_attr)(void *priv, u16 eth_id, u16 lag_id, bool enable); + int (*cfg_bond_shaping)(void *priv, u8 eth_id, bool enable); + void (*cfg_bgid_back_pressure)(void *priv, u8 main_eth_id, u8 other_eth_id, bool enable); + void (*set_sfp_state)(void *priv, struct net_device *netdev, u8 eth_id, + bool open, bool is_force); + int (*get_board_id)(void *priv); + void (*cfg_eth_bond_event)(void *priv, bool enable); + + /* rep associated */ + int (*rep_netdev_open)(struct net_device *netdev); + int (*rep_netdev_stop)(struct net_device *netdev); + netdev_tx_t (*rep_start_xmit)(struct sk_buff *skb, struct net_device *netdev); + void (*rep_get_stats64)(struct net_device *netdev, struct rtnl_link_stats64 *stats); + void (*rep_set_rx_mode)(struct net_device *dev); + int (*rep_set_mac)(struct net_device *dev, void *p); + int (*rep_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); + int (*rep_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); + int (*rep_setup_tc)(struct net_device *dev, enum tc_setup_type type, void *type_data); + int (*rep_get_phys_port_name)(struct net_device *dev, char *name, size_t len); + int (*rep_get_port_parent_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); + void (*get_rep_feature)(void *priv, struct nbl_register_net_result *register_result); + void (*get_rep_queue_num)(void *priv, u8 *base_queue_id, u8 *rep_queue_num); + int (*alloc_rep_queue_mgt)(void *priv, struct net_device *netdev); + void (*get_rep_queue_info)(void *priv, u16 *queue_num, u16 *queue_size); + void (*get_user_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + int (*free_rep_queue_mgt)(void *priv); + void (*set_eswitch_mode)(void *priv, u16 switch_mode); + u16 (*get_eswitch_mode)(void *priv); + int (*alloc_rep_data)(void *priv, int num_vfs, u16 vf_base_vsi_id); + void (*free_rep_data)(void *priv); + void (*set_rep_netdev_info)(void *priv, void *rep_data); + void (*unset_rep_netdev_info)(void *priv); + int (*disable_phy_flow)(void *priv, u8 eth_id); + int (*enable_phy_flow)(void *priv, u8 eth_id); + void (*init_acl)(void *priv); + void (*uninit_acl)(void *priv); + int (*set_upcall_rule)(void *priv, u8 eth_id, u16 vsi_id); + int (*unset_upcall_rule)(void *priv, u8 eth_id); + int (*switchdev_init_cmdq)(void *priv); + int (*switchdev_deinit_cmdq)(void *priv); + int (*set_tc_flow_info)(void *priv); + int (*unset_tc_flow_info)(void *priv); + int (*get_tc_flow_info)(void *priv); + int (*register_indr_dev_tc_offload)(void *priv, struct net_device *netdev); + void (*unregister_indr_dev_tc_offload)(void *priv, struct net_device *netdev); + void (*set_lag_info)(void *priv, struct net_device *bond_netdev, u8 lag_id); + void (*unset_lag_info)(void *priv); + void (*set_netdev_ops)(void *priv, const struct net_device_ops *net_device_ops, bool is_pf); + + /* ethtool */ + void (*get_drvinfo)(struct net_device *netdev, struct ethtool_drvinfo *drvinfo); + int (*get_module_eeprom)(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data); + int (*get_module_info)(struct net_device *netdev, struct ethtool_modinfo *info); + int (*get_eeprom_length)(struct net_device *netdev); + int (*get_eeprom)(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes); + void (*get_strings)(struct net_device *netdev, u32 stringset, u8 *data); + int (*get_sset_count)(struct net_device *netdev, int sset); + void (*get_ethtool_stats)(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); + void (*get_channels)(struct net_device *netdev, struct ethtool_channels *channels); + int (*set_channels)(struct net_device *netdev, struct ethtool_channels *channels); + u32 (*get_link)(struct net_device *netdev); + int (*get_ksettings)(struct net_device *netdev, struct ethtool_link_ksettings *cmd); + int (*set_ksettings)(struct net_device *netdev, const struct ethtool_link_ksettings *cmd); + void (*get_ringparam)(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack); + int (*set_ringparam)(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *k_ringparam, + struct netlink_ext_ack *extack); + + int (*get_coalesce)(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack); + int (*set_coalesce)(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack); + + int (*get_rxnfc)(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs); + int (*set_rxnfc)(struct net_device *netdev, struct ethtool_rxnfc *cmd); + u32 (*get_rxfh_indir_size)(struct net_device *netdev); + u32 (*get_rxfh_key_size)(struct net_device *netdev); + int (*get_rxfh)(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); + u32 (*get_msglevel)(struct net_device *netdev); + void (*set_msglevel)(struct net_device *netdev, u32 msglevel); + int (*get_regs_len)(struct net_device *netdev); + void (*get_ethtool_dump_regs)(struct net_device *netdev, + struct ethtool_regs *regs, void *p); + int (*get_per_queue_coalesce)(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec); + int (*set_per_queue_coalesce)(struct net_device *netdev, + u32 q_num, struct ethtool_coalesce *ec); + void (*self_test)(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data); + u32 (*get_priv_flags)(struct net_device *netdev); + int (*set_priv_flags)(struct net_device *netdev, u32 priv_flags); + int (*set_pause_param)(struct net_device *netdev, struct ethtool_pauseparam *param); + void (*get_pause_param)(struct net_device *netdev, struct ethtool_pauseparam *param); + int (*set_fec_param)(struct net_device *netdev, struct ethtool_fecparam *fec); + int (*get_fec_param)(struct net_device *netdev, struct ethtool_fecparam *fec); + int (*get_ts_info)(struct net_device *netdev, struct ethtool_ts_info *ts_info); + int (*set_phys_id)(struct net_device *netdev, enum ethtool_phys_id_state state); + int (*nway_reset)(struct net_device *netdev); + void (*get_rep_strings)(struct net_device *netdev, u32 stringset, u8 *data); + int (*get_rep_sset_count)(struct net_device *netdev, int sset); + void (*get_rep_ethtool_stats)(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); + + u16 (*get_rdma_cap_num)(void *priv); + void (*setup_rdma_id)(void *priv); + void (*remove_rdma_id)(void *priv); + void (*register_rdma)(void *priv, u16 vsi_id, struct nbl_rdma_register_param *param); + void (*unregister_rdma)(void *priv, u16 vsi_id); + void (*register_rdma_bond)(void *priv, struct nbl_lag_member_list_param *list_param, + struct nbl_rdma_register_param *register_param); + void (*unregister_rdma_bond)(void *priv, u16 lag_id); + u8 __iomem * (*get_hw_addr)(void *priv, size_t *size); + u64 (*get_real_hw_addr)(void *priv, u16 vsi_id); + u16 (*get_function_id)(void *priv, u16 vsi_id); + void (*get_real_bdf)(void *priv, u16 vsi_id, u8 *bus, u8 *dev, u8 *function); + int (*set_eth_mac_addr)(void *priv, u8 *mac, u8 eth_id); + int (*process_abnormal_event)(void *priv); + void (*adapt_desc_gother)(void *priv); + void (*process_flr)(void *priv, u16 vfid); + u16 (*covert_vfid_to_vsi_id)(void *priv, u16 vfid); + void (*recovery_abnormal)(void *priv); + void (*keep_alive)(void *priv); + + int (*get_devlink_info)(struct devlink *devlink, struct devlink_info_req *req, + struct netlink_ext_ack *extack); + int (*update_devlink_flash)(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack); + u32 (*get_adminq_tx_buf_size)(void *priv); + int (*emp_console_write)(void *priv, char *buf, size_t count); + bool (*check_fw_heartbeat)(void *priv); + bool (*check_fw_reset)(void *priv); + + bool (*get_product_flex_cap)(void *priv, enum nbl_flex_cap_type cap_type); + bool (*get_product_fix_cap)(void *priv, enum nbl_fix_cap_type cap_type); +#ifdef CONFIG_TLS_DEVICE + int (*add_tls_dev)(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn); + void (*del_tls_dev)(struct net_device *netdev, struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction); + int (*resync_tls_dev)(struct net_device *netdev, struct sock *sk, + u32 tcp_seq, u8 *rec_num, + enum tls_offload_ctx_dir direction); + int (*add_xdo_dev_state)(struct xfrm_state *x, struct netlink_ext_ack *extack); + void (*delete_xdo_dev_state)(struct xfrm_state *x); + void (*free_xdo_dev_state)(struct xfrm_state *x); + bool (*xdo_dev_offload_ok)(struct sk_buff *skb, struct xfrm_state *x); + void (*xdo_dev_state_advance_esn)(struct xfrm_state *x); + bool (*check_ipsec_status)(void *priv); + void (*handle_ipsec_event)(void *priv); +#endif + void (*configure_virtio_dev_msix)(void *priv, u16 vector); + void (*configure_rdma_msix_off)(void *priv, u16 vector); + void (*configure_virtio_dev_ready)(void *priv); + + int (*setup_st)(void *priv, void *st_table_param); + void (*remove_st)(void *priv, void *st_table_param); + u16 (*get_vf_base_vsi_id)(void *priv, u16 func_id); + int (*setup_vf_config)(void *priv, int num_vfs, bool is_flush); + void (*remove_vf_config)(void *priv); + int (*setup_vf_resource)(void *priv, int num_vfs); + void (*remove_vf_resource)(void *priv); + void (*cfg_fd_update_event)(void *priv, bool enable); + + void (*get_xdp_queue_info)(void *priv, u16 *queue_num, u16 *queue_size, u16 vsi_id); + int (*set_xdp)(struct net_device *netdev, struct netdev_bpf *xdp); + void (*set_hw_status)(void *priv, enum nbl_hw_status hw_status); + void (*get_active_func_bitmaps)(void *priv, unsigned long *bitmap, int max_func); + int (*configure_qos)(void *priv, u8 eth_id, u8 *pfc, u8 trust, u8 *dscp2prio_map); + int (*get_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int *xoff, int *xon); + int (*set_pfc_buffer_size)(void *priv, u8 eth_id, u8 prio, int xoff, int xon); +}; + +struct nbl_service_ops_tbl { + struct nbl_resource_pt_ops pt_ops; + struct nbl_service_ops *ops; + void *priv; +}; + +int nbl_serv_init(void *priv, struct nbl_init_param *param); +void nbl_serv_remove(void *priv); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h new file mode 100644 index 0000000000000000000000000000000000000000..b81fa26728bfc52cd4dd319a4c8042c8a075ec5a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_include.h @@ -0,0 +1,1380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_INCLUDE_H_ +#define _NBL_INCLUDE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_TLS_DEVICE +#include +#endif +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* ------ Basic definitions ------- */ +#define NBL_DRIVER_NAME "nbl_core" +#define NBL_REP_DRIVER_NAME "nbl_rep" +/* "product NO-V NO.R NO.B NO.SP NO" + * product NO define: + * 1 reserve for develop branch + * 2 df200 + * 3 ASIC snic + * 4 x4 + */ +#define NBL_DRIVER_VERSION "1-1.1.100.0" + +#define NBL_DRIVER_DEV_MAX 8 + +#define NBL_PAIR_ID_GET_TX(id) ((id) * 2 + 1) +#define NBL_PAIR_ID_GET_RX(id) ((id) * 2) + +#define NBL_MAX_PF 8 + +#define NBL_IPV6_ADDR_LEN_AS_U8 16 + +#define NBL_P4_NAME_LEN 64 + +#define NBL_FLOW_INDEX_BYTE_LEN 8 + +#define NBL_RATE_MBPS_100G (100000) +#define NBL_RATE_MBPS_25G (25000) + +#define NBL_NEXT_ID(id, max) ({ typeof(id) _id = (id); ((_id) == (max) ? 0 : (_id) + 1); }) +#define NBL_IPV6_U32LEN 4 + +/* macro for counter */ +#define NBL_FLOW_COUNT_NUM 8 +#define NBL_COUNTER_MAX_STAT_ID 2048 +/* counter_id + stat_id */ +#define NBL_COUNTER_MAX_ID (128 * 1024) + +#define NBL_TC_MCC_MEMBER_MAX 16 + +#define NBL_IP_VERSION_V4 4 +#define NBL_IP_VERSION_V6 6 +#define NBL_MAX_FUNC (520) + +/* key element: key flag bitmap */ +#define NBL_FLOW_KEY_TABLE_IDX_FLAG BIT_ULL(0) +#define NBL_FLOW_KEY_INPORT8_FLAG BIT_ULL(1) +#define NBL_FLOW_KEY_INPORT4_FLAG BIT_ULL(39) +#define NBL_FLOW_KEY_INPORT2_FLAG BIT_ULL(40) // error +#define NBL_FLOW_KEY_INPORT2L_FLAG BIT_ULL(41) // error +#define NBL_FLOW_KEY_T_DIPV4_FLAG BIT_ULL(2) +#define NBL_FLOW_KEY_T_DIPV6_FLAG BIT_ULL(3) +#define NBL_FLOW_KEY_T_OPT_DATA_FLAG BIT_ULL(4) +#define NBL_FLOW_KEY_T_VNI_FLAG BIT_ULL(5) +#define NBL_FLOW_KEY_T_DSTMAC_FLAG BIT_ULL(6) // error +#define NBL_FLOW_KEY_T_SRCMAC_FLAG BIT_ULL(7) // error +#define NBL_FLOW_KEY_T_SVLAN_FLAG BIT_ULL(8) // error +#define NBL_FLOW_KEY_T_CVLAN_FLAG BIT_ULL(9) // error +#define NBL_FLOW_KEY_T_ETHERTYPE_FLAG BIT_ULL(10) // error +#define NBL_FLOW_KEY_T_SRCPORT_FLAG BIT_ULL(11) +#define NBL_FLOW_KEY_T_DSTPORT_FLAG BIT_ULL(12) +#define NBL_FLOW_KEY_T_NPROTO_FLAG BIT_ULL(13) // delete +#define NBL_FLOW_KEY_T_OPT_CLASS_FLAG BIT_ULL(14) +#define NBL_FLOW_KEY_T_PROTOCOL_FLAG BIT_ULL(15) +#define NBL_FLOW_KEY_T_TCPSTAT_FLAG BIT_ULL(16) // delete +#define NBL_FLOW_KEY_T_TOS_FLAG BIT_ULL(17) +#define NBL_FLOW_KEY_T_TTL_FLAG BIT_ULL(18) +#define NBL_FLOW_KEY_SIPV4_FLAG BIT_ULL(19) +#define NBL_FLOW_KEY_SIPV6_FLAG BIT_ULL(20) +#define NBL_FLOW_KEY_DIPV4_FLAG BIT_ULL(21) +#define NBL_FLOW_KEY_DIPV6_FLAG BIT_ULL(22) +#define NBL_FLOW_KEY_DSTMAC_FLAG BIT_ULL(23) +#define NBL_FLOW_KEY_SRCMAC_FLAG BIT_ULL(24) +#define NBL_FLOW_KEY_SVLAN_FLAG BIT_ULL(25) +#define NBL_FLOW_KEY_CVLAN_FLAG BIT_ULL(26) +#define NBL_FLOW_KEY_ETHERTYPE_FLAG BIT_ULL(27) +#define NBL_FLOW_KEY_SRCPORT_FLAG BIT_ULL(28) +#define NBL_FLOW_KEY_ICMP_TYPE_FLAG BIT_ULL(28) +#define NBL_FLOW_KEY_DSTPORT_FLAG BIT_ULL(29) +#define NBL_FLOW_KEY_ICMP_CODE_FLAG BIT_ULL(29) +#define NBL_FLOW_KEY_ARP_OP_FLAG BIT_ULL(30) // error +#define NBL_FLOW_KEY_ICMPV6_TYPE_FLAG BIT_ULL(31) // error +#define NBL_FLOW_KEY_PROTOCOL_FLAG BIT_ULL(32) +#define NBL_FLOW_KEY_TCPSTAT_FLAG BIT_ULL(33) +#define NBL_FLOW_KEY_TOS_FLAG BIT_ULL(34) +#define NBL_FLOW_KEY_DSCP_FLAG BIT_ULL(34) +#define NBL_FLOW_KEY_TTL_FLAG BIT_ULL(35) +#define NBL_FLOW_KEY_HOPLIMIT_FLAG BIT_ULL(35) +#define NBL_FLOW_KEY_RDMA_ACK_SEQ_FLAG BIT_ULL(36) // error +#define NBL_FLOW_KEY_RDMA_QPN_FLAG BIT_ULL(37) // error +#define NBL_FLOW_KEY_RDMA_OP_FLAG BIT_ULL(38) // error +#define NBL_FLOW_KEY_EXEHASH_FLAG BIT_ULL(43) +#define NBL_FLOW_KEY_DPHASH_FLAG BIT_ULL(44) +#define NBL_FLOW_KEY_RECIRC_FLAG BIT_ULL(63) + +/* action flag */ +#define NBL_FLOW_ACTION_METADATA_FLAG BIT_ULL(1) +#define NBL_FLOW_ACTION_DROP BIT_ULL(2) +#define NBL_FLOW_ACTION_REDIRECT BIT_ULL(3) +#define NBL_FLOW_ACTION_MIRRED BIT_ULL(4) +#define NBL_FLOW_ACTION_TUNNEL_ENCAP BIT_ULL(5) +#define NBL_FLOW_ACTION_TUNNEL_DECAP BIT_ULL(6) +#define NBL_FLOW_ACTION_COUNTER BIT_ULL(7) +#define NBL_FLOW_ACTION_SET_IPV4_SRC_IP BIT_ULL(8) +#define NBL_FLOW_ACTION_SET_IPV4_DST_IP BIT_ULL(9) +#define NBL_FLOW_ACTION_SET_IPV6_SRC_IP BIT_ULL(10) +#define NBL_FLOW_ACTION_SET_IPV6_DST_IP BIT_ULL(11) +#define NBL_FLOW_ACTION_SET_SRC_MAC BIT_ULL(12) +#define NBL_FLOW_ACTION_SET_DST_MAC BIT_ULL(13) +#define NBL_FLOW_ACTION_SET_SRC_PORT BIT_ULL(14) +#define NBL_FLOW_ACTION_SET_DST_PORT BIT_ULL(15) +#define NBL_FLOW_ACTION_SET_TTL BIT_ULL(16) +#define NBL_FLOW_ACTION_SET_IPV4_DSCP BIT_ULL(17) +#define NBL_FLOW_ACTION_SET_IPV6_DSCP BIT_ULL(18) +#define NBL_FLOW_ACTION_RSS BIT_ULL(19) +#define NBL_FLOW_ACTION_QUEUE BIT_ULL(20) +#define NBL_FLOW_ACTION_MARK BIT_ULL(21) +#define NBL_FLOW_ACTION_PUSH_INNER_VLAN BIT_ULL(22) +#define NBL_FLOW_ACTION_PUSH_OUTER_VLAN BIT_ULL(23) +#define NBL_FLOW_ACTION_POP_INNER_VLAN BIT_ULL(24) +#define NBL_FLOW_ACTION_POP_OUTER_VLAN BIT_ULL(25) +#define NBL_FLOW_ACTION_REPLACE_INNER_VLAN BIT_ULL(26) +#define NBL_FLOW_ACTION_REPLACE_SINGLE_INNER_VLAN BIT_ULL(27) +#define NBL_FLOW_ACTION_REPLACE_OUTER_VLAN BIT_ULL(28) +#define NBL_FLOW_ACTION_PHY_PORT BIT_ULL(29) +#define NBL_FLOW_ACTION_PORT_ID BIT_ULL(30) +#define NBL_FLOW_ACTION_INGRESS BIT_ULL(31) +#define NBL_FLOW_ACTION_EGRESS BIT_ULL(32) +#define NBL_FLOW_ACTION_IPV4 BIT_ULL(33) +#define NBL_FLOW_ACTION_IPV6 BIT_ULL(34) +#define NBL_FLOW_ACTION_CAR BIT_ULL(35) +#define NBL_FLOW_ACTION_MCC BIT_ULL(36) +#define NBL_FLOW_ACTION_MIRRED_ENCAP BIT_ULL(37) +#define NBL_FLOW_ACTION_META_RECIRC BIT_ULL(38) +#define NBL_FLOW_ACTION_STAT BIT_ULL(39) +#define NBL_ACTION_FLAG_OFFSET_MAX BIT_ULL(40) + +extern struct list_head lag_resource_head; +extern struct mutex nbl_lag_mutex; + +#define SET_DEV_MIN_MTU(netdev, mtu) ((netdev)->min_mtu = (mtu)) +#define SET_DEV_MAX_MTU(netdev, mtu) ((netdev)->max_mtu = (mtu)) + +#define NBL_USER_DEV_SHMMSGRING_SIZE (PAGE_SIZE) +#define NBL_USER_DEV_SHMMSGBUF_SIZE (NBL_USER_DEV_SHMMSGRING_SIZE - 8) + +/* Used for macros to pass checkpatch */ +#define NBL_NAME(x) x + +#define NBL_SET_INTR_COALESCE(param, tx_usecs, tx_max_frames, rx_usecs, rx_max_frames) \ +do { \ + typeof(param) __param = param; \ + __param->tx_coalesce_usecs = tx_usecs; \ + __param->tx_max_coalesced_frames = tx_max_frames; \ + __param->rx_coalesce_usecs = rx_usecs; \ + __param->rx_max_coalesced_frames = rx_max_frames; \ +} while (0) + +enum nbl_product_type { + NBL_LEONIS_TYPE, + NBL_BOOTIS_TYPE, + NBL_VIRTIO_TYPE, + NBL_PRODUCT_MAX, +}; + +enum nbl_flex_cap_type { + NBL_DUMP_FLOW_CAP, + NBL_DUMP_FD_CAP, + NBL_SECURITY_ACCEL_CAP, + NBL_FLEX_CAP_NBITS +}; + +enum nbl_fix_cap_type { + NBL_TASK_OFFLOAD_NETWORK_CAP, + NBL_TASK_FW_HB_CAP, + NBL_TASK_FW_RESET_CAP, + NBL_TASK_CLEAN_ADMINDQ_CAP, + NBL_TASK_CLEAN_MAILBOX_CAP, + NBL_TASK_IPSEC_AGE_CAP, + NBL_VIRTIO_CAP, + NBL_ETH_SUPPORT_NRZ_RS_FEC_544, + NBL_RESTOOL_CAP, + NBL_HWMON_TEMP_CAP, + NBL_ITR_DYNAMIC, + NBL_TASK_ADAPT_DESC_GOTHER, + NBL_P4_CAP, + NBL_PROCESS_FLR_CAP, + NBL_RECOVERY_ABNORMAL_STATUS, + NBL_TASK_KEEP_ALIVE, + NBL_PMD_DEBUG, + NBL_XDP_CAP, + NBL_TASK_RESET_CAP, + NBL_TASK_RESET_CTRL_CAP, + NBL_QOS_SYSFS_CAP, + NBL_FIX_CAP_NBITS +}; + +enum nbl_bootis_port_id { + NBL_PORT_ETH0 = 0, + NBL_PORT_ETH1, + NBL_PORT_MAX, +}; + +enum nbl_sfp_module_state { + NBL_SFP_MODULE_OFF, + NBL_SFP_MODULE_ON, +}; + +enum { + NBL_VSI_DATA = 0,/* default vsi in kernel or independent dpdk */ + NBL_VSI_CTRL, + NBL_VSI_USER, /* dpdk used vsi in coexist dpdk */ + NBL_VSI_XDP, + NBL_VSI_MAX, +}; + +enum { + NBL_P4_DEFAULT = 0, + NBL_P4_TYPE_MAX, +}; + +enum { + NBL_TX = 0, + NBL_RX, +}; + +enum nbl_hw_status { + NBL_HW_NOMAL, + NBL_HW_FATAL_ERR, /* Most hw module is not work nomal exclude pcie/emp */ + NBL_HW_STATUS_MAX, +}; + +enum nbl_reset_event { + NBL_HW_FATAL_ERR_EVENT, /* Most hw module is not work nomal exclude pcie/emp */ + NBL_HW_MAX_EVENT +}; + +/* ------ Params that go through multiple layers ------ */ +struct nbl_driver_info { +#define NBL_DRIVER_VERSION_LEN_MAX (32) + char driver_version[NBL_DRIVER_VERSION_LEN_MAX]; +}; + +struct nbl_func_caps { + u32 has_ctrl:1; + u32 has_net:1; + u32 is_vf:1; + u32 is_nic:1; + u32 is_blk:1; + u32 has_user:1; + u32 support_lag:1; + u32 has_grc:1; + u32 has_factory_ctrl:1; + u32 rsv:24; +}; + +struct nbl_init_param { + struct nbl_func_caps caps; + enum nbl_product_type product_type; + bool is_rep; + bool pci_using_dac; +}; + +struct nbl_txrx_queue_param { + u16 vsi_id; + u64 dma; + u64 avail; + u64 used; + u16 desc_num; + u16 local_queue_id; + u16 intr_en; + u16 intr_mask; + u16 global_vector_id; + u16 half_offload_en; + u16 split; + u16 extend_header; + u16 cxt; + u16 rxcsum; +}; + +struct nbl_tc_qidsc_info { + u16 count; + u16 offset; + u32 pad; + u64 max_tx_rate; +}; + +#define NBL_MAX_TC_NUM (8) +struct nbl_tc_qidsc_param { + struct nbl_tc_qidsc_info info[NBL_MAX_TC_NUM]; + bool enable; + u16 num_tc; + u16 origin_qps; + u16 vsi_id; + u8 gravity; +}; + +struct nbl_qid_map_table { + u32 local_qid; + u32 notify_addr_l; + u32 notify_addr_h; + u32 global_qid; + u32 ctrlq_flag; +}; + +struct nbl_qid_map_param { + struct nbl_qid_map_table *qid_map; + u16 start; + u16 len; +}; + +struct nbl_ecpu_qid_map_param { + u8 valid; + u16 table_id; + u16 max_qid; + u16 base_qid; + u16 device_type; + u64 notify_addr; +}; + +struct nbl_rss_alg_param { + u8 hash_field_type_v4; + u8 hash_field_type_v6; + u8 hash_field_mask_dport; + u8 hash_field_mask_sport; + u8 hash_field_mask_dip; + u8 hash_field_mask_sip; + u8 hash_alg_type; +}; + +struct nbl_vnet_queue_info_param { + u32 function_id; + u32 device_id; + u32 bus_id; + u32 msix_idx; + u32 msix_idx_valid; + u32 valid; +}; + +struct nbl_queue_cfg_param { + /* queue args*/ + u64 desc; + u64 avail; + u64 used; + u16 size; + u16 extend_header; + u16 split; + u16 last_avail_idx; + u16 global_queue_id; + + /*interrupt args*/ + u16 global_vector; + u16 intr_en; + u16 intr_mask; + + /* dvn args */ + u16 tx; + + /* uvn args*/ + u16 rxcsum; + u16 half_offload_en; +}; + +struct nbl_msix_info_param { + u16 msix_num; + struct msix_entry *msix_entries; +}; + +struct nbl_queue_stats { + u64 packets; + u64 bytes; + u64 descs; +}; + +struct nbl_rep_stats { + u64 packets; + u64 bytes; + u64 dropped; +}; + +struct nbl_tx_queue_stats { + u64 tso_packets; + u64 tso_bytes; + u64 tx_csum_packets; + u64 tx_busy; + u64 tx_dma_busy; + u64 tx_multicast_packets; + u64 tx_unicast_packets; + u64 tx_skb_free; + u64 tx_desc_addr_err_cnt; + u64 tx_desc_len_err_cnt; +#ifdef CONFIG_TLS_DEVICE + u64 tls_encrypted_packets; + u64 tls_encrypted_bytes; + u64 tls_ooo_packets; +#endif +}; + +struct nbl_rx_queue_stats { + u64 rx_csum_packets; + u64 rx_csum_errors; + u64 rx_multicast_packets; + u64 rx_unicast_packets; + u64 rx_desc_addr_err_cnt; + u64 rx_alloc_buf_err_cnt; + u64 rx_cache_reuse; + u64 rx_cache_full; + u64 rx_cache_empty; + u64 rx_cache_busy; + u64 rx_cache_waive; +#ifdef CONFIG_TLS_DEVICE + u64 tls_decrypted_packets; + u64 tls_resync_req_num; +#endif +}; + +struct nbl_stats { + /* for toe stats */ + u64 tso_packets; + u64 tso_bytes; + u64 tx_csum_packets; + u64 rx_csum_packets; + u64 rx_csum_errors; + u64 tx_busy; + u64 tx_dma_busy; + u64 tx_multicast_packets; + u64 tx_unicast_packets; +#ifdef CONFIG_TLS_DEVICE + u64 tls_encrypted_packets; + u64 tls_encrypted_bytes; + u64 tls_ooo_packets; + u64 tls_decrypted_packets; + u64 tls_resync_req_num; +#endif + u64 rx_multicast_packets; + u64 rx_unicast_packets; + u64 tx_skb_free; + u64 tx_desc_addr_err_cnt; + u64 tx_desc_len_err_cnt; + u64 rx_desc_addr_err_cnt; + u64 rx_alloc_buf_err_cnt; + u64 rx_cache_reuse; + u64 rx_cache_full; + u64 rx_cache_empty; + u64 rx_cache_busy; + u64 rx_cache_waive; + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets; + u64 rx_bytes; +}; + +struct nbl_priv_stats { + u64 total_dvn_pkt_drop_cnt; + u64 total_uvn_stat_pkt_drop; +}; + +struct nbl_notify_param { + u16 notify_qid; + u16 tail_ptr; +}; + +#define NBL_LAG_MAX_PORTS 2 +#define NBL_LAG_VALID_PORTS 2 +#define NBL_LAG_MAX_NUM 2 +#define NBL_LAG_MAX_RESOURCE_NUM NBL_DRIVER_DEV_MAX + +struct nbl_lag_member { + struct netdev_lag_lower_state_info lower_state; + struct notifier_block notify_block; + struct netdev_net_notifier netdevice_nn; + struct list_head mem_list_node; + struct net_device *netdev; + bool is_bond_adev; + u16 vsi_id; + u8 lag_id; + u8 eth_id; + u8 logic_eth_id; + u8 bonded; +}; + +struct nbl_enable_lag_param { + bool enable; + u16 pa_ext_type_tbl_id; + u16 flow_tbl_id; + u16 upcall_queue; +}; + +enum nbl_eth_speed { + LINK_SPEED_100M = 0, + LINK_SPEED_1000M = 1, + LINK_SPEED_5G = 2, + LINK_SPEEP_10G = 3, + LINK_SPEED_25G = 4, + LINK_SPEED_50G = 5, + LINK_SPEED_100G = 6, + LINK_SPEED_200G = 7 +}; + +#define NBL_KTLS_IV_LEN 8 +#define NBL_KTLS_REC_LEN 8 + +struct nbl_ktls_offload_context_tx { + u32 index; + u32 expected_tcp; + u8 iv[NBL_KTLS_IV_LEN]; + u8 rec_num[NBL_KTLS_REC_LEN]; + bool ctx_post_pending; + struct tls_offload_context_tx *tx_ctx; +}; + +struct nbl_ktls_offload_context_rx { + u32 index; + u32 tcp_seq; + u8 rec_num[NBL_KTLS_REC_LEN]; +}; + +struct aes_gcm_keymat { + u8 crypto_type; + u32 salt; + u32 icv_len; +#define NBL_IPSEC_KEY_LEN 8 + u32 aes_key[NBL_IPSEC_KEY_LEN]; + u64 seq_iv; +}; + +struct nbl_accel_esp_xfrm_attrs { + u8 is_ipv6; + u8 nat_flag; + u8 tunnel_mode; + u16 sport; + u16 dport; + u32 spi; + xfrm_address_t saddr; + xfrm_address_t daddr; + struct aes_gcm_keymat aes_gcm; +}; + +struct nbl_ipsec_esn_state { + u32 sn; + u32 esn; + u8 wrap_en : 1; + u8 overlap : 1; + u8 enable : 1; + u8 window_en : 1; + u8 option : 2; +}; + +struct nbl_sa_search_key { + u16 family; + u32 mark; + __be32 spi; + xfrm_address_t daddr; +}; + +struct nbl_ipsec_cfg_info { + struct nbl_sa_search_key sa_key; + bool vld; + + u32 lft_cnt; + u32 lft_diff; + u32 hard_round; + u32 soft_round; + u32 hard_remain; + u32 soft_remain; + + u16 vsi; + u8 limit_type; + u8 limit_enable; + u64 hard_limit; + u64 soft_limit; +}; + +struct nbl_ipsec_sa_entry { + struct nbl_ipsec_cfg_info cfg_info; + struct nbl_ipsec_esn_state esn_state; + struct nbl_accel_esp_xfrm_attrs attrs; + u32 index; +}; + +union nbl_ipsec_lft_info { + u32 data; + struct { + u32 soft_sad_index : 11; + u32 soft_vld :1; + u32 rsv1 : 4; + u32 hard_sad_index : 11; + u32 hard_vld :1; + u32 rsv2 : 4; + }; +}; + +struct nbl_common_irq_num { + int mbx_irq_num; +}; + +struct nbl_ctrl_irq_num { + int adminq_irq_num; + int abnormal_irq_num; +}; + +#define NBL_PORT_KEY_ILLEGAL 0x0 +#define NBL_PORT_KEY_CAPABILITIES 0x1 +#define NBL_PORT_KEY_ENABLE 0x2 /* BIT(0): NBL_PORT_FLAG_ENABLE_NOTIFY */ +#define NBL_PORT_KEY_DISABLE 0x3 +#define NBL_PORT_KEY_ADVERT 0x4 +#define NBL_PORT_KEY_LOOPBACK 0x5 /* 0: disable eth loopback, 1: enable eth loopback */ +#define NBL_PORT_KEY_MODULE_SWITCH 0x6 /* 0: sfp off, 1: sfp on */ +#define NBL_PORT_KEY_MAC_ADDRESS 0x7 +#define NBL_PORT_KRY_LED_BLINK 0x8 +#define NBL_PORT_KEY_RESTORE_DEFAULTE_CFG 11 +#define NBL_PORT_KEY_SET_PFC_CFG 12 + +enum { + NBL_PORT_SUBOP_READ = 1, + NBL_PORT_SUBOP_WRITE = 2, +}; + +#define NBL_PORT_FLAG_ENABLE_NOTIFY BIT(0) +#define NBL_PORT_ENABLE_LOOPBACK 1 +#define NBL_PORT_DISABLE_LOOPBCK 0 +#define NBL_PORT_SFP_ON 1 +#define NBL_PORT_SFP_OFF 0 +#define NBL_PORT_KEY_KEY_SHIFT 56 +#define NBL_PORT_KEY_DATA_MASK 0xFFFFFFFFFFFF + +enum nbl_flow_ctrl { + NBL_PORT_TX_PAUSE = 0x1, + NBL_PORT_RX_PAUSE = 0x2, + NBL_PORT_TXRX_PAUSE_OFF = 0x4, /* used for ethtool, means ethtool close tx and rx pause */ +}; + +enum nbl_port_fec { + NBL_PORT_FEC_OFF = 1, + NBL_PORT_FEC_RS = 2, + NBL_PORT_FEC_BASER = 3, + NBL_PORT_FEC_AUTO = 4, /* ethtool may set Auto mode, used for PF mailbox msg*/ +}; + +enum nbl_port_autoneg { + NBL_PORT_AUTONEG_DISABLE = 0x1, + NBL_PORT_AUTONEG_ENABLE = 0x2, +}; + +enum nbl_port_type { + NBL_PORT_TYPE_UNKNOWN = 0, + NBL_PORT_TYPE_FIBRE, + NBL_PORT_TYPE_COPPER, +}; + +enum nbl_port_max_rate { + NBL_PORT_MAX_RATE_UNKNOWN = 0, + NBL_PORT_MAX_RATE_1G, + NBL_PORT_MAX_RATE_10G, + NBL_PORT_MAX_RATE_25G, + NBL_PORT_MAX_RATE_100G, + NBL_PORT_MAX_RATE_100G_PAM4, +}; + +enum nbl_port_mode { + NBL_PORT_NRZ_NORSFEC, + NBL_PORT_NRZ_544, + NBL_PORT_NRZ_528, + NBL_PORT_PAM4_544, + NBL_PORT_MODE_MAX, +}; + +enum nbl_led_reg_ctrl { + NBL_LED_REG_ACTIVE, + NBL_LED_REG_ON, + NBL_LED_REG_OFF, + NBL_LED_REG_INACTIVE, +}; + +#define NBL_PORT_CAP_AUTONEG_MASK (BIT(NBL_PORT_CAP_AUTONEG)) +#define NBL_PORT_CAP_FEC_MASK \ + (BIT(NBL_PORT_CAP_FEC_OFF) | BIT(NBL_PORT_CAP_FEC_RS) | BIT(NBL_PORT_CAP_FEC_BASER)) +#define NBL_PORT_CAP_PAUSE_MASK (BIT(NBL_PORT_CAP_TX_PAUSE) | BIT(NBL_PORT_CAP_RX_PAUSE)) +#define NBL_PORT_CAP_SPEED_1G_MASK\ + (BIT(NBL_PORT_CAP_1000BASE_T) | BIT(NBL_PORT_CAP_1000BASE_X)) +#define NBL_PORT_CAP_SPEED_10G_MASK\ + (BIT(NBL_PORT_CAP_10GBASE_T) | BIT(NBL_PORT_CAP_10GBASE_KR) | BIT(NBL_PORT_CAP_10GBASE_SR)) +#define NBL_PORT_CAP_SPEED_25G_MASK \ + (BIT(NBL_PORT_CAP_25GBASE_KR) | BIT(NBL_PORT_CAP_25GBASE_SR) |\ + BIT(NBL_PORT_CAP_25GBASE_CR) | BIT(NBL_PORT_CAP_25G_AUI)) +#define NBL_PORT_CAP_SPEED_50G_MASK \ + (BIT(NBL_PORT_CAP_50GBASE_KR2) | BIT(NBL_PORT_CAP_50GBASE_SR2) |\ + BIT(NBL_PORT_CAP_50GBASE_CR2) | BIT(NBL_PORT_CAP_50G_AUI2) |\ + BIT(NBL_PORT_CAP_50GBASE_KR_PAM4) | BIT(NBL_PORT_CAP_50GBASE_SR_PAM4) |\ + BIT(NBL_PORT_CAP_50GBASE_CR_PAM4) | BIT(NBL_PORT_CAP_50G_AUI_PAM4)) +#define NBL_PORT_CAP_SPEED_100G_MASK \ + (BIT(NBL_PORT_CAP_100GBASE_KR4) | BIT(NBL_PORT_CAP_100GBASE_SR4) |\ + BIT(NBL_PORT_CAP_100GBASE_CR4) | BIT(NBL_PORT_CAP_100G_AUI4) |\ + BIT(NBL_PORT_CAP_100G_CAUI4) | BIT(NBL_PORT_CAP_100GBASE_KR2_PAM4) |\ + BIT(NBL_PORT_CAP_100GBASE_SR2_PAM4) | BIT(NBL_PORT_CAP_100GBASE_CR2_PAM4) |\ + BIT(NBL_PORT_CAP_100G_AUI2_PAM4)) +#define NBL_PORT_CAP_SPEED_MASK \ + (NBL_PORT_CAP_SPEED_1G_MASK | NBL_PORT_CAP_SPEED_10G_MASK |\ + NBL_PORT_CAP_SPEED_25G_MASK | NBL_PORT_CAP_SPEED_50G_MASK |\ + NBL_PORT_CAP_SPEED_100G_MASK) +#define NBL_PORT_CAP_PAM4_MASK\ + (BIT(NBL_PORT_CAP_50GBASE_KR_PAM4) | BIT(NBL_PORT_CAP_50GBASE_SR_PAM4) |\ + BIT(NBL_PORT_CAP_50GBASE_CR_PAM4) | BIT(NBL_PORT_CAP_50G_AUI_PAM4) |\ + BIT(NBL_PORT_CAP_100GBASE_KR2_PAM4) | BIT(NBL_PORT_CAP_100GBASE_SR2_PAM4) |\ + BIT(NBL_PORT_CAP_100GBASE_CR2_PAM4) | BIT(NBL_PORT_CAP_100G_AUI2_PAM4)) +#define NBL_ETH_1G_DEFAULT_FEC_MODE NBL_PORT_FEC_OFF +#define NBL_ETH_10G_DEFAULT_FEC_MODE NBL_PORT_FEC_OFF +#define NBL_ETH_25G_DEFAULT_FEC_MODE NBL_PORT_FEC_RS +#define NBL_ETH_100G_DEFAULT_FEC_MODE NBL_PORT_FEC_RS + +enum nbl_port_cap { + NBL_PORT_CAP_TX_PAUSE, + NBL_PORT_CAP_RX_PAUSE, + NBL_PORT_CAP_AUTONEG, + NBL_PORT_CAP_FEC_NONE, + NBL_PORT_CAP_FEC_OFF = NBL_PORT_CAP_FEC_NONE, + NBL_PORT_CAP_FEC_RS, + NBL_PORT_CAP_FEC_BASER, + NBL_PORT_CAP_1000BASE_T, + NBL_PORT_CAP_1000BASE_X, + NBL_PORT_CAP_10GBASE_T, + NBL_PORT_CAP_10GBASE_KR, + NBL_PORT_CAP_10GBASE_SR, + NBL_PORT_CAP_25GBASE_KR, + NBL_PORT_CAP_25GBASE_SR, + NBL_PORT_CAP_25GBASE_CR, + NBL_PORT_CAP_25G_AUI, + NBL_PORT_CAP_50GBASE_KR2, + NBL_PORT_CAP_50GBASE_SR2, + NBL_PORT_CAP_50GBASE_CR2, + NBL_PORT_CAP_50G_AUI2, + NBL_PORT_CAP_50GBASE_KR_PAM4, + NBL_PORT_CAP_50GBASE_SR_PAM4, + NBL_PORT_CAP_50GBASE_CR_PAM4, + NBL_PORT_CAP_50G_AUI_PAM4, + NBL_PORT_CAP_100GBASE_KR4, + NBL_PORT_CAP_100GBASE_SR4, + NBL_PORT_CAP_100GBASE_CR4, + NBL_PORT_CAP_100G_AUI4, + NBL_PORT_CAP_100G_CAUI4, + NBL_PORT_CAP_100GBASE_KR2_PAM4, + NBL_PORT_CAP_100GBASE_SR2_PAM4, + NBL_PORT_CAP_100GBASE_CR2_PAM4, + NBL_PORT_CAP_100G_AUI2_PAM4, + NBL_PORT_CAP_FEC_AUTONEG, + NBL_PORT_CAP_MAX +}; + +enum nbl_fw_port_speed { + NBL_FW_PORT_SPEED_10G, + NBL_FW_PORT_SPEED_25G, + NBL_FW_PORT_SPEED_50G, + NBL_FW_PORT_SPEED_100G, +}; + +#define PASSTHROUGH_FW_CMD_DATA_LEN (3072) +struct nbl_passthrough_fw_cmd_param { + u16 opcode; + u16 errcode; + u16 in_size; + u16 out_size; + u8 data[PASSTHROUGH_FW_CMD_DATA_LEN]; +}; + +#define NBL_NET_RING_NUM_CMD_LEN (520) +struct nbl_fw_cmd_net_ring_num_param { + u16 pf_def_max_net_qp_num; + u16 vf_def_max_net_qp_num; + u16 net_max_qp_num[NBL_NET_RING_NUM_CMD_LEN]; +}; + +#define NBL_RDMA_CAP_CMD_LEN (65) +struct nbl_fw_cmd_rdma_cap_param { + u32 valid; + u8 rdma_func_bitmaps[NBL_RDMA_CAP_CMD_LEN]; + u8 rsv[7]; +}; + +#define NBL_RDMA_MEM_TYPE_MAX (2) +struct nbl_fw_cmd_rdma_mem_type_param { + u32 mem_type; +}; + +#define NBL_VF_NUM_CMD_LEN (8) +struct nbl_fw_cmd_vf_num_param { + u32 valid; + u16 vf_max_num[NBL_VF_NUM_CMD_LEN]; +}; + +#define NBL_ST_INFO_NAME_LEN (64) +#define NBL_ST_INFO_NETDEV_MAX (8) +#define NBL_ST_INFO_RESERVED_LEN (376) +struct nbl_st_info_param { + u8 version; + u8 bus; + u8 devid; + u8 function; + u16 domain; + u16 rsv0; + char driver_name[NBL_ST_INFO_NAME_LEN]; + char driver_ver[NBL_ST_INFO_NAME_LEN]; + char netdev_name[NBL_ST_INFO_NETDEV_MAX][NBL_ST_INFO_NAME_LEN]; + u8 rsv[NBL_ST_INFO_RESERVED_LEN]; +} __packed; + +static inline u64 nbl_speed_to_link_mode(unsigned int speed, u8 autoneg) +{ + u64 link_mode = 0; + int speed_support = 0; + + switch (speed) { + case SPEED_100000: + link_mode |= BIT(NBL_PORT_CAP_100GBASE_KR4) | BIT(NBL_PORT_CAP_100GBASE_SR4) | + BIT(NBL_PORT_CAP_100GBASE_CR4) | BIT(NBL_PORT_CAP_100G_AUI4) | + BIT(NBL_PORT_CAP_100G_CAUI4) | BIT(NBL_PORT_CAP_100GBASE_KR2_PAM4) | + BIT(NBL_PORT_CAP_100GBASE_SR2_PAM4) | BIT(NBL_PORT_CAP_100GBASE_CR2_PAM4) | + BIT(NBL_PORT_CAP_100G_AUI2_PAM4); + fallthrough; + case SPEED_50000: + link_mode |= BIT(NBL_PORT_CAP_50GBASE_KR2) | BIT(NBL_PORT_CAP_50GBASE_SR2) | + BIT(NBL_PORT_CAP_50GBASE_CR2) | BIT(NBL_PORT_CAP_50G_AUI2) | + BIT(NBL_PORT_CAP_50GBASE_KR_PAM4) | BIT(NBL_PORT_CAP_50GBASE_SR_PAM4) | + BIT(NBL_PORT_CAP_50GBASE_CR_PAM4) | BIT(NBL_PORT_CAP_50G_AUI_PAM4); + fallthrough; + case SPEED_25000: + link_mode |= BIT(NBL_PORT_CAP_25GBASE_KR) | BIT(NBL_PORT_CAP_25GBASE_SR) | + BIT(NBL_PORT_CAP_25GBASE_CR) | BIT(NBL_PORT_CAP_25G_AUI); + fallthrough; + case SPEED_10000: + link_mode |= BIT(NBL_PORT_CAP_10GBASE_T) | BIT(NBL_PORT_CAP_10GBASE_KR) | + BIT(NBL_PORT_CAP_10GBASE_SR); + fallthrough; + case SPEED_1000: + link_mode |= BIT(NBL_PORT_CAP_1000BASE_T) | BIT(NBL_PORT_CAP_1000BASE_X); + speed_support = 1; + } + + if (autoneg && speed_support) + link_mode |= BIT(NBL_PORT_CAP_AUTONEG); + + return link_mode; +} + +#define NBL_DEFINE_NAME_WITH_WIDTH_CHECK(_struct, _size) \ +_struct; \ +static inline int nbl_##_struct##_size_is_not_equal_to_define(void) \ +{ \ + int check[((sizeof(_struct) * 8) == (_size)) ? 1 : -1]; \ + return check[0]; \ +} + +#define nbl_list_entry_is_head(pos, head, member) \ + (&pos->member == (head)) + +/** + * list_is_first -- tests whether @ list is the first entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int nbl_list_is_first(const struct list_head *list, + const struct list_head *head) +{ + return list->prev == head; +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int nbl_list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int nbl_list_empty(const struct list_head *head) +{ + return READ_ONCE(head->next) == head; +} + +/** + * nbl_read_poll_timeout - Periodically poll an address until a condition is + * met or a timeout occurs + * @op: accessor function (takes @args as its arguments) + * @val: Variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.rst). + * @timeout_us: Timeout in us, 0 means never timeout + * @sleep_before_read: if it is true, sleep @sleep_us before read. + * @args: arguments for @op poll + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @args is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. + * + * When available, you'll probably want to use one of the specialized + * macros defined below rather than this macro directly. + */ +#define nbl_read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ + sleep_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __sleep_us = (sleep_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + might_sleep_if((__sleep_us) != 0); \ + if (sleep_before_read && __sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + for (;;) { \ + (val) = op(args); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(args); \ + break; \ + } \ + if (__sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + +#define NBL_OPS_CALL(func, para) \ + ({ typeof(func) _func = (func); \ + (!_func) ? 0 : _func para; }) + +enum { + NBL_TC_PORT_TYPE_INVALID = 0, + NBL_TC_PORT_TYPE_VSI, + NBL_TC_PORT_TYPE_ETH, + NBL_TC_PORT_TYPE_BOND, +}; + +struct nbl_tc_port { + u32 id; + u8 type; +}; + +enum nbl_cmd_status { + NBL_CMDQ_SUCCESS = 0, + /* failed establishing cmd */ + NBL_CMDQ_PARAM_ERR = -1, + NBL_CMDQ_NOT_SUPP = -3, + NBL_CMDQ_NO_MEMORY = -4, + NBL_CMDQ_NOT_READY = -5, + NBL_CMDQ_UNDONE = -6, + /* failed sending cmd */ + NBL_CMDQ_CQ_ERR = -100, + NBL_CMDQ_CQ_FULL = -102, + NBL_CMDQ_CQ_NOT_READY = -103, + NBL_CMDQ_CQ_ERR_PARAMS = -104, + NBL_CMDQ_CQ_ERR_BUFFER = -105, + /* failed executing cmd */ + NBL_CMDQ_FAILED = -200, + NBL_CMDQ_NOBUF_ERR = -201, + NBL_CMDQ_TIMEOUT_ERR = -202, + NBL_CMDQ_NOHIT_ERR = -203, + NBL_CMDQ_RESEND_FAIL = -204, + NBL_CMDQ_RESET_FAIL = -205, + NBL_CMDQ_NEED_RESEND = -206, + NBL_CMDQ_NEED_RESET = -207, +}; + +struct nbl_fdir_l2 { + u8 dst_mac[ETH_ALEN]; /* dest MAC address */ + u8 src_mac[ETH_ALEN]; /* src MAC address */ + u16 ether_type; /* for NON_IP_L2 */ +}; + +struct nbl_fdir_l4 { + u16 dst_port; + u16 src_port; + u8 tcp_flag; +}; + +struct nbl_fdir_l3 { + union { + u32 addr; + u8 v6_addr[NBL_IPV6_ADDR_LEN_AS_U8]; + } src_ip, dst_ip; + + u8 ip_ver; + u8 tos; + u8 ttl; + u8 proto; +}; + +struct nbl_tc_fdir_tnl { + u32 flags; + u32 vni; +}; + +struct nbl_port_mcc { + u16 dport_id; + u8 port_type; +}; + +#define NBL_VLAN_TYPE_ETH_BASE 1027 +#define NBL_VLAN_TPID_VALUE 0x8100 +#define NBL_QINQ_TPID_VALUE 0x88A8 +struct nbl_vlan { + u16 vlan_tag; + u16 eth_proto; + u32 port_id; + u8 port_type; +}; + +/* encap info */ +#define NBL_FLOW_ACTION_ENCAP_TOTAL_LEN 128 +#define NBL_FLOW_ACTION_ENCAP_OFFSET_LEN 9 +#define NBL_FLOW_ACTION_ENCAP_HALF_LEN 45 +#define NBL_FLOW_ACTION_ENCAP_MAX_LEN 90 + +struct nbl_encap_key { + struct ip_tunnel_key ip_tun_key; + void *tc_tunnel; +}; + +struct nbl_encap_entry { + struct nbl_encap_key key; + unsigned char hw_dst[ETH_ALEN]; + + struct net_device *out_dev; + u8 encap_buf[NBL_FLOW_ACTION_ENCAP_TOTAL_LEN]; + u16 encap_size; + u16 encap_idx; + u32 vni; + u32 ref_cnt; +}; + +union nbl_flow_encap_offset_tbl_u { + struct nbl_flow_encap_offset_tbl { + u16 phid3_offset:7; + u16 phid2_offset:7; + u16 l4_ck_mod:3; + u16 l3_ck_en:1; + u16 len_offset1:7; + u16 len_en1:1; + u16 len_offset0:7; + u16 len_en0:1; + u16 dscp_offset:10; + u16 vlan_offset:7; + u16 vni_offset:7; + u16 sport_offset:7; + u16 tnl_len:7; + } __packed info; +#define NBL_FLOW_ENCAP_OFFSET_TBL_WIDTH (sizeof(struct nbl_flow_encap_offset_tbl) \ + / sizeof(u32)) + u32 data[NBL_FLOW_ENCAP_OFFSET_TBL_WIDTH]; +} __packed; + +struct nbl_rule_action { + u64 flag; /* action flag, eg:set ipv4 src/redirect */ + u32 drop_flag; /* drop or forward */ + u32 counter_id; + u32 port_id; + u8 port_type; + u8 action_cnt; /* different action type total cnt */ + + u8 next_stg_sel; + + u8 dscp; /* set dscp */ + /* set ops */ + struct nbl_fdir_l4 l4_outer; + struct nbl_fdir_l2 l2_data_outer; + struct nbl_fdir_l3 ip_outer; + u8 lag_id; + struct nbl_port_mcc port_mcc[NBL_TC_MCC_MEMBER_MAX]; + u16 mcc_cnt; + struct nbl_vlan vlan; + struct ip_tunnel_info *tunnel; + struct nbl_encap_key encap_key; + union nbl_flow_encap_offset_tbl_u encap_idx_info; + u32 vni; + u8 encap_buf[NBL_FLOW_ACTION_ENCAP_TOTAL_LEN]; + u16 encap_size; + u16 encap_idx; + bool encap_parse_ok; + struct net_device *in_port; + struct net_device *tc_tun_encap_out_dev; +}; + +struct nbl_fdir_fltr { + struct nbl_fdir_l2 l2_data_outer; + struct nbl_fdir_l2 l2_mask_outer; + struct nbl_fdir_l2 l2_data; + struct nbl_fdir_l2 l2_mask; + + struct nbl_fdir_l3 ip; + struct nbl_fdir_l3 ip_mask; + struct nbl_fdir_l3 ip_outer; + struct nbl_fdir_l3 ip_mask_outer; + + struct nbl_fdir_l4 l4; + struct nbl_fdir_l4 l4_mask; + struct nbl_fdir_l4 l4_outer; + struct nbl_fdir_l4 l4_mask_outer; + + struct nbl_tc_fdir_tnl tnl; + struct nbl_tc_fdir_tnl tnl_mask; + + u16 svlan_type; + u16 svlan_tag; + u16 cvlan_type; + u16 cvlan_tag; + u16 svlan_mask; + u16 cvlan_mask; + u32 tnl_flag:1; + u32 tnl_cnt:1; + u32 vlan_cnt:2; + u32 metadata : 16; + u32 acl_flow:1; + u32 dir:1; + u32 rsv:1; + + u8 lag_id; + u16 port; + bool is_cvlan; +}; + +/** + * struct nbl_flow_pattern_conf: + * input : storage key info from pattern + * input_set : storage key flag in order to get ptype + */ +struct nbl_flow_pattern_conf { + struct nbl_fdir_fltr input; + struct net_device *input_dev; + u8 flow_send; + u8 graph_idx; + u16 pp_flag; + u64 input_set; + u64 key_flag; +}; + +struct nbl_flow_index_key { + union { + u64 cookie; + u8 data[NBL_FLOW_INDEX_BYTE_LEN]; + }; +}; + +struct nbl_tc_flow_param { + struct nbl_tc_port in; + struct nbl_tc_port out; + struct nbl_tc_port mirror_out; + struct nbl_flow_pattern_conf filter; + struct nbl_rule_action act; + struct nbl_flow_index_key key; + struct ip_tunnel_info *tunnel; + bool encap; + struct nbl_common_info *common; + struct nbl_service_mgt *serv_mgt; +}; + +struct nbl_stats_param { + struct flow_cls_offload *f; +}; + +enum nbl_hwmon_type { + NBL_HWMON_TEMP_INPUT, + NBL_HWMON_TEMP_MAX, + NBL_HWMON_TEMP_CRIT, + NBL_HWMON_TEMP_HIGHEST, + NBL_HWMON_TEMP_TYPE_MAX, +}; + +struct nbl_load_p4_param { +#define NBL_P4_SECTION_NAME_LEN 32 + u8 name[NBL_P4_SECTION_NAME_LEN]; + u32 addr; + u32 size; + u16 section_index; + u16 section_offset; + u8 *data; + bool start; + bool end; +}; + +#define NBL_ACL_TCAM_KEY_LEN 5 +#define NBL_ACL_TCAM_KEY_MAX 16 + +struct nbl_acl_tcam_key_param { + u8 data[NBL_ACL_TCAM_KEY_LEN]; +} __packed; + +struct nbl_acl_tcam_param { + union nbl_acl_tcam_info { + struct nbl_acl_tcam_key_param key[NBL_ACL_TCAM_KEY_MAX]; + u8 data[NBL_ACL_TCAM_KEY_LEN * NBL_ACL_TCAM_KEY_MAX]; + } info; + u8 len; +}; + +enum { + NBL_NETIF_F_SG_BIT, /* Scatter/gather IO. */ + NBL_NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */ + NBL_NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */ + NBL_NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ + NBL_NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ + NBL_NETIF_F_HW_VLAN_CTAG_TX_BIT, /* Transmit VLAN CTAG HW acceleration */ + NBL_NETIF_F_HW_VLAN_CTAG_RX_BIT, /* Receive VLAN CTAG HW acceleration */ + NBL_NETIF_F_HW_VLAN_CTAG_FILTER_BIT, /* Receive filtering on VLAN CTAGs */ + NBL_NETIF_F_TSO_BIT, /* ... TCPv4 segmentation */ + NBL_NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ + NBL_NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ + NBL_NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ + NBL_NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */ + NBL_NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ + NBL_NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT, /* ... UDP TUNNEL with TSO & CSUM */ + NBL_NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4 + * in hardware and all other + * headers in software. + */ + NBL_NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */ + NBL_NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ + NBL_NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ + NBL_NETIF_F_RXHASH_BIT, /* Receive hashing offload */ + NBL_NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ + NBL_NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */ + NBL_NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ + NBL_NETIF_F_HW_VLAN_STAG_FILTER_BIT, /* Receive filtering on VLAN STAGs */ + NBL_NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ + NBL_FEATURES_COUNT +}; + +static const netdev_features_t nbl_netdev_features[] = { + [NBL_NETIF_F_SG_BIT] = NETIF_F_SG, + [NBL_NETIF_F_IP_CSUM_BIT] = NETIF_F_IP_CSUM, + [NBL_NETIF_F_IPV6_CSUM_BIT] = NETIF_F_IPV6_CSUM, + [NBL_NETIF_F_HIGHDMA_BIT] = NETIF_F_HIGHDMA, + [NBL_NETIF_F_HW_VLAN_CTAG_TX_BIT] = NETIF_F_HW_VLAN_CTAG_TX, + [NBL_NETIF_F_HW_VLAN_CTAG_RX_BIT] = NETIF_F_HW_VLAN_CTAG_RX, + [NBL_NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = NETIF_F_HW_VLAN_CTAG_FILTER, + [NBL_NETIF_F_TSO_BIT] = NETIF_F_TSO, + [NBL_NETIF_F_GSO_ROBUST_BIT] = NETIF_F_GSO_ROBUST, + [NBL_NETIF_F_TSO6_BIT] = NETIF_F_TSO6, + [NBL_NETIF_F_GSO_GRE_BIT] = NETIF_F_GSO_GRE, + [NBL_NETIF_F_GSO_GRE_CSUM_BIT] = NETIF_F_GSO_GRE_CSUM, + [NBL_NETIF_F_GSO_UDP_TUNNEL_BIT] = NETIF_F_GSO_UDP_TUNNEL, + [NBL_NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = NETIF_F_GSO_UDP_TUNNEL_CSUM, + [NBL_NETIF_F_GSO_PARTIAL_BIT] = NETIF_F_GSO_PARTIAL, + [NBL_NETIF_F_GSO_UDP_L4_BIT] = NETIF_F_GSO_UDP_L4, + [NBL_NETIF_F_SCTP_CRC_BIT] = NETIF_F_SCTP_CRC, + [NBL_NETIF_F_NTUPLE_BIT] = NETIF_F_NTUPLE, + [NBL_NETIF_F_RXHASH_BIT] = NETIF_F_RXHASH, + [NBL_NETIF_F_RXCSUM_BIT] = NETIF_F_RXCSUM, + [NBL_NETIF_F_HW_VLAN_STAG_TX_BIT] = NETIF_F_HW_VLAN_STAG_TX, + [NBL_NETIF_F_HW_VLAN_STAG_RX_BIT] = NETIF_F_HW_VLAN_STAG_RX, + [NBL_NETIF_F_HW_VLAN_STAG_FILTER_BIT] = NETIF_F_HW_VLAN_STAG_FILTER, + [NBL_NETIF_F_HW_TC_BIT] = NETIF_F_HW_TC, +}; + +#define NBL_FEATURE(name) (1 << (NBL_##name##_BIT)) +#define NBL_FEATURE_TEST_BIT(val, loc) (((val) >> (loc)) & 0x1) + +static inline netdev_features_t nbl_features_to_netdev_features(u64 features) +{ + netdev_features_t netdev_features = 0; + int i = 0; + + for (i = 0; i < NBL_FEATURES_COUNT; i++) { + if (NBL_FEATURE_TEST_BIT(features, i)) + netdev_features += nbl_netdev_features[i]; + } + + return netdev_features; +}; + +enum nbl_abnormal_event_module { + NBL_ABNORMAL_EVENT_DVN = 0, + NBL_ABNORMAL_EVENT_UVN, + NBL_ABNORMAL_EVENT_MAX, +}; + +struct nbl_abnormal_details { + bool abnormal; + u16 qid; + u16 vsi_id; +}; + +struct nbl_abnormal_event_info { + struct nbl_abnormal_details details[NBL_ABNORMAL_EVENT_MAX]; + u32 other_abnormal_info; +}; + +enum nbl_performance_mode { + NBL_QUIRKS_NO_TOE, + NBL_QUIRKS_UVN_PREFETCH_ALIGN, +}; + +extern int performance_mode; + +struct nbl_vsi_param { + u16 vsi_id; + u16 queue_offset; + u16 queue_num; + u8 index; +}; + +struct nbl_ring_param { + u16 tx_ring_num; + u16 rx_ring_num; + u16 xdp_ring_offset; /* xdp-vsi queue'vertor share data-vsi queue */ + u16 queue_size; +}; + +enum nbl_trust_mode { + NBL_TRUST_MODE_8021P, + NBL_TRUST_MODE_DSCP +}; + +#define NBL_MAX_PFC_PRIORITIES 8 +#define NBL_DSCP_MAX 64 + +extern int loongarch_low_version; +#define NBL_LOONGSON64_VF_MAX_QUEUE_NUM 2 +#define NBL_LOONGSON64_MAX_QUEUE_NUM 8 + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h new file mode 100644 index 0000000000000000000000000000000000000000..0c9a4ec160230dd083b935d70749933a0d984095 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_include/nbl_product_base.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#ifndef _NBL_DEF_PRODUCT_BASE_H_ +#define _NBL_DEF_PRODUCT_BASE_H_ + +#include "nbl_include.h" + +struct nbl_product_base_ops { + int (*phy_init)(void *p, struct nbl_init_param *param); + void (*phy_remove)(void *p); + int (*res_init)(void *p, struct nbl_init_param *param); + void (*res_remove)(void *p); + int (*chan_init)(void *p, struct nbl_init_param *param); + void (*chan_remove)(void *p); +}; + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c b/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c new file mode 100644 index 0000000000000000000000000000000000000000..20a719e44e02fec6e43ffc48eeeea92fb2b49ed3 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/nbl/nbl_main.c @@ -0,0 +1,638 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Bennie Yan + */ + +#include +#include "nbl_core.h" + +static struct nbl_software_tool_table nbl_st_table; +static struct dentry *nbl_debugfs_root; + +/* global cmdq and tc flow related structures */ +static struct nbl_tc_insts_info g_tc_insts[NBL_TC_FLOW_INST_COUNT] = { { 0 } }; + +static struct nbl_product_base_ops nbl_product_base_ops[NBL_PRODUCT_MAX] = { + { + .phy_init = nbl_phy_init_leonis, + .phy_remove = nbl_phy_remove_leonis, + .res_init = nbl_res_init_leonis, + .res_remove = nbl_res_remove_leonis, + .chan_init = nbl_chan_init_common, + .chan_remove = nbl_chan_remove_common, + }, +}; + +static char *nblst_cdevnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "nblst/%s", dev_name(dev)); +} + +int nbl_core_start(struct nbl_adapter *adapter, struct nbl_init_param *param) +{ + int ret = 0; + + ret = nbl_dev_start(adapter, param); + return ret; +} + +void nbl_core_stop(struct nbl_adapter *adapter) +{ + nbl_dev_stop(adapter); +} + +static void nbl_core_setup_product_ops(struct nbl_adapter *adapter, struct nbl_init_param *param, + struct nbl_product_base_ops **product_base_ops) +{ + adapter->product_base_ops = &nbl_product_base_ops[param->product_type]; + *product_base_ops = adapter->product_base_ops; +} + +struct nbl_adapter *nbl_core_init(struct pci_dev *pdev, struct nbl_init_param *param) +{ + struct nbl_adapter *adapter; + struct nbl_common_info *common; + struct nbl_product_base_ops *product_base_ops; + int ret = 0; + + if (!pdev) + return NULL; + + adapter = devm_kzalloc(&pdev->dev, sizeof(struct nbl_adapter), GFP_KERNEL); + if (!adapter) + return NULL; + + adapter->pdev = pdev; + common = NBL_ADAPTER_TO_COMMON(adapter); + + NBL_COMMON_TO_PDEV(common) = pdev; + NBL_COMMON_TO_DEV(common) = &pdev->dev; + NBL_COMMON_TO_DMA_DEV(common) = &pdev->dev; + NBL_COMMON_TO_DEBUG_LVL(common) |= NBL_DEBUG_ALL; + NBL_COMMON_TO_VF_CAP(common) = param->caps.is_vf; + NBL_COMMON_TO_PCI_USING_DAC(common) = param->pci_using_dac; + NBL_COMMON_TO_PCI_FUNC_ID(common) = PCI_FUNC(pdev->devfn); + common->devid = PCI_SLOT(pdev->devfn); + common->bus = pdev->bus->number; + common->tc_inst_id = NBL_TC_FLOW_INST_COUNT; + common->product_type = param->product_type; + + memcpy(&adapter->init_param, param, sizeof(adapter->init_param)); + + nbl_core_setup_product_ops(adapter, param, &product_base_ops); + + /* every product's phy/chan/res layer has a great difference, so call their own init ops */ + ret = product_base_ops->phy_init(adapter, param); + if (ret) + goto phy_init_fail; + + ret = product_base_ops->chan_init(adapter, param); + if (ret) + goto chan_init_fail; + + ret = product_base_ops->res_init(adapter, param); + if (ret) + goto res_init_fail; + + ret = nbl_disp_init(adapter, param); + if (ret) + goto disp_init_fail; + + ret = nbl_serv_init(adapter, param); + if (ret) + goto serv_init_fail; + + ret = nbl_dev_init(adapter, param); + if (ret) + goto dev_init_fail; + + nbl_debugfs_func_init(adapter, param); + + return adapter; + +dev_init_fail: + nbl_serv_remove(adapter); +serv_init_fail: + nbl_disp_remove(adapter); +disp_init_fail: + product_base_ops->res_remove(adapter); +res_init_fail: + product_base_ops->chan_remove(adapter); +chan_init_fail: + product_base_ops->phy_remove(adapter); +phy_init_fail: + devm_kfree(&pdev->dev, adapter); + return NULL; +} + +void nbl_core_remove(struct nbl_adapter *adapter) +{ + struct device *dev; + struct nbl_product_base_ops *product_base_ops; + + if (!adapter) + return; + + dev = NBL_ADAPTER_TO_DEV(adapter); + product_base_ops = NBL_ADAPTER_TO_RPDUCT_BASE_OPS(adapter); + + nbl_debugfs_func_remove(adapter); + nbl_dev_remove(adapter); + nbl_serv_remove(adapter); + nbl_disp_remove(adapter); + product_base_ops->res_remove(adapter); + product_base_ops->chan_remove(adapter); + product_base_ops->phy_remove(adapter); + devm_kfree(dev, adapter); +} + +void nbl_tc_set_cmdq_info(int (*send_cmdq)(void *, const void *, void *), + void *priv, u8 index) +{ + g_tc_insts[index].send_cmdq = send_cmdq; + g_tc_insts[index].chan_mgt = priv; +} + +void nbl_tc_unset_cmdq_info(u8 index) +{ + g_tc_insts[index].send_cmdq = NULL; + g_tc_insts[index].chan_mgt = NULL; + g_tc_insts[index].locked = 0; +} + +void nbl_tc_set_flow_info(void *priv, u8 index) +{ + g_tc_insts[index].tc_flow_mgt = priv; +} + +void nbl_tc_unset_flow_info(u8 index) +{ + g_tc_insts[index].tc_flow_mgt = NULL; +} + +void *nbl_tc_get_flow_info(u8 index) +{ + return g_tc_insts[index].tc_flow_mgt; +} + +u8 nbl_tc_alloc_inst_id(void) +{ + u8 inst_id = 0; + + spin_lock(&nbl_tc_flow_inst_lock); + for (inst_id = 0; inst_id < NBL_TC_FLOW_INST_COUNT; inst_id++) + if (!g_tc_insts[inst_id].locked) { + g_tc_insts[inst_id].locked = 1; + spin_unlock(&nbl_tc_flow_inst_lock); + return inst_id; + } + + /* return invalid index */ + spin_unlock(&nbl_tc_flow_inst_lock); + return NBL_TC_FLOW_INST_COUNT; +} + +int nbl_tc_call_inst_cmdq(u8 inst_id, const void *hdr, void *cmd) +{ + void *priv = NULL; + + if (!g_tc_insts[inst_id].chan_mgt || !g_tc_insts[inst_id].send_cmdq) + return NBL_CMDQ_NOT_READY; + + priv = g_tc_insts[inst_id].chan_mgt; + return g_tc_insts[inst_id].send_cmdq(priv, hdr, cmd); +} + +int nbl_st_init(struct nbl_software_tool_table *st_table) +{ + dev_t devid; + int ret = 0; + + ret = alloc_chrdev_region(&devid, 0, NBL_ST_MAX_DEVICE_NUM, "nblst"); + if (ret < 0) + return ret; + + st_table->major = MAJOR(devid); + st_table->devno = devid; + + st_table->cls = class_create("nblst_cls"); + st_table->cls->devnode = nblst_cdevnode; + if (IS_ERR(st_table->cls)) { + unregister_chrdev(st_table->major, "nblst"); + unregister_chrdev_region(st_table->devno, NBL_ST_MAX_DEVICE_NUM); + ret = -EBUSY; + } + + return ret; +} + +void nbl_st_remove(struct nbl_software_tool_table *st_table) +{ + class_destroy(st_table->cls); + unregister_chrdev(st_table->major, "nblst"); + unregister_chrdev_region(st_table->devno, NBL_ST_MAX_DEVICE_NUM); +} + +struct nbl_software_tool_table *nbl_get_st_table(void) +{ + return &nbl_st_table; +} + +static void nbl_debugfs_init(void) +{ + nbl_debugfs_root = debugfs_create_dir(NBL_DRIVER_NAME, NULL); +} + +static void nbl_debugfs_remove(void) +{ + debugfs_remove_recursive(nbl_debugfs_root); + nbl_debugfs_root = NULL; +} + +struct dentry *nbl_get_debugfs_root(void) +{ + return nbl_debugfs_root; +} + +static void nbl_get_func_param(struct pci_dev *pdev, kernel_ulong_t driver_data, + struct nbl_init_param *param) +{ + param->caps.has_ctrl = NBL_CAP_IS_CTRL(driver_data); + param->caps.has_net = NBL_CAP_IS_NET(driver_data); + param->caps.is_vf = NBL_CAP_IS_VF(driver_data); + param->caps.support_lag = NBL_CAP_SUPPORT_LAG(driver_data); + param->caps.has_user = NBL_CAP_IS_USER(driver_data); + param->caps.has_grc = NBL_CAP_IS_GRC(driver_data); + param->caps.is_blk = NBL_CAP_IS_BLK(driver_data); + param->caps.is_nic = NBL_CAP_IS_NIC(driver_data); + param->caps.has_factory_ctrl = NBL_CAP_IS_FACTORY_CTRL(driver_data); + + if (NBL_CAP_IS_LEONIS(driver_data)) + param->product_type = NBL_LEONIS_TYPE; + if (NBL_CAP_IS_BOOTIS(driver_data)) + param->product_type = NBL_BOOTIS_TYPE; + if (NBL_CAP_IS_VIRTIO(driver_data)) + param->product_type = NBL_VIRTIO_TYPE; + + /** + * Leonis only PF0 has ctrl capability, but PF0's pcie device_id is same with other PF. + * So hanle it special. + **/ + if (param->product_type == NBL_LEONIS_TYPE && !param->caps.is_vf && + (PCI_FUNC(pdev->devfn) == 0) && !param->caps.has_factory_ctrl) { + param->caps.has_ctrl = 1; + param->caps.has_grc = 1; + } + + if (param->caps.has_ctrl && param->caps.has_factory_ctrl) { + dev_err(&pdev->dev, "Do not support ctrl & factory_ctrl simutaneously, skip ctrl"); + memset(¶m->caps, 0, sizeof(param->caps)); + param->caps.has_factory_ctrl = true; + } +} + +static int nbl_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *id) +{ + struct device *dev = &pdev->dev; + struct nbl_adapter *adapter = NULL; + struct nbl_init_param param = {{0}}; + int err; + + dev_info(dev, "nbl probe\n"); + + err = pci_enable_device(pdev); + if (err) + return err; + + param.pci_using_dac = true; + nbl_get_func_param(pdev, id->driver_data, ¶m); + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (err) { + dev_info(dev, "Configure DMA 64 bit mask failed, err = %d\n", err); + param.pci_using_dac = false; + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(dev, "Configure DMA 32 bit mask failed, err = %d\n", err); + goto configure_dma_err; + } + } + + pci_set_master(pdev); + + pci_save_state(pdev); + + if (param.caps.is_blk) { + dev_info(dev, "nbl_virtio_blk probe OK\n"); + return NBL_OK; + } + + adapter = nbl_core_init(pdev, ¶m); + if (!adapter) { + dev_err(dev, "Nbl adapter init fail\n"); + err = -EAGAIN; + goto adapter_init_err; + } + + pci_set_drvdata(pdev, adapter); + + err = nbl_core_start(adapter, ¶m); + if (err) + goto core_start_err; + + dev_info(dev, "nbl probe finished\n"); + + return 0; + +core_start_err: + nbl_core_remove(adapter); +adapter_init_err: + pci_clear_master(pdev); +configure_dma_err: + pci_disable_device(pdev); + return err; +} + +static void nbl_remove(struct pci_dev *pdev) +{ + struct nbl_adapter *adapter = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "nbl remove\n"); + pci_disable_sriov(pdev); + + nbl_core_stop(adapter); + nbl_core_remove(adapter); + pci_clear_master(pdev); + pci_disable_device(pdev); + + dev_info(&pdev->dev, "nbl remove OK!\n"); +} + +static void nbl_shutdown(struct pci_dev *pdev) +{ + struct nbl_adapter *adapter = pci_get_drvdata(pdev); + + if (!NBL_COMMON_TO_VF_CAP(NBL_ADAPTER_TO_COMMON(adapter))) + nbl_remove(pdev); + + dev_info(&pdev->dev, "nbl shutdown OK\n"); +} + +static __maybe_unused int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct nbl_adapter *adapter = pci_get_drvdata(pdev); + int err; + + if (!num_vfs) { + pci_disable_sriov(pdev); + if (!adapter) + return 0; + + nbl_dev_remove_vf_config(adapter); + + err = nbl_dev_destroy_rep(adapter); + if (err) { + dev_err(&pdev->dev, "nbl destroy repr dev failed %d!\n", err); + return err; + } + return 0; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_err(&pdev->dev, "nbl enable sriov failed %d!\n", err); + return err; + } + + err = nbl_dev_create_rep(adapter, num_vfs); + if (err) { + dev_err(&pdev->dev, "nbl create repr dev failed %d!\n", err); + pci_disable_sriov(pdev); + return err; + } + + err = nbl_dev_setup_vf_config(adapter, num_vfs); + if (err) { + dev_err(&pdev->dev, "nbl setup vf config failed %d!\n", err); + pci_disable_sriov(pdev); + nbl_dev_destroy_rep(adapter); + return err; + } + + return num_vfs; +} + +#define NBL_VENDOR_ID (0x1F0F) + +/** + * Leonis DeviceID + * 0x3400-0x3402 reserve for internal test + * 0x3403-0x340d for snic v3r1 product + **/ +#define NBL_DEVICE_ID_LEONIS_FACTORY (0x3400) +#define NBL_DEVICE_ID_LEONIS_PF (0x3401) +#define NBL_DEVICE_ID_LEONIS_VF (0x3402) +#define NBL_DEVICE_ID_M18110 (0x3403) +#define NBL_DEVICE_ID_M18110_LX (0x3404) +#define NBL_DEVICE_ID_M18110_BASE_T (0x3405) +#define NBL_DEVICE_ID_M18110_LX_BASE_T (0x3406) +#define NBL_DEVICE_ID_M18110_OCP (0x3407) +#define NBL_DEVICE_ID_M18110_LX_OCP (0x3408) +#define NBL_DEVICE_ID_M18110_BASE_T_OCP (0x3409) +#define NBL_DEVICE_ID_M18110_LX_BASE_T_OCP (0x340a) +#define NBL_DEVICE_ID_M18120 (0x340b) +#define NBL_DEVICE_ID_M18120_LX (0x340c) +#define NBL_DEVICE_ID_M18120_BASE_T (0x340d) +#define NBL_DEVICE_ID_M18120_LX_BASE_T (0x340e) +#define NBL_DEVICE_ID_M18120_OCP (0x340f) +#define NBL_DEVICE_ID_M18120_LX_OCP (0x3410) +#define NBL_DEVICE_ID_M18120_BASE_T_OCP (0x3411) +#define NBL_DEVICE_ID_M18120_LX_BASE_T_OCP (0x3412) +#define NBL_DEVICE_ID_M18100_VF (0x3413) + +#define NBL_BOOTIS_DEVICE_ID_1226 (0x1226) +#define NBL_BOOTIS_DEVICE_ID_1227 (0x1227) +#define NBL_DF200_VDPA_NET_ID (0x1041) +#define NBL_DF200_VDPA_BLK_ID (0x1042) +#define NBL_DF200_RDMA_NET_ID (0x1043) + +static const struct pci_device_id nbl_id_table[] = { + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_LEONIS_FACTORY), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_FACTORY_CTRL_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_LEONIS_PF), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_LEONIS_VF), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_VF_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_BASE_T), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_BASE_T), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_BASE_T_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18110_LX_BASE_T_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_BASE_T), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_BASE_T), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_BASE_T_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18120_LX_BASE_T_OCP), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) | NBL_CAP_SET_BIT(NBL_CAP_HAS_USER_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_SUPPORT_LAG_BIT) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_M18100_VF), .driver_data = + NBL_CAP_SET_BIT(NBL_CAP_HAS_NET_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_VF_BIT) | + NBL_CAP_SET_BIT(NBL_CAP_IS_NIC_BIT) | NBL_CAP_SET_BIT(NBL_CAP_IS_LEONIS_BIT) }, + /* required as sentinel */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, nbl_id_table); + +static int __maybe_unused nbl_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct nbl_adapter *adapter = pci_get_drvdata(pdev); + + return nbl_dev_suspend(adapter); +} + +static int __maybe_unused nbl_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct nbl_adapter *adapter = pci_get_drvdata(pdev); + + return nbl_dev_resume(adapter); +} + +static SIMPLE_DEV_PM_OPS(nbl_pm_ops, nbl_suspend, nbl_resume); +static struct pci_driver nbl_driver = { + .name = NBL_DRIVER_NAME, + .id_table = nbl_id_table, + .probe = nbl_probe, + .remove = nbl_remove, + .shutdown = nbl_shutdown, + .sriov_configure = nbl_sriov_configure, + .driver.pm = &nbl_pm_ops, +}; + +static int __init nbl_module_init(void) +{ + int status; + + nbl_dev_user_module_init(); + status = nbl_common_create_wq(); + if (status) { + pr_err("Failed to create wq, err = %d\n", status); + goto wq_create_failed; + } + + mutex_init(&nbl_lag_mutex); + spin_lock_init(&nbl_tc_flow_inst_lock); + INIT_LIST_HEAD(&lag_resource_head); + + nbl_st_init(nbl_get_st_table()); + nbl_debugfs_init(); + + nbl_event_init(); + + status = pci_register_driver(&nbl_driver); + if (status) { + pr_err("Failed to register PCI driver, err = %d\n", status); + goto pci_register_driver_failed; + } + + return 0; + +pci_register_driver_failed: + nbl_debugfs_remove(); + nbl_common_destroy_wq(); +wq_create_failed: + nbl_dev_user_module_destroy(); + return status; +} + +static void __exit nbl_module_exit(void) +{ + pci_unregister_driver(&nbl_driver); + + nbl_st_remove(nbl_get_st_table()); + + nbl_common_destroy_wq(); + nbl_dev_user_module_destroy(); + + nbl_debugfs_remove(); + + nbl_event_remove(); + + pr_info("nbl module unloaded\n"); +} + +module_init(nbl_module_init); +module_exit(nbl_module_exit); +MODULE_LICENSE("GPL"); + +#define NBL_FW_PATH "nbl/" +#define NBL_FW_SNIC_PATH NBL_FW_PATH "snic_v3r1/" +#define NBL_FW_TUNNEL_TOE_P4 NBL_FW_SNIC_PATH + +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "nbl_single_tunnel_toe_enhance.elf"); +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "nbl_dual_tunnel_toe_enhance.elf"); +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "nbl_quad_tunnel_toe_enhance.elf"); +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_single_port_p4_hg"); +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_dual_port_p4_hg"); +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_quad_port_p4_hg"); +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_single_port_p4_lg"); +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_dual_port_p4_lg"); +MODULE_FIRMWARE(NBL_FW_SNIC_PATH "m181xx_quad_port_p4_lg");