diff --git a/Documentation/networking/device_drivers/ethernet/nebula-matrix/m1600.rst b/Documentation/networking/device_drivers/ethernet/nebula-matrix/m1600.rst new file mode 100644 index 0000000000000000000000000000000000000000..7896c60202b6ee49edb776256a22d9f93a064342 --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/nebula-matrix/m1600.rst @@ -0,0 +1,55 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================================ +Linux Base Driver for Nebula-matrix M1600-NIC family +============================================================ + +Overview: +========= +M1600-NIC is a series of network interface card for the Data Center Area. + +The driver supports link-speed 10GbE. + +M1600 devices support SR-IOV. This driver is used for both of Physical +Function(PF) and Virtual Function(VF). + +M1600 devices support MSI-X interrupt vector for each Tx/Rx queue and +interrupt moderation. + +M1600 devices support also various offload features such as checksum offload, +Receive-Side Scaling(RSS). + + +Supported PCI vendor ID/device IDs: +=================================== + +1f0f:1600 - M1600-Nic PF +1f0f:1601 - M1600-Nic VF + +ethtool tool support +==================== + +Obtain basic information of the network card: + ethtool -i enp130s0f0 + +Get network card ring parameters: + ethtool -g enp130s0f0 + +Set the ring parameter: + ethtool -G enp130s0f0 rx 1024 tx 1024 + +View statistics: + ethtool -S enp130s0f0 + +Viewing Optical Module Information: + ethtool -m enp130s0f0 + +Support +======= + +For more information about M1600-NIC, please visit the following URL: +https://www.nebula-matrix.com/ + +If an issue is identified with the released source code on the supported kernel +with a supported adapter, email the specific information related to the issue to +open@nebula-matrix.com. diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index f934557ff7650cb48bda909ad77805e20b8b2609..ab44be9b9b99aff87b8e80220356fc41ede53616 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -2893,6 +2893,8 @@ CONFIG_SMSC9420=m # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +CONFIG_NET_VENDOR_NEBULA_MATRIX=y +CONFIG_M1600=m # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_NET_SB1000 is not set diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 11323cdc33014d7be4ffb98b86e8fce77704a594..31e74b83eca8fddccb2f2017bd0c3e303c8443c5 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -2864,6 +2864,8 @@ CONFIG_SFC_MCDI_LOGGING=y # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +CONFIG_NET_VENDOR_NEBULA_MATRIX=y +CONFIG_M1600=m # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_NET_SB1000 is not set diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 6998a8cb3faadeebae2d620a3a47d5996fc146b4..92989a31b95f6ad381fc26969911c234ec84c76d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -183,5 +183,6 @@ source "drivers/net/ethernet/via/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" +source "drivers/net/ethernet/nebula-matrix/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 6a7d68ea63ed08ede187bbb3caf8186cdf03dd8f..5e945a97cd879211ed3b03b76e7da23e067986c7 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -96,3 +96,4 @@ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ obj-$(CONFIG_NET_VENDOR_NETSWIFT) += netswift/ +obj-$(CONFIG_NET_VENDOR_NEBULA_MATRIX) += nebula-matrix/ diff --git a/drivers/net/ethernet/nebula-matrix/Kconfig b/drivers/net/ethernet/nebula-matrix/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..906d3362e49dcd9425a6a9be6263b7b2113cc3fc --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/Kconfig @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Nebula-matrix network device configuration +# + +config NET_VENDOR_NEBULA_MATRIX + bool "Nebula-matrix devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Nebual-matrix cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_NEBULA_MATRIX + +config M1600 + tristate "Nebula-matrix Ethernet Controller m1600 Family support" + depends on PCI + depends on ARM64 || X86_64 + default m + help + This driver supports Nebula-matrix Ethernet Controller m1600 Family of + devices. For more information about this product, go to the product + description with smart NIC: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called m1600. + +endif # NET_VENDOR_NEBULA_MATRIX diff --git a/drivers/net/ethernet/nebula-matrix/Makefile b/drivers/net/ethernet/nebula-matrix/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..44e2c66ea433c1d304440ff108a00bae64cc5297 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Nebula-matrix network device drivers. +# + +obj-$(CONFIG_M1600) += m1600/ diff --git a/drivers/net/ethernet/nebula-matrix/m1600/Makefile b/drivers/net/ethernet/nebula-matrix/m1600/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..17906016d384a791f4ac78101efe5c62147f7052 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2022 Nebula Matrix Limited. +# Author: Monte Song + +ccflags-y += -DCONFIG_NBL_DEBUGFS + +obj-$(CONFIG_M1600) += m1600.o + +m1600-y += main.o \ + ethtool.o \ + common.o \ + interrupt.o \ + txrx.o \ + mailbox.o \ + debug.o \ + hwmon.o \ + macvlan.o \ + sriov.o + diff --git a/drivers/net/ethernet/nebula-matrix/m1600/common.c b/drivers/net/ethernet/nebula-matrix/m1600/common.c new file mode 100644 index 0000000000000000000000000000000000000000..73011d14379ece867b5857ba98023cd58d2cd219 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/common.c @@ -0,0 +1,1651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#include +#include +#include + +#include "hw.h" +#include "common.h" +#include "txrx.h" +#include "mailbox.h" + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif + +void nbl_af_configure_fc_cplh_up_th(struct nbl_hw *hw) +{ + wr32(hw, NBL_FC_CPLH_UP_TH_REG_ADDR, NBL_FC_CPLH_UP_TH_B8); +} + +void nbl_firmware_init(struct nbl_hw *hw) +{ + u32 init_status; + u32 i = 0; + + do { + init_status = rd32(hw, NBL_GREG_DYNAMIC_INIT_REG); + i++; + if (i % 10 == 0) + pr_warn("Tried %u times already, but firmware has not been initialized yet\n", + i); + } while (init_status != NBL_DYNAMIC_INIT_DONE); +} + +static void nbl_af_capture_broadcast_packets(struct nbl_hw *hw) +{ + struct nbl_pcmrt_action pcmrt_action; + struct nbl_pcmrt_mask pcmrt_mask; + struct nbl_pcmrt_key pcmrt_key; + unsigned int slot = NBL_PCMRT_BROADCAST_SLOT; + + memset(&pcmrt_key, 0, sizeof(pcmrt_key)); + pcmrt_key.dmac_type = NBL_PCMRT_DMAC_BROADCAST; + pcmrt_key.valid = 1; + + memset(&pcmrt_mask, 0, sizeof(pcmrt_mask)); + pcmrt_mask.dmac_mask = 0; + pcmrt_mask.etype_mask = 1; + pcmrt_mask.ip_protocol_mask = 1; + pcmrt_mask.dport_mask = 1; + pcmrt_mask.tcp_ctrl_bits_mask = 1; + pcmrt_mask.up_down_mask = 1; + + rd32_for_each(hw, NBL_PA_PCMRT_ACTION_REG, (u32 *)&pcmrt_action, sizeof(pcmrt_action)); + pcmrt_action.action_bitmap &= ~(NBL_PCMRT_ACTION_MASK << (slot * NBL_PCMRT_ACTION_BIT_LEN)); + pcmrt_action.action_bitmap |= ((u64)NBL_PCMRT_ACTION_CAPTURE) << + (slot * NBL_PCMRT_ACTION_BIT_LEN); + + wr32_for_each(hw, NBL_PA_PCMRT_ACTION_REG, (u32 *)&pcmrt_action, sizeof(pcmrt_action)); + wr32_for_each(hw, NBL_PA_PCMRT_MASK_REG_ARR(slot), (u32 *)&pcmrt_mask, sizeof(pcmrt_mask)); + wr32_for_each(hw, NBL_PA_PCMRT_KEY_REG_ARR(slot), (u32 *)&pcmrt_key, sizeof(pcmrt_key)); +} + +static void nbl_af_capture_multicast_packets(struct nbl_hw *hw) +{ + struct nbl_pcmrt_action pcmrt_action; + struct nbl_pcmrt_mask pcmrt_mask; + struct nbl_pcmrt_key pcmrt_key; + unsigned int slot = NBL_PCMRT_MULTICAST_SLOT; + + memset(&pcmrt_key, 0, sizeof(pcmrt_key)); + pcmrt_key.dmac_type = NBL_PCMRT_DMAC_MULTICAST; + pcmrt_key.valid = 1; + + memset(&pcmrt_mask, 0, sizeof(pcmrt_mask)); + pcmrt_mask.dmac_mask = 0; + pcmrt_mask.etype_mask = 1; + pcmrt_mask.ip_protocol_mask = 1; + pcmrt_mask.dport_mask = 1; + pcmrt_mask.tcp_ctrl_bits_mask = 1; + pcmrt_mask.up_down_mask = 1; + + rd32_for_each(hw, NBL_PA_PCMRT_ACTION_REG, (u32 *)&pcmrt_action, sizeof(pcmrt_action)); + pcmrt_action.action_bitmap &= ~(NBL_PCMRT_ACTION_MASK << + (slot * NBL_PCMRT_ACTION_BIT_LEN)); + pcmrt_action.action_bitmap |= ((u64)NBL_PCMRT_ACTION_CAPTURE) << + (slot * NBL_PCMRT_ACTION_BIT_LEN); + + wr32_for_each(hw, NBL_PA_PCMRT_ACTION_REG, (u32 *)&pcmrt_action, sizeof(pcmrt_action)); + wr32_for_each(hw, NBL_PA_PCMRT_MASK_REG_ARR(slot), (u32 *)&pcmrt_mask, sizeof(pcmrt_mask)); + wr32_for_each(hw, NBL_PA_PCMRT_KEY_REG_ARR(slot), (u32 *)&pcmrt_key, sizeof(pcmrt_key)); +} + +static void nbl_af_capture_lacp_packets(struct nbl_hw *hw) +{ + u32 etype_ext; + struct nbl_pcmrt_action pcmrt_action; + struct nbl_pcmrt_mask pcmrt_mask; + struct nbl_pcmrt_key pcmrt_key; + unsigned int etype_ext_slot = NBL_ETYPE_EXT_LACP_SLOT; + unsigned int index; + unsigned int offset; + unsigned int slot = NBL_PCMRT_LACP_SLOT; + + index = etype_ext_slot / NBL_ETYPE_EXTS_PER_REG; + offset = etype_ext_slot % NBL_ETYPE_EXTS_PER_REG; + etype_ext = rd32(hw, NBL_PA_ETYPE_EXT_REG_ARR(index)); + etype_ext &= ~(NBL_ETYPE_EXT_MASK << (offset * NBL_ETYPE_EXT_BIT_LEN)); + etype_ext |= ETH_P_SLOW << (offset * NBL_ETYPE_EXT_BIT_LEN); + wr32(hw, NBL_PA_ETYPE_EXT_REG_ARR(index), etype_ext); + + memset(&pcmrt_key, 0, sizeof(pcmrt_key)); + pcmrt_key.etype_type = NBL_PCMRT_ETYPE_EXT_BASE + etype_ext_slot; + pcmrt_key.valid = 1; + + memset(&pcmrt_mask, 0, sizeof(pcmrt_mask)); + pcmrt_mask.dmac_mask = 1; + pcmrt_mask.etype_mask = 0; + pcmrt_mask.ip_protocol_mask = 1; + pcmrt_mask.dport_mask = 1; + pcmrt_mask.tcp_ctrl_bits_mask = 1; + pcmrt_mask.up_down_mask = 1; + + rd32_for_each(hw, NBL_PA_PCMRT_ACTION_REG, (u32 *)&pcmrt_action, sizeof(pcmrt_action)); + pcmrt_action.action_bitmap &= ~(NBL_PCMRT_ACTION_MASK << (slot * NBL_PCMRT_ACTION_BIT_LEN)); + pcmrt_action.action_bitmap |= ((u64)NBL_PCMRT_ACTION_CAPTURE) << + (slot * NBL_PCMRT_ACTION_BIT_LEN); + + wr32_for_each(hw, NBL_PA_PCMRT_ACTION_REG, (u32 *)&pcmrt_action, sizeof(pcmrt_action)); + wr32_for_each(hw, NBL_PA_PCMRT_MASK_REG_ARR(slot), (u32 *)&pcmrt_mask, sizeof(pcmrt_mask)); + wr32_for_each(hw, NBL_PA_PCMRT_KEY_REG_ARR(slot), (u32 *)&pcmrt_key, sizeof(pcmrt_key)); +} + +static void nbl_af_capture_lldp_packets(struct nbl_hw *hw) +{ + u32 etype_ext; + struct nbl_pcmrt_action pcmrt_action; + struct nbl_pcmrt_mask pcmrt_mask; + struct nbl_pcmrt_key pcmrt_key; + unsigned int etype_ext_slot = NBL_ETYPE_EXT_LLDP_SLOT; + unsigned int index; + unsigned int offset; + unsigned int slot = NBL_PCMRT_LLDP_SLOT; + + index = etype_ext_slot / NBL_ETYPE_EXTS_PER_REG; + offset = etype_ext_slot % NBL_ETYPE_EXTS_PER_REG; + etype_ext = rd32(hw, NBL_PA_ETYPE_EXT_REG_ARR(index)); + etype_ext &= ~(NBL_ETYPE_EXT_MASK << (offset * NBL_ETYPE_EXT_BIT_LEN)); + etype_ext |= ETH_P_LLDP << (offset * NBL_ETYPE_EXT_BIT_LEN); + wr32(hw, NBL_PA_ETYPE_EXT_REG_ARR(index), etype_ext); + + memset(&pcmrt_key, 0, sizeof(pcmrt_key)); + pcmrt_key.etype_type = NBL_PCMRT_ETYPE_EXT_BASE + etype_ext_slot; + pcmrt_key.valid = 1; + + memset(&pcmrt_mask, 0, sizeof(pcmrt_mask)); + pcmrt_mask.dmac_mask = 1; + pcmrt_mask.etype_mask = 0; + pcmrt_mask.ip_protocol_mask = 1; + pcmrt_mask.dport_mask = 1; + pcmrt_mask.tcp_ctrl_bits_mask = 1; + pcmrt_mask.up_down_mask = 1; + + rd32_for_each(hw, NBL_PA_PCMRT_ACTION_REG, (u32 *)&pcmrt_action, sizeof(pcmrt_action)); + pcmrt_action.action_bitmap &= ~(NBL_PCMRT_ACTION_MASK << (slot * NBL_PCMRT_ACTION_BIT_LEN)); + pcmrt_action.action_bitmap |= ((u64)NBL_PCMRT_ACTION_CAPTURE) << + (slot * NBL_PCMRT_ACTION_BIT_LEN); + + wr32_for_each(hw, NBL_PA_PCMRT_ACTION_REG, (u32 *)&pcmrt_action, sizeof(pcmrt_action)); + wr32_for_each(hw, NBL_PA_PCMRT_MASK_REG_ARR(slot), (u32 *)&pcmrt_mask, sizeof(pcmrt_mask)); + wr32_for_each(hw, NBL_PA_PCMRT_KEY_REG_ARR(slot), (u32 *)&pcmrt_key, sizeof(pcmrt_key)); +} + +static void nbl_af_clear_capture_broadcast_packets_conf(struct nbl_hw *hw) +{ + struct nbl_pcmrt_key pcmrt_key; + unsigned int slot = NBL_PCMRT_BROADCAST_SLOT; + + memset(&pcmrt_key, 0, sizeof(pcmrt_key)); + pcmrt_key.valid = 0; + wr32_for_each(hw, NBL_PA_PCMRT_KEY_REG_ARR(slot), (u32 *)&pcmrt_key, sizeof(pcmrt_key)); +} + +static void nbl_af_clear_capture_multicast_packets_conf(struct nbl_hw *hw) +{ + struct nbl_pcmrt_key pcmrt_key; + unsigned int slot = NBL_PCMRT_MULTICAST_SLOT; + + memset(&pcmrt_key, 0, sizeof(pcmrt_key)); + pcmrt_key.valid = 0; + wr32_for_each(hw, NBL_PA_PCMRT_KEY_REG_ARR(slot), (u32 *)&pcmrt_key, sizeof(pcmrt_key)); +} + +static void nbl_af_clear_capture_lacp_packets_conf(struct nbl_hw *hw) +{ + struct nbl_pcmrt_key pcmrt_key; + unsigned int slot = NBL_PCMRT_LACP_SLOT; + + memset(&pcmrt_key, 0, sizeof(pcmrt_key)); + pcmrt_key.valid = 0; + wr32_for_each(hw, NBL_PA_PCMRT_KEY_REG_ARR(slot), (u32 *)&pcmrt_key, sizeof(pcmrt_key)); +} + +static void nbl_af_clear_capture_lldp_packets_conf(struct nbl_hw *hw) +{ + struct nbl_pcmrt_key pcmrt_key; + unsigned int slot = NBL_PCMRT_LLDP_SLOT; + + memset(&pcmrt_key, 0, sizeof(pcmrt_key)); + pcmrt_key.valid = 0; + wr32_for_each(hw, NBL_PA_PCMRT_KEY_REG_ARR(slot), (u32 *)&pcmrt_key, sizeof(pcmrt_key)); +} + +void nbl_af_configure_captured_packets(struct nbl_hw *hw) +{ + nbl_af_capture_broadcast_packets(hw); + nbl_af_capture_multicast_packets(hw); + nbl_af_capture_lacp_packets(hw); + nbl_af_capture_lldp_packets(hw); +} + +void nbl_af_clear_captured_packets_conf(struct nbl_hw *hw) +{ + nbl_af_clear_capture_broadcast_packets_conf(hw); + nbl_af_clear_capture_multicast_packets_conf(hw); + nbl_af_clear_capture_lacp_packets_conf(hw); + nbl_af_clear_capture_lldp_packets_conf(hw); +} + +u32 nbl_af_get_firmware_version(struct nbl_hw *hw) +{ + return rd32(hw, NBL_GREG_DYNAMIC_VERSION_REG); +} + +int nbl_af_res_mng_init(struct nbl_hw *hw) +{ + struct nbl_af_res_info *af_res; + struct nbl_qid_map invalid_qid_map; + struct nbl_func_res *func_res; + u16 i; + + af_res = kmalloc(sizeof(*af_res), GFP_KERNEL); + if (!af_res) + return -ENOMEM; + + spin_lock_init(&af_res->func_res_lock); + bitmap_zero(af_res->interrupt_bitmap, NBL_MAX_INTERRUPT); + bitmap_zero(af_res->txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE); + + af_res->qid_map_ready = 0; + af_res->qid_map_select = NBL_MASTER_QID_MAP_TABLE; + + memset(&invalid_qid_map, 0, sizeof(invalid_qid_map)); + invalid_qid_map.local_qid = 0x1F; + invalid_qid_map.notify_addr_l = 0x7FFFFFF; + invalid_qid_map.notify_addr_h = 0xFFFF; + invalid_qid_map.global_qid = 0x7F; + invalid_qid_map.rsv = 0x1FF; + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) + af_res->qid_map_table[i] = invalid_qid_map; + + memset(af_res->res_record, 0, sizeof(af_res->res_record)); + + for (i = 0; i < NBL_ETH_PORT_NUM; i++) { + atomic_set(&af_res->eth_port_tx_refcount[i], 0); + atomic_set(&af_res->eth_port_rx_refcount[i], 0); + } + + for (i = 0; i < NBL_MAX_FUNC; i++) { + func_res = kmalloc(sizeof(*func_res), GFP_ATOMIC | __GFP_ZERO); + if (!func_res) + goto all_mem_failed; + af_res->res_record[i] = func_res; + } + + hw->af_res = af_res; + return 0; + +all_mem_failed: + for (i = 0; i < NBL_MAX_PF_FUNC; i++) + kfree(af_res->res_record[i]); + kfree(af_res); + return -ENOMEM; +} + +void nbl_af_free_res(struct nbl_hw *hw) +{ + struct nbl_af_res_info *af_res; + struct nbl_func_res *func_res; + u8 i; + + af_res = hw->af_res; + for (i = 0; i < NBL_MAX_FUNC; i++) { + func_res = af_res->res_record[i]; + kfree(func_res); + } + + kfree(af_res); + hw->af_res = NULL; +} + +void nbl_af_compute_bdf(struct nbl_hw *hw, u16 func_id, + u8 *bus, u8 *devid, u8 *function) +{ + u16 af_bdf; + u16 function_bdf; + + af_bdf = (((u16)hw->bus) << 8) | PCI_DEVFN((u16)hw->devid, (u16)hw->function); + function_bdf = af_bdf + func_id; + + if (function_bdf < af_bdf) + pr_alert("Compute BDF number for mailbox function %u error\n", func_id); + + *bus = function_bdf >> 8; + *devid = PCI_SLOT(function_bdf); + *function = PCI_FUNC(function_bdf); +} + +bool nbl_check_golden_version(struct nbl_hw *hw) +{ + struct nbl_dynamic_version version; + + rd32_for_each(hw, NBL_GREG_DYNAMIC_VERSION_REG, (u32 *)&version, + sizeof(version)); + return version.sub_version == NBL_GOLDEN_SUB_VERSION; +} + +static inline u64 nbl_get_qid_map_key(struct nbl_qid_map qid_map) +{ + u64 key; + u64 notify_addr_l; + u64 notify_addr_h; + + notify_addr_l = qid_map.notify_addr_l; + notify_addr_h = qid_map.notify_addr_h; + key = (notify_addr_h << NBL_QID_MAP_NOTIFY_ADDR_LOW_PART_LEN) | notify_addr_l; + + return key; +} + +static void nbl_af_fill_qid_map_table(struct nbl_hw *hw, u16 func_id, u64 notify_addr) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_qid_map qid_map; + struct nbl_queue_table_ready queue_table_ready; + struct nbl_queue_table_select queue_table_select; + unsigned long flags; + u8 *txrx_queues; + u64 key; + u8 qid_map_entries; + u8 qid_map_base; + u8 i; + u8 j; + + spin_lock_irqsave(&af_res->func_res_lock, flags); + + qid_map_base = NBL_QID_MAP_TABLE_ENTRIES; + key = notify_addr >> NBL_QID_MAP_NOTIFY_ADDR_SHIFT; + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + WARN_ON(key == nbl_get_qid_map_key(af_res->qid_map_table[i])); + if (key < nbl_get_qid_map_key(af_res->qid_map_table[i])) { + qid_map_base = i; + break; + } + } + + if (unlikely(qid_map_base == NBL_QID_MAP_TABLE_ENTRIES)) { + pr_alert("Can not insert key corresponding to notify addr %llx\n", notify_addr); + spin_unlock_irqrestore(&af_res->func_res_lock, flags); + return; + } + + qid_map_entries = func_res->num_txrx_queues; + for (i = NBL_QID_MAP_TABLE_ENTRIES - qid_map_entries; i > qid_map_base; i--) + af_res->qid_map_table[i - 1 + qid_map_entries] = af_res->qid_map_table[i - 1]; + + txrx_queues = func_res->txrx_queues; + memset(&qid_map, 0, sizeof(qid_map)); + for (i = 0; i < qid_map_entries; i++) { + qid_map.local_qid = 2 * i + 1; + qid_map.notify_addr_l = key; + qid_map.notify_addr_h = key >> NBL_QID_MAP_NOTIFY_ADDR_LOW_PART_LEN; + qid_map.global_qid = txrx_queues[i]; + af_res->qid_map_table[qid_map_base + i] = qid_map; + } + + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + j = 0; + + do { + wr32_for_each(hw, NBL_PCOMPLETER_QID_MAP_REG_ARR(af_res->qid_map_select, i), + (u32 *)(af_res->qid_map_table + i), sizeof(qid_map)); + udelay(5); + rd32_for_each(hw, NBL_PCOMPLETER_QID_MAP_REG_ARR(af_res->qid_map_select, i), + (u32 *)&qid_map, sizeof(qid_map)); + if (likely(!memcmp(&qid_map, af_res->qid_map_table + i, sizeof(qid_map)))) + break; + j++; + } while (j < NBL_REG_WRITE_MAX_TRY_TIMES); + + if (j == NBL_REG_WRITE_MAX_TRY_TIMES) + pr_err("Write to qid map table entry %hhu failed\n", i); + } + + memset(&queue_table_select, 0, sizeof(queue_table_select)); + queue_table_select.select = af_res->qid_map_select; + wr32_and_verify(hw, NBL_PCOMPLETER_QUEUE_TABLE_SELECT_REG, *(u32 *)&queue_table_select); + af_res->qid_map_select = !af_res->qid_map_select; + + if (!af_res->qid_map_ready) { + memset(&queue_table_ready, 0, sizeof(queue_table_ready)); + queue_table_ready.ready = 1; + wr32_for_each(hw, NBL_PCOMPLETER_QUEUE_TABLE_READY_REG, + (u32 *)&queue_table_ready, sizeof(queue_table_ready)); + af_res->qid_map_ready = 1; + } + + spin_unlock_irqrestore(&af_res->func_res_lock, flags); +} + +static void nbl_af_remove_qid_map_table(struct nbl_hw *hw, u16 func_id, u64 notify_addr) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_qid_map qid_map; + struct nbl_qid_map invalid_qid_map; + struct nbl_queue_table_ready queue_table_ready; + struct nbl_queue_table_select queue_table_select; + unsigned long flags; + u64 key; + u8 qid_map_entries; + u8 qid_map_base; + u8 i; + u8 j; + + spin_lock_irqsave(&af_res->func_res_lock, flags); + + qid_map_base = NBL_QID_MAP_TABLE_ENTRIES; + key = notify_addr >> NBL_QID_MAP_NOTIFY_ADDR_SHIFT; + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + if (key == nbl_get_qid_map_key(af_res->qid_map_table[i])) { + qid_map_base = i; + break; + } + } + + if (unlikely(qid_map_base == NBL_QID_MAP_TABLE_ENTRIES)) { + pr_alert("Can not find key corresponding to notify addr %llx\n", notify_addr); + spin_unlock_irqrestore(&af_res->func_res_lock, flags); + return; + } + + qid_map_entries = func_res->num_txrx_queues; + invalid_qid_map.local_qid = 0x1F; + invalid_qid_map.notify_addr_l = 0x7FFFFFF; + invalid_qid_map.notify_addr_h = 0xFFFF; + invalid_qid_map.global_qid = 0x7F; + invalid_qid_map.rsv = 0x1FF; + for (i = qid_map_base; i < NBL_QID_MAP_TABLE_ENTRIES - qid_map_entries; i++) + af_res->qid_map_table[i] = af_res->qid_map_table[i + qid_map_entries]; + for (; i < NBL_QID_MAP_TABLE_ENTRIES; i++) + af_res->qid_map_table[i] = invalid_qid_map; + + for (i = 0; i < NBL_QID_MAP_TABLE_ENTRIES; i++) { + j = 0; + + do { + wr32_for_each(hw, NBL_PCOMPLETER_QID_MAP_REG_ARR(af_res->qid_map_select, i), + (u32 *)(af_res->qid_map_table + i), sizeof(qid_map)); + udelay(5); + rd32_for_each(hw, NBL_PCOMPLETER_QID_MAP_REG_ARR(af_res->qid_map_select, i), + (u32 *)&qid_map, sizeof(qid_map)); + if (likely(!memcmp(&qid_map, af_res->qid_map_table + i, sizeof(qid_map)))) + break; + j++; + } while (j < NBL_REG_WRITE_MAX_TRY_TIMES); + + if (j == NBL_REG_WRITE_MAX_TRY_TIMES) + pr_err("Write to qid map table entry %hhu failed when remove entries\n", i); + } + + memset(&queue_table_select, 0, sizeof(queue_table_select)); + queue_table_select.select = af_res->qid_map_select; + wr32_and_verify(hw, NBL_PCOMPLETER_QUEUE_TABLE_SELECT_REG, *(u32 *)&queue_table_select); + af_res->qid_map_select = !af_res->qid_map_select; + + if (!func_id) { + WARN_ON(!af_res->qid_map_ready); + memset(&queue_table_ready, 0, sizeof(queue_table_ready)); + queue_table_ready.ready = 0; + wr32_for_each(hw, NBL_PCOMPLETER_QUEUE_TABLE_READY_REG, + (u32 *)&queue_table_ready, sizeof(queue_table_ready)); + af_res->qid_map_ready = 0; + } + + spin_unlock_irqrestore(&af_res->func_res_lock, flags); +} + +int nbl_af_configure_func_msix_map(struct nbl_hw *hw, u16 func_id, u16 requested) +{ + struct nbl_adapter *adapter = hw->back; + struct device *dev = &adapter->pdev->dev; + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_msix_map_table *msix_map_table; + struct nbl_msix_map *msix_map_entries; + struct nbl_function_msix_map function_msix_map; + u16 *interrupts; + unsigned long flags; + u16 intr_index; + u16 i; + int err; + + msix_map_table = &func_res->msix_map_table; + msix_map_table->size = sizeof(struct nbl_msix_map) * NBL_MSIX_MAP_TABLE_MAX_ENTRIES; + msix_map_table->base_addr = dma_alloc_coherent(dev, msix_map_table->size, + &msix_map_table->dma, + GFP_ATOMIC | __GFP_ZERO); + if (!msix_map_table->base_addr) { + msix_map_table->size = 0; + return -ENOMEM; + } + + interrupts = kcalloc(requested, sizeof(interrupts[0]), GFP_ATOMIC); + if (!interrupts) { + err = -ENOMEM; + goto alloc_interrupts_err; + } + func_res->num_interrupts = requested; + func_res->interrupts = interrupts; + + spin_lock_irqsave(&af_res->func_res_lock, flags); + + for (i = 0; i < requested; i++) { + intr_index = find_first_zero_bit(af_res->interrupt_bitmap, NBL_MAX_INTERRUPT); + if (intr_index == NBL_MAX_INTERRUPT) { + pr_err("There is no available interrupt left\n"); + err = -EAGAIN; + goto get_interrupt_err; + } + interrupts[i] = intr_index; + set_bit(intr_index, af_res->interrupt_bitmap); + } + + spin_unlock_irqrestore(&af_res->func_res_lock, flags); + + msix_map_entries = msix_map_table->base_addr; + for (i = 0; i < requested; i++) { + msix_map_entries[i].global_msix_index = interrupts[i]; + msix_map_entries[i].valid = 1; + } + + function_msix_map.msix_map_base_addr = msix_map_table->dma; + function_msix_map.function = hw->function; + function_msix_map.devid = hw->devid; + function_msix_map.bus = hw->bus; + function_msix_map.valid = 1; + wr32_for_each(hw, NBL_PCOMPLETER_FUNCTION_MSIX_MAP_REG_ARR(func_id), + (u32 *)&function_msix_map, sizeof(function_msix_map)); + + return 0; + +get_interrupt_err: + while (i--) { + intr_index = interrupts[i]; + clear_bit(intr_index, af_res->interrupt_bitmap); + } + spin_unlock_irqrestore(&af_res->func_res_lock, flags); + + kfree(interrupts); + func_res->num_interrupts = 0; + func_res->interrupts = NULL; + +alloc_interrupts_err: + dma_free_coherent(dev, msix_map_table->size, msix_map_table->base_addr, + msix_map_table->dma); + msix_map_table->size = 0; + msix_map_table->base_addr = NULL; + msix_map_table->dma = 0; + + return err; +} + +void nbl_af_destroy_func_msix_map(struct nbl_hw *hw, u16 func_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_function_msix_map function_msix_map; + struct nbl_msix_map_table *msix_map_table; + struct device *dev = nbl_hw_to_dev(hw); + u16 *interrupts; + u16 intr_num; + unsigned long flags; + u16 i; + + memset(&function_msix_map, 0, sizeof(function_msix_map)); + wr32_for_each(hw, NBL_PCOMPLETER_FUNCTION_MSIX_MAP_REG_ARR(func_id), + (u32 *)&function_msix_map, sizeof(function_msix_map)); + + if (!func_res) + return; + /* NOTICE: DMA memory for msix map table is release when AF is removed + * because there is WARN message if it is released when interrupt + * is disabled. + */ + + intr_num = func_res->num_interrupts; + interrupts = func_res->interrupts; + if (!interrupts) + return; + spin_lock_irqsave(&af_res->func_res_lock, flags); + for (i = 0; i < intr_num; i++) + clear_bit(interrupts[i], af_res->interrupt_bitmap); + spin_unlock_irqrestore(&af_res->func_res_lock, flags); + kfree(interrupts); + func_res->interrupts = NULL; + func_res->num_interrupts = 0; + + WARN_ON(func_res->txrx_queues); + msix_map_table = &func_res->msix_map_table; + WARN_ON(!msix_map_table->base_addr); + dma_free_coherent(dev, msix_map_table->size, msix_map_table->base_addr, + msix_map_table->dma); + msix_map_table->size = 0; + msix_map_table->base_addr = NULL; + msix_map_table->dma = 0; +} + +int nbl_configure_msix_map(struct nbl_hw *hw) +{ + struct nbl_adapter *adapter = hw->back; + int num_cpus; + int needed; + int err; + + num_cpus = num_online_cpus(); + needed = num_cpus > adapter->num_rxq ? adapter->num_rxq : num_cpus; + if (needed <= 0 || needed > U16_MAX - 1) { + pr_err("There are %d cpus online and %d rx queue(s), which is invalid\n", + num_cpus, adapter->num_rxq); + return -EINVAL; + } + + adapter->num_lan_msix = (u16)needed; + adapter->num_q_vectors = adapter->num_lan_msix; + + adapter->num_mailbox_msix = 1; + needed += 1; + + if (is_af(hw)) { + /* An additional interrupt is used by AF protocol packet + * such as ARP packet forward queue. + */ + needed += 1; + err = nbl_af_configure_func_msix_map(hw, 0, (u16)needed); + if (err) { + pr_err("AF configure function msix map table failed\n"); + goto err_out; + } + } else { + err = nbl_mailbox_req_cfg_msix_map_table(hw, (u16)needed); + if (err) { + pr_err("PF %u configure function msix map table failed\n", hw->function); + goto err_out; + } + } + + return 0; + +err_out: + adapter->num_lan_msix = 0; + adapter->num_q_vectors = 0; + adapter->num_mailbox_msix = 0; + return err; +} + +void nbl_destroy_msix_map(struct nbl_hw *hw) +{ + if (is_af(hw)) + nbl_af_destroy_func_msix_map(hw, 0); + else + nbl_mailbox_req_destroy_msix_map_table(hw); +} + +int nbl_af_configure_qid_map(struct nbl_hw *hw, u16 func_id, u8 num_queues, u64 notify_addr) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + unsigned long flags; + u8 queue_index; + u8 *txrx_queues; + u8 i; + int err; + + WARN_ON(!func_res || func_res->num_txrx_queues); + + txrx_queues = kcalloc(num_queues, sizeof(txrx_queues[0]), GFP_ATOMIC); + if (!txrx_queues) + return -ENOMEM; + func_res->num_txrx_queues = num_queues; + func_res->txrx_queues = txrx_queues; + + spin_lock_irqsave(&af_res->func_res_lock, flags); + + for (i = 0; i < num_queues; i++) { + queue_index = find_first_zero_bit(af_res->txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE); + if (queue_index == NBL_MAX_TXRX_QUEUE) { + pr_err("There is no available txrx queues left\n"); + err = -EAGAIN; + goto get_txrx_queue_err; + } + txrx_queues[i] = queue_index; + set_bit(queue_index, af_res->txrx_queue_bitmap); + } + + spin_unlock_irqrestore(&af_res->func_res_lock, flags); + + nbl_af_fill_qid_map_table(hw, func_id, notify_addr); + + return 0; + +get_txrx_queue_err: + while (i--) { + queue_index = txrx_queues[i]; + clear_bit(queue_index, af_res->txrx_queue_bitmap); + } + spin_unlock_irqrestore(&af_res->func_res_lock, flags); + + kfree(txrx_queues); + func_res->num_txrx_queues = 0; + func_res->txrx_queues = NULL; + + return err; +} + +void nbl_af_clear_qid_map(struct nbl_hw *hw, u16 func_id, u64 notify_addr) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + unsigned long flags; + u8 queue_index; + u8 num_queues; + u8 *txrx_queues; + u8 i; + + WARN_ON(!func_res || !func_res->num_txrx_queues); + + nbl_af_remove_qid_map_table(hw, func_id, notify_addr); + + num_queues = func_res->num_txrx_queues; + txrx_queues = func_res->txrx_queues; + spin_lock_irqsave(&af_res->func_res_lock, flags); + for (i = 0; i < num_queues; i++) { + queue_index = txrx_queues[i]; + clear_bit(queue_index, af_res->txrx_queue_bitmap); + } + spin_unlock_irqrestore(&af_res->func_res_lock, flags); + + kfree(txrx_queues); + func_res->txrx_queues = NULL; + func_res->num_txrx_queues = 0; +} + +static u64 nbl_read_real_bar_base_addr(struct pci_dev *pdev) +{ + u32 val; + u64 addr; + + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &val); + addr = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0 + 4, &val); + addr |= ((u64)val << 32); + + return addr; +} + +int nbl_get_vsi_id(struct nbl_hw *hw) +{ + int err; + + if (!is_vf(hw)) { + hw->vsi_id = hw->function; + } else { + err = nbl_mailbox_req_get_vsi_id(hw); + if (err < 0) { + pr_err("Get vsi id failed with error %d\n", err); + return err; + } + hw->vsi_id = (u8)(unsigned int)err; + } + + return 0; +} + +#ifdef CONFIG_PCI_IOV +static u64 nbl_read_real_vf_bar_base_addr(struct pci_dev *pdev) +{ + int pos; + u32 val; + u64 addr; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR, &val); + addr = (u64)(val & PCI_BASE_ADDRESS_MEM_MASK); + + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR + 4, &val); + addr |= ((u64)val << 32); + + return addr; +} +#endif + +void nbl_af_register_vf_bar_info(struct nbl_hw *hw, u16 func_id, + u64 vf_bar_start, u64 vf_bar_len) +{ + struct nbl_af_res_info *af_res = hw->af_res; + + af_res->vf_bar_info[func_id].vf_bar_start = vf_bar_start; + af_res->vf_bar_info[func_id].vf_bar_len = vf_bar_len; +} + +#ifdef CONFIG_PCI_IOV +int nbl_register_vf_bar_info(struct nbl_hw *hw) +{ + struct nbl_adapter *adapter; + struct pci_dev *pdev; + u64 vf_bar_len; + u64 vf_bar_start; + struct resource *res; + int err = 0; + + if (is_vf(hw)) + return 0; + + adapter = hw->back; + pdev = adapter->pdev; + + vf_bar_start = nbl_read_real_vf_bar_base_addr(pdev); + res = &pdev->resource[PCI_IOV_RESOURCES]; + vf_bar_len = resource_size(res) / NBL_MAX_VF_PER_PF; + if (is_af(hw)) + nbl_af_register_vf_bar_info(hw, 0, vf_bar_start, vf_bar_len); + else + err = nbl_mailbox_req_register_vf_bar_info(hw, vf_bar_start, vf_bar_len); + + return err; +} +#else +int nbl_register_vf_bar_info(struct nbl_hw *hw) +{ + return 0; +} +#endif + +u64 nbl_af_compute_vf_bar_base_addr(struct nbl_hw *hw, u16 func_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_vf_bar_info *vf_bar_info; + u8 pf_func_id; + u8 vf_offset; + u64 base_addr; + + WARN_ON(func_id < NBL_MAX_PF_FUNC); + pf_func_id = (func_id - NBL_MAX_PF_FUNC) / NBL_MAX_VF_PER_PF; + vf_offset = (func_id - NBL_MAX_PF_FUNC) % NBL_MAX_VF_PER_PF; + vf_bar_info = &af_res->vf_bar_info[pf_func_id]; + base_addr = vf_bar_info->vf_bar_start + vf_bar_info->vf_bar_len * vf_offset; + + return base_addr; +} + +int nbl_configure_notify_addr(struct nbl_hw *hw) +{ + struct nbl_adapter *adapter; + struct pci_dev *pdev; + u64 real_addr; + u64 notify_addr; + u8 num_txq; + u8 num_rxq; + int err = 0; + + adapter = hw->back; + pdev = adapter->pdev; + num_txq = adapter->num_txq; + num_rxq = adapter->num_rxq; + if (num_txq != num_rxq) { + pr_err("The number of TX queues must equal to RX queues\n"); + return -EINVAL; + } + + if (!is_vf(hw)) + real_addr = nbl_read_real_bar_base_addr(pdev); + else + err = nbl_mailbox_req_get_vf_bar_base_addr(hw, &real_addr); + + if (err) { + pr_err("Get VF BAR base address failed with error %d\n", err); + return err; + } + + if (is_af(hw)) { + notify_addr = real_addr + NBL_PCOMPLETER_AF_NOTIFY_REG; + if (real_addr <= U32_MAX && notify_addr > U32_MAX) + pr_warn("Maybe we can not successfully kick the doorbell\n"); + /* AF have an additional queue used for + * protocol packet forwarding. + */ + num_rxq += 1; + err = nbl_af_configure_qid_map(hw, 0, num_rxq, notify_addr); + } else { + notify_addr = real_addr; + err = nbl_mailbox_req_cfg_qid_map(hw, num_rxq, notify_addr); + } + + return err; +} + +void nbl_clear_notify_addr(struct nbl_hw *hw) +{ + struct nbl_adapter *adapter; + struct pci_dev *pdev; + u64 real_addr; + u64 notify_addr; + int err = 0; + + adapter = hw->back; + pdev = adapter->pdev; + if (!is_vf(hw)) + real_addr = nbl_read_real_bar_base_addr(pdev); + else + err = nbl_mailbox_req_get_vf_bar_base_addr(hw, &real_addr); + + if (err) { + pr_err("Failed to get VF BAR base address when clear notify address\n"); + return; + } + + if (is_af(hw)) { + notify_addr = real_addr + NBL_PCOMPLETER_AF_NOTIFY_REG; + nbl_af_clear_qid_map(hw, 0, notify_addr); + } else { + notify_addr = real_addr; + nbl_mailbox_req_clear_qid_map(hw, notify_addr); + } +} + +void nbl_af_enable_promisc(struct nbl_hw *hw, u8 eth_port_id) +{ + struct nbl_pro_ctrl ctrl; + + rd32_for_each(hw, NBL_PRO_CTRL_REG, (u32 *)&ctrl, sizeof(ctrl)); + ctrl.mac_mismatch_drop_en &= ~BIT(eth_port_id); + wr32_for_each(hw, NBL_PRO_CTRL_REG, (u32 *)&ctrl, sizeof(ctrl)); +} + +void nbl_af_disable_promisc(struct nbl_hw *hw, u8 eth_port_id) +{ + struct nbl_pro_ctrl ctrl; + + rd32_for_each(hw, NBL_PRO_CTRL_REG, (u32 *)&ctrl, sizeof(ctrl)); + ctrl.mac_mismatch_drop_en |= BIT(eth_port_id); + wr32_for_each(hw, NBL_PRO_CTRL_REG, (u32 *)&ctrl, sizeof(ctrl)); +} + +void nbl_enable_promisc(struct nbl_hw *hw) +{ + struct nbl_adapter *adapter = hw->back; + u8 eth_port_id = hw->eth_port_id; + + if (is_vf(hw)) { + pr_info("VF is not allowed to set promiscuous mode\n"); + return; + } + + if (test_and_set_bit(NBL_PROMISC, adapter->state)) + return; + + if (is_af(hw)) + nbl_af_enable_promisc(hw, eth_port_id); + else + nbl_mailbox_req_enable_promisc(hw, eth_port_id); +} + +void nbl_disable_promisc(struct nbl_hw *hw) +{ + struct nbl_adapter *adapter = hw->back; + u8 eth_port_id = hw->eth_port_id; + + if (is_vf(hw)) { + pr_info("VF is not allowed to set promiscuous mode\n"); + return; + } + + if (!test_and_clear_bit(NBL_PROMISC, adapter->state)) + return; + + if (is_af(hw)) + nbl_af_disable_promisc(hw, eth_port_id); + else + nbl_mailbox_req_disable_promisc(hw, eth_port_id); +} + +void nbl_af_configure_ingress_eth_port_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_ingress_eth_port port_config; + struct nbl_ingress_eth_port_fwd port_fwd_config; + u32 reg; + + memset(&port_config, 0, sizeof(port_config)); + + port_config.default_vlan_en = 1; + port_config.default_vlanid = 0; + + port_config.vlan_check_en = 0; + + port_config.lag = 0; + + port_config.cos_map_mode = NBL_COS_MODE_DEFAULT_ETH_PRI; + port_config.default_pri = 7; + + port_config.veb_num = eth_port_id; + + reg = NBL_PRO_INGRESS_ETH_PORT_REG_ARR(eth_port_id); + wr32_for_each(hw, reg, (u32 *)&port_config, sizeof(port_config)); + + memset(&port_fwd_config, 0, sizeof(port_fwd_config)); + + port_fwd_config.dport = NBL_PORT_HOST; + port_fwd_config.dport_id = vsi_id; + port_fwd_config.fwd = NBL_INGRESS_FWD_NORMAL; + + reg = NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(eth_port_id); + wr32_for_each(hw, reg, (u32 *)&port_fwd_config, sizeof(port_fwd_config)); +} + +static void nbl_configure_ingress_eth_port_table(struct nbl_hw *hw) +{ + if (is_af(hw)) + nbl_af_configure_ingress_eth_port_table(hw, hw->eth_port_id, hw->vsi_id); + else + nbl_mailbox_req_cfg_ingress_eth_port_table(hw, hw->eth_port_id, hw->vsi_id); +} + +static void nbl_configure_egress_eth_port_table(struct nbl_hw __maybe_unused *hw) +{ +} + +static void nbl_configure_eth_port_table(struct nbl_hw *hw) +{ + if (is_vf(hw)) + return; + + nbl_configure_ingress_eth_port_table(hw); + nbl_configure_egress_eth_port_table(hw); +} + +void nbl_af_configure_src_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + u8 forward_ring_index = af_res->forward_ring_index; + struct nbl_src_vsi_port src_vsi_port_config; + + memset(&src_vsi_port_config, 0, sizeof(src_vsi_port_config)); + + src_vsi_port_config.default_vlanid = 0; + + src_vsi_port_config.vlan_check_en = 0; + + src_vsi_port_config.cos_map_mode = NBL_SRC_VSI_COS_MODE_DEFAULT_PORT_PRI; + src_vsi_port_config.default_pri = 7; + + if (vsi_id < NBL_MAX_PF_FUNC) { + src_vsi_port_config.mac_lut_en = 0; + } else { + src_vsi_port_config.mac_lut_en = 1; + src_vsi_port_config.forward_queue_id_en = 1; + src_vsi_port_config.forward_queue_id = forward_ring_index; + } + + src_vsi_port_config.lag = 0; + src_vsi_port_config.dport_id = eth_port_id; + + src_vsi_port_config.default_vlan_en = 1; + + src_vsi_port_config.vlan_push_en = 0; + + src_vsi_port_config.veb_num = eth_port_id; + + src_vsi_port_config.catch_vsi_idx = vsi_id; + + src_vsi_port_config.vlanid_match_en = 0; + + src_vsi_port_config.smac_match_en = 0; + + wr32_for_each(hw, NBL_PRO_SRC_VSI_PORT_REG_ARR(vsi_id), + (u32 *)&src_vsi_port_config, sizeof(src_vsi_port_config)); +} + +static void nbl_configure_src_vsi_table(struct nbl_hw *hw) +{ + if (is_af(hw)) + nbl_af_configure_src_vsi_table(hw, hw->eth_port_id, hw->vsi_id); + else + nbl_mailbox_req_cfg_src_vsi_table(hw, hw->eth_port_id, hw->vsi_id); +} + +void nbl_af_configure_dest_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_dest_vsi_port dest_vsi_port_config; + + memset(&dest_vsi_port_config, 0, sizeof(dest_vsi_port_config)); + + dest_vsi_port_config.vlan_push_cnt = 0; + + dest_vsi_port_config.vsi_en = 1; + + dest_vsi_port_config.pkt_len_chk_en = 0; + + dest_vsi_port_config.pf_id = eth_port_id; + + wr32_for_each(hw, NBL_PRO_DEST_VSI_PORT_REG_ARR(vsi_id), + (u32 *)&dest_vsi_port_config, sizeof(dest_vsi_port_config)); +} + +static void nbl_configure_dest_vsi_table(struct nbl_hw *hw) +{ + if (is_af(hw)) + nbl_af_configure_dest_vsi_table(hw, hw->eth_port_id, hw->vsi_id); + else + nbl_mailbox_req_cfg_dest_vsi_table(hw, hw->eth_port_id, hw->vsi_id); +} + +static void nbl_configure_vsi_table(struct nbl_hw *hw) +{ + nbl_configure_src_vsi_table(hw); + nbl_configure_dest_vsi_table(hw); +} + +void nbl_datapath_init(struct nbl_hw *hw) +{ + struct nbl_adapter *adapter = hw->back; + + set_bit(NBL_PROMISC, adapter->state); + nbl_disable_promisc(hw); + + nbl_configure_eth_port_table(hw); + + nbl_configure_vsi_table(hw); +} + +bool nbl_af_query_link_status(struct nbl_hw *hw, u8 eth_port_id) +{ + struct device *dev = nbl_hw_to_dev(hw); + struct nbl_loopback_mode loopback_mode; + struct nbl_eth_rx_stat rx_stat; + enum nbl_eth_speed_mode selected_speed; + enum nbl_eth_speed_mode current_speed; + bool link_up; + + rd32_for_each(hw, NBL_ETH_LOOPBACK_MODE_REG(eth_port_id), + (u32 *)&loopback_mode, sizeof(loopback_mode)); + selected_speed = loopback_mode.speed_sel; + current_speed = loopback_mode.speed_stat; + if (selected_speed != current_speed) { + dev_info(dev, "Selected speed %u doest not match current speed %u\n", + selected_speed, current_speed); + return false; + } + if (selected_speed == NBL_ETH_SPEED_MODE_25G) { + dev_info(dev, "25GE speed is not supported\n"); + return false; + } + + rd32_for_each(hw, NBL_ETH_RX_STAT_REG(eth_port_id), (u32 *)&rx_stat, + sizeof(rx_stat)); + if (selected_speed == NBL_ETH_SPEED_MODE_10G) + link_up = !!rx_stat.rx_status; + else + link_up = !!(rx_stat.ge_pcs_pma_status & + (1 << NBL_GE_PCS_PMA_LINK_STATUS_SHIFT)); + + return link_up; +} + +bool nbl_query_link_status(struct nbl_hw *hw) +{ + bool link_up; + + if (is_af(hw)) + link_up = nbl_af_query_link_status(hw, hw->eth_port_id); + else + link_up = nbl_mailbox_req_query_link_status(hw, hw->eth_port_id); + + return link_up; +} + +void nbl_query_link_status_subtask(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + bool link_up; + + if (test_bit(NBL_DOWN, adapter->state)) + return; + + link_up = nbl_query_link_status(hw); + if (link_up == netif_carrier_ok(netdev)) + return; + if (link_up) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); +} + +void nbl_af_init_pkt_len_limit(struct nbl_hw *hw, u8 eth_port_id, + struct nbl_pkt_len_limit pkt_len_limit) +{ + wr32_for_each(hw, NBL_ETH_PKT_LEN_LIMIT(eth_port_id), + (u32 *)&pkt_len_limit, sizeof(pkt_len_limit)); +} + +static void nbl_set_pkt_max_limit(struct nbl_hw *hw) +{ + wr32(hw, NBL_PRO_MAX_PKT_LEN_REG, NBL_URMUX_MAX_PKT_LEN); + wr32(hw, NBL_URMUX_PRO_MAX_PKT_KEN_REG, NBL_URMUX_MAX_PKT_LEN); + wr32(hw, NBL_URMUX_CFG_SYNC_REG, 0); + wr32(hw, NBL_URMUX_CFG_SYNC_REG, 1); +} + +void nbl_init_pkt_len_limit(struct nbl_hw *hw) +{ + struct nbl_pkt_len_limit pkt_len_limit = { 0 }; + + if (is_vf(hw)) + return; + + pkt_len_limit.max_pkt_len = NBL_MAX_FRAME_SIZE; + pkt_len_limit.min_pkt_len = NBL_MIN_FRAME_SIZE; + if (is_af(hw)) { + nbl_af_init_pkt_len_limit(hw, hw->eth_port_id, pkt_len_limit); + nbl_set_pkt_max_limit(hw); + } else { + nbl_mailbox_req_init_pkt_len_limit(hw, hw->eth_port_id, pkt_len_limit); + } +} + +int nbl_af_get_eth_stats(struct nbl_hw *hw, u8 eth_port_id, struct nbl_hw_stats *hw_stats) +{ + u64 value_low; + u64 value_high; + int i; + struct nbl_eth_reset_ctl_and_status eth_reset; + + for (i = 0; i < 3; i++) { + rd32_for_each(hw, NBL_ETH_RESET_REG(eth_port_id), (u32 *)ð_reset, + sizeof(struct nbl_eth_reset_ctl_and_status)); + if (eth_reset.eth_statistics_vld == 1) + break; + usleep_range(100000, 200000); + } + + if (i == 3) { + pr_warn("port %d wait statistics_vld timed out\n", eth_port_id); + return -ETIMEDOUT; + } + + value_low = rd32(hw, NBL_PED_ETH_PAUSE_TX_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_PED_ETH_PAUSE_TX_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_fc_pause = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_FRAME_ERROR_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_FRAME_ERROR_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_frame_error = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_TOTAL_GOOD_PKT_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_TOTAL_GOOD_PKT_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_total_good_packets = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_TOTAL_GOOD_BYTES_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_TOTAL_GOOD_BYTES_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_total_good_bytes = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_BAD_FCS_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_BAD_FCS_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_bad_fcs = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_UNICAST_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_UNICAST_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_unicast = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_MULTICAST_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_MULTICAST_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_multicast = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_BROADCAST_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_BROADCAST_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_broadcast = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_VLAN_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_VLAN_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_vlan = (value_high << 32) + value_low; + + /* read total stats lastly, ensure total stats is bigger than others */ + value_low = rd32(hw, NBL_ETH_TX_TOTAL_PKT_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_TOTAL_PKT_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_total_packets = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_TOTAL_BYTES_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_TX_TOTAL_BYTES_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->tx_total_bytes = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_PA_ETH_PAUSE_RX_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_PA_ETH_PAUSE_RX_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_fc_pause = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_BADCODE_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_BADCODE_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_bad_code = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_TOTAL_GOOD_PKT_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_TOTAL_GOOD_PKT_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_total_good_packets = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_TOTAL_GOOD_BYTES_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_TOTAL_GOOD_BYTES_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_total_good_bytes = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_BAD_FCS_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_BAD_FCS_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_bad_fcs = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_FRAMING_ERR_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_FRAMING_ERR_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_frame_err = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_UNICAST_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_UNICAST_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_unicast = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_MULTICAST_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_MULTICAST_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_multicast = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_BROADCAST_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_BROADCAST_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_broadcast = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_VLAN_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_VLAN_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_vlan = (value_high << 32) + value_low; + + /* read total stats lastly, ensure total stats is bigger than others */ + value_low = rd32(hw, NBL_ETH_RX_TOTAL_PKT_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_TOTAL_PKT_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_total_packets = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_TOTAL_BYTES_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_TOTAL_BYTES_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_total_bytes = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_OVERSIZE_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_OVERSIZE_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_oversize = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_RX_UNDERSIZE_CNT_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_ETH_RX_UNDERSIZE_CNT_H_REG(eth_port_id)) & 0xFFFF; + hw_stats->rx_undersize = (value_high << 32) + value_low; + + return 0; +} + +static inline u64 dec_compare48(struct nbl_hw *hw, u64 a, u64 b, char *reg) +{ + if (a >= b) + return (a - b); + + pr_info("Dec compare overflow correction, port: %d, reg: %s\n", + hw->eth_port_id, reg); + return (BIT_ULL(48) - b + a); +} + +static void nbl_correct_eth_stat(struct nbl_hw *hw, u64 *old, u64 *new, char *reg) +{ + u64 value; + + value = *new; + if (((value & 0xFFFFFFFF) == 0xDEADBEEF) || ((value & 0xFFFF00000000) == 0xBEEF00000000)) { + pr_warn("ETH port %d maybe read abnormal value %llx from reg %s\n", hw->eth_port_id, + *new, reg); + *new = *old; + } +} + +static void nbl_correct_eth_stats(struct nbl_hw *hw, struct nbl_hw_stats *hw_stats) +{ + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_total_packets, &hw_stats->tx_total_packets, + "tx_total_packets"); + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_total_good_packets, + &hw_stats->tx_total_good_packets, + "tx_total_good_packets"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_total_packets, &hw_stats->rx_total_packets, + "rx_total_packets"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_total_good_packets, + &hw_stats->rx_total_good_packets, + "rx_total_good_packets"); + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_bad_fcs, &hw_stats->tx_bad_fcs, + "tx_bad_fcs"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_bad_fcs, &hw_stats->rx_bad_fcs, + "rx_bad_fcs"); + + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_total_bytes, &hw_stats->tx_total_bytes, + "tx_total_bytes"); + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_total_good_bytes, &hw_stats->tx_total_good_bytes, + "tx_total_good_bytes"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_total_bytes, &hw_stats->rx_total_bytes, + "rx_total_bytes"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_total_good_bytes, &hw_stats->rx_total_good_bytes, + "rx_total_good_bytes"); + + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_frame_error, &hw_stats->tx_frame_error, + "tx_frame_error"); + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_unicast, &hw_stats->tx_unicast, + "tx_unicast"); + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_multicast, &hw_stats->tx_multicast, + "tx_multicast"); + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_broadcast, &hw_stats->tx_broadcast, + "tx_broadcast"); + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_vlan, &hw_stats->tx_vlan, + "tx_vlan"); + nbl_correct_eth_stat(hw, &hw->hw_stats.tx_fc_pause, &hw_stats->tx_fc_pause, + "tx_fc_pause"); + + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_oversize, &hw_stats->rx_oversize, + "rx_oversize"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_undersize, &hw_stats->rx_undersize, + "rx_undersize"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_frame_err, &hw_stats->rx_frame_err, + "rx_frame_err"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_bad_code, &hw_stats->rx_bad_code, + "rx_bad_code"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_unicast, &hw_stats->rx_unicast, + "rx_unicast"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_multicast, &hw_stats->rx_multicast, + "rx_multicast"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_broadcast, &hw_stats->rx_broadcast, + "rx_broadcast"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_vlan, &hw_stats->rx_vlan, + "rx_vlan"); + nbl_correct_eth_stat(hw, &hw->hw_stats.rx_fc_pause, &hw_stats->rx_fc_pause, + "rx_fc_pause"); +} + +void nbl_update_stats_subtask(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_hw_stats hw_stats; + struct net_device *netdev = adapter->netdev; + struct nbl_ring *ring; + u8 ring_count; + u8 ring_index; + u64 tx_busy, tx_linearize, tx_dma_err; + u64 tx_csum_pkts = 0, rx_csum_pkts = 0; + u64 alloc_page_failed; + u64 alloc_skb_failed; + u64 rx_dma_err; + int ret; + + if (test_bit(NBL_DOWN, adapter->state) || + test_bit(NBL_RESETTING, adapter->state)) + return; + + if (is_af(hw)) + ret = nbl_af_get_eth_stats(hw, hw->eth_port_id, &hw_stats); + else + ret = nbl_mailbox_req_get_eth_stats(hw, hw->eth_port_id, &hw_stats); + + if (ret < 0) + memcpy(&hw_stats, &hw->hw_stats, sizeof(hw_stats)); + else + nbl_correct_eth_stats(hw, &hw_stats); + + mutex_lock(&adapter->stats.lock); + adapter->stats.tx_total_packets += dec_compare48(hw, hw_stats.tx_total_packets, + hw->hw_stats.tx_total_packets, + "tx_total_packets"); + adapter->stats.tx_total_good_packets += dec_compare48(hw, hw_stats.tx_total_good_packets, + hw->hw_stats.tx_total_good_packets, + "tx_total_good_packets"); + adapter->stats.tx_bad_fcs += dec_compare48(hw, hw_stats.tx_bad_fcs, + hw->hw_stats.tx_bad_fcs, + "tx_bad_fcs"); + adapter->stats.tx_total_bytes += dec_compare48(hw, hw_stats.tx_total_bytes, + hw->hw_stats.tx_total_bytes, + "tx_total_bytes"); + adapter->stats.tx_total_good_bytes += dec_compare48(hw, hw_stats.tx_total_good_bytes, + hw->hw_stats.tx_total_good_bytes, + "tx_total_good_bytes"); + adapter->stats.tx_frame_error += dec_compare48(hw, hw_stats.tx_frame_error, + hw->hw_stats.tx_frame_error, + "tx_frame_error"); + adapter->stats.tx_unicast += dec_compare48(hw, hw_stats.tx_unicast, + hw->hw_stats.tx_unicast, + "tx_unicast"); + adapter->stats.tx_multicast += dec_compare48(hw, hw_stats.tx_multicast, + hw->hw_stats.tx_multicast, + "tx_multicast"); + adapter->stats.tx_broadcast += dec_compare48(hw, hw_stats.tx_broadcast, + hw->hw_stats.tx_broadcast, + "tx_broadcast"); + adapter->stats.tx_vlan += dec_compare48(hw, hw_stats.tx_vlan, + hw->hw_stats.tx_vlan, + "tx_vlan"); + adapter->stats.tx_fc_pause += dec_compare48(hw, hw_stats.tx_fc_pause, + hw->hw_stats.tx_fc_pause, + "tx_fc_pause"); + + adapter->stats.rx_bad_code += dec_compare48(hw, hw_stats.rx_bad_code, + hw->hw_stats.rx_bad_code, + "rx_bad_code"); + adapter->stats.rx_total_packets += dec_compare48(hw, hw_stats.rx_total_packets, + hw->hw_stats.rx_total_packets, + "rx_total_packets"); + adapter->stats.rx_total_bytes += dec_compare48(hw, hw_stats.rx_total_bytes, + hw->hw_stats.rx_total_bytes, + "rx_total_bytes"); + adapter->stats.rx_total_good_packets += dec_compare48(hw, hw_stats.rx_total_good_packets, + hw->hw_stats.rx_total_good_packets, + "rx_total_good_packets"); + adapter->stats.rx_total_good_bytes += dec_compare48(hw, hw_stats.rx_total_good_bytes, + hw->hw_stats.rx_total_good_bytes, + "rx_total_good_bytes"); + adapter->stats.rx_bad_fcs += dec_compare48(hw, hw_stats.rx_bad_fcs, + hw->hw_stats.rx_bad_fcs, + "rx_bad_fcs"); + adapter->stats.rx_frame_err += dec_compare48(hw, hw_stats.rx_frame_err, + hw->hw_stats.rx_frame_err, + "rx_frame_err"); + adapter->stats.rx_unicast += dec_compare48(hw, hw_stats.rx_unicast, + hw->hw_stats.rx_unicast, + "rx_unicast"); + adapter->stats.rx_multicast += dec_compare48(hw, hw_stats.rx_multicast, + hw->hw_stats.rx_multicast, + "rx_multicast"); + adapter->stats.rx_broadcast += dec_compare48(hw, hw_stats.rx_broadcast, + hw->hw_stats.rx_broadcast, + "rx_broadcast"); + adapter->stats.rx_vlan += dec_compare48(hw, hw_stats.rx_vlan, + hw->hw_stats.rx_vlan, + "rx_vlan"); + adapter->stats.rx_oversize += dec_compare48(hw, hw_stats.rx_oversize, + hw->hw_stats.rx_oversize, + "rx_oversize"); + adapter->stats.rx_undersize += dec_compare48(hw, hw_stats.rx_undersize, + hw->hw_stats.rx_undersize, + "rx_undersize"); + adapter->stats.rx_fc_pause += dec_compare48(hw, hw_stats.rx_fc_pause, + hw->hw_stats.rx_fc_pause, + "rx_rc_pause"); + adapter->stats.tx_error_packets = adapter->stats.tx_bad_fcs + + adapter->stats.tx_frame_error; + adapter->stats.rx_error_packets = adapter->stats.rx_frame_err + + adapter->stats.rx_bad_fcs + + adapter->stats.rx_oversize + + adapter->stats.rx_undersize; + + memcpy(&hw->hw_stats, &hw_stats, sizeof(hw_stats)); + + netdev->stats.multicast = adapter->stats.rx_multicast; + netdev->stats.rx_errors = adapter->stats.rx_error_packets; + netdev->stats.tx_errors = adapter->stats.tx_error_packets; + netdev->stats.rx_length_errors = adapter->stats.rx_oversize + + adapter->stats.rx_undersize; + netdev->stats.rx_crc_errors = adapter->stats.rx_bad_fcs; + netdev->stats.rx_frame_errors = adapter->stats.rx_frame_err; + + ring_count = adapter->num_txq; + tx_busy = 0; + tx_linearize = 0; + tx_dma_err = 0; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = READ_ONCE(adapter->tx_rings[ring_index]); + if (!ring) + continue; + tx_busy += ring->tx_stats.tx_busy; + tx_linearize += ring->tx_stats.tx_linearize; + tx_csum_pkts += ring->tx_stats.tx_csum_pkts; + tx_dma_err += ring->tx_stats.tx_dma_err; + } + adapter->stats.tx_busy = tx_busy; + adapter->stats.tx_linearize = tx_linearize; + adapter->stats.tx_csum_pkts = tx_csum_pkts; + adapter->stats.tx_dma_err = tx_dma_err; + + ring_count = adapter->num_rxq; + alloc_page_failed = 0; + alloc_skb_failed = 0; + rx_dma_err = 0; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = READ_ONCE(adapter->rx_rings[ring_index]); + if (!ring) + continue; + rx_csum_pkts += ring->rx_stats.rx_csum_pkts; + alloc_page_failed += ring->rx_stats.alloc_page_failed; + alloc_skb_failed += ring->rx_stats.alloc_skb_failed; + rx_dma_err += ring->rx_stats.rx_dma_err; + } + adapter->stats.rx_csum_pkts = rx_csum_pkts; + adapter->stats.alloc_page_failed = alloc_page_failed; + adapter->stats.alloc_skb_failed = alloc_skb_failed; + adapter->stats.rx_dma_err = rx_dma_err; + + mutex_unlock(&adapter->stats.lock); +} + +void nbl_init_hw_stats(struct nbl_hw *hw) +{ + int ret; + + if (is_af(hw)) + ret = nbl_af_get_eth_stats(hw, hw->eth_port_id, &hw->hw_stats); + else + ret = nbl_mailbox_req_get_eth_stats(hw, hw->eth_port_id, &hw->hw_stats); + + if (ret < 0) + pr_err("nbl init hw_stat failed, port: %d\n", hw->eth_port_id); +} + +void nbl_reset_subtask(struct nbl_adapter *adapter) +{ + if (!test_and_clear_bit(NBL_RESET_REQUESTED, adapter->state)) + return; + + rtnl_lock(); + if (test_bit(NBL_DOWN, adapter->state) || + test_bit(NBL_RESETTING, adapter->state)) { + rtnl_unlock(); + return; + } + + adapter->stats.tx_timeout++; + + nbl_do_reset(adapter); + + rtnl_unlock(); +} diff --git a/drivers/net/ethernet/nebula-matrix/m1600/common.h b/drivers/net/ethernet/nebula-matrix/m1600/common.h new file mode 100644 index 0000000000000000000000000000000000000000..ddcc214d30188239e754ff5a87aaf10649f33669 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/common.h @@ -0,0 +1,493 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#ifndef _NBL_COMMON_H_ +#define _NBL_COMMON_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "hw.h" + +#define NBL_X4_DRIVER_NAME "m1600" +#define NBL_X4_DRIVER_VERSION "2.1.2" + +#define NBL_MAILBOX_QUEUE_LEN 256 +#define NBL_MAILBOX_BUF_LEN 4096 + +#define NBL_REG_WRITE_MAX_TRY_TIMES 5 + +#define NBL_LED_FLICKER_FREQUENCY (2) + +#define NBL_MAX_JUMBO_FRAME_SIZE (15872) +#define NBL_MAX_FRAME_SIZE (10000) +#define NBL_MIN_FRAME_SIZE (64) +#define NBL_MAX_MTU 9600 +#define NBL_VLAN_HLEN 4 +#define NBL_URMUX_MAX_PKT_LEN 10000 + +#define NBL_MODULE_SPEED_NOT_SUPPORT 0 +#define NBL_MODULE_SPEED_1G BIT(0) +#define NBL_MODULE_SPEED_10G BIT(1) + +struct nbl_mailbox_buf { + void *va; + dma_addr_t pa; + size_t size; +}; + +struct nbl_mailbox_tx_desc { + u16 flags; + u16 srcid; + u16 dstid; + u16 data_len; + u16 buf_len; + u64 buf_addr; + u16 msg_type; + u8 data[16]; + u8 rsv[28]; +} __packed; + +struct nbl_mailbox_rx_desc { + u16 flags; + u32 buf_len; + u16 buf_id; + u64 buf_addr; +} __packed; + +struct nbl_mailbox_ring { + void *desc; + struct nbl_mailbox_buf *buf; + + u16 next_to_use; + u16 tail_ptr; + u16 next_to_clean; + + dma_addr_t dma; +}; + +#define NBL_STRING_NAME_LEN 32 + +struct nbl_mailbox_info { + struct nbl_mailbox_ring txq; + struct nbl_mailbox_ring rxq; + + /* For mailbox txq */ + spinlock_t txq_lock; + + /* For send msg */ + struct mutex send_normal_msg_lock; + int acked; + int ack_err; + unsigned int ack_req_msg_type; + char *ack_data; + u16 ack_data_len; + + u16 num_txq_entries; + u16 num_rxq_entries; + u16 txq_buf_size; + u16 rxq_buf_size; + + char name[NBL_STRING_NAME_LEN]; +}; + +struct nbl_msix_map_table { + struct nbl_msix_map *base_addr; + dma_addr_t dma; + size_t size; +}; + +struct nbl_func_res { + u8 num_txrx_queues; + u8 *txrx_queues; + u16 num_interrupts; + u16 *interrupts; + + struct nbl_msix_map_table msix_map_table; + + u16 macvlan_start_index; + u16 num_macvlan_entries; + u8 eth_port_id; + u8 mac_addr[ETH_ALEN]; + s16 vlan_ids[NBL_PF_MAX_MACVLAN_ENTRIES]; +}; + +enum nbl_func_type { + NBL_X4_AF, + NBL_X4_PF, + NBL_X4_VF, +}; + +struct nbl_fc_info { + u32 rx_pause; + u32 tx_pause; +}; + +struct nbl_hw_stats { + u64 tx_total_packets; + u64 tx_total_good_packets; + u64 rx_total_packets; + u64 rx_total_good_packets; + u64 tx_bad_fcs; + u64 rx_bad_fcs; + + u64 tx_total_bytes; + u64 tx_total_good_bytes; + u64 rx_total_bytes; + u64 rx_total_good_bytes; + + u64 tx_frame_error; + u64 tx_unicast; + u64 tx_multicast; + u64 tx_broadcast; + u64 tx_vlan; + u64 tx_fc_pause; + + u64 rx_oversize; + u64 rx_undersize; + u64 rx_frame_err; + u64 rx_bad_code; + u64 rx_unicast; + u64 rx_multicast; + u64 rx_broadcast; + u64 rx_vlan; + u64 rx_fc_pause; +}; + +struct nbl_stats { + /* for nbl status consistent */ + struct mutex lock; + u64 tx_total_packets; + u64 tx_total_good_packets; + u64 tx_total_bytes; + u64 tx_total_good_bytes; + u64 tx_error_packets; + u64 tx_bad_fcs; + u64 tx_frame_error; + u64 tx_unicast; + u64 tx_multicast; + u64 tx_broadcast; + u64 tx_vlan; + u64 tx_fc_pause; + + u64 rx_total_packets; + u64 rx_total_good_packets; + u64 rx_total_bytes; + u64 rx_total_good_bytes; + u64 rx_error_packets; + u64 rx_bad_fcs; + u64 rx_oversize; + u64 rx_undersize; + u64 rx_frame_err; + u64 rx_bad_code; + u64 rx_unicast; + u64 rx_multicast; + u64 rx_broadcast; + u64 rx_vlan; + u64 rx_fc_pause; + + u64 tx_busy; + u64 tx_linearize; + u64 tx_timeout; + u64 tx_csum_pkts; + u64 rx_csum_pkts; + u64 tx_dma_err; + u64 alloc_page_failed; + u64 alloc_skb_failed; + u64 rx_dma_err; + + u64 err_status_reset; + u64 bad_code_reset; +}; + +struct nbl_vf_bar_info { + u64 vf_bar_start; + u64 vf_bar_len; +}; + +struct nbl_af_res_info { + /* For function resource */ + spinlock_t func_res_lock; + DECLARE_BITMAP(interrupt_bitmap, NBL_MAX_INTERRUPT); + DECLARE_BITMAP(txrx_queue_bitmap, NBL_MAX_TXRX_QUEUE); + struct nbl_qid_map qid_map_table[NBL_QID_MAP_TABLE_ENTRIES]; + int qid_map_ready; + int qid_map_select; + struct nbl_func_res *res_record[NBL_MAX_FUNC]; + + struct nbl_vf_bar_info vf_bar_info[NBL_MAX_PF_FUNC]; + + u8 forward_ring_index; + + atomic_t eth_port_tx_refcount[NBL_ETH_PORT_NUM]; + atomic_t eth_port_rx_refcount[NBL_ETH_PORT_NUM]; +}; + +struct nbl_hw { + u8 __iomem *hw_addr; + void *back; + + u8 function; + u8 devid; + u8 bus; + + enum nbl_func_type func_type; + + u8 vsi_id; + u8 eth_port_id; + + u8 __iomem *msix_bar_hw_addr; + + bool module_inplace; + u8 module_support_speed; + + u8 __iomem *mailbox_bar_hw_addr; + struct nbl_mailbox_info mailbox; + + struct nbl_af_res_info *af_res; + + struct nbl_fc_info fc; + + struct nbl_hw_stats hw_stats; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + + /* debugfs */ + struct dentry *nbl_debug_root; + + int debugfs_reg_bar; + long debugfs_reg_offset; + long debugfs_reg_length; +}; + +enum nbl_adapter_state { + NBL_DOWN, + NBL_MAILBOX_READY, + NBL_MAILBOX_EVENT_PENDING, + NBL_RESETTING, + NBL_RESET_REQUESTED, + NBL_PROMISC, + NBL_STATE_NBITS, +}; + +struct nbl_healing_var { + u64 former_bad_code; + int bad_code_increase; + int status_chk_timer; +}; + +struct nbl_adapter { + struct nbl_hw hw; + struct pci_dev *pdev; + struct net_device *netdev; + + u8 num_txq; + u8 num_rxq; + u16 tx_desc_num; + u16 rx_desc_num; + + struct msix_entry *msix_entries; + u16 num_lan_msix; + u16 num_mailbox_msix; + + struct nbl_ring **tx_rings; + struct nbl_ring **rx_rings; + + u16 num_q_vectors; + struct nbl_q_vector **q_vectors; + + DECLARE_BITMAP(state, NBL_STATE_NBITS); + + unsigned long serv_timer_period; + struct timer_list serv_timer; + struct work_struct serv_task1; + struct work_struct serv_task2; + + struct nbl_stats stats; + + struct nbl_healing_var healing_var; + + struct device *hwmon_dev; + + u32 msg_enable; + + u32 flags; +}; + +static inline bool is_af(struct nbl_hw *hw) +{ + return hw->func_type == NBL_X4_AF; +} + +static inline bool is_vf(struct nbl_hw *hw) +{ + return hw->func_type == NBL_X4_VF; +} + +#define nbl_adapter_to_dev(adapter) (&((adapter)->pdev->dev)) +#define nbl_hw_to_dev(hw) nbl_adapter_to_dev((struct nbl_adapter *)((hw)->back)) + +#define wr32(hw, reg, value) writel((value), ((hw)->hw_addr + (reg))) +#define rd32(hw, reg) readl((hw)->hw_addr + (reg)) +#define wr32_for_each(hw, reg, value, size) \ + do { \ + int __n; \ + for (__n = 0; __n < (size); __n += 4) \ + wr32((hw), (reg) + __n, (value)[__n / 4]); \ + } while (0) +#define rd32_for_each(hw, reg, value, size) \ + do { \ + int __n; \ + for (__n = 0; __n < (size); __n += 4) \ + (value)[__n / 4] = rd32((hw), (reg) + __n); \ + } while (0) +#define wr32_zero_for_each(hw, reg, size) \ + do { \ + int __n; \ + for (__n = 0; __n < (size); __n += 4) \ + wr32((hw), (reg) + __n, 0); \ + } while (0) + +#define NBL_WRITE_VERIFY_MAX_TIMES (5) + +static inline void wr32_and_verify(struct nbl_hw *hw, u64 reg, u32 value) +{ + u32 read_value; + int i = 0; + + while (likely(i < NBL_WRITE_VERIFY_MAX_TIMES)) { + wr32(hw, reg, value); + read_value = rd32(hw, reg); + if (read_value == value) + return; + i++; + } + pr_err("Write to register addr %llx failed\n", reg); +} + +#define mb_wr32(hw, reg, value) writel((value), ((hw)->mailbox_bar_hw_addr + (reg))) +#define mb_rd32(hw, reg) readl((hw)->mailbox_bar_hw_addr + (reg)) +#define mb_wr32_for_each(hw, reg, value, size) \ + do { \ + int __n; \ + for (__n = 0; __n < (size); __n += 4) \ + mb_wr32((hw), (reg) + __n, (value)[__n / 4]); \ + } while (0) +#define mb_rd32_for_each(hw, reg, value, size) \ + do { \ + int __n; \ + for (__n = 0; __n < (size); __n += 4) \ + (value)[__n / 4] = mb_rd32((hw), (reg) + __n); \ + } while (0) + +#define msix_wr32(hw, reg, value) writel((value), ((hw)->msix_bar_hw_addr + (reg))) + +void nbl_service_task1_schedule(struct nbl_adapter *adapter); +void nbl_service_task_schedule(struct nbl_adapter *adapter); + +void nbl_firmware_init(struct nbl_hw *hw); + +void nbl_af_configure_captured_packets(struct nbl_hw *hw); +void nbl_af_clear_captured_packets_conf(struct nbl_hw *hw); + +u32 nbl_af_get_firmware_version(struct nbl_hw *hw); + +int nbl_af_res_mng_init(struct nbl_hw *hw); +void nbl_af_free_res(struct nbl_hw *hw); + +void nbl_af_compute_bdf(struct nbl_hw *hw, u16 func_id, + u8 *bus, u8 *devid, u8 *function); + +bool nbl_check_golden_version(struct nbl_hw *hw); + +int nbl_af_configure_func_msix_map(struct nbl_hw *hw, u16 func_id, u16 requested); +void nbl_af_destroy_func_msix_map(struct nbl_hw *hw, u16 func_id); + +int nbl_configure_msix_map(struct nbl_hw *hw); +void nbl_destroy_msix_map(struct nbl_hw *hw); + +int nbl_af_configure_qid_map(struct nbl_hw *hw, u16 func_id, u8 num_queues, u64 notify_addr); +void nbl_af_clear_qid_map(struct nbl_hw *hw, u16 func_id, u64 notify_addr); + +int nbl_get_vsi_id(struct nbl_hw *hw); + +void nbl_af_register_vf_bar_info(struct nbl_hw *hw, u16 func_id, + u64 vf_bar_start, u64 vf_bar_len); +int nbl_register_vf_bar_info(struct nbl_hw *hw); + +u64 nbl_af_compute_vf_bar_base_addr(struct nbl_hw *hw, u16 func_id); + +int nbl_configure_notify_addr(struct nbl_hw *hw); +void nbl_clear_notify_addr(struct nbl_hw *hw); + +void nbl_af_disable_promisc(struct nbl_hw *hw, u8 eth_port_id); +void nbl_disable_promisc(struct nbl_hw *hw); +void nbl_af_enable_promisc(struct nbl_hw *hw, u8 eth_port_id); +void nbl_enable_promisc(struct nbl_hw *hw); + +void nbl_af_configure_ingress_eth_port_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); +void nbl_af_configure_src_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); +void nbl_af_configure_dest_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); +void nbl_datapath_init(struct nbl_hw *hw); + +int nbl_af_get_board_info(struct nbl_hw *hw, u8 eth_port_id, union nbl_board_info *board_info); + +bool nbl_af_query_link_status(struct nbl_hw *hw, u8 eth_port_id); +bool nbl_query_link_status(struct nbl_hw *hw); +void nbl_query_link_status_subtask(struct nbl_adapter *adapter); + +void nbl_af_set_pauseparam(struct nbl_hw *hw, u8 eth_port_id, struct nbl_fc_info fc); + +void nbl_af_write_mac_to_logic(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr); +void nbl_write_mac_to_logic(struct nbl_hw *hw, u8 *mac_addr); + +void nbl_af_init_pkt_len_limit(struct nbl_hw *hw, u8 eth_port_id, + struct nbl_pkt_len_limit pkt_len_limit); +void nbl_init_pkt_len_limit(struct nbl_hw *hw); + +int nbl_af_get_eth_stats(struct nbl_hw *hw, u8 eth_port_id, struct nbl_hw_stats *hw_stats); + +void nbl_update_stats_subtask(struct nbl_adapter *adapter); +void nbl_init_hw_stats(struct nbl_hw *hw); + +void nbl_reset_subtask(struct nbl_adapter *adapter); + +int nbl_stop(struct net_device *netdev); +int nbl_open(struct net_device *netdev); + +void nbl_do_reset(struct nbl_adapter *adapter); + +enum NBL_MODULE_INPLACE_STATUS nbl_af_check_module_inplace(struct nbl_hw *hw, u8 eth_port_id); + +int nbl_af_config_module_speed(struct nbl_hw *hw, u8 target_speed, u8 eth_port_id); + +void nbl_set_module_speed(struct nbl_hw *hw, u8 target_speed); + +void nbl_af_configure_fc_cplh_up_th(struct nbl_hw *hw); + +u32 nbl_af_get_rxlos(struct nbl_hw *hw, u8 eth_port_id); + +void nbl_af_reset_eth(struct nbl_hw *hw, u8 eth_port_id); + +#ifdef CONFIG_NBL_DEBUGFS +void nbl_debugfs_init(void); +void nbl_debugfs_exit(void); +void nbl_debugfs_hw_init(struct nbl_hw *hw); +void nbl_debugfs_hw_exit(struct nbl_hw *hw); +#else +static inline void nbl_debugfs_init(void) {} +static inline void nbl_debugfs_exit(void) {} +static inline void nbl_debugfs_hw_init(struct nbl_hw *hw) {} +static inline void nbl_debugfs_hw_exit(struct nbl_hw *hw) {} +#endif + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/m1600/debug.c b/drivers/net/ethernet/nebula-matrix/m1600/debug.c new file mode 100644 index 0000000000000000000000000000000000000000..cc6da4736ae9b11bdfb77beaf506a89b75afff87 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/debug.c @@ -0,0 +1,607 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: David Miao + */ + +#ifdef CONFIG_NBL_DEBUGFS +#include +#include +#include +#include +#include +#include +#include + +#include "hw.h" +#include "common.h" +#include "ethtool.h" +#include "interrupt.h" +#include "txrx.h" +#include "mailbox.h" +#include "hwmon.h" + +static struct dentry *nblx4_debug_root; + +#define SINGLE_FOPS_RW(_fops_, _open_, _write_) \ + static const struct file_operations _fops_ = { \ + .open = _open_, \ + .write = _write_, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = seq_release, \ + } + +#define SINGLE_FOPS_RO(_fops_, _open_) \ + static const struct file_operations _fops_ = { \ + .open = _open_, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = seq_release, \ + } + +/* dvn */ +static int dvn_seq_show(struct seq_file *m, void *v) +{ + int i, j; + struct nbl_hw *hw = m->private; + struct tx_queue_info q; + struct nbl_tx_queue_stat qs; + + for (i = 0; i < NBL_MAX_TXRX_QUEUE; i++) { + rd32_for_each(hw, NBL_DVN_QUEUE_INFO_ARR(i), + (u32 *)&q, sizeof(struct tx_queue_info)); + seq_printf(m, "QueueID: %03d - ", i); + for (j = 0; j < sizeof(struct tx_queue_info) / sizeof(u32); j++) + seq_printf(m, "%08X ", ((u32 *)&q)[j]); + seq_printf(m, "size:%d ", q.log2_size); + seq_printf(m, "vsi_idx:%d ", q.src_vsi_idx); + seq_printf(m, "pri:%d ", q.priority); + seq_printf(m, "en:%d ", q.enable); + seq_printf(m, "tail_ptr:%d ", q.tail_ptr); + seq_printf(m, "head_ptr:%d\n", q.head_ptr); + } + seq_puts(m, "\n"); + + seq_puts(m, "=== statistics ===\n"); + for (i = 0; i < NBL_MAX_TXRX_QUEUE; i++) { + rd32_for_each(hw, NBL_DVN_QUEUE_STAT_REG_ARR(i), + (u32 *)&qs, sizeof(struct nbl_tx_queue_stat)); + seq_printf(m, "QueueID: %03d - ", i); + seq_printf(m, "pkt_get: %d ", qs.pkt_get); + seq_printf(m, "pkt_out: %d ", qs.pkt_out); + seq_printf(m, "pkt_drop: %d ", qs.pkt_drop); + seq_printf(m, "sw_notify: %d ", qs.sw_notify); + seq_printf(m, "pkt_dsch: %d ", qs.pkt_dsch); + seq_printf(m, "hd_notify: %d ", qs.hd_notify); + seq_printf(m, "hd_notify_empty: %d\n", qs.hd_notify_empty); + } + + return 0; +} + +static int debugfs_dvn_open(struct inode *inode, struct file *file) +{ + return single_open(file, dvn_seq_show, inode->i_private); +} + +SINGLE_FOPS_RO(dvn_fops, debugfs_dvn_open); + +/* uvn */ +#define TABLE_UVN_ATTR(n, b, l) \ + { .name = n, .base = NBL_UVN_MODULE + (b), .len = l, } +static struct uvn_table { + char *name; + long base; + int len; +} tables[] = { + TABLE_UVN_ATTR("rd_diff_err_state", 0x2000, NBL_MAX_TXRX_QUEUE), + TABLE_UVN_ATTR("queue_pkt_drop", 0x3000, NBL_MAX_TXRX_QUEUE), + TABLE_UVN_ATTR("queue_desc_no_available", 0x3200, NBL_MAX_TXRX_QUEUE), + TABLE_UVN_ATTR("queue_pkt_in_cnt", 0x3400, NBL_MAX_TXRX_QUEUE), + TABLE_UVN_ATTR("queue_pkt_out_cnt", 0x3600, NBL_MAX_TXRX_QUEUE), + TABLE_UVN_ATTR("queue_desc_rd_cnt", 0x3800, NBL_MAX_TXRX_QUEUE), + TABLE_UVN_ATTR("queue_desc_wb_cnt", 0x3A00, NBL_MAX_TXRX_QUEUE), + TABLE_UVN_ATTR("queue_notify_cnt", 0x3C00, NBL_MAX_TXRX_QUEUE), + TABLE_UVN_ATTR("queue_desc_merge_cnt", 0x3E00, NBL_MAX_TXRX_QUEUE), +}; + +static int uvn_seq_show(struct seq_file *m, void *v) +{ + int i, j; + struct nbl_hw *hw = m->private; + struct rx_queue_info q; + + for (i = 0; i < NBL_MAX_TXRX_QUEUE; i++) { + rd32_for_each(hw, NBL_UVN_QUEUE_INFO_ARR(i), + (u32 *)&q, sizeof(struct rx_queue_info)); + seq_printf(m, "QueueID: %03d - ", i); + for (j = 0; j < sizeof(struct rx_queue_info) / sizeof(u32); j++) + seq_printf(m, "%08X ", ((u32 *)&q)[j]); + seq_printf(m, "size:%d ", q.log2_size); + seq_printf(m, "buf_len:%d ", q.buf_length_pow); + seq_printf(m, "en:%d ", q.enable); + seq_printf(m, "tail_ptr:%d ", q.tail_ptr); + seq_printf(m, "head_ptr:%d\n", q.head_ptr); + } + seq_puts(m, "\n"); + + #define LINE_RECORD_NUM 8 + for (i = 0; i < ARRAY_SIZE(tables); i++) { + seq_printf(m, "=== %s ===\n", tables[i].name); + for (j = 0; j < tables[i].len; j++) { + if (j % LINE_RECORD_NUM == 0) + seq_printf(m, "QueueID %03d:", j); + seq_printf(m, " %d", rd32(hw, tables[i].base + j * 4)); + if (((j + 1) % LINE_RECORD_NUM == 0) || ((j + 1) == LINE_RECORD_NUM)) + seq_puts(m, "\n"); + } + if ((i + 1) != ARRAY_SIZE(tables)) + seq_puts(m, "\n"); + } + + return 0; +} + +static int debugfs_uvn_open(struct inode *inode, struct file *file) +{ + return single_open(file, uvn_seq_show, inode->i_private); +} + +SINGLE_FOPS_RO(uvn_fops, debugfs_uvn_open); + +/* nic statistics */ +static int nic_statistics_seq_show(struct seq_file *m, void *v) +{ + int epid; + struct nbl_hw *hw = m->private; + + WARN_ON(!is_af(hw)); + + for (epid = 0; epid < 4; epid++) { + seq_printf(m, "======== port %d ========\n", epid); + + /* tx */ + seq_printf(m, "tx_total_packets=%lld\n", + ((u64)rd32(hw, NBL_ETH_TX_TOTAL_PKT_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_TX_TOTAL_PKT_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "tx_total_bytes=%lld\n", + ((u64)rd32(hw, NBL_ETH_TX_TOTAL_BYTES_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_TX_TOTAL_BYTES_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "tx_total_good_packets=%lld\n", + ((u64)rd32(hw, NBL_ETH_TX_TOTAL_GOOD_PKT_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_TX_TOTAL_GOOD_PKT_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "tx_frame_error=%lld\n", + ((u64)rd32(hw, NBL_ETH_TX_FRAME_ERROR_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_TX_FRAME_ERROR_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "tx_bad_fcs=%lld\n", + ((u64)rd32(hw, NBL_ETH_TX_BAD_FCS_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_TX_BAD_FCS_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "rx_bad_code=%lld\n", + ((u64)rd32(hw, NBL_ETH_RX_BADCODE_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_RX_BADCODE_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_puts(m, "-----\n"); + + /* rx */ + seq_printf(m, "rx_total_packets=%lld\n", + ((u64)rd32(hw, NBL_ETH_RX_TOTAL_PKT_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_RX_TOTAL_PKT_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "rx_total_bytes=%lld\n", + ((u64)rd32(hw, NBL_ETH_RX_TOTAL_BYTES_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_RX_TOTAL_BYTES_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "rx_total_good_packets=%lld\n", + ((u64)rd32(hw, NBL_ETH_RX_TOTAL_GOOD_PKT_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_RX_TOTAL_GOOD_PKT_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "rx_total_good_bytes=%lld\n", + ((u64)rd32(hw, NBL_ETH_RX_TOTAL_GOOD_BYTES_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_RX_TOTAL_GOOD_BYTES_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "rx_frame_err=%lld\n", + ((u64)rd32(hw, NBL_ETH_RX_FRAMING_ERR_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_RX_FRAMING_ERR_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "rx_bad_fcs=%lld\n", + ((u64)rd32(hw, NBL_ETH_RX_BAD_FCS_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_RX_BAD_FCS_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "rx_oversize=%lld\n", + ((u64)rd32(hw, NBL_ETH_RX_OVERSIZE_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_RX_OVERSIZE_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + seq_printf(m, "rx_undersize=%lld\n", + ((u64)rd32(hw, NBL_ETH_RX_UNDERSIZE_CNT_L_REG(epid)) | + (((u64)rd32(hw, NBL_ETH_RX_UNDERSIZE_CNT_H_REG(epid)) & + 0xFFFF) << 32))); + + if (epid != 3) + seq_puts(m, "\n"); + } + + return 0; +} + +static int debugfs_nic_statistics_open(struct inode *inode, struct file *file) +{ + return single_open(file, nic_statistics_seq_show, inode->i_private); +} + +SINGLE_FOPS_RO(nic_statistics_fops, debugfs_nic_statistics_open); + +/* ring */ +static int ring_seq_show(struct seq_file *m, void *v) +{ + int i, j, n; + struct nbl_rx_desc *rx_desc; + struct nbl_tx_desc *tx_desc; + struct nbl_ring *ring = m->private; + + seq_printf(m, "size=%d\n", ring->size); + seq_printf(m, "dma=0x%llX\n", (unsigned long long)ring->dma); + seq_printf(m, "desc=0x%llX\n", (unsigned long long)ring->desc); + seq_printf(m, "desc_num=%d\n", ring->desc_num); + seq_printf(m, "local_qid=%d\n", ring->local_qid); + seq_printf(m, "queue_index=%d\n", ring->queue_index); + seq_printf(m, "notify_addr=0x%llX\n", + (unsigned long long)ring->notify_addr); + seq_printf(m, "buf_len=%d\n", ring->buf_len); + seq_printf(m, "next_to_use=%d\n", ring->next_to_use); + seq_printf(m, "next_to_clean=%d\n", ring->next_to_clean); + seq_printf(m, "next_to_alloc=%d\n", ring->next_to_alloc); + seq_printf(m, "tail_ptr=%d\n", ring->tail_ptr); + if (!ring->desc) { + seq_puts(m, "[Unallocated]\n"); + return 0; + } + + if (ring->local_qid & 1) { + tx_desc = (struct nbl_tx_desc *)ring->desc; + n = sizeof(struct nbl_tx_desc) / sizeof(u32); + for (i = 0; i < ring->desc_num; i++) { + seq_printf(m, "[desc-%03d]: ", i); + for (j = 0; j < n; j++) + seq_printf(m, "%08X ", ((u32 *)tx_desc)[j]); + seq_printf(m, "dlen:%d ", tx_desc->data_len); + seq_printf(m, "plen:%d ", tx_desc->pkt_len); + seq_printf(m, "dd:%d ", tx_desc->dd); + seq_printf(m, "eop:%d ", tx_desc->eop); + seq_printf(m, "sop:%d ", tx_desc->sop); + seq_printf(m, "fwd:%d ", tx_desc->fwd); + seq_printf(m, "dp:%d ", tx_desc->dport); + seq_printf(m, "dpi:%d ", tx_desc->dport_id); + seq_printf(m, "l3c:%d ", tx_desc->l3_checksum); + seq_printf(m, "l4c:%d ", tx_desc->l4_checksum); + seq_printf(m, "rsslag:%d ", tx_desc->rss_lag); + seq_printf(m, "l3_off:%d\n", tx_desc->l3_start_offset); + tx_desc++; + } + } else { + rx_desc = (struct nbl_rx_desc *)ring->desc; + n = sizeof(struct nbl_rx_desc) / sizeof(u32); + for (i = 0; i < ring->desc_num; i++) { + seq_printf(m, "[desc-%03d]: ", i); + for (j = 0; j < n; j++) + seq_printf(m, "%08X ", ((u32 *)rx_desc)[j]); + seq_printf(m, "dlen:%d ", rx_desc->data_len); + seq_printf(m, "dd:%d ", rx_desc->dd); + seq_printf(m, "eop:%d ", rx_desc->eop); + seq_printf(m, "sop:%d ", rx_desc->sop); + seq_printf(m, "fwd:%d ", rx_desc->fwd); + seq_printf(m, "sp:%d ", rx_desc->sport); + seq_printf(m, "spi:%d ", rx_desc->sport_id); + seq_printf(m, "cks:%d ", rx_desc->checksum_status); + seq_printf(m, "ptype:%d ", rx_desc->ptype); + seq_printf(m, "lag:%d ", rx_desc->lag); + seq_printf(m, "lagid:%d\n", rx_desc->lag_id); + rx_desc++; + } + } + + return 0; +} + +static int debugfs_ring_open(struct inode *inode, struct file *file) +{ + return single_open(file, ring_seq_show, inode->i_private); +} + +SINGLE_FOPS_RO(ring_fops, debugfs_ring_open); + +/* function_msix_map_table */ +static int tables_seq_show(struct seq_file *m, void *v) +{ + int i, j, k; + struct nbl_hw *hw; + struct nbl_adapter *adapter; + struct nbl_function_msix_map function_msix_map; + struct nbl_qid_map qid_map; + struct nbl_msix_entry msix_entry; + struct nbl_msix_info msix_info; + struct nbl_queue_map queue_map; + + hw = m->private; + adapter = (struct nbl_adapter *)hw->back; + + seq_puts(m, "===== function_msix_map_table at 0x0013_4000 =====\n"); + for (i = 0; i < NBL_MAX_FUNC; i++) { + struct nbl_func_res *funs_res = hw->af_res->res_record[i]; + + rd32_for_each(hw, NBL_PCOMPLETER_FUNCTION_MSIX_MAP_REG_ARR(i), + (u32 *)&function_msix_map, + sizeof(struct nbl_function_msix_map)); + seq_printf(m, "[%03d] base:0x%llX bus:%d dev:%d func:%d valid:%d\n", + i, + function_msix_map.msix_map_base_addr, + function_msix_map.bus, + function_msix_map.devid, + function_msix_map.function, + function_msix_map.valid); + + if (funs_res) { + seq_printf(m, " queues:%d irqs:%d\n", + funs_res->num_txrx_queues, funs_res->num_interrupts); + + for (j = 0; j < adapter->num_q_vectors + 1; j++) { + seq_printf(m, " [%03d] global_msix_index:%d valid:%d\n", j, + funs_res->msix_map_table.base_addr[j].global_msix_index, + funs_res->msix_map_table.base_addr[j].valid); + } + } + } + seq_puts(m, "\n"); + + for (k = 0; k < 2; k++) { + seq_printf(m, "===== qid_map_table %d at 0x0013_8000 now %d =====\n", + k, rd32(hw, NBL_PCOMPLETER_QUEUE_TABLE_SELECT_REG) & 1); + for (i = 0; i < NBL_MAX_TXRX_QUEUE; i++) { + rd32_for_each(hw, NBL_PCOMPLETER_QID_MAP_REG_ARR(k, i), + (u32 *)&qid_map, sizeof(struct nbl_qid_map)); + seq_printf(m, "[%03d] local_qid:%d notify_addr_l:0x%X notify_addr_h:0x%X global_qid:%d notify_addr:0x%llX\n", + i, + qid_map.local_qid, + qid_map.notify_addr_l, + qid_map.notify_addr_h, + qid_map.global_qid, + (((u64)qid_map.notify_addr_h << 27) | + qid_map.notify_addr_l) << 5); + } + seq_puts(m, "\n"); + } + + seq_puts(m, "===== msix_table at 0x0015_4000 =====\n"); + for (i = 0; i < NBL_MAX_INTERRUPT; i++) { + rd32_for_each(hw, NBL_PADPT_MSIX_TABLE_REG_ADDR(i), + (u32 *)&msix_entry, sizeof(struct nbl_msix_entry)); + seq_printf(m, "[%03d] addr:0x%016llX msg_data:%d mask:%d\n", i, + ((u64)msix_entry.upper_address << 32) | msix_entry.lower_address, + msix_entry.message_data, msix_entry.vector_mask); + } + seq_puts(m, "\n"); + + seq_puts(m, "===== msix_info_table at 0x0015_8000 =====\n"); + for (i = 0; i < NBL_MAX_INTERRUPT; i++) { + rd32_for_each(hw, NBL_PADPT_MSIX_INFO_REG_ARR(i), + (u32 *)&msix_info, sizeof(struct nbl_msix_info)); + seq_printf(m, "[%03d] intrl_pnum:%d intrl_rate:%d bus:%d dev:%d func:%d valid:%d\n", + i, + msix_info.intrl_pnum, msix_info.intrl_rate, + msix_info.bus, msix_info.devid, msix_info.function, msix_info.valid); + } + seq_puts(m, "\n"); + + seq_puts(m, "===== queue_map_table at 0x0015_C000 =====\n"); + for (i = 0; i < NBL_MAX_TXRX_QUEUE * 2; i++) { + rd32_for_each(hw, NBL_PADPT_QUEUE_MAP_REG_ARR(i), + (u32 *)&queue_map, sizeof(struct nbl_queue_map)); + seq_printf(m, "[%03d] bus:%d dev:%d func:%d msix_idx:%d valid:%d\n", i, + queue_map.bus, queue_map.devid, queue_map.function, + queue_map.msix_idx, queue_map.msix_idx_valid); + } + + return 0; +} + +static int debugfs_tables_open(struct inode *inode, struct file *file) +{ + return single_open(file, tables_seq_show, inode->i_private); +} + +SINGLE_FOPS_RO(tables_fops, debugfs_tables_open); + +/* bar */ +static int bar_seq_show(struct seq_file *m, void *v) +{ + struct nbl_hw *hw = m->private; + struct nbl_adapter *adapter = hw->back; + + seq_printf(m, "BAR0 - phy: 0x%llX virt: 0x%llX len: 0x%llX\n", + pci_resource_start(adapter->pdev, NBL_X4_MEMORY_BAR), + (u64)hw->hw_addr, + pci_resource_len(adapter->pdev, NBL_X4_MEMORY_BAR)); + seq_printf(m, "BAR2 - phy: 0x%llX virt: 0x%llX len: 0x%llX\n", + pci_resource_start(adapter->pdev, NBL_X4_MAILBOX_BAR), + (u64)hw->mailbox_bar_hw_addr, + pci_resource_len(adapter->pdev, NBL_X4_MAILBOX_BAR)); + + return 0; +} + +static int debugfs_bar_open(struct inode *inode, struct file *file) +{ + return single_open(file, bar_seq_show, inode->i_private); +} + +SINGLE_FOPS_RO(bar_fops, debugfs_bar_open); + +/* register + * echo offset > register - BAR 0 and 4B + * echo offset,length > register - BAR 0 and length + * echo bB,offset > register - BAR B (0 or 2) and 4B + * echo bB,offset,length > register - BAR B (0 or 2) and length + */ +static int register_seq_show(struct seq_file *m, void *v) +{ + int i; + struct nbl_hw *hw = m->private; + + seq_printf(m, "BAR %d off 0x%lX len 0x%lX:\n", + hw->debugfs_reg_bar, hw->debugfs_reg_offset, hw->debugfs_reg_length); + for (i = 0; i < hw->debugfs_reg_length; i += 4) { + seq_printf(m, "[%08X]: ", (unsigned int)hw->debugfs_reg_offset + i); + if (hw->debugfs_reg_bar == 0) + seq_printf(m, "%08X\n", rd32(hw, hw->debugfs_reg_offset + i)); + else if (hw->debugfs_reg_bar == 2) + seq_printf(m, "%08X\n", mb_rd32(hw, hw->debugfs_reg_offset + i)); + } + + return 0; +} + +static ssize_t debugfs_register_write(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + int err; + char *p, *p1, line[16] = { 0, }; + struct nbl_hw *hw = ((struct seq_file *)(file->private_data))->private; + + if (copy_from_user(line, buf, count)) + return -EFAULT; + + p = line; + /* BAR */ + if (line[0] == 'b') { + if (line[2] != ',') + return -EINVAL; + if (line[1] == '0') + hw->debugfs_reg_bar = 0; + else if (line[1] == '2') + hw->debugfs_reg_bar = 2; + else + return -EINVAL; + p = line + 3; + } + /* offset */ + p1 = strchr(p, ','); + if (p1) { + *p1 = 0; + p1++; + } + err = kstrtol(p, 0, &hw->debugfs_reg_offset); + if (err) + return err; + /* length */ + if (p1) { + err = kstrtol(p1, 0, &hw->debugfs_reg_length); + if (err) + return err; + } + + hw->debugfs_reg_offset = ALIGN_DOWN(hw->debugfs_reg_offset, 4); + hw->debugfs_reg_length = ALIGN(hw->debugfs_reg_length, 4); + if (!hw->debugfs_reg_length) + hw->debugfs_reg_length = 4; + + return count; +} + +static int debugfs_register_open(struct inode *inode, struct file *file) +{ + return single_open(file, register_seq_show, inode->i_private); +} + +SINGLE_FOPS_RW(reg_fops, debugfs_register_open, debugfs_register_write); + +/* function init and cleanup */ +void nbl_debugfs_hw_init(struct nbl_hw *hw) +{ + int i; + char buf[16]; + struct nbl_adapter *adapter; + + adapter = (struct nbl_adapter *)hw->back; + + if (!nblx4_debug_root) + return; + + snprintf(buf, sizeof(buf), "%04x:%02x:%02x.%x", + pci_domain_nr(adapter->pdev->bus), hw->bus, hw->devid, hw->function); + hw->nbl_debug_root = debugfs_create_dir(buf, nblx4_debug_root); + + if (is_af(hw)) { + debugfs_create_file("dvn", 0444, + hw->nbl_debug_root, hw, &dvn_fops); + debugfs_create_file("uvn", 0644, + hw->nbl_debug_root, hw, &uvn_fops); + debugfs_create_file("nic-statistics", 0444, + hw->nbl_debug_root, hw, &nic_statistics_fops); + debugfs_create_file("tables", 0644, + hw->nbl_debug_root, hw, &tables_fops); + } + + if (adapter->num_txq) { + for (i = 0; i < adapter->num_txq; i++) { + snprintf(buf, sizeof(buf), "txring-%d", i); + debugfs_create_file(buf, 0444, + hw->nbl_debug_root, + adapter->tx_rings[i], &ring_fops); + } + } + + if (adapter->num_rxq) { + for (i = 0; i < adapter->num_rxq; i++) { + snprintf(buf, sizeof(buf), "rxring-%d", i); + debugfs_create_file(buf, 0444, + hw->nbl_debug_root, + adapter->rx_rings[i], &ring_fops); + } + } + + debugfs_create_file("bar", 0444, hw->nbl_debug_root, hw, &bar_fops); + + hw->debugfs_reg_bar = 0; + hw->debugfs_reg_offset = 0; + hw->debugfs_reg_length = 8; + debugfs_create_file("reg", 0444, hw->nbl_debug_root, hw, ®_fops); +} + +void nbl_debugfs_hw_exit(struct nbl_hw *hw) +{ + debugfs_remove_recursive(hw->nbl_debug_root); + hw->nbl_debug_root = NULL; +} + +/* module init and cleanup */ +void nbl_debugfs_init(void) +{ + nblx4_debug_root = debugfs_create_dir("nblx4", NULL); + if (!nblx4_debug_root) + pr_info("init of nbl X4 debugfs failed\n"); +} + +void nbl_debugfs_exit(void) +{ + debugfs_remove_recursive(nblx4_debug_root); + nblx4_debug_root = NULL; +} +#endif /* CONFIG_NBL_DEBUGFS */ diff --git a/drivers/net/ethernet/nebula-matrix/m1600/ethtool.c b/drivers/net/ethernet/nebula-matrix/m1600/ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..b3efdec12f42c42374a08d6d228c220459eb6578 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/ethtool.c @@ -0,0 +1,1298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#include +#include + +#include "hw.h" +#include "common.h" +#include "txrx.h" +#include "mailbox.h" +#include "ethtool.h" + +static void nbl_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + u32 firmware_version; + + if (is_af(hw)) + firmware_version = nbl_af_get_firmware_version(hw); + else + firmware_version = nbl_mailbox_req_get_firmware_version(hw); + + strscpy(drvinfo->driver, NBL_X4_DRIVER_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, NBL_X4_DRIVER_VERSION, sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%x", firmware_version); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + + drvinfo->regdump_len = 0; +} + +static u32 nbl_get_link(struct net_device *netdev) +{ + return netif_carrier_ok(netdev) ? 1 : 0; +} + +static int nbl_get_regs_len(struct net_device *netdev) +{ + return ARRAY_SIZE(nbl_regs_dump_list) * sizeof(u32); +} + +void nbl_af_get_ethtool_dump_regs(struct nbl_hw *hw, u32 *regs_buff, u32 count) +{ + int i; + + for (i = 0; i < count; ++i) + regs_buff[i] = rd32(hw, nbl_regs_dump_list[i]); +} + +static void nbl_get_ethtool_dump_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + u32 *regs_buff = (u32 *)p; + u32 count = ARRAY_SIZE(nbl_regs_dump_list); + int err; + + regs->version = (u32)hw->devid; + + if (is_af(hw)) { + nbl_af_get_ethtool_dump_regs(hw, regs_buff, count); + } else { + err = nbl_mailbox_req_get_ethtool_dump_regs(hw, regs_buff, count); + if (err) + pr_err("Ethtool mailbox req get regs error!\n"); + } +} + +static void nbl_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam __always_unused *k_ringparam, + struct netlink_ext_ack __always_unused *extack) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + + ringparam->tx_max_pending = NBL_MAX_TX_DESC_NUM; + ringparam->rx_max_pending = NBL_MAX_RX_DESC_NUM; + ringparam->rx_pending = adapter->rx_rings[0]->desc_num; + ringparam->tx_pending = adapter->tx_rings[0]->desc_num; +} + +static int nbl_check_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam) +{ + /* check if tx_pending is out of range or power of 2 */ + if (ringparam->tx_pending > NBL_MAX_TX_DESC_NUM || + ringparam->tx_pending < NBL_MIN_TX_DESC_NUM) { + pr_err("Tx descriptors requested: %d, out of range[%d-%d]\n", + ringparam->tx_pending, NBL_MIN_TX_DESC_NUM, NBL_MAX_TX_DESC_NUM); + return -EINVAL; + } + if (ringparam->tx_pending & (ringparam->tx_pending - 1)) { + pr_err("Tx descriptors requested: %d is not power of 2\n", + ringparam->tx_pending); + return -EINVAL; + } + + /* check if rx_pending is out of range or power of 2 */ + if (ringparam->rx_pending > NBL_MAX_RX_DESC_NUM || + ringparam->rx_pending < NBL_MIN_RX_DESC_NUM) { + pr_err("Rx descriptors requested: %d, out of range[%d-%d]\n", + ringparam->rx_pending, NBL_MIN_RX_DESC_NUM, NBL_MAX_RX_DESC_NUM); + return -EINVAL; + } + if (ringparam->rx_pending & (ringparam->rx_pending - 1)) { + pr_err("Rx descriptors requested: %d is not power of 2\n", + ringparam->rx_pending); + return -EINVAL; + } + + if (ringparam->rx_jumbo_pending || ringparam->rx_mini_pending) { + pr_err("rx_jumbo_pending or rx_mini_pending is not supported\n"); + return -EINVAL; + } + + return 0; +} + +static int nbl_pre_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + int timeout = 50; + + if (ringparam->rx_pending == adapter->rx_desc_num && + ringparam->tx_pending == adapter->tx_desc_num) { + pr_debug("Nothing to change, descriptor count is same as requested\n"); + return 0; + } + + while (test_and_set_bit(NBL_RESETTING, adapter->state)) { + timeout--; + if (!timeout) { + pr_err("Timeout while resetting in set ringparam\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + /* configure params later */ + return 1; +} + +static int nbl_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam __always_unused *k_ringparam, + struct netlink_ext_ack __always_unused *extack) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + u16 new_tx_count; + u16 new_rx_count; + int was_running; + int i; + int err; + + err = nbl_check_set_ringparam(netdev, ringparam); + if (err < 0) + return err; + + err = nbl_pre_set_ringparam(netdev, ringparam); + /* if either error occur or nothing to change, return */ + if (err <= 0) + return err; + + new_tx_count = ringparam->tx_pending; + new_rx_count = ringparam->rx_pending; + + was_running = netif_running(netdev); + + if (was_running) { + err = nbl_stop(netdev); + if (err) { + pr_err("Netdev stop failed while setting ringparam\n"); + clear_bit(NBL_RESETTING, adapter->state); + return err; + } + } + + if (adapter->tx_desc_num != new_tx_count) { + adapter->tx_desc_num = new_tx_count; + for (i = 0; i < adapter->num_txq; i++) + adapter->tx_rings[i]->desc_num = new_tx_count; + } + + if (adapter->rx_desc_num != new_rx_count) { + adapter->rx_desc_num = new_rx_count; + for (i = 0; i < adapter->num_rxq; i++) + adapter->rx_rings[i]->desc_num = new_rx_count; + } + + if (was_running) { + err = nbl_open(netdev); + if (err) { + pr_err("Netdev open failed after setting ringparam\n"); + clear_bit(NBL_RESETTING, adapter->state); + return err; + } + } + + clear_bit(NBL_RESETTING, adapter->state); + + return 0; +} + +static int nbl_read_reg_i2c(struct nbl_hw *hw, u32 offset, u8 *data, u8 data_len, + u16 sff_8472_addr, const struct nbl_iic_phy_regs *regs) +{ + u32 status; + u32 timeout = 50; + struct nbl_sfp_iic_data sfp_iic_reg; + u32 *reg_value; + u32 i; + u32 sfp_iic_data; + + if (sff_8472_addr != SFF_8472_A0 && sff_8472_addr != SFF_8472_A2) { + pr_err("I2C read SFP module with wrong slave address\n"); + return -EIO; + } + + if (offset >= ETH_MODULE_SFF_8472_LEN) { + pr_err("I2C read SFP module with offset %u which is larger than module length %u", + offset, ETH_MODULE_SFF_8472_LEN); + return -EIO; + } + + if (data_len > NBL_SFP_READ_MAXLEN_ONE_TIME) { + pr_err("I2C read SFP module with data len %u which is larger than max read len %u\n", + data_len, NBL_SFP_READ_MAXLEN_ONE_TIME); + return -EIO; + } + + sfp_iic_reg.slave_addr = sff_8472_addr; + sfp_iic_reg.rw_mode = SFF_I2C_READ; + sfp_iic_reg.target_addr = offset; + sfp_iic_reg.access_bytes = data_len; + sfp_iic_reg.iic_chn = 0; /* kernel driver uses chn0 to rw sfp eeprom */ + + reg_value = (u32 *)&sfp_iic_reg; + + wr32(hw, regs->request, *reg_value); + + /* check job done: loop and query the done register bit */ + do { + timeout--; + if (timeout == 0) { + pr_err("Failed to read SFP module registers with I2C with slave address %u offset value %u and data len %u\n", + sff_8472_addr, offset, data_len); + return -EIO; + } + usleep_range(50, 100); + status = rd32(hw, regs->done) & NBL_SFP_RW_DONE_CHN0_MASK; + } while (!status); + + sfp_iic_data = rd32(hw, regs->rdata); + + for (i = 0; i < data_len; i++) + /* adjust endianness */ + data[data_len - i - 1] = (sfp_iic_data >> (i * 8)) & 0xff; + + return 0; +} + +static int __maybe_unused nbl_write_reg_i2c(struct nbl_hw *hw, u32 offset, u8 data, + u16 sff_8472_addr, + const struct nbl_iic_phy_regs *regs) +{ + int status = 0; + u32 timeout = 100; + struct nbl_sfp_iic_data sfp_iic_reg; + u32 *reg_value; + + if (sff_8472_addr != SFF_8472_A0 && sff_8472_addr != SFF_8472_A2) { + pr_err("I2C write SFP module with wrong slave address\n"); + return -EIO; + } + + if (offset >= ETH_MODULE_SFF_8472_LEN) { + pr_err("I2C write SFP module with offset %u which is larger than module length %u", + offset, ETH_MODULE_SFF_8472_LEN); + return -EIO; + } + + sfp_iic_reg.slave_addr = sff_8472_addr; + sfp_iic_reg.rw_mode = SFF_I2C_WRITE; + sfp_iic_reg.target_addr = offset; + sfp_iic_reg.access_bytes = 1; /* write 1 byte at 1 time */ + sfp_iic_reg.iic_chn = 0; /* kernel driver used chn0 to rw sfp eeprom */ + sfp_iic_reg.wdata = data; + + reg_value = (u32 *)&sfp_iic_reg; + wr32(hw, regs->request, *reg_value); + + /* check job done: loop and query the done register bit */ + do { + timeout--; + if (timeout == 0) { + pr_err("Failed to write SFP module registers with I2C with slave address %u offset value %u\n", + sff_8472_addr, offset); + return -EIO; + } + usleep_range(100, 200); + status = rd32(hw, regs->done) & NBL_SFP_RW_DONE_CHN0_MASK; + } while (!status); + + return 0; +} + +int nbl_af_get_module_info(struct nbl_hw *hw, u8 eth_port_id, struct ethtool_modinfo *info) +{ + struct nbl_iic_phy_regs iic_phy_regs; + int status; + u8 sff8472_rev; + u8 addr_mode; + bool page_swap = false; + + /* low signal means module inplace */ + if (rd32(hw, NBL_LSP_SFP_MOD_REG(eth_port_id)) & BIT(0)) { + pr_debug("Optical module of ETH port %u is not inplace\n", eth_port_id); + return -EIO; + } + + iic_phy_regs.request = NBL_LSP_SFP_I2C_REQUEST_REG(eth_port_id); + iic_phy_regs.rdata = NBL_LSP_SFP_I2C_RDATA_CHN_REG(eth_port_id, 0); + iic_phy_regs.done = NBL_LSP_SFP_I2C_DONE_REG(eth_port_id); + + /* SFF-8472 specification revision edition */ + status = nbl_read_reg_i2c(hw, SFF_8472_COMPLIANCE, &sff8472_rev, 1, + SFF_8472_A0, &iic_phy_regs); + if (status) { + pr_debug("Port %d failed to get SFP module revision information\n", eth_port_id); + return -EIO; + } + + /* check if addressing mode is supported */ + status = nbl_read_reg_i2c(hw, SFF_8472_DIAGNOSTIC, &addr_mode, 1, + SFF_8472_A0, &iic_phy_regs); + if (status) { + pr_debug("Port %d failed to get SFP module addressing mode information\n", + eth_port_id); + return -EIO; + } + + /* check if can access page 0xA2 directly, see sff-8472 */ + if (addr_mode & SFF_8472_ADDRESSING_MODE) { + pr_err("Port %d address change required to access page 0xA2 which is not supported\n", + eth_port_id); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == SFF_8472_UNSUPPORTED || page_swap || + !(addr_mode & SFF_DDM_IMPLEMENTED)) { + /* We have an SFP, but it does not support SFF-8472 */ + info->type = ETH_MODULE_SFF_8079; + info->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + info->type = ETH_MODULE_SFF_8472; + info->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int nbl_get_module_info(struct net_device *netdev, struct ethtool_modinfo *info) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + u8 eth_port_id = hw->eth_port_id; + int err; + + if (is_af(hw)) + err = nbl_af_get_module_info(hw, eth_port_id, info); + else + err = nbl_mailbox_req_get_module_info(hw, eth_port_id, info); + + return err; +} + +int nbl_af_get_module_eeprom(struct nbl_hw *hw, u8 eth_port_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_iic_phy_regs iic_phy_regs; + int status; + u8 databyte[4]; + int i; + u8 len; + + if (eeprom->len == 0) + return -EINVAL; + + /* low signal means module inplace */ + if (rd32(hw, NBL_LSP_SFP_MOD_REG(eth_port_id)) & BIT(0)) { + pr_debug("Optical module of ETH port %u is not inplace\n", eth_port_id); + return -EIO; + } + + iic_phy_regs.request = NBL_LSP_SFP_I2C_REQUEST_REG(eth_port_id); + iic_phy_regs.rdata = NBL_LSP_SFP_I2C_RDATA_CHN_REG(eth_port_id, 0); + iic_phy_regs.done = NBL_LSP_SFP_I2C_DONE_REG(eth_port_id); + + for (i = eeprom->offset; i < eeprom->offset + eeprom->len; + i += NBL_SFP_READ_MAXLEN_ONE_TIME) { + if (eeprom->offset + eeprom->len - i >= NBL_SFP_READ_MAXLEN_ONE_TIME) + len = NBL_SFP_READ_MAXLEN_ONE_TIME; + else + len = eeprom->offset + eeprom->len - i; + + if (i < ETH_MODULE_SFF_8079_LEN) + status = nbl_read_reg_i2c(hw, i, databyte, len, + SFF_8472_A0, &iic_phy_regs); + else + status = nbl_read_reg_i2c(hw, i, databyte, len, + SFF_8472_A2, &iic_phy_regs); + + if (status) { + pr_debug("Port %d get SPF module eeprom failed for read module register %d failed\n", + eth_port_id, i); + return -EIO; + } + + memcpy(data + i - eeprom->offset, databyte, len); + } + + return 0; +} + +static int nbl_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + u8 eth_port_id = hw->eth_port_id; + int err; + + if (is_af(hw)) + err = nbl_af_get_module_eeprom(hw, eth_port_id, eeprom, data); + else + err = nbl_mailbox_req_get_module_eeprom(hw, eth_port_id, eeprom, data); + + return err; +} + +int nbl_read_eeprom_byte(struct nbl_hw *hw, u32 addr, u8 *data) +{ + struct nbl_eeprom_status status; + u32 value; + u32 i = 0; + + /* There is no need to write slave addr reg, the default value is ok. */ + wr32(hw, NBL_LSP_EEPROM_ADDR_REG, addr); + + wr32(hw, NBL_LSP_EEPROM_RW_REG, NBL_EEPROM_READ); + + /* Use rising edge to trigger read operation. */ + wr32(hw, NBL_LSP_EEPROM_REQ_REG, 0); + wr32(hw, NBL_LSP_EEPROM_REQ_REG, 1); + do { + *(u32 *)&status = rd32(hw, NBL_LSP_EEPROM_STATUS_REG); + if (status.done) + break; + i++; + if (!(i % 1000)) { + pr_err("Wait too long for EEPROM read done\n"); + return -EIO; + } + usleep_range(50, 100); + } while (true); + + value = rd32(hw, NBL_LSP_EEPROM_RDATA_REG); + *data = (u8)value; + + return 0; +} + +static int __maybe_unused nbl_write_eeprom_byte(struct nbl_hw *hw, u32 addr, u8 data) +{ + struct nbl_eeprom_status status; + u32 i = 0; + u32 write_data = data & 0xff; + + /* There is no need to write slave addr reg, the default value is ok. */ + wr32(hw, NBL_LSP_EEPROM_ADDR_REG, addr); + + wr32(hw, NBL_LSP_EEPROM_RW_REG, NBL_EEPROM_WRITE); + + wr32(hw, NBL_LSP_EEPROM_WDATA_REG, write_data); + + /* Use rising edge to trigger read operation. */ + wr32(hw, NBL_LSP_EEPROM_REQ_REG, 0); + wr32(hw, NBL_LSP_EEPROM_REQ_REG, 1); + + do { + *(u32 *)&status = rd32(hw, NBL_LSP_EEPROM_STATUS_REG); + if (status.done) + break; + i++; + if (!(i % 1000)) { + pr_err("Wait too long for EEPROM write addr %x done\n", addr); + return -EIO; + } + + usleep_range(100, 200); + } while (true); + + return 0; +} + +int nbl_get_eeprom_len(struct net_device *netdev) +{ + return NBL_EEPROM_LENGTH; +} + +int nbl_af_get_eeprom(struct nbl_hw *hw, u32 offset, u32 length, u8 *bytes) +{ + int total_length; + u32 i; + int ret; + + total_length = nbl_get_eeprom_len(NULL); + if ((u32)total_length <= offset || (u32)total_length - offset < length) { + pr_debug("Try to access invalid EEPROM range\n"); + return -EINVAL; + } + + for (i = 0; i < length; i++) { + ret = nbl_read_eeprom_byte(hw, offset + i, bytes + i); + if (ret < 0) { + pr_info("Get EEPROM content failed\n"); + return ret; + } + } + + return 0; +} + +static int nbl_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + int ret; + + if (is_af(hw)) + ret = nbl_af_get_eeprom(hw, eeprom->offset, eeprom->len, bytes); + else + ret = nbl_mailbox_req_get_eeprom(hw, eeprom->offset, eeprom->len, bytes); + + return ret; +} + +static int nbl_get_port_type(struct nbl_hw *hw) +{ + u8 cable_tech; + u8 cable_comp; + struct ethtool_eeprom eeprom = { 0 }; + int ret; + + eeprom.offset = SFF_8472_CABLE_TECHNOLOGY; + eeprom.len = sizeof(cable_tech); + if (is_af(hw)) + ret = nbl_af_get_module_eeprom(hw, hw->eth_port_id, &eeprom, &cable_tech); + else + ret = nbl_mailbox_req_get_module_eeprom(hw, hw->eth_port_id, &eeprom, &cable_tech); + + if (ret) { + pr_err("Read SFF_8472_CABLE_TECHNOLOGY register in passive cable case failed, port: %d\n", + hw->eth_port_id); + return PORT_OTHER; + } + + if (cable_tech & SFF_PASSIVE_CABLE) { + eeprom.offset = SFF_8472_CABLE_SPEC_COMP; + eeprom.len = sizeof(cable_comp); + if (is_af(hw)) + ret = nbl_af_get_module_eeprom(hw, hw->eth_port_id, &eeprom, &cable_comp); + else + ret = nbl_mailbox_req_get_module_eeprom(hw, hw->eth_port_id, + &eeprom, &cable_comp); + + if (ret) { + pr_err("Read SFF_8472_CABLE_SPEC_COMP register in passive cable case failed, port: %d\n", + hw->eth_port_id); + return PORT_OTHER; + } + + /* determine if the port is a cooper cable */ + if (cable_comp == SFF_COPPER_UNSPECIFIED || + cable_comp == SFF_COPPER_8431_APPENDIX_E) + return PORT_DA; + else + return PORT_FIBRE; + } else if (cable_tech & SFF_ACTIVE_CABLE) { + eeprom.offset = SFF_8472_CABLE_SPEC_COMP; + eeprom.len = sizeof(cable_comp); + if (is_af(hw)) + ret = nbl_af_get_module_eeprom(hw, hw->eth_port_id, &eeprom, &cable_comp); + else + ret = nbl_mailbox_req_get_module_eeprom(hw, hw->eth_port_id, + &eeprom, &cable_comp); + + if (ret) { + pr_err("Read SFF_8472_CABLE_SPEC_COMP register in active cable case failed, port: %d\n", + hw->eth_port_id); + return PORT_OTHER; + } + + /* determine if the port is a cooper cable */ + if (cable_comp == SFF_COPPER_UNSPECIFIED || + cable_comp == SFF_COPPER_8431_APPENDIX_E || + cable_comp == SFF_COPPER_8431_LIMITING) + return PORT_DA; + else + return PORT_FIBRE; + } else { + return PORT_FIBRE; + } +} + +static void +nbl_get_ksettings(const struct nbl_hw *hw, struct ethtool_link_ksettings *cmd) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + unsigned int idx = 0; + + cmd->base.autoneg = AUTONEG_DISABLE; + + for (; idx < size; idx++) { + cmd->link_modes.supported[idx] = hw->supported[idx]; + cmd->link_modes.advertising[idx] = hw->advertising[idx]; + } +} + +int nbl_af_query_link_speed(struct nbl_hw *hw, u8 eth_port_id, u32 *speed_stat) +{ + struct nbl_loopback_mode loopback_mode = {0}; + + rd32_for_each(hw, NBL_ETH_LOOPBACK_MODE_REG(eth_port_id), + (u32 *)&loopback_mode, sizeof(loopback_mode)); + + *speed_stat = loopback_mode.speed_stat; + + return 0; +} + +u32 nbl_query_link_speed(struct nbl_hw *hw) +{ + u32 speed_stat; + u8 eth_port_id = hw->eth_port_id; + int ret; + + if (is_af(hw)) + ret = nbl_af_query_link_speed(hw, eth_port_id, &speed_stat); + else + ret = nbl_mailbox_req_link_speed(hw, eth_port_id, &speed_stat); + + if (ret) { + pr_err("Failed to get link speed, port id: %d\n", eth_port_id); + return NBL_MODULE_SPEED_NOT_SUPPORT; + } + + if (speed_stat == NBL_ETH_SPEED_MODE_10G) + return NBL_MODULE_SPEED_10G; + else if (speed_stat == NBL_ETH_SPEED_MODE_1G) + return NBL_MODULE_SPEED_1G; + else + return NBL_MODULE_SPEED_NOT_SUPPORT; +} + +static int +nbl_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + u32 link_stat; + u32 link_speed; + + nbl_get_ksettings(hw, cmd); + + link_stat = nbl_get_link(netdev); + if (!link_stat) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.port = PORT_OTHER; + return 0; + } + + link_speed = nbl_query_link_speed(hw); + switch (link_speed) { + case NBL_MODULE_SPEED_10G: + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = nbl_get_port_type(hw); + break; + case NBL_MODULE_SPEED_1G: + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = nbl_get_port_type(hw); + break; + default: + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.port = PORT_OTHER; + break; + } + + return 0; +} + +static int nbl_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + + if (cmd->base.duplex == DUPLEX_HALF) { + pr_warn("Half duplex mode is not supported\n"); + return -EINVAL; + } + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + pr_warn("Autoneg is not supported\n"); + return -EINVAL; + } + + if (cmd->base.speed == SPEED_10000) { + if (hw->module_support_speed & NBL_MODULE_SPEED_10G) { + nbl_set_module_speed(hw, NBL_MODULE_SPEED_10G); + } else { + pr_warn("Port %d module doesn't support 10G mode\n", hw->eth_port_id); + return -EINVAL; + } + } else if (cmd->base.speed == SPEED_1000) { + if (hw->module_support_speed & NBL_MODULE_SPEED_1G) { + nbl_set_module_speed(hw, NBL_MODULE_SPEED_1G); + } else { + pr_warn("Port %d module doesn't support 1G mode\n", hw->eth_port_id); + return -EINVAL; + } + } else { + pr_warn("Port %d speed mode is not supported\n", hw->eth_port_id); + return -EINVAL; + } + + return 0; +} + +static u32 nbl_get_msglevel(struct net_device *netdev) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void nbl_set_msglevel(struct net_device *netdev, u32 msglevel) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = msglevel; +} + +static u64 nbl_link_test(struct nbl_hw *hw) +{ + bool link_up; + + link_up = nbl_query_link_status(hw); + if (link_up) + return 0; + else + return 1; +} + +u64 nbl_af_reg_test(struct nbl_hw *hw, u8 eth_port_id) +{ + u32 tmp_val; + u32 reserve; + int i; + const u32 patterns[NBL_TEST_PATTERN_NUM] = { + NBL_REG_TEST_PATTERN_0, + NBL_REG_TEST_PATTERN_1, + NBL_REG_TEST_PATTERN_2, + NBL_REG_TEST_PATTERN_3 + }; + + reserve = rd32(hw, NBL_ETH_SELF_STIMU_REG2(eth_port_id)); + + for (i = 0; i < NBL_TEST_PATTERN_NUM; i++) { + wr32(hw, NBL_ETH_SELF_STIMU_REG2(eth_port_id), patterns[i]); + tmp_val = rd32(hw, NBL_ETH_SELF_STIMU_REG2(eth_port_id)); + + if (tmp_val != patterns[i]) { + wr32(hw, NBL_ETH_SELF_STIMU_REG2(eth_port_id), reserve); + return 1; + } + } + /* restore register value */ + wr32(hw, NBL_ETH_SELF_STIMU_REG2(eth_port_id), reserve); + + return 0; +} + +static u64 nbl_reg_test(struct nbl_hw *hw, u8 eth_port_id) +{ + u64 ret; + + if (is_af(hw)) + ret = nbl_af_reg_test(hw, eth_port_id); + else + ret = nbl_mailbox_req_reg_test(hw, eth_port_id); + + return ret; +} + +static void nbl_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + u8 eth_port_id = hw->eth_port_id; + bool if_running = netif_running(netdev); + int status; + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* if online, take if offline */ + if (if_running) + nbl_stop(netdev); + + data[NBL_ETH_TEST_LINK] = nbl_link_test(hw); + data[NBL_ETH_TEST_REG] = nbl_reg_test(hw, eth_port_id); + + if (data[NBL_ETH_TEST_LINK] || + data[NBL_ETH_TEST_REG]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) { + status = nbl_open(netdev); + if (status) { + netdev_err(netdev, "Could not open device %s, err %d\n", + pci_name(adapter->pdev), status); + } + } + } else { + /* Online test */ + data[NBL_ETH_TEST_LINK] = nbl_link_test(hw); + + if (data[NBL_ETH_TEST_LINK]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Only test offlined, pass by default */ + data[NBL_ETH_TEST_REG] = 0; + } +} + +int nbl_af_set_phys_id(struct nbl_hw *hw, u8 eth_port_id, enum ethtool_phys_id_state state) +{ + u32 led_reg_ctrl; + u32 led_reg_addr = NBL_ETH_LED_CTRL_REG(eth_port_id); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + led_reg_ctrl = rd32(hw, led_reg_addr); + led_reg_ctrl |= NBL_FORCE_LED_EN; + wr32(hw, led_reg_addr, led_reg_ctrl); + return NBL_LED_FLICKER_FREQUENCY; + case ETHTOOL_ID_ON: + led_reg_ctrl = rd32(hw, led_reg_addr); + led_reg_ctrl |= NBL_FORCE_ACT_LED_LEVEL; + wr32(hw, led_reg_addr, led_reg_ctrl); + break; + case ETHTOOL_ID_OFF: + led_reg_ctrl = rd32(hw, led_reg_addr); + led_reg_ctrl &= ~((u32)NBL_FORCE_ACT_LED_LEVEL); + wr32(hw, led_reg_addr, led_reg_ctrl); + break; + case ETHTOOL_ID_INACTIVE: + led_reg_ctrl = rd32(hw, led_reg_addr); + led_reg_ctrl &= ~((u32)NBL_FORCE_LED_EN); + wr32(hw, led_reg_addr, led_reg_ctrl); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +nbl_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + int ret; + + if (is_af(hw)) + ret = nbl_af_set_phys_id(hw, hw->eth_port_id, state); + else + ret = nbl_mailbox_req_set_phy_id(hw, hw->eth_port_id, state); + + return ret; +} + +static int +nbl_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + + if (param->autoneg == AUTONEG_ENABLE) { + pr_debug("autoneg is not support\n"); + return -EINVAL; + } + + if (param->rx_pause == hw->fc.rx_pause && param->tx_pause == hw->fc.tx_pause) { + pr_debug("autoneg param is not changed\n"); + return 0; + } + + hw->fc.rx_pause = param->rx_pause; + hw->fc.tx_pause = param->tx_pause; + if (is_af(hw)) + nbl_af_set_pauseparam(hw, hw->eth_port_id, hw->fc); + else + nbl_mailbox_req_set_pauseparam(hw, hw->eth_port_id, hw->fc); + + if (hw->fc.rx_pause && hw->fc.tx_pause) { + __clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, hw->advertising); + } else if (!hw->fc.rx_pause && !hw->fc.tx_pause) { + __clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, hw->advertising); + __clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, hw->advertising); + } else if (hw->fc.rx_pause && !hw->fc.tx_pause) { + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, hw->advertising); + } else if (!hw->fc.rx_pause && hw->fc.tx_pause) { + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, hw->advertising); + __clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, hw->advertising); + } + + return 0; +} + +static void +nbl_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + + param->autoneg = AUTONEG_DISABLE; + param->rx_pause = hw->fc.rx_pause; + param->tx_pause = hw->fc.tx_pause; +} + +void nbl_af_get_pause_stats(struct nbl_hw *hw, u8 eth_port_id, struct ethtool_pause_stats *stats) +{ + struct nbl_pause_cnt pause_cnt; + + rd32_for_each(hw, NBL_PA_PAUSE_RX_CNT, + (u32 *)&pause_cnt, sizeof(pause_cnt)); + stats->rx_pause_frames = pause_cnt.eth_pause_cnt[eth_port_id]; + + rd32_for_each(hw, NBL_PED_PAUSE_TX_CNT, + (u32 *)&pause_cnt, sizeof(pause_cnt)); + stats->tx_pause_frames = pause_cnt.eth_pause_cnt[eth_port_id]; +} + +static void nbl_get_pause_stats(struct net_device *netdev, struct ethtool_pause_stats *stats) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + + if (is_af(hw)) + nbl_af_get_pause_stats(hw, hw->eth_port_id, stats); + else + nbl_mailbox_req_get_pause_stats(hw, hw->eth_port_id, stats); +} + +static void nbl_stats_fill_strings(struct nbl_adapter *adapter, u8 *data) +{ + char *p = (char *)data; + u8 i; + + for (i = 0; i < NBL_GLOBAL_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", nbl_gstrings_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < adapter->num_txq; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < adapter->num_rxq; i++) { + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } +} + +static void nbl_priv_flags_fill_strings(struct nbl_adapter *adapter, u8 *data) +{ + char *p = (char *)data; + unsigned int i; + + for (i = 0; i < NBL_PRIV_FLAG_ARRAY_SIZE; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", nbl_priv_flags[i]); + p += ETH_GSTRING_LEN; + } +} + +static void nbl_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, nbl_gstrings_test, NBL_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + nbl_stats_fill_strings(adapter, data); + break; + case ETH_SS_PRIV_FLAGS: + nbl_priv_flags_fill_strings(adapter, data); + break; + } +} + +static int nbl_get_sset_count(struct net_device *netdev, int sset) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + u8 total_queues; + + switch (sset) { + case ETH_SS_TEST: + return NBL_TEST_LEN; + case ETH_SS_STATS: + total_queues = adapter->num_txq + adapter->num_rxq; + return NBL_GLOBAL_STATS_LEN + total_queues * NBL_QUEUE_STAT_ENTRIES; + case ETH_SS_PRIV_FLAGS: + return NBL_PRIV_FLAG_ARRAY_SIZE; + default: + return -EOPNOTSUPP; + } +} + +static void +nbl_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) +{ + struct rtnl_link_stats64 temp_stats; + struct nbl_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats; + struct nbl_ring *ring; + unsigned int start; + char *p = NULL; + int i; + int j; + + nbl_update_stats_subtask(adapter); + net_stats = dev_get_stats(netdev, &temp_stats); + for (i = 0; i < NBL_GLOBAL_STATS_LEN; i++) { + switch (nbl_gstrings_stats[i].type) { + case NBL_NETDEV_STATS: + p = (char *)net_stats + + nbl_gstrings_stats[i].stat_offset; + break; + case NBL_ETH_STATS: + case NBL_PRIV_STATS: + p = (char *)adapter + + nbl_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (nbl_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < adapter->num_txq; j++) { + ring = adapter->tx_rings[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + + for (j = 0; j < adapter->num_rxq; j++) { + ring = adapter->rx_rings[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } +} + +int nbl_af_get_coalesce(struct nbl_hw *hw, struct ethtool_coalesce *ec, + u16 func_id, u16 local_vector_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + u32 regval; + u16 global_vector_id; + + WARN_ON(!func_res); + WARN_ON(local_vector_id >= func_res->num_interrupts); + global_vector_id = func_res->interrupts[local_vector_id]; + + regval = rd32(hw, NBL_PADPT_MSIX_INFO_REG_ARR(global_vector_id)); + + ec->rx_max_coalesced_frames = (regval & NBL_MSIX_INTR_CTRL_PNUM_MASK) >> + NBL_MSIX_INTR_CTRL_PNUM_SHIFT; + ec->rx_coalesce_usecs = ((regval & NBL_MSIX_INTR_CTRL_RATE_MASK) >> + NBL_MSIX_INTR_CTRL_RATE_SHIFT) * + NBL_MSIX_INTR_CTRL_RATE_GRANUL; + + return 0; +} + +static int nbl_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce __always_unused *kernel_coal, + struct netlink_ext_ack __always_unused *extack) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + int err; + u16 local_vector_id; + + local_vector_id = adapter->rx_rings[0]->q_vector->q_vector_id; + + if (is_af(hw)) + err = nbl_af_get_coalesce(hw, ec, 0, local_vector_id); + else + err = nbl_mailbox_req_get_coalesce(hw, ec, local_vector_id); + + return err; +} + +int nbl_af_set_coalesce(struct nbl_hw *hw, u16 func_id, u16 local_vector_id, + u16 num_q_vectors, u32 regval) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + u16 global_vector_id; + int i; + + WARN_ON(!func_res); + WARN_ON(local_vector_id + num_q_vectors > func_res->num_interrupts); + + for (i = 0; i < num_q_vectors; i++) { + global_vector_id = func_res->interrupts[local_vector_id + i]; + wr32(hw, NBL_PADPT_MSIX_INFO_REG_ARR(global_vector_id), regval); + } + + return 0; +} + +static int nbl_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce __always_unused *kernel_coal, + struct netlink_ext_ack __always_unused *extack) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + u16 pnum; + u16 rate; + u32 regval; + int err; + u16 local_vector_id; + u16 num_q_vectors; + + num_q_vectors = adapter->num_q_vectors; + local_vector_id = adapter->rx_rings[0]->q_vector->q_vector_id; + + if (ec->rx_max_coalesced_frames > U16_MAX) { + pr_err("rx_frames %d out of range: [0 - %d]\n", + ec->rx_max_coalesced_frames, U16_MAX); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs % NBL_MSIX_INTR_CTRL_RATE_GRANUL) { + pr_err("rx_usecs should be integral multiple of %d\n", + NBL_MSIX_INTR_CTRL_RATE_GRANUL); + return -EINVAL; + } else if ((ec->rx_coalesce_usecs / NBL_MSIX_INTR_CTRL_RATE_GRANUL) > U16_MAX) { + pr_err("rx_frames %d out of range: [0 - %d]\n", + ec->rx_coalesce_usecs, U16_MAX * NBL_MSIX_INTR_CTRL_RATE_GRANUL); + return -EINVAL; + } + + pnum = (u16)ec->rx_max_coalesced_frames; + rate = (u16)(ec->rx_coalesce_usecs / NBL_MSIX_INTR_CTRL_RATE_GRANUL); + + regval = ((pnum << NBL_MSIX_INTR_CTRL_PNUM_SHIFT) & NBL_MSIX_INTR_CTRL_PNUM_MASK) | + ((rate << NBL_MSIX_INTR_CTRL_RATE_SHIFT) & NBL_MSIX_INTR_CTRL_RATE_MASK); + + if (is_af(hw)) + err = nbl_af_set_coalesce(hw, 0, local_vector_id, num_q_vectors, regval); + else + err = nbl_mailbox_req_set_coalesce(hw, local_vector_id, num_q_vectors, regval); + + return err; +} + +static u32 nbl_get_max_combined(struct nbl_adapter *adapter) +{ + return min_t(u32, NBL_MAX_RING_NUM, (u16)num_online_cpus()); +} + +static u32 nbl_get_combined_cnt(struct nbl_adapter *adapter) +{ + int i; + u32 combined = 0; + u16 num = adapter->num_q_vectors; + + for (i = 0; i < num; i++) { + struct nbl_q_vector *q_vector = adapter->q_vectors[i]; + + if (q_vector->num_ring_rx && q_vector->num_ring_tx) + combined++; + } + + return combined; +} + +static void nbl_get_channels(struct net_device *dev, struct ethtool_channels *channels) +{ + struct nbl_adapter *adapter = netdev_priv(dev); + + channels->max_combined = nbl_get_max_combined(adapter); + + channels->combined_count = nbl_get_combined_cnt(adapter); +} + +static u32 nbl_get_priv_flags(struct net_device *netdev) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + u32 ret_flags = 0; + + if (adapter->flags & BIT(NBL_ADAPTER_SRIOV_ENA)) + ret_flags |= BIT(NBL_ADAPTER_SRIOV_ENA); + + return ret_flags; +} + +static int nbl_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + u32 flags = adapter->flags; + + flags &= ~(1 << NBL_ADAPTER_SRIOV_ENA); + if (priv_flags & (1 << NBL_ADAPTER_SRIOV_ENA)) + flags |= (1 << NBL_ADAPTER_SRIOV_ENA); + + if (flags != adapter->flags) + adapter->flags = flags; + + return 0; +} + +static const struct ethtool_ops nbl_ethtool_ops = { +#ifdef ETHTOOL_COALESCE_RX_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES, +#endif /* ETHTOOL_COALESCE_USECS */ + .get_drvinfo = nbl_get_drvinfo, + .get_link = nbl_get_link, + .get_regs_len = nbl_get_regs_len, + .get_regs = nbl_get_ethtool_dump_regs, + .get_ringparam = nbl_get_ringparam, + .set_ringparam = nbl_set_ringparam, + .get_link_ksettings = nbl_get_link_ksettings, + .set_link_ksettings = nbl_set_link_ksettings, + .get_msglevel = nbl_get_msglevel, + .set_msglevel = nbl_set_msglevel, + .self_test = nbl_self_test, + .get_module_eeprom = nbl_get_module_eeprom, + .get_module_info = nbl_get_module_info, + .get_eeprom_len = nbl_get_eeprom_len, + .get_eeprom = nbl_get_eeprom, + .set_phys_id = nbl_set_phys_id, + .set_pauseparam = nbl_set_pauseparam, + .get_pauseparam = nbl_get_pauseparam, + .get_pause_stats = nbl_get_pause_stats, + .get_strings = nbl_get_strings, + .get_sset_count = nbl_get_sset_count, + .get_ethtool_stats = nbl_get_ethtool_stats, + .get_coalesce = nbl_get_coalesce, + .set_coalesce = nbl_set_coalesce, + .get_channels = nbl_get_channels, + .get_priv_flags = nbl_get_priv_flags, + .set_priv_flags = nbl_set_priv_flags, +}; + +void nbl_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &nbl_ethtool_ops; +} diff --git a/drivers/net/ethernet/nebula-matrix/m1600/ethtool.h b/drivers/net/ethernet/nebula-matrix/m1600/ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..a71b617a7cba23cba3577e6f7b7754c22def7da1 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/ethtool.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#ifndef _NBL_ETHTOOL_H_ +#define _NBL_ETHTOOL_H_ + +#include +#include + +static const u32 nbl_regs_dump_list[] = { + NBL_GREG_DYNAMIC_PRJ_ID_REG, + NBL_GREG_DYNAMIC_VERSION_REG, +}; + +enum NBL_STATS_TYPE { + NBL_NETDEV_STATS, + NBL_ETH_STATS, + NBL_PRIV_STATS, + NBL_STATS_TYPE_MAX +}; + +struct nbl_ethtool_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->(MEMBER))) +#endif + +#define NBL_NETDEV_STAT(_name, stat_m) { \ + .stat_string = _name, \ + .type = NBL_NETDEV_STATS, \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, stat_m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, stat_m) \ +} + +#define NBL_ETH_STAT(_name, stat_m) { \ + .stat_string = _name, \ + .type = NBL_ETH_STATS, \ + .sizeof_stat = sizeof_field(struct nbl_adapter, stat_m), \ + .stat_offset = offsetof(struct nbl_adapter, stat_m) \ +} + +#define NBL_PRIV_STAT(_name, stat_m) { \ + .stat_string = _name, \ + .type = NBL_PRIV_STATS, \ + .sizeof_stat = sizeof_field(struct nbl_adapter, stat_m), \ + .stat_offset = offsetof(struct nbl_adapter, stat_m) \ +} + +static const struct nbl_ethtool_stats nbl_gstrings_stats[] = { + NBL_NETDEV_STAT("rx_packets", rx_packets), + NBL_NETDEV_STAT("tx_packets", tx_packets), + NBL_NETDEV_STAT("rx_bytes", rx_bytes), + NBL_NETDEV_STAT("tx_bytes", tx_bytes), + NBL_NETDEV_STAT("rx_errors", rx_errors), + NBL_NETDEV_STAT("tx_errors", tx_errors), + NBL_NETDEV_STAT("rx_dropped", rx_dropped), + NBL_NETDEV_STAT("tx_dropped", tx_dropped), + NBL_NETDEV_STAT("multicast", multicast), + NBL_NETDEV_STAT("rx_crc_errors", rx_crc_errors), + NBL_NETDEV_STAT("rx_frame_errors", rx_frame_errors), + NBL_NETDEV_STAT("rx_length_errors", rx_length_errors), + + NBL_ETH_STAT("tx_total_packets", stats.tx_total_packets), + NBL_ETH_STAT("tx_total_bytes", stats.tx_total_bytes), + NBL_ETH_STAT("tx_total_good_packets", stats.tx_total_good_packets), + NBL_ETH_STAT("tx_total_good_bytes", stats.tx_total_good_bytes), + NBL_ETH_STAT("tx_frame_error", stats.tx_frame_error), + NBL_ETH_STAT("tx_bad_fcs", stats.tx_bad_fcs), + NBL_ETH_STAT("tx_unicast", stats.tx_unicast), + NBL_ETH_STAT("tx_multicast", stats.tx_multicast), + NBL_ETH_STAT("tx_broadcast", stats.tx_broadcast), + NBL_ETH_STAT("tx_vlan", stats.tx_vlan), + NBL_ETH_STAT("tx_fc_pause", stats.tx_fc_pause), + + NBL_ETH_STAT("rx_total_packets", stats.rx_total_packets), + NBL_ETH_STAT("rx_total_bytes", stats.rx_total_bytes), + NBL_ETH_STAT("rx_total_good_packets", stats.rx_total_good_packets), + NBL_ETH_STAT("rx_total_good_bytes", stats.rx_total_good_bytes), + NBL_ETH_STAT("rx_oversize", stats.rx_oversize), + NBL_ETH_STAT("rx_undersize", stats.rx_undersize), + NBL_ETH_STAT("rx_frame_err", stats.rx_frame_err), + NBL_ETH_STAT("rx_bad_code", stats.rx_bad_code), + NBL_ETH_STAT("rx_bad_fcs", stats.rx_bad_fcs), + NBL_ETH_STAT("rx_unicast", stats.rx_unicast), + NBL_ETH_STAT("rx_multicast", stats.rx_multicast), + NBL_ETH_STAT("rx_broadcast", stats.rx_broadcast), + NBL_ETH_STAT("rx_vlan", stats.rx_vlan), + NBL_ETH_STAT("rx_fc_pause", stats.rx_fc_pause), + + NBL_PRIV_STAT("tx_csum_pkts", stats.tx_csum_pkts), + NBL_PRIV_STAT("rx_csum_pkts", stats.rx_csum_pkts), + NBL_PRIV_STAT("tx_busy", stats.tx_busy), + NBL_PRIV_STAT("tx_linearize", stats.tx_linearize), + NBL_PRIV_STAT("tx_dma_err", stats.tx_dma_err), + NBL_PRIV_STAT("alloc_page_failed", stats.alloc_page_failed), + NBL_PRIV_STAT("alloc_skb_failed", stats.alloc_skb_failed), + NBL_PRIV_STAT("rx_dma_err", stats.rx_dma_err), + NBL_PRIV_STAT("tx_timeout", stats.tx_timeout), + NBL_PRIV_STAT("err_status_reset", stats.err_status_reset), + NBL_PRIV_STAT("bad_code_reset", stats.bad_code_reset), +}; + +enum nbl_ethtool_test_id { + NBL_ETH_TEST_REG = 0, + NBL_ETH_TEST_LINK, +}; + +static const char nbl_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Link test (on/offline)", +}; + +#define NBL_TEST_LEN (sizeof(nbl_gstrings_test) / ETH_GSTRING_LEN) + +#define NBL_REG_TEST_PATTERN_0 0x5A5A5A5A +#define NBL_REG_TEST_PATTERN_1 0xA5A5A5A5 +#define NBL_REG_TEST_PATTERN_2 0x00000000 +#define NBL_REG_TEST_PATTERN_3 0xFFFFFFFF +#define NBL_TEST_PATTERN_NUM 4 + +#define NBL_GLOBAL_STATS_LEN ARRAY_SIZE(nbl_gstrings_stats) + +static const char nbl_priv_flags[][ETH_GSTRING_LEN] = { + "sriov-ena", +}; + +enum nbl_adapter_flags { + NBL_ADAPTER_SRIOV_ENA, + NBL_ADAPTER_FLAGS_MAX +}; + +#define NBL_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(nbl_priv_flags) + +void nbl_set_ethtool_ops(struct net_device *netdev); +int nbl_af_get_module_eeprom(struct nbl_hw *hw, u8 eth_port_id, + struct ethtool_eeprom *eeprom, u8 *data); + +int nbl_af_get_module_info(struct nbl_hw *hw, u8 eth_port_id, struct ethtool_modinfo *info); + +int nbl_read_eeprom_byte(struct nbl_hw *hw, u32 addr, u8 *data); +int nbl_af_get_eeprom(struct nbl_hw *hw, u32 offset, u32 length, u8 *bytes); + +u64 nbl_af_link_test(struct nbl_hw *hw, u8 eth_port_id); +u64 nbl_af_reg_test(struct nbl_hw *hw, u8 eth_port_id); + +void nbl_af_get_ethtool_dump_regs(struct nbl_hw *hw, u32 *regs_buff, u32 len); + +int nbl_af_set_phys_id(struct nbl_hw *hw, u8 eth_port_id, enum ethtool_phys_id_state state); + +void nbl_af_get_pause_stats(struct nbl_hw *hw, u8 eth_port_id, struct ethtool_pause_stats *stats); + +int nbl_af_get_coalesce(struct nbl_hw *hw, struct ethtool_coalesce *ec, + u16 func_id, u16 local_vector_id); +int nbl_af_set_coalesce(struct nbl_hw *hw, u16 func_id, u16 local_vector_id, + u16 num_q_vectors, u32 regval); + +int nbl_af_query_link_speed(struct nbl_hw *hw, u8 eth_port_id, u32 *speed_stat); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/m1600/hw.h b/drivers/net/ethernet/nebula-matrix/m1600/hw.h new file mode 100644 index 0000000000000000000000000000000000000000..2ac510853aea9df01596052cc80929fd647fc161 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/hw.h @@ -0,0 +1,1047 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#ifndef _NBL_HW_H_ +#define _NBL_HW_H_ + +#include +#include + +#define NBL_VENDOR_ID 0x1F0F + +#define NBL_DEVICE_ID_X4_PF 0x1600 +#define NBL_DEVICE_ID_X4_VF 0x1601 + +#define NBL_MAC_X4_MAGIC "ndx4lid" +#define NBL_MAC_MAGIC_LEN 7 + +#define NBL_X4_MEMORY_BAR (0) +#define NBL_X4_MAILBOX_BAR (2) +#define NBL_X4_MSIX_BAR (4) + +#define NBL_ETH_PORT_NUM (4) + +#define NBL_MAX_PF_FUNC (4) +#define NBL_MAX_VF_PER_PF (16) +#define NBL_MAX_FUNC 68 +#define NBL_MAX_TXRX_QUEUE 128 +#define NBL_MAX_INTERRUPT 512 +#define NBL_MAX_MACVLAN_ENTRY 512 + +#define NBL_VF_BASE_FUNC_ID (NBL_MAX_PF_FUNC) + +#define NBL_DEFAULT_VLAN_ID 0 + +#define NBL_PF_MAX_MACVLAN_ENTRIES 16 +#define NBL_VF_MAX_MACVLAN_ENTRIES 7 + +#define NBL_VF_MACVLAN_START_INDEX (NBL_MAX_PF_FUNC * NBL_PF_MAX_MACVLAN_ENTRIES) + +#define BYTES_PER_DWORD (4) +#define BITS_PER_DWORD (BYTES_PER_DWORD * 8) + +#define NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN 16 + +#define NBL_PAUSE_CNT_REG_WIDTH 8 + +/* grep module related structures and values */ +struct nbl_dynamic_version { + u32 sub_version:8; + u32 date:8; + u32 month:8; + u32 year:8; +}; + +#define NBL_GOLDEN_SUB_VERSION (0xEE) + +#define NBL_DYNAMIC_INIT_DONE (0xFFFFFFFF) + +/* pro module related structures and values */ +struct nbl_pro_ctrl { + u32 mac_mismatch_drop_en:4; + u32 rsv:28; +}; + +enum nbl_txd_port_type { + NBL_PORT_ETH, + NBL_PORT_HOST, +}; + +enum nbl_ingress_eth_port_cos_map_mode { + NBL_COS_MODE_DEFAULT_ETH_PRI, + NBL_COS_MODE_EXTERNAL_VLAN, + NBL_COS_MODE_RESERVE, +}; + +struct nbl_ingress_eth_port { + u32 default_vlanid:12; + u32 vlan_type:2; + u32 vlan_check_en:1; + u32 lag:1; + u32 lag_id:2; + u32 cos_map_mode:2; + u32 default_pri:3; + u32 veb_num:2; + u32 rsv0:4; + u32 default_vlan_en:1; + u32 rsv1:2; +}; + +enum nbl_ingress_eth_port_fwd_type { + NBL_INGRESS_FWD_DROP, + NBL_INGRESS_FWD_NORMAL, + NBL_INGRESS_FWD_CPU, + NBL_INGRESS_FWD_RESERVE, +}; + +struct nbl_ingress_eth_port_fwd { + u32 dport:1; + u32 dport_id:7; + u32 forward_queue_id:7; + u32 forward_queue_id_en:1; + u32 rsv:14; + u32 fwd:2; +}; + +enum nbl_src_vsi_cos_mode_type { + NBL_SRC_VSI_COS_MODE_DEFAULT_PORT_PRI, + NBL_SRC_VSI_COS_MODE_VLAN, + NBL_SRC_VSI_COS_MODE_QUEUE_PRI, +}; + +struct nbl_src_vsi_port { + u32 default_vlanid:12; + u32 vlan_type:2; + u32 vlan_check_en:1; + u32 cos_map_mode:3; + u32 default_pri:3; + u32 default_cfi:1; + u32 lag:1; + u32 dport_id:2; + u32 mac_lut_en:1; + u32 default_vlan_en:1; + u32 vlan_push_en:1; + u32 veb_num:2; + u32 rsv0:2; + u32 catch_vsi_idx:7; + u32 vlanid_match_en:1; + u32 vlanid_match_val:12; + u32 forward_queue_id:7; + u32 forward_queue_id_en:1; + u32 rsv1:3; + u32 smac_match_en:1; + u8 smac[ETH_ALEN]; + u16 rsv2; +}; + +struct nbl_dest_vsi_port { + u32 push_ovlan:16; + u32 vlan_pop_cnt:2; + u32 vlan_push_cnt:2; + u32 rsv0:11; + u32 vsi_en:1; + u32 pkt_len:16; + u32 pkt_len_chk_en:1; + u32 pf_id:2; + u32 rsv1:13; +}; + +#define RSS_ENTRIES_PER_VSI (16) + +struct nbl_rss_entry { + u32 rx_queue_id:7; + u32 rsv:25; +}; + +/* ped module related structures and values */ +struct nbl_ped_port_smac { + u8 smac[ETH_ALEN]; + u16 rsv; +}; + +struct nbl_pause_cnt { + u8 eth_pause_cnt[NBL_ETH_PORT_NUM]; +}; + +/* pa module related structures and values */ +enum nbl_pcmrt_slot { + NBL_PCMRT_BROADCAST_SLOT, + NBL_PCMRT_MULTICAST_SLOT, + NBL_PCMRT_LACP_SLOT, + NBL_PCMRT_LLDP_SLOT, + NBL_PCMRT_MAX_SLOT = 32, +}; + +#define NBL_ETYPE_EXT_BIT_LEN (16) +#define NBL_ETYPE_EXT_MASK ((1U << NBL_ETYPE_EXT_BIT_LEN) - 1) +#define NBL_ETYPE_EXTS_PER_REG (2) + +enum nbl_etype_ext_slot { + NBL_ETYPE_EXT_LACP_SLOT, + NBL_ETYPE_EXT_LLDP_SLOT, + NBL_ETYPE_EXT_MAX_SLOT = 8, +}; + +enum nbl_pcmrt_action_type { + NBL_PCMRT_ACTION_DROP, + NBL_PCMRT_ACTION_NORMAL, + NBL_PCMRT_ACTION_CAPTURE, + NBL_PCMRT_ACTION_RESERVE, +}; + +#define NBL_PCMRT_ACTION_BIT_LEN (2) +#define NBL_PCMRT_ACTION_MASK ((u64)((1 << NBL_PCMRT_ACTION_BIT_LEN) - 1)) + +struct nbl_pcmrt_action { + u64 action_bitmap; +}; + +enum nbl_pcmrt_key_dmac_type { + NBL_PCMRT_DMAC_UNICAST, + NBL_PCMRT_DMAC_MULTICAST, + NBL_PCMRT_DMAC_BROADCAST, + NBL_PCMRT_DMAC_THIRD_LAYER_MULTICAST, + NBL_PCMRT_DMAC_SPECIAL_MAC, + NBL_PCMRT_DMAC_RESERVE, +}; + +enum nbl_pcmrt_key_etype_type { + NBL_PCMRT_ETYPE_IP, + NBL_PCMRT_ETYPE_ARP, + NBL_PCMRT_ETYPE_RARP, + NBL_PCMRT_ETYPE_IPV6, + NBL_PCMRT_ETYPE_EXT_BASE, +}; + +struct nbl_pcmrt_key { + u32 dmac_type:4; + u32 etype_type:4; + u32 ip_protocol_type:4; + u32 dport_type:4; + u32 tcp_ctrl_bits_type:2; + u32 up_down_type:1; + u32 valid:1; + u32 rsv:12; +}; + +struct nbl_pcmrt_mask { + u32 dmac_mask:1; + u32 etype_mask:1; + u32 ip_protocol_mask:1; + u32 dport_mask:1; + u32 tcp_ctrl_bits_mask:1; + u32 up_down_mask:1; + u32 rsv:26; +}; + +/* memt module related structures and values */ +enum nbl_macvlan_direction { + NBL_MACVLAN_UP_DIRECTION, + NBL_MACVLAN_DOWN_DIRECTION, +}; + +enum nbl_macvlan_dport_type { + NBL_MACVLAN_DPORT_ETH, + NBL_MACVLAN_DPORT_HOST, +}; + +enum nbl_macvlan_operation_type { + NBL_MACVLAN_OP_LOOKUP, + NBL_MACVLAN_OP_ADD, + NBL_MACVLAN_OP_CHANGE, + NBL_MACVLAN_OP_DELETE, +}; + +struct nbl_macvlan_key { + u32 vlan_id:12; + u32 mac5:8; + u32 mac4:8; + u32 mac3_l:4; + u32 mac3_h:4; + u32 mac2:8; + u32 mac1:8; + u32 mac0:8; + u32 eth_port_id:2; + u32 direction:1; + u32 rsv:1; +}; + +struct nbl_macvlan_result { + u32 dport:1; + u32 dport_id:7; + u32 lag_id:2; + u32 lag_enable:1; + u32 rsv0:21; + u32 rsv1; +}; + +struct nbl_macvlan_table_index { + u32 index:9; + u32 rsv:23; +}; + +struct nbl_macvlan_control { + u32 op_type:2; + u32 rsv0:1; + u32 start:1; + u32 flush_enable:1; + u32 rsv1:27; +}; + +struct nbl_macvlan_status { + u32 up_mac_op_type:2; + u32 up_mac_op_success:1; + u32 up_mac_op_done:1; + u32 dn_mac_op_type:2; + u32 dn_mac_op_success:1; + u32 dn_mac_op_done:1; + u32 rsv:24; +}; + +/* dvn module related structures and values */ +struct nbl_queue_reset { + u32 queue_rst_id:7; + u32 rsv:25; +}; + +struct tx_queue_info { + u32 base_addr_l; + u32 base_addr_h; + u32 log2_size:4; + u32 rsv0:12; + u32 src_vsi_idx:7; + u32 rsv1:1; + u32 priority:3; + u32 rsv2:1; + u32 enable:1; + u32 rsv3:3; + u32 tail_ptr:16; + u32 head_ptr:16; +}; + +struct nbl_tx_queue_stat { + u32 pkt_get; + u32 pkt_out; + u32 pkt_drop; + u32 sw_notify; + u32 pkt_dsch; + u32 hd_notify; + u32 hd_notify_empty; + u32 rsv; +}; + +/* uvn module related structures and values */ +struct nbl_rx_queue_reset { + u32 queue_rst_id:7; + u32 rsv0:1; + u32 valid:1; + u32 rsv:23; +}; + +struct rx_queue_info { + u32 base_addr_l; + u32 base_addr_h; + u32 log2_size:4; + u32 buf_length_pow:4; + u32 rsv0:8; + u32 enable:1; + u32 rsv1:15; + u32 tail_ptr:16; + u32 head_ptr:16; +}; + +/* eth module related structures and values */ +#define NBL_SUB_ETH_LEN (0x00010000) + +enum nbl_eth_speed_mode { + NBL_ETH_SPEED_MODE_25G, + NBL_ETH_SPEED_MODE_1G, + NBL_ETH_SPEED_MODE_10G, +}; + +struct nbl_eth_reset_ctl_and_status { + u32 rsv0:1; + u32 rx_reset:1; + u32 tx_reset:1; + u32 gtwiz_reset_rx_datapath:1; + u32 gtwiz_reset_tx_datapath:1; + u32 rsv1:3; + u32 eth_recovery_flash_mask:1; + u32 rsv2:3; + u32 gt_rxpcsreset:1; + u32 gt_txpcsreset:1; + u32 gt_rxbufreset:1; + u32 gt_txpmareset:1; + u32 gt_rxresetdone:1; + u32 gr_txresetdone:1; + u32 eth_statistics_vld:1; + u32 rsv3:13; +}; + +struct nbl_loopback_mode { + u32 loopback_ctrl:3; + u32 rsv0:1; + u32 speed_sel:2; + u32 rsv1:2; + u32 speed_stat:2; + u32 rsv2:6; + u32 txpolarity:1; + u32 rxpolarity:1; + u32 rsv3:14; +}; + +struct nbl_tx_ctrl { + u32 tx_enable:1; + u32 tx_fcs_ins_enable:1; + u32 tx_ignore_fcs:1; + u32 tx_custom_preamble_enable:1; + u32 tx_send_lfi:1; + u32 tx_send_rfi:1; + u32 tx_send_idle:1; + u32 rsv0:9; + u32 tx_ipg_value:4; + u32 rsv1:12; +}; + +struct nbl_rx_ctrl { + u32 rx_enable:1; + u32 rx_delete_fcs:1; + u32 rx_ignore_fcs:1; + u32 rx_custom_preamble_enable:1; + u32 rx_check_sfd:1; + u32 rx_check_preamble:1; + u32 rx_process_lfi:1; + u32 rx_force_resync:1; + u32 rsv:24; +}; + +struct nbl_pkt_len_limit { + u32 min_pkt_len:8; + u32 rsv1:8; + u32 max_pkt_len:15; + u32 rsv2:1; +}; + +#define NBL_GE_PCS_PMA_LINK_STATUS_SHIFT (0) +struct nbl_eth_rx_stat { + u32 rx_status:1; + u32 rx_block_lock:1; + u32 rx_high_ber:1; + u32 rx_valid_ctrl_code:1; + u32 rx_remote_fault:1; + u32 rx_local_fault:1; + u32 rx_internal_local_fault:1; + u32 rx_received_local_fault:1; + u32 power_good:1; + u32 tx_unfout:1; + u32 gpcs_reset_done:1; + u32 switching:1; + u32 init_done_eth:1; + u32 rsv0:3; + u32 ge_pcs_pma_status:16; +}; + +/* dsch module related structures and values */ +struct nbl_port_map { + u32 port_id:2; + u32 rsv:30; +}; + +/* mailbox module related structures and values */ +struct nbl_mailbox_qinfo_map { + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 msix_idx:11; + u32 valid:1; + u32 rsv:4; +}; + +/* pcompleter module related structures and values */ +struct nbl_queue_table_ready { + u32 ready:1; + u32 rsv:31; +}; + +enum nbl_qid_map_table_type { + NBL_MASTER_QID_MAP_TABLE, + NBL_SLAVE_QID_MAP_TABLE, +}; + +struct nbl_queue_table_select { + u32 select:1; + u32 rsv:31; +}; + +#define NBL_MSIX_MAP_TABLE_MAX_ENTRIES (64) + +struct nbl_function_msix_map { + u64 msix_map_base_addr; + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 valid:1; + u32 rsv0:15; + u32 rsv1; +}; + +struct nbl_msix_map { + u16 valid:1; + u16 global_msix_index:9; + u16 rsv:6; +}; + +#define NBL_QID_MAP_TABLE_ENTRIES (NBL_MAX_TXRX_QUEUE) + +#define NBL_QID_MAP_NOTIFY_ADDR_SHIFT (5) +#define NBL_QID_MAP_NOTIFY_ADDR_LOW_PART_LEN (27) + +struct nbl_qid_map { + u32 local_qid:5; + u32 notify_addr_l:27; + u32 notify_addr_h:16; + u32 global_qid:7; + u32 rsv:9; +}; + +/* padpt module related structures and values */ +struct nbl_msix_entry { + u32 lower_address; + u32 upper_address; + u32 message_data; + u32 vector_mask; +}; + +struct nbl_msix_info { + u32 intrl_pnum:16; + u32 intrl_rate:16; + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 msix_mask_en:1; + u32 rsv:14; + u32 valid:1; +}; + +#define NBL_MSIX_INTR_CTRL_PNUM_SHIFT 0 +#define NBL_MSIX_INTR_CTRL_PNUM_MASK (0xFFFF << 0) +#define NBL_MSIX_INTR_CTRL_RATE_SHIFT 16 +#define NBL_MSIX_INTR_CTRL_RATE_MASK (0xFFFF << 16) +#define NBL_MSIX_INTR_CTRL_RATE_GRANUL 8 + +struct nbl_queue_map { + u32 function:3; + u32 devid:5; + u32 bus:8; + u32 msix_idx:11; + u32 msix_idx_valid:1; + u32 rsv:3; + u32 valid:1; +}; + +/* lsp module related structures and values */ +#define NBL_SFP_CONFIGURE_TAB_LEN (0x40) + +#define NBL_SFP_READ_MAXLEN_ONE_TIME 4 +#define NBL_SFP_RW_DONE_CHN0_MASK 0x1 + +enum NBL_MODULE_INPLACE_STATUS { + NBL_MODULE_INPLACE = 0, + NBL_MODULE_NOT_INPLACE = 1, +}; + +enum SFF_RW_MODE { + SFF_I2C_WRITE = 0, + SFF_I2C_READ, +}; + +struct nbl_iic_phy_regs { + u32 request; + u32 rdata; + u32 done; +}; + +struct nbl_sfp_iic_data { + u32 wdata : 8; /* iic write data */ + u32 target_addr : 8; + u32 rw_mode : 1; + u32 slave_addr : 7; + u32 access_bytes : 4; /* the bytes to access for one times ,up to 4 */ + u32 iic_chn : 4; /* kernel driver use chn0 to access iic */ +}; + +#define NBL_ETH_RMON_LEN (0x100) + +/* slave address: 7 bit valid */ +#define SFF_8472_A0 0x50 +#define SFF_8472_A2 0x51 + +/* SFF moudle register addresses: 8 bit valid */ +#define SFF_8472_IDENTIFIER 0x0 +#define SFF_8472_10GB_CAPABILITY 0x3 /* check sff-8472 table 5-3 */ +#define SFF_8472_1GB_CAPABILITY 0x6 /* check sff-8472 table 5-3 */ +#define SFF_8472_CABLE_TECHNOLOGY 0x8 /* check sff-8472 table 5-3 */ +#define SFF_8472_EXTENDED_CAPA 0x24 /* check sff-8024 table 4-4 */ +#define SFF_8472_CABLE_SPEC_COMP 0x3C +#define SFF_8472_DIAGNOSTIC 0x5C /* digital diagnostic monitoring, relates to A2 */ +#define SFF_8472_COMPLIANCE 0x5E /* the specification revision version */ +#define SFF_8472_VENDOR_NAME 0x14 +#define SFF_8472_VENDOR_NAME_LEN 16 /* 16 bytes, from offset 0x14 to offset 0x23 */ +#define SFF_8472_VENDOR_PN 0x28 +#define SFF_8472_VENDOR_PN_LEN 16 +#define SFF_8472_VENDOR_OUI 0x25 /* name and oui cannot all be empty */ +#define SFF_8472_VENDOR_OUI_LEN 3 +#define SFF_8472_SIGNALING_RATE 0xC +#define SFF_8472_SIGNALING_RATE_MAX 0x42 +#define SFF_8472_SIGNALING_RATE_MIN 0x43 +/* optional status/control bits: soft rate select and tx disable */ +#define SFF_8472_OSCB 0x6E +/* extended status/control bits */ +#define SFF_8472_ESCB 0x76 + +/* SFF status code */ +#define SFF_IDENTIFIER_SFP 0x3 +#define SFF_PASSIVE_CABLE 0x4 +#define SFF_ACTIVE_CABLE 0x8 +#define SFF_8472_ADDRESSING_MODE 0x4 +#define SFF_8472_UNSUPPORTED 0x00 +#define SFF_8472_10G_SR_BIT 4 /* 850nm, short reach */ +#define SFF_8472_10G_LR_BIT 5 /* 1310nm, long reach */ +#define SFF_8472_10G_LRM_BIT 6 /* 1310nm, long reach multimode */ +#define SFF_8472_10G_ER_BIT 7 /* 1550nm, extended reach */ +#define SFF_8472_1G_SX_BIT 0 +#define SFF_8472_1G_LX_BIT 1 +#define SFF_8472_1G_CX_BIT 2 +#define SFF_8472_1G_T_BIT 3 +#define SFF_8472_SOFT_TX_DISABLE 6 +#define SFF_8472_SOFT_RATE_SELECT 4 +#define SFF_8472_EMPTY_ASCII 20 +#define SFF_DDM_IMPLEMENTED 0x40 +#define SFF_COPPER_UNSPECIFIED 0 +#define SFF_COPPER_8431_APPENDIX_E 1 +#define SFF_COPPER_8431_LIMITING 4 + +#define NBL_FORCE_LED_EN BIT(8) /* set means control LED by software */ +#define NBL_FORCE_ACT_LED_LEVEL BIT(4) /* 1: led on; 0: led off */ + +/* grep module related macros */ +#define NBL_GREG_MODULE (0x00000000) + +#define NBL_GREG_DYNAMIC_PRJ_ID_REG (NBL_GREG_MODULE + 0x00000000) +#define NBL_GREG_DYNAMIC_VERSION_REG (NBL_GREG_MODULE + 0x00000004) +#define NBL_GREG_DYNAMIC_INIT_REG (NBL_GREG_MODULE + 0x00000010) +#define NBL_GREG_DYNAMIC_CLR_CNT_REG (NBL_GREG_MODULE + 0x0000001C) + +/* pro module related macros */ +#define NBL_PRO_MODULE (0x00020000) + +#define NBL_PRO_CTRL_REG (NBL_PRO_MODULE + 0x00002000) +#define NBL_PRO_MAX_PKT_LEN_REG (NBL_PRO_MODULE + 0x0000200C) + +#define NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(i) \ + (NBL_PRO_MODULE + 0x00003000 + (i) * sizeof(struct nbl_ingress_eth_port_fwd)) +#define NBL_PRO_INGRESS_ETH_PORT_REG_ARR(i) \ + (NBL_PRO_MODULE + 0x00003010 + (i) * sizeof(struct nbl_ingress_eth_port)) + +#define NBL_PRO_SRC_VSI_PORT_REG_ARR(i) \ + (NBL_PRO_MODULE + 0x00004000 + (i) * sizeof(struct nbl_src_vsi_port)) +#define NBL_PRO_DEST_VSI_PORT_REG_ARR(i) \ + (NBL_PRO_MODULE + 0x00005000 + (i) * sizeof(struct nbl_dest_vsi_port)) + +#define NBL_PRO_RSS_GROUP_REG_ARR(vsi, i) \ + (NBL_PRO_MODULE + 0x00006000 + \ + ((vsi) * RSS_ENTRIES_PER_VSI + (i)) * sizeof(struct nbl_rss_entry)) + +/* qm module related macros */ +#define NBL_QM_MODULE (0x00030000) + +#define NBL_QM_PORT_TX_PAUSE_EN (NBL_QM_MODULE + 0x000000C0) + +/* ped module related macros */ +#define NBL_PED_MOUDULE (0x00050000) + +#define NBL_PED_PAUSE_TX_CNT (NBL_PED_MOUDULE + 0x000000BC) +#define NBL_PED_PORT_SMAC_REG_H(eth_port_id) \ + (NBL_PED_MOUDULE + 0x000000E0 + (eth_port_id) * sizeof(struct nbl_ped_port_smac)) +#define NBL_PED_PORT_SMAC_REG_L(eth_port_id) \ + (NBL_PED_MOUDULE + 0x000000E4 + (eth_port_id) * sizeof(struct nbl_ped_port_smac)) +#define NBL_PED_ETH_PAUSE_TX_L_REG(eth_port_id) \ + (NBL_PED_MOUDULE + (eth_port_id) * NBL_PAUSE_CNT_REG_WIDTH + 0x00000140) +#define NBL_PED_ETH_PAUSE_TX_H_REG(eth_port_id) \ + (NBL_PED_MOUDULE + (eth_port_id) * NBL_PAUSE_CNT_REG_WIDTH + 0x00000144) + +/* pa module related macros */ +#define NBL_PA_MODULE (0x00060000) + +#define NBL_PA_PAUSE_RX_CNT (NBL_PA_MODULE + 0x00000130) +#define NBL_PA_PAUSE_RX_EN (NBL_PA_MODULE + 0x00000524) + +#define NBL_PA_ETYPE_EXT_REG_ARR(i) (NBL_PA_MODULE + 0x0000050C + (i) * 4) + +#define NBL_PA_PCMRT_ACTION_REG (NBL_PA_MODULE + 0x0000052C) +#define NBL_PA_PCMRT_KEY_REG_ARR(i) \ + (NBL_PA_MODULE + 0x00001000 + (i) * sizeof(struct nbl_pcmrt_key)) +#define NBL_PA_PCMRT_MASK_REG_ARR(i) \ + (NBL_PA_MODULE + 0x00002000 + (i) * sizeof(struct nbl_pcmrt_mask)) +#define NBL_PA_ETH_PAUSE_RX_L_REG(eth_port_id) \ + (NBL_PA_MODULE + (eth_port_id) * NBL_PAUSE_CNT_REG_WIDTH + 0x00000200) +#define NBL_PA_ETH_PAUSE_RX_H_REG(eth_port_id) \ + (NBL_PA_MODULE + (eth_port_id) * NBL_PAUSE_CNT_REG_WIDTH + 0x00000204) + +/* memt module related macros */ +#define NBL_MEMT_MODULE (0x00080000) + +#define NBL_MEMT_OPERATION_REG (NBL_MEMT_MODULE + 0x00003010) +#define NBL_MEMT_KEY_REG (NBL_MEMT_MODULE + 0x00003100) +#define NBL_MEMT_TABLE_INDEX_REG (NBL_MEMT_MODULE + 0x00003200) +#define NBL_MEMT_RESULT_REG (NBL_MEMT_MODULE + 0x00003300) +#define NBL_MEMT_STATUS_REG (NBL_MEMT_MODULE + 0x00003400) + +/* urmux module related macros */ +#define NBL_URMUX_MODULE (0x00090000) + +#define NBL_URMUX_PRO_MAX_PKT_KEN_REG (NBL_URMUX_MODULE + 0x00000050) +#define NBL_URMUX_CFG_SYNC_REG (NBL_URMUX_MODULE + 0x00000060) +#define NBL_URMUX_ETHX_RX_BYTE_L_REG(eth) \ + (NBL_URMUX_MODULE + 0x00000100 + (eth) * NBL_ETH_RMON_LEN) +#define NBL_URMUX_ETHX_RX_BYTE_H_REG(eth) \ + (NBL_URMUX_MODULE + 0x00000104 + (eth) * NBL_ETH_RMON_LEN) +#define NBL_URMUX_ETHX_RX_PKT_REG(eth) \ + (NBL_URMUX_MODULE + 0x00000114 + (eth) * NBL_ETH_RMON_LEN) +#define NBL_URMUX_ETHX_RX_UNDERSIZE_REG(eth) \ + (NBL_URMUX_MODULE + 0x00000118 + (eth) * NBL_ETH_RMON_LEN) +#define NBL_URMUX_ETHX_RX_OVERSIZE_REG(eth) \ + (NBL_URMUX_MODULE + 0x00000150 + (eth) * NBL_ETH_RMON_LEN) +#define NBL_URMUX_ETHX_RX_CRC_ERR_REG(eth) \ + (NBL_URMUX_MODULE + 0x00000154 + (eth) * NBL_ETH_RMON_LEN) + +/* dmux module related macros */ +#define NBL_DMUX_MODULE (0x000A0000) + +#define NBL_DMUX_ETHX_TX_BYTE_L_REG(eth) \ + (NBL_DMUX_MODULE + 0x00000100 + (eth) * NBL_ETH_RMON_LEN) +#define NBL_DMUX_ETHX_TX_BYTE_H_REG(eth) \ + (NBL_DMUX_MODULE + 0x00000104 + (eth) * NBL_ETH_RMON_LEN) +#define NBL_DMUX_ETHX_TX_PKT_REG(eth) \ + (NBL_DMUX_MODULE + 0x00000114 + (eth) * NBL_ETH_RMON_LEN) + +/* dvn module related macros */ +#define NBL_DVN_MODULE (0x000B0000) + +#define NBL_DVN_QUEUE_RESET_REG (NBL_DVN_MODULE + 0x00000104) +#define NBL_DVN_QUEUE_INFO_ARR(i) \ + (NBL_DVN_MODULE + 0x00001000 + (i) * sizeof(struct tx_queue_info)) +#define NBL_DVN_QUEUE_STAT_REG_ARR(i) \ + (NBL_DVN_MODULE + 0x00003000 + (i) * sizeof(struct nbl_tx_queue_stat)) + +/* uvn module related macros */ +#define NBL_UVN_MODULE (0x000C0000) + +#define NBL_UVN_QUEUE_RESET_REG (NBL_UVN_MODULE + 0x00000104) +#define NBL_UVN_QUEUE_INFO_ARR(i) \ + (NBL_UVN_MODULE + 0x00001000 + (i) * sizeof(struct rx_queue_info)) +#define NBL_UVN_QUEUE_STATE_REG_ARR(i) \ + (NBL_UVN_MODULE + 0x00000110 + (i) * BYTES_PER_DWORD) +#define NBL_UVN_DROP_CNT_REG_ARR(i) \ + (NBL_UVN_MODULE + 0x00003000 + (i) * BYTES_PER_DWORD) + +/* eth module related macros */ +#define NBL_ETH_MODULE (0x000D0000) + +#define NBL_ETH_RESET_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00000000) +#define NBL_ETH_LOOPBACK_MODE_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00000004) +#define NBL_ETH_RX_CTRL_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00000008) +#define NBL_ETH_PKT_LEN_LIMIT(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x0000000C) +#define NBL_ETH_RX_STAT_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00000010) +#define NBL_ETH_TX_CTRL_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00000014) +#define NBL_ETH_SELF_STIMU_REG2(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00000038) +#define NBL_ETH_LED_CTRL_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00000058) + +/* rx stat reg */ +#define NBL_ETH_RX_TOTAL_PKT_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001000) +#define NBL_ETH_RX_TOTAL_PKT_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001004) +#define NBL_ETH_RX_TOTAL_GOOD_PKT_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001008) +#define NBL_ETH_RX_TOTAL_GOOD_PKT_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x0000100C) +#define NBL_ETH_RX_TOTAL_BYTES_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001010) +#define NBL_ETH_RX_TOTAL_BYTES_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001014) +#define NBL_ETH_RX_TOTAL_GOOD_BYTES_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001018) +#define NBL_ETH_RX_TOTAL_GOOD_BYTES_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x0000101C) +#define NBL_ETH_RX_BAD_FCS_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001020) +#define NBL_ETH_RX_BAD_FCS_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001024) +#define NBL_ETH_RX_FRAMING_ERR_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001028) +#define NBL_ETH_RX_FRAMING_ERR_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x0000102C) +#define NBL_ETH_RX_BADCODE_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001030) +#define NBL_ETH_RX_BADCODE_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001034) +#define NBL_ETH_RX_OVERSIZE_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001050) +#define NBL_ETH_RX_OVERSIZE_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001054) +#define NBL_ETH_RX_UNDERSIZE_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00001058) +#define NBL_ETH_RX_UNDERSIZE_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x0000105C) +#define NBL_ETH_RX_UNICAST_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000010D0) +#define NBL_ETH_RX_UNICAST_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000010D4) +#define NBL_ETH_RX_MULTICAST_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000010D8) +#define NBL_ETH_RX_MULTICAST_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000010DC) +#define NBL_ETH_RX_BROADCAST_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000010E0) +#define NBL_ETH_RX_BROADCAST_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000010E4) +#define NBL_ETH_RX_VLAN_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000010E8) +#define NBL_ETH_RX_VLAN_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000010EC) + +/* tx stat reg */ +#define NBL_ETH_TX_TOTAL_PKT_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00002000) +#define NBL_ETH_TX_TOTAL_PKT_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00002004) +#define NBL_ETH_TX_TOTAL_BYTES_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00002008) +#define NBL_ETH_TX_TOTAL_BYTES_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x0000200C) +#define NBL_ETH_TX_TOTAL_GOOD_PKT_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00002010) +#define NBL_ETH_TX_TOTAL_GOOD_PKT_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00002014) +#define NBL_ETH_TX_TOTAL_GOOD_BYTES_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00002018) +#define NBL_ETH_TX_TOTAL_GOOD_BYTES_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x0000201C) +#define NBL_ETH_TX_UNICAST_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00002090) +#define NBL_ETH_TX_UNICAST_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00002094) +#define NBL_ETH_TX_MULTICAST_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x00002098) +#define NBL_ETH_TX_MULTICAST_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x0000209C) +#define NBL_ETH_TX_BROADCAST_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000020A0) +#define NBL_ETH_TX_BROADCAST_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000020A4) +#define NBL_ETH_TX_VLAN_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000020A8) +#define NBL_ETH_TX_VLAN_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000020AC) +#define NBL_ETH_TX_BAD_FCS_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000020B0) +#define NBL_ETH_TX_BAD_FCS_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000020B4) +#define NBL_ETH_TX_FRAME_ERROR_CNT_L_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000020B8) +#define NBL_ETH_TX_FRAME_ERROR_CNT_H_REG(eth_port_id) \ + (NBL_ETH_MODULE + (eth_port_id) * NBL_SUB_ETH_LEN + 0x000020BC) + +/* dsch module related macros */ +#define NBL_DSCH_MODULE (0x00110000) + +#define NBL_DSCH_NOTIFY_BITMAP_ARR(i) \ + (NBL_DSCH_MODULE + 0x00003000 + (i) * BYTES_PER_DWORD) +#define NBL_DSCH_FLY_BITMAP_ARR(i) \ + (NBL_DSCH_MODULE + 0x00004000 + (i) * BYTES_PER_DWORD) +#define NBL_DSCH_PORT_MAP_REG_ARR(i) \ + (NBL_DSCH_MODULE + 0x00005000 + (i) * sizeof(struct nbl_port_map)) + +/* mailbox module related macros */ +#define NBL_MAILBOX_MODULE (0x00120000) + +#define NBL_MAILBOX_M_QINFO_MAP_REG_ARR(func_id) \ + (NBL_MAILBOX_MODULE + 0x00001000 + (func_id) * sizeof(struct nbl_mailbox_qinfo_map)) + +/* pcompleter module related macros */ +#define NBL_PCOMPLETER_MODULE (0x00130000) + +#define NBL_PCOMPLETER_AF_NOTIFY_REG (NBL_PCOMPLETER_MODULE + 0x00001000) +#define NBL_PCOMPLETER_QUEUE_TABLE_READY_REG \ + (NBL_PCOMPLETER_MODULE + 0x00000800) +#define NBL_PCOMPLETER_QUEUE_TABLE_SELECT_REG \ + (NBL_PCOMPLETER_MODULE + 0x00000804) +#define NBL_PCOMPLETER_FUNCTION_MSIX_MAP_REG_ARR(i) \ + (NBL_PCOMPLETER_MODULE + 0x00004000 + (i) * sizeof(struct nbl_function_msix_map)) +#define NBL_PCOMPLETER_QID_MAP_REG_ARR(select, i) \ + (NBL_PCOMPLETER_MODULE + 0x00008000 + \ + (select) * NBL_QID_MAP_TABLE_ENTRIES * sizeof(struct nbl_qid_map) + \ + (i) * sizeof(struct nbl_qid_map)) + +/* padpt module related macros */ +#define NBL_PADPT_MODULE (0x00150000) + +#define NBL_FC_CPLH_UP_TH_REG_OFS 0x15c +#define NBL_FC_CPLH_UP_TH_REG_ADDR (NBL_PADPT_MODULE + NBL_FC_CPLH_UP_TH_REG_OFS) + +enum nbl_fc_cplh_up_eth_value { + NBL_FC_CPLH_UP_TH_B8 = 0x100b8, + NBL_FC_CPLH_UP_TH_C0 = 0x100c0, + NBL_FC_CPLH_UP_TH_D0 = 0x100d0, +}; + +#define NBL_PADPT_MSIX_TABLE_REG_ADDR(vector_id) \ + (NBL_PADPT_MODULE + 0x00004000 + (vector_id) * sizeof(struct nbl_msix_entry)) +#define NBL_PADPT_MSIX_TABLE_MASK_FIELD_ARR(vector_id) \ + (NBL_PADPT_MODULE + 0x00004000 + 12 + (vector_id) * sizeof(struct nbl_msix_entry)) +#define NBL_PADPT_MSIX_INFO_REG_ARR(vector_id) \ + (NBL_PADPT_MODULE + 0x00008000 + (vector_id) * sizeof(struct nbl_msix_info)) +#define NBL_PADPT_QUEUE_MAP_REG_ARR(queue_id) \ + (NBL_PADPT_MODULE + 0x0000C000 + (queue_id) * sizeof(struct nbl_queue_map)) + +/* lsp module related macros */ +#define NBL_LSP_MODULE (0x00160000) + +#define NBL_LSP_SFP_I2C_REQUEST_REG(sfp_id) \ + (NBL_LSP_MODULE + (sfp_id) * NBL_SFP_CONFIGURE_TAB_LEN + 0x140) +#define NBL_LSP_SFP_I2C_RDATA_CHN_REG(sfp_id, chn_id) \ + (NBL_LSP_MODULE + (sfp_id) * NBL_SFP_CONFIGURE_TAB_LEN + 0x144 + (chn_id) * 4) +#define NBL_LSP_SFP_I2C_DONE_REG(sfp_id) \ + (NBL_LSP_MODULE + (sfp_id) * NBL_SFP_CONFIGURE_TAB_LEN + 0x158) +#define NBL_LSP_SFP_MOD_REG(sfp_id) \ + (NBL_LSP_MODULE + (sfp_id) * NBL_SFP_CONFIGURE_TAB_LEN + 0x16c) /* module inplace */ +#define NBL_LSP_SFP_RXLOS_REG(sfp_id) \ + (NBL_LSP_MODULE + (sfp_id) * NBL_SFP_CONFIGURE_TAB_LEN + 0x170) + +#define NBL_LSP_EEPROM_REQ_REG (NBL_LSP_MODULE + 0x00000250) +#define NBL_LSP_EEPROM_RW_REG (NBL_LSP_MODULE + 0x00000254) +#define NBL_LSP_EEPROM_SLAVE_ADDR_REG (NBL_LSP_MODULE + 0x00000258) +#define NBL_LSP_EEPROM_ADDR_REG (NBL_LSP_MODULE + 0x0000025C) +#define NBL_LSP_EEPROM_WDATA_REG (NBL_LSP_MODULE + 0x00000260) +#define NBL_LSP_EEPROM_RDATA_REG (NBL_LSP_MODULE + 0x00000264) +#define NBL_LSP_EEPROM_STATUS_REG (NBL_LSP_MODULE + 0x00000268) + +/* prcfg module related macros */ +#define NBL_PRCFG_MODULE (0x00180000) + +#define NBL_PRCFG_TEMPERATURE_REG (NBL_PRCFG_MODULE + 0x00004400) +#define NBL_PRCFG_VCCINT_REG (NBL_PRCFG_MODULE + 0x00004404) +#define NBL_PRCFG_VCCAUX_REG (NBL_PRCFG_MODULE + 0x00004408) +#define NBL_PRCFG_VCCBRAM_REG (NBL_PRCFG_MODULE + 0x00004418) +#define NBL_PRCFG_VUSER0_REG (NBL_PRCFG_MODULE + 0x00004600) +#define NBL_PRCFG_VUSER1_REG (NBL_PRCFG_MODULE + 0x00004604) +#define NBL_PRCFG_VUSER2_REG (NBL_PRCFG_MODULE + 0x00004608) +#define NBL_PRCFG_VUSER3_REG (NBL_PRCFG_MODULE + 0x0000460C) + +struct nbl_eeprom_status { + u32 done:1; + u32 rsv:31; +}; + +#define NBL_EEPROM_LENGTH (0x100) +enum nbl_eeprom_access_type { + NBL_EEPROM_WRITE, + NBL_EEPROM_READ, +}; + +enum nbl_board_version { + NBL_X4_BOARD = 0x01, +}; + +union nbl_board_info { + struct { + u8 version; + u8 magic[7]; + u8 pn[16]; + u8 sn[16]; + u8 mac1[8]; /* pf1 */ + u8 mac2[8]; /* pf2 */ + u8 mac3[8]; /* pf3 */ + u8 mac4[8]; /* pf4 */ + u8 mac5[8]; /* pf5 */ + u8 mac6[8]; /* pf6 */ + u8 mac7[8]; /* pf7 */ + u8 mac8[8]; /* pf8 */ + }; + struct { + u8 data[252]; + u32 crc; + }; +}; + +/* mailbox BAR related macros and structures */ +struct nbl_mailbox_qinfo { + u16 qid; + u16 tail_ptr; + u32 rx_base_addr_l; + u32 rx_base_addr_h; + u32 rx_size_bwid; + u32 rx_cmd; + u32 rsv0; + u32 tx_base_addr_l; + u32 tx_base_addr_h; + u32 tx_size_bwid; + u32 tx_cmd; + u32 rsv1; + u32 rx_head_ptr; + u32 tx_head_ptr; + u32 rx_tail_ptr; + u32 tx_tail_ptr; + u32 rsv2; +} __packed; + +#define NBL_MAILBOX_TX_RESET BIT(0) +#define NBL_MAILBOX_RX_RESET BIT(0) +#define NBL_MAILBOX_TX_ENABLE BIT(1) +#define NBL_MAILBOX_RX_ENABLE BIT(1) + +#define NBL_MAILBOX_TX_DESC_AVAIL BIT(0) +#define NBL_MAILBOX_TX_DESC_USED BIT(1) +#define NBL_MAILBOX_RX_DESC_AVAIL BIT(3) +#define NBL_MAILBOX_RX_DESC_USED BIT(4) + +#define NBL_MAILBOX_NOTIFY_ADDR (0x00000000) + +#define NBL_MAILBOX_QINFO_CFG_REG (0x00000000) +#define NBL_MAILBOX_QINFO_CFG_RX_BASE_ADDR_L_FIELD \ + (NBL_MAILBOX_QINFO_CFG_REG + offsetof(struct nbl_mailbox_qinfo, rx_base_addr_l)) +#define NBL_MAILBOX_QINFO_CFG_RX_BASE_ADDR_H_FIELD \ + (NBL_MAILBOX_QINFO_CFG_REG + offsetof(struct nbl_mailbox_qinfo, rx_base_addr_h)) +#define NBL_MAILBOX_QINFO_CFG_RX_SIZE_BWID_FIELD \ + (NBL_MAILBOX_QINFO_CFG_REG + offsetof(struct nbl_mailbox_qinfo, rx_size_bwid)) +#define NBL_MAILBOX_QINFO_CFG_RX_CMD_FIELD \ + (NBL_MAILBOX_QINFO_CFG_REG + offsetof(struct nbl_mailbox_qinfo, rx_cmd)) +#define NBL_MAILBOX_QINFO_CFG_TX_BASE_ADDR_L_FIELD \ + (NBL_MAILBOX_QINFO_CFG_REG + offsetof(struct nbl_mailbox_qinfo, tx_base_addr_l)) +#define NBL_MAILBOX_QINFO_CFG_TX_BASE_ADDR_H_FIELD \ + (NBL_MAILBOX_QINFO_CFG_REG + offsetof(struct nbl_mailbox_qinfo, tx_base_addr_h)) +#define NBL_MAILBOX_QINFO_CFG_TX_SIZE_BWID_FIELD \ + (NBL_MAILBOX_QINFO_CFG_REG + offsetof(struct nbl_mailbox_qinfo, tx_size_bwid)) +#define NBL_MAILBOX_QINFO_CFG_TX_CMD_FIELD \ + (NBL_MAILBOX_QINFO_CFG_REG + offsetof(struct nbl_mailbox_qinfo, tx_cmd)) + +/* msix BAR related macros and structures */ +#define NBL_MSIX_VECTOR_TABLE_OFFSET (0x00000000) + +#define NBL_MSIX_VECTOR_TABLE_MASK_FIELD_ARR(vector_id) \ + (NBL_MSIX_VECTOR_TABLE_OFFSET + (vector_id) * sizeof(struct nbl_msix_entry) + \ + offsetof(struct nbl_msix_entry, vector_mask)) + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/m1600/hwmon.c b/drivers/net/ethernet/nebula-matrix/m1600/hwmon.c new file mode 100644 index 0000000000000000000000000000000000000000..5386321ae4ca0ede594eb5b6a4d2b7776f21466d --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/hwmon.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#include +#include + +#include "hw.h" +#include "common.h" +#include "hwmon.h" + +enum NBL_HWMON_TEMP { + NBL_TEMP, + NBL_TEMP_MAX, +}; + +enum NBL_HWMON_VOLTAGE { + NBL_VOLT_VCCINT, + NBL_VOLT_VCCAUX, + NBL_VOLT_VCCBRAM, + NBL_VOLT_VUSER0, + NBL_VOLT_VUSER1, + NBL_VOLT_VUSER2, + NBL_VOLT_VUSER3, + NBL_VOLT_MAX, +}; + +#define NBL_HWMON_TEMP_MUL (5093140064ULL) +#define NBL_HWMON_TEMP_SHIFT (16) +#define NBL_HWMON_TEMP_SUB (2802308787LL) +#define NBL_HWMON_TEMP_FAC (10000) + +#define NBL_HWMON_VOLT_MUL (3000) +#define NBL_HWMON_VOLT_SHIFT (16) + +static ssize_t nbl_hwmon_temp_input_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nbl_adapter *adapter = dev_get_drvdata(dev); + struct nbl_hw *hw = &adapter->hw; + int channel = to_sensor_dev_attr(attr)->index; + u32 val; + int temperature; + int len; + + switch (channel) { + case NBL_TEMP: + val = rd32(hw, NBL_PRCFG_TEMPERATURE_REG); + temperature = (int)((((s64)(((u64)val * NBL_HWMON_TEMP_MUL) >> + NBL_HWMON_TEMP_SHIFT)) - NBL_HWMON_TEMP_SUB) / + NBL_HWMON_TEMP_FAC); + break; + default: + return -EINVAL; + } + + len = snprintf(buf, PAGE_SIZE, "%d\n", temperature); + return len; +} + +static ssize_t nbl_hwmon_in_input_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct nbl_adapter *adapter = dev_get_drvdata(dev); + struct nbl_hw *hw = &adapter->hw; + int channel = to_sensor_dev_attr(attr)->index; + u32 val; + u32 voltage; + int len; + + switch (channel) { + case NBL_VOLT_VCCINT: + val = rd32(hw, NBL_PRCFG_VCCINT_REG); + break; + case NBL_VOLT_VCCAUX: + val = rd32(hw, NBL_PRCFG_VCCAUX_REG); + break; + case NBL_VOLT_VCCBRAM: + val = rd32(hw, NBL_PRCFG_VCCBRAM_REG); + break; + case NBL_VOLT_VUSER0: + val = rd32(hw, NBL_PRCFG_VUSER0_REG); + break; + case NBL_VOLT_VUSER1: + val = rd32(hw, NBL_PRCFG_VUSER1_REG); + break; + case NBL_VOLT_VUSER2: + val = rd32(hw, NBL_PRCFG_VUSER2_REG); + break; + case NBL_VOLT_VUSER3: + val = rd32(hw, NBL_PRCFG_VUSER3_REG); + break; + default: + return -EINVAL; + } + + voltage = (val * NBL_HWMON_VOLT_MUL) >> NBL_HWMON_VOLT_SHIFT; + + len = snprintf(buf, PAGE_SIZE, "%u\n", voltage); + return len; +} + +static SENSOR_DEVICE_ATTR(temp1_input, 0444, nbl_hwmon_temp_input_show, NULL, NBL_TEMP); + +static SENSOR_DEVICE_ATTR(in0_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VCCINT); +static SENSOR_DEVICE_ATTR(in1_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VCCAUX); +static SENSOR_DEVICE_ATTR(in2_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VCCBRAM); +static SENSOR_DEVICE_ATTR(in3_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VUSER0); +static SENSOR_DEVICE_ATTR(in4_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VUSER1); +static SENSOR_DEVICE_ATTR(in5_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VUSER2); +static SENSOR_DEVICE_ATTR(in6_input, 0444, nbl_hwmon_in_input_show, NULL, NBL_VOLT_VUSER3); + +static struct attribute *hwmon_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in3_input.dev_attr.attr, + &sensor_dev_attr_in4_input.dev_attr.attr, + &sensor_dev_attr_in5_input.dev_attr.attr, + &sensor_dev_attr_in6_input.dev_attr.attr, + NULL, +}; + +static umode_t hwmon_attributes_visible(struct kobject __always_unused *kobj, + struct attribute *attr, int __always_unused index) +{ + return attr->mode; +} + +static const struct attribute_group hwmon_attrgroup = { + .attrs = hwmon_attributes, + .is_visible = hwmon_attributes_visible, +}; + +static const struct attribute_group *hwmon_groups[] = { + &hwmon_attrgroup, + NULL, +}; + +int nbl_hwmon_init(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct device *dev = nbl_adapter_to_dev(adapter); + + if (!is_af(hw)) + return 0; + + adapter->hwmon_dev = hwmon_device_register_with_groups(dev, "nbl_x4", adapter, + hwmon_groups); + + return PTR_ERR_OR_ZERO(adapter->hwmon_dev); +} + +void nbl_hwmon_fini(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + + if (!is_af(hw)) + return; + + if (adapter->hwmon_dev) + hwmon_device_unregister(adapter->hwmon_dev); +} diff --git a/drivers/net/ethernet/nebula-matrix/m1600/hwmon.h b/drivers/net/ethernet/nebula-matrix/m1600/hwmon.h new file mode 100644 index 0000000000000000000000000000000000000000..290da970f4011c22528785282ac8ed9a6e96dab0 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/hwmon.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#ifndef _NBL_HWMON_H_ +#define _NBL_HWMON_H_ + +int nbl_hwmon_init(struct nbl_adapter *adapter); +void nbl_hwmon_fini(struct nbl_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/m1600/interrupt.c b/drivers/net/ethernet/nebula-matrix/m1600/interrupt.c new file mode 100644 index 0000000000000000000000000000000000000000..7d9ee38fa388f8a4ba0a68c07e6fe6a4eaf64119 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/interrupt.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#include +#include +#include + +#include "hw.h" +#include "common.h" +#include "interrupt.h" +#include "txrx.h" +#include "mailbox.h" + +static int nbl_alloc_msix_entries(struct nbl_adapter *adapter, u16 num_entries) +{ + u16 i; + + adapter->msix_entries = devm_kcalloc(nbl_adapter_to_dev(adapter), num_entries, + sizeof(*adapter->msix_entries), GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) + adapter->msix_entries[i].entry = i; + + return 0; +} + +static void nbl_free_msix_entries(struct nbl_adapter *adapter) +{ + devm_kfree(nbl_adapter_to_dev(adapter), adapter->msix_entries); + adapter->msix_entries = NULL; +} + +static int nbl_alloc_msix_intr(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + int needed; + int err; + + needed = adapter->num_lan_msix + adapter->num_mailbox_msix; + /* An additional interrupt is needed by AF to process + * protocol packets such as ARP broadcast packets. + */ + needed += is_af(hw) ? 1 : 0; + err = nbl_alloc_msix_entries(adapter, (u16)needed); + if (err) { + pr_err("Allocate msix entries failed\n"); + return err; + } + + err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, needed, needed); + if (err < 0) + goto enable_msix_failed; + + return needed; + +enable_msix_failed: + nbl_free_msix_entries(adapter); + return err; +} + +static void nbl_free_msix_intr(struct nbl_adapter *adapter) +{ + pci_disable_msix(adapter->pdev); + nbl_free_msix_entries(adapter); +} + +int nbl_init_interrupt_scheme(struct nbl_adapter *adapter) +{ + int err; + struct device *dev = nbl_adapter_to_dev(adapter); + + err = nbl_alloc_msix_intr(adapter); + if (err < 0) { + dev_err(dev, "Failed to enable MSI-X vectors\n"); + return err; + } + + return 0; +} + +void nbl_fini_interrupt_scheme(struct nbl_adapter *adapter) +{ + nbl_free_msix_intr(adapter); +} + +static void nbl_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct nbl_q_vector *q_vector = container_of(notify, struct nbl_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +static void nbl_irq_affinity_release(struct kref __always_unused *ref) +{ +} + +int nbl_napi_poll(struct napi_struct *napi, int budget) +{ + struct nbl_q_vector *q_vector = container_of(napi, struct nbl_q_vector, napi); + struct nbl_adapter *adapter = q_vector->adapter; + struct nbl_hw *hw = &adapter->hw; + bool clean_complete = true; + struct nbl_ring *ring; + int budget_per_ring; + int work_done; + int cleaned; + bool wd; + + for (ring = q_vector->tx_ring; ring; ring = ring->next) { + wd = nbl_clean_tx_irq(ring, budget); + if (!wd) + clean_complete = false; + } + + if (unlikely(q_vector->num_ring_rx > 1)) + budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); + else + budget_per_ring = budget; + + work_done = 0; + for (ring = q_vector->rx_ring; ring; ring = ring->next) { + cleaned = nbl_clean_rx_irq(ring, budget_per_ring); + + if (cleaned >= budget_per_ring) + clean_complete = false; + work_done += cleaned; + } + + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + napi_complete_done(napi, work_done); + + nbl_enable_msix_irq(hw, q_vector); + + return budget - 1; + } + + return budget; + } + + if (likely(napi_complete_done(napi, work_done))) + nbl_enable_msix_irq(hw, q_vector); + + return min_t(int, work_done, budget - 1); +} + +static irqreturn_t nbl_msix_clean_rings(int __always_unused irq, void *data) +{ + struct nbl_q_vector *q_vector = (struct nbl_q_vector *)data; + + if (!q_vector->tx_ring && !q_vector->rx_ring) + return IRQ_HANDLED; + + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +int nbl_request_irq(struct nbl_adapter *adapter) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_q_vector *q_vector; + u16 q_vector_id; + u16 rx_intr_index; + u16 tx_intr_index; + u32 irq_num; + int cpu; + int err; + + rx_intr_index = 0; + tx_intr_index = 0; + for (q_vector_id = 0; q_vector_id < adapter->num_q_vectors; q_vector_id++) { + q_vector = adapter->q_vectors[q_vector_id]; + irq_num = adapter->msix_entries[q_vector_id].vector; + + if (q_vector->tx_ring && q_vector->rx_ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%02u", adapter->netdev->name, "TxRx", rx_intr_index); + rx_intr_index++; + tx_intr_index++; + } else if (q_vector->rx_ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%02u", adapter->netdev->name, "Rx", rx_intr_index); + rx_intr_index++; + } else if (q_vector->tx_ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%02u", adapter->netdev->name, "Tx", tx_intr_index); + tx_intr_index++; + } else { + pr_notice("Queue vector %u is not used now\n", q_vector_id); + WARN_ON(1); + } + + err = devm_request_irq(dev, irq_num, nbl_msix_clean_rings, + 0, q_vector->name, q_vector); + if (err) { + netdev_err(adapter->netdev, "Queue vector %u requests MSIX irq failed with error %d\n", + q_vector_id, err); + goto request_irq_err; + } + + q_vector->affinity_notify.notify = nbl_irq_affinity_notify; + q_vector->affinity_notify.release = nbl_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + cpu = cpumask_local_spread(q_vector->global_vector_id, + dev_to_node(dev)); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + return 0; + +request_irq_err: + while (q_vector_id--) { + irq_num = adapter->msix_entries[q_vector_id].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + devm_free_irq(dev, irq_num, adapter->q_vectors[q_vector_id]); + } + return err; +} + +void nbl_free_irq(struct nbl_adapter *adapter) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_q_vector *q_vector; + u16 q_vector_id; + u32 irq_num; + + for (q_vector_id = 0; q_vector_id < adapter->num_q_vectors; q_vector_id++) { + q_vector = adapter->q_vectors[q_vector_id]; + + WARN_ON(!q_vector || !(q_vector->tx_ring || q_vector->rx_ring)); + irq_num = adapter->msix_entries[q_vector_id].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + devm_free_irq(dev, irq_num, adapter->q_vectors[q_vector_id]); + } +} + +void nbl_enable_all_napis(struct nbl_adapter *adapter) +{ + int q_vector_id; + struct nbl_q_vector *q_vector; + + for (q_vector_id = 0; q_vector_id < adapter->num_q_vectors; q_vector_id++) { + q_vector = adapter->q_vectors[q_vector_id]; + + if (q_vector->tx_ring || q_vector->rx_ring) + napi_enable(&q_vector->napi); + } +} + +void nbl_disable_all_napis(struct nbl_adapter *adapter) +{ + int q_vector_id; + struct nbl_q_vector *q_vector; + + for (q_vector_id = 0; q_vector_id < adapter->num_q_vectors; q_vector_id++) { + q_vector = adapter->q_vectors[q_vector_id]; + + if (q_vector->tx_ring || q_vector->rx_ring) + napi_disable(&q_vector->napi); + } +} + +void nbl_af_configure_msix_irq(struct nbl_hw *hw, u16 func_id, u16 local_vector_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_msix_info msix_info; + u16 global_vector_id; + u8 bus; + u8 devid; + u8 function; + + WARN_ON(!func_res); + WARN_ON(local_vector_id >= func_res->num_interrupts); + global_vector_id = func_res->interrupts[local_vector_id]; + nbl_af_compute_bdf(hw, func_id, &bus, &devid, &function); + + memset(&msix_info, 0, sizeof(msix_info)); + msix_info.intrl_pnum = 0; + msix_info.intrl_rate = 0; + msix_info.function = function; + msix_info.devid = devid; + msix_info.bus = bus; + msix_info.valid = 1; + if (func_id < NBL_MAX_PF_FUNC) + msix_info.msix_mask_en = 1; + else + msix_info.msix_mask_en = 0; + + wr32_for_each(hw, NBL_PADPT_MSIX_INFO_REG_ARR(global_vector_id), + (u32 *)&msix_info, sizeof(msix_info)); +} + +static void nbl_configure_msix_irq(struct nbl_hw *hw, struct nbl_q_vector *q_vector) +{ + u16 local_vector_id; + + local_vector_id = q_vector->q_vector_id; + if (is_af(hw)) + nbl_af_configure_msix_irq(hw, 0, local_vector_id); + else + nbl_mailbox_req_cfg_msix_irq(hw, local_vector_id); +} + +void nbl_configure_msix_irqs(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_q_vector *q_vector; + u16 i; + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vectors[i]; + nbl_configure_msix_irq(hw, q_vector); + } +} + +void nbl_af_clear_msix_irq_conf(struct nbl_hw *hw, u16 func_id, u16 local_vector_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_msix_info msix_info; + u16 global_vector_id; + + if (!func_res || local_vector_id >= func_res->num_interrupts) { + pr_err("Severe error occurred when clear MSIX irq configuration\n"); + return; + } + global_vector_id = func_res->interrupts[local_vector_id]; + + memset(&msix_info, 0, sizeof(msix_info)); + wr32_for_each(hw, NBL_PADPT_MSIX_INFO_REG_ARR(global_vector_id), + (u32 *)&msix_info, sizeof(msix_info)); +} + +static void nbl_clear_msix_irq_conf(struct nbl_hw *hw, struct nbl_q_vector *q_vector) +{ + u16 local_vector_id; + + local_vector_id = q_vector->q_vector_id; + if (is_af(hw)) + nbl_af_clear_msix_irq_conf(hw, 0, local_vector_id); + else + nbl_mailbox_req_clear_msix_irq_conf(hw, local_vector_id); +} + +void nbl_clear_msix_irqs_conf(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_q_vector *q_vector; + u16 i; + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vectors[i]; + nbl_clear_msix_irq_conf(hw, q_vector); + } +} + +/* NOTICE: maybe we can write to MSIX bar directly to unmask irq */ +void nbl_enable_msix_irq(struct nbl_hw *hw, struct nbl_q_vector *q_vector) +{ + u16 local_vector_id; + + local_vector_id = q_vector->q_vector_id; + msix_wr32(hw, NBL_MSIX_VECTOR_TABLE_MASK_FIELD_ARR(local_vector_id), 0); +} + +int nbl_af_forward_ring_napi_poll(struct napi_struct *napi, int budget) +{ + struct nbl_q_vector *q_vector = container_of(napi, struct nbl_q_vector, napi); + struct nbl_adapter *adapter = q_vector->adapter; + struct nbl_hw *hw = &adapter->hw; + bool clean_complete = true; + struct nbl_ring *ring; + int budget_per_ring; + int work_done; + int cleaned; + bool wd; + + for (ring = q_vector->tx_ring; ring; ring = ring->next) { + wd = nbl_af_clean_forward_ring_tx_irq(ring, budget); + if (!wd) + clean_complete = false; + } + + if (unlikely(q_vector->num_ring_rx > 1)) + budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); + else + budget_per_ring = budget; + + work_done = 0; + for (ring = q_vector->rx_ring; ring; ring = ring->next) { + cleaned = nbl_af_clean_forward_ring_rx_irq(ring, budget_per_ring); + + if (cleaned >= budget_per_ring) + clean_complete = false; + work_done += cleaned; + } + + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + napi_complete_done(napi, work_done); + + nbl_enable_msix_irq(hw, q_vector); + + return budget - 1; + } + + return budget; + } + + if (likely(napi_complete_done(napi, work_done))) + nbl_enable_msix_irq(hw, q_vector); + + return min_t(int, work_done, budget - 1); +} + +int nbl_af_forward_ring_request_irq(struct nbl_adapter *adapter) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_q_vector *q_vector; + u16 q_vector_id; + u32 irq_num; + int cpu; + int err; + + q_vector_id = adapter->num_q_vectors; + q_vector = adapter->q_vectors[q_vector_id]; + + irq_num = adapter->msix_entries[q_vector_id].vector; + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s", adapter->netdev->name, "forward_ring"); + + err = devm_request_irq(dev, irq_num, nbl_msix_clean_rings, + 0, q_vector->name, q_vector); + if (err) { + pr_err("AF request irq for forward ring failed with error %d\n", err); + return err; + } + + q_vector->affinity_notify.notify = nbl_irq_affinity_notify; + q_vector->affinity_notify.release = nbl_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + cpu = cpumask_local_spread(q_vector->global_vector_id, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + + return 0; +} + +void nbl_af_forward_ring_free_irq(struct nbl_adapter *adapter) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_q_vector *q_vector; + u16 q_vector_id; + u32 irq_num; + + q_vector_id = adapter->num_q_vectors; + q_vector = adapter->q_vectors[q_vector_id]; + + irq_num = adapter->msix_entries[q_vector_id].vector; + + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + devm_free_irq(dev, irq_num, q_vector); +} + +void nbl_af_enable_forward_ring_napi(struct nbl_adapter *adapter) +{ + int q_vector_id; + struct nbl_q_vector *q_vector; + + q_vector_id = adapter->num_q_vectors; + q_vector = adapter->q_vectors[q_vector_id]; + napi_enable(&q_vector->napi); +} + +void nbl_af_disable_forward_ring_napi(struct nbl_adapter *adapter) +{ + int q_vector_id; + struct nbl_q_vector *q_vector; + + q_vector_id = adapter->num_q_vectors; + q_vector = adapter->q_vectors[q_vector_id]; + napi_disable(&q_vector->napi); +} + +void nbl_af_configure_forward_ring_irq(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_q_vector *q_vector; + int q_vector_id; + + q_vector_id = adapter->num_q_vectors; + q_vector = adapter->q_vectors[q_vector_id]; + nbl_configure_msix_irq(hw, q_vector); +} + +void nbl_af_clear_forward_ring_irq_conf(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_q_vector *q_vector; + int q_vector_id; + + q_vector_id = adapter->num_q_vectors; + q_vector = adapter->q_vectors[q_vector_id]; + nbl_clear_msix_irq_conf(hw, q_vector); +} diff --git a/drivers/net/ethernet/nebula-matrix/m1600/interrupt.h b/drivers/net/ethernet/nebula-matrix/m1600/interrupt.h new file mode 100644 index 0000000000000000000000000000000000000000..903487e6943f3bd0865a8b823c7f7e0fdda040d7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/interrupt.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#ifndef _NBL_INTERRUPT_H_ +#define _NBL_INTERRUPT_H_ + +int nbl_init_interrupt_scheme(struct nbl_adapter *adapter); +void nbl_fini_interrupt_scheme(struct nbl_adapter *adapter); + +int nbl_napi_poll(struct napi_struct *napi, int budget); + +int nbl_request_irq(struct nbl_adapter *adapter); +void nbl_free_irq(struct nbl_adapter *adapter); + +void nbl_enable_all_napis(struct nbl_adapter *adapter); +void nbl_disable_all_napis(struct nbl_adapter *adapter); + +void nbl_configure_msix_irqs(struct nbl_adapter *adapter); + +void nbl_af_configure_msix_irq(struct nbl_hw *hw, u16 func_id, u16 local_vector_id); + +void nbl_af_clear_msix_irq_conf(struct nbl_hw *hw, u16 func_id, u16 local_vector_id); +void nbl_clear_msix_irqs_conf(struct nbl_adapter *adapter); + +void nbl_enable_msix_irq(struct nbl_hw *hw, struct nbl_q_vector *q_vector); + +int nbl_af_forward_ring_napi_poll(struct napi_struct *napi, int budget); + +int nbl_af_forward_ring_request_irq(struct nbl_adapter *adapter); +void nbl_af_forward_ring_free_irq(struct nbl_adapter *adapter); + +void nbl_af_enable_forward_ring_napi(struct nbl_adapter *adapter); +void nbl_af_disable_forward_ring_napi(struct nbl_adapter *adapter); + +void nbl_af_configure_forward_ring_irq(struct nbl_adapter *adapter); +void nbl_af_clear_forward_ring_irq_conf(struct nbl_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/m1600/macvlan.c b/drivers/net/ethernet/nebula-matrix/m1600/macvlan.c new file mode 100644 index 0000000000000000000000000000000000000000..5aa800e1ed3981a21ae368c25584bd843fb76c44 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/macvlan.c @@ -0,0 +1,563 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#include "hw.h" +#include "common.h" +#include "mailbox.h" +#include "macvlan.h" + +static void nbl_macvlan_set_key(struct nbl_macvlan_key *key, u8 eth_port_id, + u8 *mac_addr, u16 vlan_id, + enum nbl_macvlan_direction direction) +{ + memset(key, 0, sizeof(*key)); + key->vlan_id = vlan_id; + key->mac5 = mac_addr[5]; + key->mac4 = mac_addr[4]; + key->mac3_l = mac_addr[3]; + key->mac3_h = mac_addr[3] >> 4; + key->mac2 = mac_addr[2]; + key->mac1 = mac_addr[1]; + key->mac0 = mac_addr[0]; + key->eth_port_id = eth_port_id; + key->direction = direction; +} + +static void nbl_macvlan_set_result(struct nbl_macvlan_result *result, + enum nbl_macvlan_dport_type dport_type, + u8 dport_id) +{ + memset(result, 0, sizeof(*result)); + result->dport = dport_type; + result->dport_id = dport_id; +} + +static void nbl_macvlan_set_table_index(struct nbl_macvlan_table_index *table_index, + u16 index) +{ + memset(table_index, 0, sizeof(*table_index)); + table_index->index = index; +} + +static void nbl_macvlan_set_control(struct nbl_macvlan_control *control, + enum nbl_macvlan_operation_type type) +{ + memset(control, 0, sizeof(*control)); + control->op_type = type; + control->start = 1; +} + +static int nbl_macvlan_table_add(struct nbl_hw *hw, struct nbl_macvlan_key *key, + struct nbl_macvlan_result *result, + struct nbl_macvlan_table_index *table_index, + struct nbl_macvlan_control *control) +{ + struct nbl_macvlan_status status; + int i = NBL_MACVLAN_TRY_GET_STATUS_TIMES; + enum nbl_macvlan_direction direction; + enum nbl_macvlan_operation_type type; + + direction = key->direction; + type = control->op_type; + + wr32_for_each(hw, NBL_MEMT_KEY_REG, (u32 *)key, sizeof(*key)); + wr32_for_each(hw, NBL_MEMT_TABLE_INDEX_REG, (u32 *)table_index, sizeof(*table_index)); + wr32_for_each(hw, NBL_MEMT_RESULT_REG, (u32 *)result, sizeof(*result)); + wr32_for_each(hw, NBL_MEMT_OPERATION_REG, (u32 *)control, sizeof(*control)); + + while (i--) { + rd32_for_each(hw, NBL_MEMT_STATUS_REG, (u32 *)&status, sizeof(status)); + if (direction == NBL_MACVLAN_UP_DIRECTION) { + if (status.up_mac_op_done) { + if (status.up_mac_op_type != type) { + pr_err("Add to up macvlan table, but invalid op type is returned\n"); + return -EINVAL; + } + if (status.up_mac_op_success) + return 0; + pr_info("Add to up macvlan table, but failed\n"); + return -EEXIST; + } + } else { + if (status.dn_mac_op_done) { + if (status.dn_mac_op_type != type) { + pr_err("Add to down macvlan table, but invalid op type is returned\n"); + return -EINVAL; + } + if (status.dn_mac_op_success) + return 0; + pr_info("Add to down macvlan table, but failed\n"); + return -EEXIST; + } + } + + udelay(2); + } + + return -EAGAIN; +} + +static int nbl_macvlan_table_delete(struct nbl_hw *hw, struct nbl_macvlan_key *key, + struct nbl_macvlan_control *control) +{ + struct nbl_macvlan_status status; + int i = NBL_MACVLAN_TRY_GET_STATUS_TIMES; + enum nbl_macvlan_direction direction; + enum nbl_macvlan_operation_type type; + + direction = key->direction; + type = control->op_type; + + wr32_for_each(hw, NBL_MEMT_KEY_REG, (u32 *)key, sizeof(*key)); + wr32_for_each(hw, NBL_MEMT_OPERATION_REG, (u32 *)control, sizeof(*control)); + + while (i--) { + rd32_for_each(hw, NBL_MEMT_STATUS_REG, (u32 *)&status, sizeof(status)); + if (direction == NBL_MACVLAN_UP_DIRECTION) { + if (status.up_mac_op_done) { + if (status.up_mac_op_type != type) { + pr_err("Delete up macvlan table entry, but invalid op type is returned\n"); + return -EINVAL; + } + if (status.up_mac_op_success) + return 0; + pr_info("Delete up macvlan table entry, but failed\n"); + return -ENOENT; + } + } else { + if (status.dn_mac_op_done) { + if (status.dn_mac_op_type != type) { + pr_err("Delete down macvlan table entry, but invalid op type is returned\n"); + return -EINVAL; + } + if (status.dn_mac_op_success) + return 0; + pr_info("Delete down macvlan table entry, but failed\n"); + return -ENOENT; + } + } + + udelay(2); + } + + return -EAGAIN; +} + +static int nbl_macvlan_up_add(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id, + u8 vsi_id, int index) +{ + struct nbl_macvlan_key key; + struct nbl_macvlan_result result; + struct nbl_macvlan_table_index table_index; + struct nbl_macvlan_control control; + + nbl_macvlan_set_key(&key, eth_port_id, mac_addr, vlan_id, NBL_MACVLAN_UP_DIRECTION); + nbl_macvlan_set_result(&result, NBL_MACVLAN_DPORT_HOST, vsi_id); + nbl_macvlan_set_table_index(&table_index, index); + nbl_macvlan_set_control(&control, NBL_MACVLAN_OP_ADD); + + return nbl_macvlan_table_add(hw, &key, &result, &table_index, &control); +} + +static int nbl_macvlan_up_delete(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id) +{ + struct nbl_macvlan_key key; + struct nbl_macvlan_control control; + + nbl_macvlan_set_key(&key, eth_port_id, mac_addr, vlan_id, NBL_MACVLAN_UP_DIRECTION); + nbl_macvlan_set_control(&control, NBL_MACVLAN_OP_DELETE); + + return nbl_macvlan_table_delete(hw, &key, &control); +} + +static int nbl_macvlan_down_add(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id, + u8 vsi_id, int index) +{ + struct nbl_macvlan_key key; + struct nbl_macvlan_result result; + struct nbl_macvlan_table_index table_index; + struct nbl_macvlan_control control; + + nbl_macvlan_set_key(&key, eth_port_id, mac_addr, vlan_id, NBL_MACVLAN_DOWN_DIRECTION); + nbl_macvlan_set_result(&result, NBL_MACVLAN_DPORT_HOST, vsi_id); + nbl_macvlan_set_table_index(&table_index, index); + nbl_macvlan_set_control(&control, NBL_MACVLAN_OP_ADD); + + return nbl_macvlan_table_add(hw, &key, &result, &table_index, &control); +} + +static int nbl_macvlan_down_delete(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id) +{ + struct nbl_macvlan_key key; + struct nbl_macvlan_control control; + + nbl_macvlan_set_key(&key, eth_port_id, mac_addr, vlan_id, NBL_MACVLAN_DOWN_DIRECTION); + nbl_macvlan_set_control(&control, NBL_MACVLAN_OP_DELETE); + + return nbl_macvlan_table_delete(hw, &key, &control); +} + +int nbl_macvlan_add(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id, + u8 vsi_id, int index) +{ + int err; + int ret; + + err = nbl_macvlan_up_add(hw, eth_port_id, mac_addr, vlan_id, vsi_id, index); + if (err) + return err; + + err = nbl_macvlan_down_add(hw, eth_port_id, mac_addr, vlan_id, vsi_id, index); + if (err) { + ret = nbl_macvlan_up_delete(hw, eth_port_id, mac_addr, vlan_id); + if (ret) + pr_err("Failed to roll back macvlan table add operation with error %d\n", + ret); + return err; + } + + return 0; +} + +int nbl_macvlan_delete(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id) +{ + int err; + + err = nbl_macvlan_up_delete(hw, eth_port_id, mac_addr, vlan_id); + if (err) + return err; + + err = nbl_macvlan_down_delete(hw, eth_port_id, mac_addr, vlan_id); + if (err) { + pr_err("Failed to delete entry in macvlan down table though delete entry in macvlan up table success\n"); + return err; + } + + return 0; +} + +int nbl_af_configure_mac_addr(struct nbl_hw *hw, u16 func_id, u8 eth_port_id, + u8 *mac_addr, u8 vsi_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + int macvlan_entry_index; + int i; + int err; + + if (!func_res->num_macvlan_entries) { + if (func_id >= NBL_MAX_PF_FUNC) { + func_res->num_macvlan_entries = NBL_VF_MAX_MACVLAN_ENTRIES; + func_res->macvlan_start_index = NBL_VF_MACVLAN_START_INDEX + + (func_id - NBL_VF_BASE_FUNC_ID) * NBL_VF_MAX_MACVLAN_ENTRIES; + } else { + func_res->num_macvlan_entries = NBL_PF_MAX_MACVLAN_ENTRIES; + func_res->macvlan_start_index = func_id * NBL_PF_MAX_MACVLAN_ENTRIES; + } + + for (i = 0; i < NBL_PF_MAX_MACVLAN_ENTRIES; i++) + func_res->vlan_ids[i] = -1; + } + + for (i = 0; i < func_res->num_macvlan_entries; i++) + if (func_res->vlan_ids[i] == -1) + break; + if (i == func_res->num_macvlan_entries) { + pr_err("There is no available macvlan entry left for mailbox function id %u device\n", + func_id); + return -EAGAIN; + } + macvlan_entry_index = func_res->macvlan_start_index + i; + + err = nbl_macvlan_add(hw, eth_port_id, mac_addr, NBL_DEFAULT_VLAN_ID, vsi_id, + macvlan_entry_index); + if (err) { + pr_err("Mailbox function id %u device failed to add macvlan entry at index %d with error %d\n", + func_id, macvlan_entry_index, err); + return err; + } + + memcpy(func_res->mac_addr, mac_addr, ETH_ALEN); + func_res->eth_port_id = eth_port_id; + func_res->vlan_ids[i] = 0; + return 0; +} + +int nbl_af_clear_mac_addr(struct nbl_hw *hw, u16 func_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + u8 eth_port_id; + u8 *mac_addr; + int offset; + int i; + int err; + + for (i = 0; i < func_res->num_macvlan_entries; i++) + if (func_res->vlan_ids[i] == 0) + break; + if (i == func_res->num_macvlan_entries) { + pr_err("MAC address may be cleared already\n"); + return -EINVAL; + } + offset = i; + + for (i = 0; i < func_res->num_macvlan_entries; i++) + if (func_res->vlan_ids[i] != -1 && i != offset) + pr_err("Macvlan entry with vlan id %hd has not been cleared\n", + func_res->vlan_ids[i]); + + eth_port_id = func_res->eth_port_id; + mac_addr = func_res->mac_addr; + err = nbl_macvlan_delete(hw, eth_port_id, mac_addr, NBL_DEFAULT_VLAN_ID); + if (err) { + pr_err("Clear mac address from hardware failed with error %d\n", err); + return err; + } + + func_res->vlan_ids[offset] = -1; + return 0; +} + +int nbl_af_change_mac_addr(struct nbl_hw *hw, u16 func_id, u8 *mac_addr, u8 vsi_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + int macvlan_entry_start; + int macvlan_entry_index; + u8 *old_mac_addr; + u8 eth_port_id; + s16 vlan_id; + int i; + int err; + int ret; + + old_mac_addr = func_res->mac_addr; + if (ether_addr_equal(old_mac_addr, mac_addr)) { + pr_info("There is no need for AF to change mac address\n"); + return 0; + } + + macvlan_entry_start = func_res->macvlan_start_index; + eth_port_id = func_res->eth_port_id; + for (i = 0; i < func_res->num_macvlan_entries; i++) { + vlan_id = func_res->vlan_ids[i]; + if (vlan_id == -1) + continue; + + macvlan_entry_index = macvlan_entry_start + i; + + err = nbl_macvlan_delete(hw, eth_port_id, old_mac_addr, vlan_id); + if (err) { + pr_err("Failed to delete macvlan entry with error %d when change mac address\n", + err); + pr_alert("Please reset hardware\n"); + goto err_out; + } + + err = nbl_macvlan_add(hw, eth_port_id, mac_addr, vlan_id, + vsi_id, macvlan_entry_index); + if (err) { + pr_err("Failed to add macvlan entry with error %d when change mac address\n", + err); + goto add_macvlan_err; + } + } + + memcpy(func_res->mac_addr, mac_addr, ETH_ALEN); + + return 0; + +add_macvlan_err: + ret = nbl_macvlan_add(hw, eth_port_id, old_mac_addr, vlan_id, vsi_id, macvlan_entry_index); + if (ret) { + pr_err("Failed to add macvlan entry with error %d when change mac address roll back\n", + ret); + pr_alert("Please reset hardware\n"); + goto err_out; + } + + while (--i >= 0) { + vlan_id = func_res->vlan_ids[i]; + if (vlan_id == -1) + continue; + + macvlan_entry_index = macvlan_entry_start + i; + + ret = nbl_macvlan_delete(hw, eth_port_id, mac_addr, vlan_id); + if (ret) { + pr_err("Failed to delete macvlan entry with error %d when change mac address roll back\n", + ret); + pr_alert("Please reset hardware\n"); + goto err_out; + } + + ret = nbl_macvlan_add(hw, eth_port_id, old_mac_addr, vlan_id, + vsi_id, macvlan_entry_index); + if (ret) { + pr_err("Failed to add macvlan entry with error %d when change mac address roll back\n", + ret); + pr_alert("Please reset hardware\n"); + goto err_out; + } + } + +err_out: + return err; +} + +int nbl_configure_mac_addr(struct nbl_hw *hw, u8 *mac_addr) +{ + int err; + u8 eth_port_id = hw->eth_port_id; + u8 vsi_id = hw->vsi_id; + + if (is_af(hw)) + err = nbl_af_configure_mac_addr(hw, 0, eth_port_id, mac_addr, vsi_id); + else + err = nbl_mailbox_req_configure_mac_addr(hw, eth_port_id, mac_addr, vsi_id); + + return err; +} + +int nbl_clear_mac_addr(struct nbl_hw *hw) +{ + int err; + + if (is_af(hw)) + err = nbl_af_clear_mac_addr(hw, 0); + else + err = nbl_mailbox_req_clear_mac_addr(hw); + + return err; +} + +int nbl_change_mac_addr(struct nbl_hw *hw, u8 *mac_addr) +{ + u8 vsi_id; + int err; + + vsi_id = hw->vsi_id; + if (is_af(hw)) + err = nbl_af_change_mac_addr(hw, 0, mac_addr, vsi_id); + else + err = nbl_mailbox_req_change_mac_addr(hw, mac_addr, vsi_id); + + return err; +} + +static int nbl_af_add_vlan_id(struct nbl_hw *hw, u16 func_id, u16 vlan_id, u8 vsi_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + int macvlan_entry_start; + int macvlan_entry_index; + u8 *mac_addr; + u8 eth_port_id; + int i; + int err; + + for (i = 0; i < func_res->num_macvlan_entries; i++) { + if ((s16)vlan_id == func_res->vlan_ids[i]) { + pr_info("Vlan id %u is added already\n", vlan_id); + return -EEXIST; + } + } + + for (i = 0; i < func_res->num_macvlan_entries; i++) { + if (func_res->vlan_ids[i] == -1) + break; + } + + if (i == func_res->num_macvlan_entries) { + pr_info("There is no macvlan entry left to add vlan id\n"); + return -ENOMEM; + } + + macvlan_entry_start = func_res->macvlan_start_index; + macvlan_entry_index = macvlan_entry_start + i; + eth_port_id = func_res->eth_port_id; + mac_addr = func_res->mac_addr; + err = nbl_macvlan_add(hw, eth_port_id, mac_addr, vlan_id, vsi_id, + macvlan_entry_index); + if (err) { + pr_err("Failed to add vlan id %u into macvlan table\n", vlan_id); + return err; + } + + func_res->vlan_ids[i] = vlan_id; + + return 0; +} + +static int nbl_af_delete_vlan_id(struct nbl_hw *hw, u16 func_id, u16 vlan_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + u8 *mac_addr; + u8 eth_port_id; + int i; + int err; + + for (i = 0; i < func_res->num_macvlan_entries; i++) { + if ((s16)vlan_id == func_res->vlan_ids[i]) + break; + } + if (i == func_res->num_macvlan_entries) { + pr_info("There is no vlan id %u in macvlan table\n", vlan_id); + return -ENOENT; + } + + eth_port_id = func_res->eth_port_id; + mac_addr = func_res->mac_addr; + err = nbl_macvlan_delete(hw, eth_port_id, mac_addr, vlan_id); + if (err) { + pr_err("Failed to delete vlan id %u from macvlan table\n", vlan_id); + pr_alert("Please reset hardware\n"); + return err; + } + + func_res->vlan_ids[i] = -1; + + return 0; +} + +int nbl_af_operate_vlan_id(struct nbl_hw *hw, u16 func_id, u16 vlan_id, + u8 vsi_id, bool add) +{ + if (add) + return nbl_af_add_vlan_id(hw, func_id, vlan_id, vsi_id); + + return nbl_af_delete_vlan_id(hw, func_id, vlan_id); +} + +int nbl_add_vlan_id(struct nbl_hw *hw, u16 vlan_id) +{ + u8 vsi_id; + int err; + + vsi_id = hw->vsi_id; + if (is_af(hw)) + err = nbl_af_operate_vlan_id(hw, 0, vlan_id, vsi_id, true); + else + err = nbl_mailbox_req_operate_vlan_id(hw, vlan_id, vsi_id, true); + + return err; +} + +int nbl_delete_vlan_id(struct nbl_hw *hw, u16 vlan_id) +{ + u8 vsi_id; + int err; + + vsi_id = hw->vsi_id; + if (is_af(hw)) + err = nbl_af_operate_vlan_id(hw, 0, vlan_id, vsi_id, false); + else + err = nbl_mailbox_req_operate_vlan_id(hw, vlan_id, vsi_id, false); + + return err; +} diff --git a/drivers/net/ethernet/nebula-matrix/m1600/macvlan.h b/drivers/net/ethernet/nebula-matrix/m1600/macvlan.h new file mode 100644 index 0000000000000000000000000000000000000000..b8e9b95bee1ab6171b0ca38101cff2337792ceb7 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/macvlan.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#ifndef _NBL_MACVLAN_H_ +#define _NBL_MACVLAN_H_ + +#define NBL_MACVLAN_TRY_GET_STATUS_TIMES 10 + +int nbl_macvlan_add(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id, + u8 vsi_id, int index); + +int nbl_macvlan_delete(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u16 vlan_id); + +int nbl_af_configure_mac_addr(struct nbl_hw *hw, u16 func_id, u8 eth_port_id, + u8 *mac_addr, u8 vsi_id); +int nbl_configure_mac_addr(struct nbl_hw *hw, u8 *mac_addr); + +int nbl_af_clear_mac_addr(struct nbl_hw *hw, u16 func_id); +int nbl_clear_mac_addr(struct nbl_hw *hw); + +int nbl_af_change_mac_addr(struct nbl_hw *hw, u16 func_id, u8 *mac_addr, u8 vsi_id); +int nbl_change_mac_addr(struct nbl_hw *hw, u8 *mac_addr); + +int nbl_af_operate_vlan_id(struct nbl_hw *hw, u16 func_id, u16 vlan_id, + u8 vsi_id, bool add); +int nbl_add_vlan_id(struct nbl_hw *hw, u16 vlan_id); +int nbl_delete_vlan_id(struct nbl_hw *hw, u16 vlan_id); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/m1600/mailbox.c b/drivers/net/ethernet/nebula-matrix/m1600/mailbox.c new file mode 100644 index 0000000000000000000000000000000000000000..4ee7d3e63e8aac1bd6e65a19a891f13b9782e6c1 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/mailbox.c @@ -0,0 +1,5169 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#include +#include +#include + +#include "hw.h" +#include "common.h" +#include "interrupt.h" +#include "txrx.h" +#include "ethtool.h" +#include "macvlan.h" +#include "sriov.h" +#include "mailbox.h" + +void nbl_af_set_mailbox_bdf_for_all_func(struct nbl_hw *hw) +{ + struct nbl_mailbox_qinfo_map mb_qinfo_map; + u16 bdf; + unsigned int i; + + bdf = (((u16)hw->bus) << 8) | PCI_DEVFN((u16)hw->devid, (u16)hw->function); + memset(&mb_qinfo_map, 0, sizeof(mb_qinfo_map)); + for (i = 0; i < NBL_MAX_FUNC; i++) { + mb_qinfo_map.function = PCI_FUNC(bdf); + mb_qinfo_map.devid = PCI_SLOT(bdf); + mb_qinfo_map.bus = bdf >> 8; + mb_qinfo_map.valid = 0; + wr32_for_each(hw, NBL_MAILBOX_M_QINFO_MAP_REG_ARR(i), (u32 *)&mb_qinfo_map, + sizeof(mb_qinfo_map)); + bdf++; + } +} + +static void nbl_mailbox_init(struct nbl_mailbox_info *mailbox) +{ + spin_lock_init(&mailbox->txq_lock); + + mutex_init(&mailbox->send_normal_msg_lock); + mailbox->acked = 0; + + mailbox->num_txq_entries = NBL_MAILBOX_QUEUE_LEN; + mailbox->num_rxq_entries = NBL_MAILBOX_QUEUE_LEN; + mailbox->txq_buf_size = NBL_MAILBOX_BUF_LEN; + mailbox->rxq_buf_size = NBL_MAILBOX_BUF_LEN; +} + +static int nbl_mailbox_setup_tx_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct device *dev = nbl_hw_to_dev(hw); + struct nbl_mailbox_ring *txq = &mailbox->txq; + size_t size = mailbox->num_txq_entries * sizeof(struct nbl_mailbox_tx_desc); + + txq->desc = dmam_alloc_coherent(dev, size, &txq->dma, GFP_KERNEL | __GFP_ZERO); + if (!txq->desc) + return -ENOMEM; + + txq->buf = devm_kcalloc(dev, mailbox->num_txq_entries, + sizeof(struct nbl_mailbox_buf), GFP_KERNEL); + if (!txq->buf) { + dmam_free_coherent(dev, size, txq->desc, txq->dma); + txq->desc = NULL; + txq->dma = 0; + return -ENOMEM; + } + + return 0; +} + +static void nbl_mailbox_teardown_tx_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct device *dev = nbl_hw_to_dev(hw); + struct nbl_mailbox_ring *txq = &mailbox->txq; + size_t size = mailbox->num_txq_entries * sizeof(struct nbl_mailbox_tx_desc); + + devm_kfree(dev, txq->buf); + txq->buf = NULL; + + dmam_free_coherent(dev, size, txq->desc, txq->dma); + txq->desc = NULL; + txq->dma = 0; +} + +static int nbl_mailbox_setup_rx_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct device *dev = nbl_hw_to_dev(hw); + struct nbl_mailbox_ring *rxq = &mailbox->rxq; + size_t size = mailbox->num_rxq_entries * sizeof(struct nbl_mailbox_rx_desc); + + rxq->desc = dmam_alloc_coherent(dev, size, &rxq->dma, GFP_KERNEL | __GFP_ZERO); + if (!rxq->desc) + return -ENOMEM; + + rxq->buf = devm_kcalloc(dev, mailbox->num_rxq_entries, + sizeof(struct nbl_mailbox_buf), GFP_KERNEL); + if (!rxq->buf) { + dmam_free_coherent(dev, size, rxq->desc, rxq->dma); + rxq->desc = NULL; + rxq->dma = 0; + return -ENOMEM; + } + + return 0; +} + +static void nbl_mailbox_teardown_rx_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct device *dev = nbl_hw_to_dev(hw); + struct nbl_mailbox_ring *rxq = &mailbox->rxq; + size_t size = mailbox->num_rxq_entries * sizeof(struct nbl_mailbox_rx_desc); + + devm_kfree(dev, rxq->buf); + rxq->buf = NULL; + + dmam_free_coherent(dev, size, rxq->desc, rxq->dma); + rxq->desc = NULL; + rxq->dma = 0; +} + +static int nbl_mailbox_setup_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + int err; + + err = nbl_mailbox_setup_tx_queue(hw, mailbox); + if (err) + return err; + + err = nbl_mailbox_setup_rx_queue(hw, mailbox); + if (err) + goto setup_rx_queue_err; + + return 0; + +setup_rx_queue_err: + nbl_mailbox_teardown_tx_queue(hw, mailbox); + return err; +} + +static void nbl_mailbox_teardown_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + nbl_mailbox_teardown_tx_queue(hw, mailbox); + nbl_mailbox_teardown_rx_queue(hw, mailbox); +} + +static void nbl_mailbox_reset_tx_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + u32 value = NBL_MAILBOX_TX_RESET; + + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_TX_CMD_FIELD, value); +} + +static void nbl_mailbox_reset_rx_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + u32 value = NBL_MAILBOX_RX_RESET; + + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_RX_CMD_FIELD, value); +} + +static void nbl_mailbox_reset_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + nbl_mailbox_reset_tx_queue(hw, mailbox); + nbl_mailbox_reset_rx_queue(hw, mailbox); +} + +static void nbl_mailbox_config_tx_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct nbl_mailbox_ring *txq = &mailbox->txq; + dma_addr_t dma_addr = txq->dma; + int size_bwid = ilog2(mailbox->num_txq_entries); + + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_TX_BASE_ADDR_L_FIELD, (u32)(dma_addr & 0xFFFFFFFF)); + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_TX_BASE_ADDR_H_FIELD, (u32)(dma_addr >> 32)); + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_TX_SIZE_BWID_FIELD, (u32)size_bwid); +} + +static void nbl_mailbox_config_rx_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct nbl_mailbox_ring *rxq = &mailbox->rxq; + dma_addr_t dma_addr = rxq->dma; + int size_bwid = ilog2(mailbox->num_rxq_entries); + + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_RX_BASE_ADDR_L_FIELD, (u32)(dma_addr & 0xFFFFFFFF)); + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_RX_BASE_ADDR_H_FIELD, (u32)(dma_addr >> 32)); + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_RX_SIZE_BWID_FIELD, (u32)size_bwid); +} + +static void nbl_mailbox_config_queue(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + nbl_mailbox_config_tx_queue(hw, mailbox); + nbl_mailbox_config_rx_queue(hw, mailbox); +} + +static int nbl_mailbox_alloc_all_tx_bufs(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct nbl_mailbox_ring *txq = &mailbox->txq; + struct nbl_mailbox_buf *buf; + u16 i; + + for (i = 0; i < mailbox->num_txq_entries; i++) { + buf = &txq->buf[i]; + buf->va = dmam_alloc_coherent(nbl_hw_to_dev(hw), mailbox->txq_buf_size, + &buf->pa, GFP_KERNEL | __GFP_ZERO); + if (!buf->va) { + pr_err("Allocate buffer for mailbox tx queue failed\n"); + goto err; + } + } + + txq->next_to_clean = 0; + txq->next_to_use = 0; + txq->tail_ptr = 0; + + return 0; +err: + while (i--) { + buf = &txq->buf[i]; + dmam_free_coherent(nbl_hw_to_dev(hw), mailbox->txq_buf_size, buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } + + return -ENOMEM; +} + +static void nbl_mailbox_free_all_tx_bufs(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct nbl_mailbox_ring *txq = &mailbox->txq; + struct nbl_mailbox_buf *buf; + u16 i; + + for (i = 0; i < mailbox->num_txq_entries; i++) { + buf = &txq->buf[i]; + dmam_free_coherent(nbl_hw_to_dev(hw), mailbox->txq_buf_size, buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } +} + +static int nbl_mailbox_alloc_all_rx_bufs(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct nbl_mailbox_ring *rxq = &mailbox->rxq; + struct nbl_mailbox_buf *buf; + struct nbl_mailbox_rx_desc *desc; + u16 i; + + for (i = 0; i < mailbox->num_rxq_entries; i++) { + buf = &rxq->buf[i]; + buf->va = dmam_alloc_coherent(nbl_hw_to_dev(hw), mailbox->rxq_buf_size, + &buf->pa, GFP_KERNEL | __GFP_ZERO); + if (!buf->va) { + pr_err("Allocate buffer for mailbox rx queue failed\n"); + goto err; + } + } + + desc = rxq->desc; + for (i = 0; i < mailbox->num_rxq_entries - 1; i++) { + buf = &rxq->buf[i]; + desc[i].flags = NBL_MAILBOX_RX_DESC_AVAIL; + desc[i].buf_addr = buf->pa; + desc[i].buf_len = mailbox->rxq_buf_size; + } + + /* Make sure the descriptor has been written */ + wmb(); + rxq->next_to_clean = 0; + rxq->next_to_use = mailbox->num_rxq_entries - 1; + rxq->tail_ptr = mailbox->num_rxq_entries - 1; + nbl_mailbox_update_rxq_tail_ptr(hw, rxq->tail_ptr); + + return 0; +err: + while (i--) { + buf = &rxq->buf[i]; + dmam_free_coherent(nbl_hw_to_dev(hw), mailbox->rxq_buf_size, buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } + + return -ENOMEM; +} + +static void nbl_mailbox_free_all_rx_bufs(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + struct nbl_mailbox_ring *rxq = &mailbox->rxq; + struct nbl_mailbox_buf *buf; + u16 i; + + for (i = 0; i < mailbox->num_rxq_entries; i++) { + buf = &rxq->buf[i]; + dmam_free_coherent(nbl_hw_to_dev(hw), mailbox->rxq_buf_size, buf->va, buf->pa); + buf->va = NULL; + buf->pa = 0; + } +} + +static int nbl_mailbox_alloc_all_bufs(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + int err; + + err = nbl_mailbox_alloc_all_tx_bufs(hw, mailbox); + if (err) + return err; + + err = nbl_mailbox_alloc_all_rx_bufs(hw, mailbox); + if (err) + goto alloc_rx_bufs_err; + + return 0; + +alloc_rx_bufs_err: + nbl_mailbox_free_all_tx_bufs(hw, mailbox); + return err; +} + +static void nbl_mailbox_free_all_bufs(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox) +{ + nbl_mailbox_free_all_tx_bufs(hw, mailbox); + nbl_mailbox_free_all_rx_bufs(hw, mailbox); +} + +static void nbl_mailbox_start_tx_queue(struct nbl_hw *hw) +{ + u32 value = NBL_MAILBOX_TX_ENABLE; + + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_TX_CMD_FIELD, value); +} + +static void nbl_mailbox_stop_tx_queue(struct nbl_hw *hw) +{ + u32 value = 0; + + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_TX_CMD_FIELD, value); +} + +static void nbl_mailbox_start_rx_queue(struct nbl_hw *hw) +{ + u32 value = NBL_MAILBOX_RX_ENABLE; + + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_RX_CMD_FIELD, value); +} + +static void nbl_mailbox_stop_rx_queue(struct nbl_hw *hw) +{ + u32 value = 0; + + mb_wr32(hw, NBL_MAILBOX_QINFO_CFG_RX_CMD_FIELD, value); +} + +static void nbl_mailbox_start_queue(struct nbl_hw *hw) +{ + nbl_mailbox_start_tx_queue(hw); + nbl_mailbox_start_rx_queue(hw); +} + +static void nbl_mailbox_stop_queue(struct nbl_hw *hw) +{ + nbl_mailbox_stop_tx_queue(hw); + nbl_mailbox_stop_rx_queue(hw); +} + +int nbl_setup_mailbox(struct nbl_hw *hw) +{ + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + + nbl_mailbox_init(mailbox); + + err = nbl_mailbox_setup_queue(hw, mailbox); + if (err) + return err; + + nbl_mailbox_reset_queue(hw, mailbox); + + nbl_mailbox_config_queue(hw, mailbox); + + err = nbl_mailbox_alloc_all_bufs(hw, mailbox); + if (err) + goto alloc_buf_err; + + nbl_mailbox_start_queue(hw); + + return 0; + +alloc_buf_err: + nbl_mailbox_teardown_queue(hw, mailbox); + + return err; +} + +void nbl_teardown_mailbox(struct nbl_hw *hw) +{ + struct nbl_mailbox_info *mailbox = &hw->mailbox; + + nbl_mailbox_stop_queue(hw); + + nbl_mailbox_free_all_bufs(hw, mailbox); + + nbl_mailbox_teardown_queue(hw, mailbox); +} + +static void nbl_mailbox_send_msg(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox, + u16 dstid, enum nbl_mailbox_msg_type msg_type, void *arg, + size_t arg_len) +{ + struct device *dev = nbl_hw_to_dev(hw); + struct nbl_mailbox_ring *txq; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_buf *tx_buf; + unsigned long flags; + u16 next_to_use; + int i; + + spin_lock_irqsave(&mailbox->txq_lock, flags); + + txq = &mailbox->txq; + next_to_use = txq->next_to_use; + tx_buf = NBL_MAILBOX_TX_BUF(txq, next_to_use); + tx_desc = NBL_MAILBOX_TX_DESC(txq, next_to_use); + + tx_desc->dstid = dstid; + tx_desc->msg_type = msg_type; + WARN_ON(arg_len > NBL_MAILBOX_BUF_LEN - sizeof(*tx_desc)); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + memcpy(tx_buf->va, arg, arg_len); + tx_desc->buf_addr = tx_buf->pa; + tx_desc->buf_len = arg_len; + tx_desc->data_len = 0; + } else { + memcpy(tx_desc->data, arg, arg_len); + tx_desc->buf_len = 0; + tx_desc->data_len = arg_len; + } + tx_desc->flags = NBL_MAILBOX_TX_DESC_AVAIL; + + /* Make sure the descriptor has been written */ + wmb(); + txq->next_to_use++; + if (txq->next_to_use == mailbox->num_txq_entries) + txq->next_to_use = 0; + txq->tail_ptr++; + nbl_mailbox_update_txq_tail_ptr(hw, txq->tail_ptr); + + i = 0; + while (!(tx_desc->flags & NBL_MAILBOX_TX_DESC_USED)) { + udelay(NBL_MAILBOX_TX_WAIT_US); + i++; + if (i == NBL_MAILBOX_TX_WAIT_TIMES) { + dev_err(dev, "Mailbox send message type: %d with descriptor %u timeout\n", + msg_type, txq->next_to_use); + break; + } + + if (!(i % NBL_MAILBOX_TX_UPDATE_NOTIFY_LIMITS)) + nbl_mailbox_update_txq_tail_ptr(hw, txq->tail_ptr); + } + + txq->next_to_clean = txq->next_to_use; + + spin_unlock_irqrestore(&mailbox->txq_lock, flags); +} + +static void nbl_mailbox_poll_once_rxq(struct nbl_hw *hw); + +int nbl_mailbox_req_cfg_msix_map_table(struct nbl_hw *hw, u16 requested) +{ + struct nbl_mailbox_cfg_msix_map_table_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_MSIX_MAP_TABLE; + /* ensure request message related variables are completely written */ + wmb(); + arg.requested = requested; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_MSIX_MAP_TABLE, &arg, sizeof(arg)); + + i = 0; + nbl_mailbox_poll_once_rxq(hw); + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure msix map table ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + nbl_mailbox_poll_once_rxq(hw); + cpu_relax(); + } + /* Make sure the mailbox varaiable ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +void nbl_mailbox_req_destroy_msix_map_table(struct nbl_hw *hw) +{ + struct nbl_mailbox_dummy_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_DESTROY_MSIX_MAP_TABLE; + /* ensure request message related variables are completely written */ + wmb(); + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_DESTROY_MSIX_MAP_TABLE, &arg, sizeof(arg)); + + i = 0; + nbl_mailbox_poll_once_rxq(hw); + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait destroy msix map table ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + nbl_mailbox_poll_once_rxq(hw); + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_send_ack_msg(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox, + u16 dstid, int err, unsigned int req_msg_type) +{ + struct nbl_mailbox_ack_msg_ret ack_msg_ret; + + ack_msg_ret.req_msg_type = req_msg_type; + ack_msg_ret.err = err; + nbl_mailbox_send_msg(hw, mailbox, dstid, NBL_MAILBOX_ACK, + &ack_msg_ret, sizeof(ack_msg_ret)); +} + +static void nbl_mailbox_send_ack_msg_with_data(struct nbl_hw *hw, struct nbl_mailbox_info *mailbox, + u16 dstid, int err, unsigned int req_msg_type, + void *data, u32 data_len) +{ + struct device *dev = nbl_hw_to_dev(hw); + struct nbl_mailbox_ring *txq; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_buf *tx_buf; + struct nbl_mailbox_ack_msg_ret ack_msg_ret; + unsigned long flags; + u16 next_to_use; + size_t arg_len; + int i; + + spin_lock_irqsave(&mailbox->txq_lock, flags); + + txq = &mailbox->txq; + next_to_use = txq->next_to_use; + tx_buf = NBL_MAILBOX_TX_BUF(txq, next_to_use); + tx_desc = NBL_MAILBOX_TX_DESC(txq, next_to_use); + + tx_desc->dstid = dstid; + tx_desc->msg_type = NBL_MAILBOX_ACK; + + ack_msg_ret.req_msg_type = req_msg_type; + ack_msg_ret.err = err; + arg_len = data_len + sizeof(ack_msg_ret); + WARN_ON(arg_len > NBL_MAILBOX_BUF_LEN - sizeof(*tx_desc)); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + memcpy(tx_buf->va, &ack_msg_ret, sizeof(ack_msg_ret)); + memcpy((char *)(tx_buf->va) + sizeof(ack_msg_ret), data, data_len); + tx_desc->buf_addr = tx_buf->pa; + tx_desc->buf_len = arg_len; + tx_desc->data_len = 0; + } else { + memcpy(tx_desc->data, &ack_msg_ret, sizeof(ack_msg_ret)); + memcpy((char *)(tx_desc->data) + sizeof(ack_msg_ret), data, data_len); + tx_desc->buf_len = 0; + tx_desc->data_len = arg_len; + } + tx_desc->flags = NBL_MAILBOX_TX_DESC_AVAIL; + + /* Make sure the descriptor has been written */ + wmb(); + txq->next_to_use++; + if (txq->next_to_use == mailbox->num_txq_entries) + txq->next_to_use = 0; + txq->tail_ptr++; + nbl_mailbox_update_txq_tail_ptr(hw, txq->tail_ptr); + + i = 0; + while (!(tx_desc->flags & NBL_MAILBOX_TX_DESC_USED)) { + udelay(NBL_MAILBOX_TX_WAIT_US); + i++; + if (i == NBL_MAILBOX_TX_WAIT_TIMES) { + dev_err(dev, "Mailbox send message type: %d with descriptor %u timeout\n", + NBL_MAILBOX_ACK, txq->next_to_use); + break; + } + + if (!(i % NBL_MAILBOX_TX_UPDATE_NOTIFY_LIMITS)) + nbl_mailbox_update_txq_tail_ptr(hw, txq->tail_ptr); + } + + txq->next_to_clean = txq->next_to_use; + + spin_unlock_irqrestore(&mailbox->txq_lock, flags); +} + +static void nbl_mailbox_recv_ack_msg(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct device *dev = nbl_hw_to_dev(hw); + struct nbl_mailbox_info *mailbox = &hw->mailbox; + struct nbl_mailbox_tx_desc *tx_desc = data; + struct nbl_mailbox_ack_msg_ret *payload; + u16 payload_len; + + if (tx_desc->data_len) { + payload = (struct nbl_mailbox_ack_msg_ret *)tx_desc->data; + payload_len = tx_desc->data_len; + } else { + payload = (struct nbl_mailbox_ack_msg_ret *)(tx_desc + 1); + payload_len = tx_desc->buf_len; + } + + if (mailbox->ack_req_msg_type != payload->req_msg_type) { + dev_warn(dev, "Unexpected ack message for type %u\n", payload->req_msg_type); + return; + } + + mailbox->ack_err = payload->err; + if (mailbox->ack_err >= 0 && (payload_len - sizeof(*payload))) { + WARN_ON(payload_len - sizeof(*payload) != mailbox->ack_data_len); + memcpy((char *)mailbox->ack_data, payload + 1, payload_len - sizeof(*payload)); + } + /* Make sure the mailbox info has been written */ + wmb(); + mailbox->acked = 1; +} + +static void nbl_mailbox_resp_cfg_msix_map_table(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_msix_map_table_arg *arg; + u16 arg_len; + u16 srcid; + u16 requested; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure msix map table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_msix_map_table_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure msix map table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_msix_map_table_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + requested = arg->requested; + err = nbl_af_configure_func_msix_map(hw, srcid, requested); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +static void nbl_mailbox_resp_destroy_msix_map_table(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + u16 arg_len; + u16 srcid; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(struct nbl_mailbox_dummy_arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Clean msix map table mailbox message has wrong argument size\n"); + return; + } + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Clean msix map table mailbox message has wrong argument size\n"); + return; + } + } + + srcid = tx_desc->srcid; + nbl_af_destroy_func_msix_map(hw, srcid); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +static void nbl_af_enable_mailbox_irq(struct nbl_hw *hw, u16 func_id, u16 vector_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_mailbox_qinfo_map mb_qinfo_map; + struct nbl_msix_info msix_info; + u16 global_vector_id; + u8 bus; + u8 devid; + u8 function; + + if (!func_res) + return; + + if (vector_id >= func_res->num_interrupts) { + pr_err("Mailbox %u request to enable mailbox MSIX irq with vector id %u, but it has %u irq vectors in total\n", + func_id, vector_id, func_res->num_interrupts); + return; + } + global_vector_id = func_res->interrupts[vector_id]; + + nbl_af_compute_bdf(hw, func_id, &bus, &devid, &function); + + memset(&msix_info, 0, sizeof(msix_info)); + msix_info.intrl_pnum = 0; + msix_info.intrl_rate = 0; + msix_info.function = function; + msix_info.devid = devid; + msix_info.bus = bus; + msix_info.valid = 1; + wr32_for_each(hw, NBL_PADPT_MSIX_INFO_REG_ARR(global_vector_id), + (u32 *)&msix_info, sizeof(msix_info)); + + rd32_for_each(hw, NBL_MAILBOX_M_QINFO_MAP_REG_ARR(func_id), (u32 *)&mb_qinfo_map, + sizeof(mb_qinfo_map)); + mb_qinfo_map.msix_idx = global_vector_id; + mb_qinfo_map.valid = 1; + wr32_for_each(hw, NBL_MAILBOX_M_QINFO_MAP_REG_ARR(func_id), (u32 *)&mb_qinfo_map, + sizeof(mb_qinfo_map)); +} + +static void nbl_mailbox_req_enable_mailbox_irq(struct nbl_hw *hw, u16 vector_id) +{ + struct nbl_mailbox_enable_mailbox_irq_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_ENABLE_MAILBOX_IRQ; + /* ensure request message related variables are completely written */ + wmb(); + + arg.vector_id = vector_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_ENABLE_MAILBOX_IRQ, &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait enable mailbox irq ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_enable_mailbox_irq(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_enable_mailbox_irq_arg *arg; + u16 arg_len; + u16 srcid; + u16 vector_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Enable mailbox irq mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_enable_mailbox_irq_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Enable mailbox irq mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_enable_mailbox_irq_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + vector_id = arg->vector_id; + nbl_af_enable_mailbox_irq(hw, srcid, vector_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_enable_irq(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u16 local_vector_id; + + local_vector_id = adapter->num_lan_msix; + /* AF has an hidden forward queue */ + local_vector_id += is_af(hw) ? 1 : 0; + if (is_af(hw)) + nbl_af_enable_mailbox_irq(hw, 0, local_vector_id); + else + nbl_mailbox_req_enable_mailbox_irq(hw, local_vector_id); +} + +static void nbl_af_disable_mailbox_irq(struct nbl_hw *hw, u16 func_id, u16 vector_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_mailbox_qinfo_map mb_qinfo_map; + struct nbl_msix_info msix_info; + u16 global_vector_id; + + if (!func_res) + return; + + if (vector_id >= func_res->num_interrupts) { + pr_err("Mailbox %u request to disable mailbox MSIX irq with vector id %u, but it has %u irq vectors in total\n", + func_id, vector_id, func_res->num_interrupts); + return; + } + global_vector_id = func_res->interrupts[vector_id]; + + rd32_for_each(hw, NBL_MAILBOX_M_QINFO_MAP_REG_ARR(func_id), (u32 *)&mb_qinfo_map, + sizeof(mb_qinfo_map)); + mb_qinfo_map.valid = 0; + wr32_for_each(hw, NBL_MAILBOX_M_QINFO_MAP_REG_ARR(func_id), (u32 *)&mb_qinfo_map, + sizeof(mb_qinfo_map)); + + memset(&msix_info, 0, sizeof(msix_info)); + wr32_for_each(hw, NBL_PADPT_MSIX_INFO_REG_ARR(global_vector_id), + (u32 *)&msix_info, sizeof(msix_info)); +} + +static void nbl_mailbox_req_disable_mailbox_irq(struct nbl_hw *hw, u16 vector_id) +{ + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_DISABLE_MAILBOX_IRQ; + /* ensure request message related variables are completely written */ + wmb(); + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_DISABLE_MAILBOX_IRQ, + &vector_id, sizeof(vector_id)); + + i = 0; + nbl_mailbox_poll_once_rxq(hw); + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait disable mailbox irq ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + nbl_mailbox_poll_once_rxq(hw); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_disable_mailbox_irq(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_disable_mailbox_irq_arg *arg; + u16 arg_len; + u16 srcid; + u16 local_vector_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Disable mailbox irq mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_disable_mailbox_irq_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Disable mailbox irq mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_disable_mailbox_irq_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + local_vector_id = arg->local_vector_id; + nbl_af_disable_mailbox_irq(hw, srcid, local_vector_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_disable_irq(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u16 local_vector_id; + + local_vector_id = adapter->num_lan_msix; + /* AF has an hidden forward queue */ + local_vector_id += is_af(hw) ? 1 : 0; + if (is_af(hw)) + nbl_af_disable_mailbox_irq(hw, 0, local_vector_id); + else + nbl_mailbox_req_disable_mailbox_irq(hw, local_vector_id); +} + +int nbl_mailbox_req_get_vsi_id(struct nbl_hw *hw) +{ + struct nbl_mailbox_dummy_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_VSI_ID; + /* ensure request message related variables are completely written */ + wmb(); + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_VSI_ID, &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get vsi id ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_get_vsi_id(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_dummy_arg *arg; + u16 arg_len; + u16 srcid; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get vsi id mailbox message has wrong argument size\n"); + return; + } + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get vsi id mailbox message has wrong argument size\n"); + return; + } + } + + srcid = tx_desc->srcid; + err = (int)(unsigned int)srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +int nbl_mailbox_req_register_vf_bar_info(struct nbl_hw *hw, u64 vf_bar_start, u64 vf_bar_len) +{ + struct nbl_mailbox_register_vf_bar_info_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_REGISTER_VF_BAR_INFO; + /* ensure request message related variables are completely written */ + wmb(); + + arg.vf_bar_start = vf_bar_start; + arg.vf_bar_len = vf_bar_len; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_REGISTER_VF_BAR_INFO, &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait register vf bar info ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return 0; +} + +static void nbl_mailbox_resp_register_vf_bar_info(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_register_vf_bar_info_arg *arg; + u16 arg_len; + u16 srcid; + u64 vf_bar_start; + u64 vf_bar_len; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Register vf bar info mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_register_vf_bar_info_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Register vf bar info mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_register_vf_bar_info_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + vf_bar_start = arg->vf_bar_start; + vf_bar_len = arg->vf_bar_len; + nbl_af_register_vf_bar_info(hw, srcid, vf_bar_start, vf_bar_len); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +int nbl_mailbox_req_get_vf_bar_base_addr(struct nbl_hw *hw, u64 *base_addr) +{ + struct nbl_mailbox_dummy_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_VF_BAR_BASE_ADDR; + mailbox->ack_data = (char *)base_addr; + mailbox->ack_data_len = sizeof(*base_addr); + + /* Make sure the mailbox info has been written */ + wmb(); + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_VF_BAR_BASE_ADDR, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get VF BAR base address ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return 0; +} + +static void nbl_mailbox_resp_get_vf_bar_base_addr(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + u16 arg_len; + u16 srcid; + u64 base_addr; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(struct nbl_mailbox_dummy_arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get VF BAR base address mailbox message has wrong argument size\n"); + return; + } + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get VF BAR base address mailbox message has wrong argument size\n"); + return; + } + } + + srcid = tx_desc->srcid; + base_addr = nbl_af_compute_vf_bar_base_addr(hw, srcid); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + &base_addr, sizeof(base_addr)); +} + +int nbl_mailbox_req_cfg_qid_map(struct nbl_hw *hw, u8 num_queues, u64 notify_addr) +{ + struct nbl_mailbox_cfg_qid_map_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_QID_MAP; + /* ensure request message related variables are completely written */ + wmb(); + + arg.num_queues = num_queues; + arg.notify_addr = notify_addr; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_QID_MAP, &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure qid map ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_cfg_qid_map(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_qid_map_arg *arg; + u16 arg_len; + u16 srcid; + u8 num_queues; + u64 notify_addr; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure qid map mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_qid_map_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure qid map mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_qid_map_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + num_queues = arg->num_queues; + notify_addr = arg->notify_addr; + err = nbl_af_configure_qid_map(hw, srcid, num_queues, notify_addr); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +void nbl_mailbox_req_clear_qid_map(struct nbl_hw *hw, u64 notify_addr) +{ + struct nbl_mailbox_clear_qid_map_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CLEAR_QID_MAP; + /* ensure request message related variables are completely written */ + wmb(); + + arg.notify_addr = notify_addr; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CLEAR_QID_MAP, &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait clear qid map ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_clear_qid_map(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_clear_qid_map_arg *arg; + u16 arg_len; + u16 srcid; + u64 notify_addr; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(struct nbl_mailbox_clear_qid_map_arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Clear qid map mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_clear_qid_map_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Clear qid map mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_clear_qid_map_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + notify_addr = arg->notify_addr; + nbl_af_clear_qid_map(hw, srcid, notify_addr); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_enable_promisc(struct nbl_hw *hw, u8 eth_port_id) +{ + struct nbl_mailbox_cfg_promisc_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + if (!mutex_trylock(&mailbox->send_normal_msg_lock)) { + pr_info("Can not enable promiscuous mode for accessing lock failed\n"); + return; + } + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_PROMISC; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.enable = true; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_PROMISC, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + udelay(5); + i++; + if (i == 200000) { + pr_warn("Wait enable eth port promiscuous ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +void nbl_mailbox_req_disable_promisc(struct nbl_hw *hw, u8 eth_port_id) +{ + struct nbl_mailbox_cfg_promisc_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + if (!mutex_trylock(&mailbox->send_normal_msg_lock)) { + pr_info("Can not disable promiscuous mode for accessing lock failed\n"); + return; + } + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_PROMISC; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.enable = false; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_PROMISC, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + udelay(5); + i++; + if (i == 200000) { + pr_warn("Wait disable eth port promiscuous ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_promisc(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_promisc_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + bool enable; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure eth port promiscuous mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_promisc_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure eth port promiscuous mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_promisc_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + enable = arg->enable; + if (!enable) + nbl_af_disable_promisc(hw, eth_port_id); + else + nbl_af_enable_promisc(hw, eth_port_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_cfg_ingress_eth_port_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_mailbox_cfg_ingress_eth_port_table_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_INGRESS_ETH_PORT_TABLE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.vsi_id = vsi_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_INGRESS_ETH_PORT_TABLE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure ingress eth port table ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_ingress_eth_port_table(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_ingress_eth_port_table_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + u8 vsi_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure ingress ETH port table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_ingress_eth_port_table_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure ingress ETH port table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_ingress_eth_port_table_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + vsi_id = arg->vsi_id; + nbl_af_configure_ingress_eth_port_table(hw, eth_port_id, vsi_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_cfg_src_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_mailbox_cfg_src_vsi_table_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_SRC_VSI_TABLE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.vsi_id = vsi_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_SRC_VSI_TABLE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure source vsi table ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_src_vsi_table(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_src_vsi_table_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + u8 vsi_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure source vsi table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_src_vsi_table_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure source vsi table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_src_vsi_table_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + vsi_id = arg->vsi_id; + nbl_af_configure_src_vsi_table(hw, eth_port_id, vsi_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_cfg_dest_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_mailbox_cfg_dest_vsi_table_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_DEST_VSI_TABLE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.vsi_id = vsi_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_DEST_VSI_TABLE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure destination vsi table ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_dest_vsi_table(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_dest_vsi_table_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + u8 vsi_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure destination vsi table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_dest_vsi_table_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure destination vsi table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_dest_vsi_table_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + vsi_id = arg->vsi_id; + nbl_af_configure_dest_vsi_table(hw, eth_port_id, vsi_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_cfg_tx_ring(struct nbl_hw *hw, dma_addr_t dma, u16 desc_num, + u8 vsi_id, u8 local_queue_id) +{ + struct nbl_mailbox_cfg_tx_ring_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_TX_RING; + /* ensure request message related variables are completely written */ + wmb(); + + arg.vsi_id = vsi_id; + arg.local_queue_id = local_queue_id; + arg.desc_num = desc_num; + arg.dma = dma; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_TX_RING, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure tx ring ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_tx_ring(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_tx_ring_arg *arg; + u16 arg_len; + u16 srcid; + dma_addr_t dma; + u16 desc_num; + u8 vsi_id; + u8 local_queue_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure tx ring mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_tx_ring_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure tx ring mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_tx_ring_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + vsi_id = arg->vsi_id; + local_queue_id = arg->local_queue_id; + desc_num = arg->desc_num; + dma = arg->dma; + nbl_af_hw_config_tx_ring(hw, srcid, dma, desc_num, vsi_id, local_queue_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_cfg_rx_ring(struct nbl_hw *hw, dma_addr_t dma, u16 desc_num, + u32 buf_len, u8 local_queue_id) +{ + struct nbl_mailbox_cfg_rx_ring_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_RX_RING; + /* ensure request message related variables are completely written */ + wmb(); + + arg.local_queue_id = local_queue_id; + arg.desc_num = desc_num; + arg.buf_len = buf_len; + arg.dma = dma; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_RX_RING, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure rx ring ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_rx_ring(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_rx_ring_arg *arg; + u16 arg_len; + u16 srcid; + dma_addr_t dma; + u16 desc_num; + u32 buf_len; + u8 local_queue_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure rx ring mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_rx_ring_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure rx ring mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_rx_ring_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + buf_len = arg->buf_len; + local_queue_id = arg->local_queue_id; + desc_num = arg->desc_num; + dma = arg->dma; + nbl_af_hw_config_rx_ring(hw, srcid, dma, desc_num, buf_len, local_queue_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_cfg_queue_map(struct nbl_hw *hw, u8 local_queue_id, bool rx, + u16 local_vector_id, bool enable, bool msix_enable) +{ + struct nbl_mailbox_cfg_queue_map_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_QUEUE_MAP; + /* ensure request message related variables are completely written */ + wmb(); + + arg.local_queue_id = local_queue_id; + arg.rx = rx; + arg.local_vector_id = local_vector_id; + arg.enable = enable; + arg.msix_enable = msix_enable; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_QUEUE_MAP, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure queue map ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_queue_map(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_queue_map_arg *arg; + u16 arg_len; + u16 srcid; + u8 local_queue_id; + bool rx; + u16 local_vector_id; + bool enable; + bool msix_enable; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure queue map mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_queue_map_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure queue map mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_queue_map_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + local_queue_id = arg->local_queue_id; + rx = arg->rx; + local_vector_id = arg->local_vector_id; + enable = arg->enable; + msix_enable = arg->msix_enable; + nbl_af_configure_queue_map(hw, srcid, local_queue_id, rx, local_vector_id, + enable, msix_enable); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_control_queue(struct nbl_hw *hw, u8 local_queue_id, bool rx, bool enable) +{ + struct nbl_mailbox_control_queue_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CONTROL_QUEUE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.local_queue_id = local_queue_id; + arg.rx = rx; + arg.enable = enable; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CONTROL_QUEUE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait control queue enable/disable ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_control_queue(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_control_queue_arg *arg; + u16 arg_len; + u16 srcid; + u8 local_queue_id; + bool rx; + bool enable; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Control queue mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_control_queue_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Control queue mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_control_queue_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + local_queue_id = arg->local_queue_id; + rx = arg->rx; + enable = arg->enable; + nbl_af_control_queue(hw, srcid, local_queue_id, rx, enable); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +int nbl_mailbox_req_reset_tx_queue(struct nbl_hw *hw, u8 local_queue_id) +{ + struct nbl_mailbox_reset_tx_queue_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_RESET_TX_QUEUE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.local_queue_id = local_queue_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_RESET_TX_QUEUE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait reset tx queue ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_reset_tx_queue(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_reset_tx_queue_arg *arg; + u16 arg_len; + u16 srcid; + u8 local_queue_id; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Reset tx queue mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_reset_tx_queue_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Reset tx queue mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_reset_tx_queue_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + local_queue_id = arg->local_queue_id; + err = nbl_af_reset_tx_queue(hw, srcid, local_queue_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +int nbl_mailbox_req_wait_rx_queue_reset_done(struct nbl_hw *hw, u8 local_queue_id) +{ + struct nbl_mailbox_wait_rx_queue_reset_done_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_WAIT_RX_QUEUE_RESET_DONE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.local_queue_id = local_queue_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_WAIT_RX_QUEUE_RESET_DONE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait reset rx queue done ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_wait_rx_queue_reset_done(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_wait_rx_queue_reset_done_arg *arg; + u16 arg_len; + u16 srcid; + u8 local_queue_id; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Wait rx queue reset done mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_wait_rx_queue_reset_done_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Wait rx queue reset done mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_wait_rx_queue_reset_done_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + local_queue_id = arg->local_queue_id; + err = nbl_af_wait_rx_queue_reset_done(hw, srcid, local_queue_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +int nbl_mailbox_req_reset_rx_queue(struct nbl_hw *hw, u8 local_queue_id) +{ + struct nbl_mailbox_reset_rx_queue_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_RESET_RX_QUEUE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.local_queue_id = local_queue_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_RESET_RX_QUEUE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait reset rx queue ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_reset_rx_queue(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_reset_rx_queue_arg *arg; + u16 arg_len; + u16 srcid; + u8 local_queue_id; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Reset rx queue mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_reset_rx_queue_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Reset rx queue mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_reset_rx_queue_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + local_queue_id = arg->local_queue_id; + err = nbl_af_reset_rx_queue(hw, srcid, local_queue_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +void nbl_mailbox_req_cfg_port_map(struct nbl_hw *hw, u8 eth_port_id, u8 local_queue_id) +{ + struct nbl_mailbox_cfg_port_map_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_PORT_MAP; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.local_queue_id = local_queue_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_PORT_MAP, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure port map ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_port_map(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_port_map_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + u8 local_queue_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure port map mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_port_map_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure port map mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_port_map_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + local_queue_id = arg->local_queue_id; + nbl_af_configure_port_map(hw, srcid, eth_port_id, local_queue_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_cfg_rss_group_table(struct nbl_hw *hw, u8 vsi_id, u8 rx_queue_num) +{ + struct nbl_mailbox_cfg_rss_group_table_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_RSS_GROUP_TABLE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.vsi_id = vsi_id; + arg.rx_queue_num = rx_queue_num; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_RSS_GROUP_TABLE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure rss group table ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_rss_group_table(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_rss_group_table_arg *arg; + u16 arg_len; + u16 srcid; + u8 vsi_id; + u8 rx_queue_num; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure rss group table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_rss_group_table_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure rss group table mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_rss_group_table_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + vsi_id = arg->vsi_id; + rx_queue_num = arg->rx_queue_num; + nbl_af_configure_rss_group_table(hw, srcid, vsi_id, rx_queue_num); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_cfg_msix_irq(struct nbl_hw *hw, u16 local_vector_id) +{ + struct nbl_mailbox_cfg_msix_irq_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CFG_MSIX_IRQ; + /* ensure request message related variables are completely written */ + wmb(); + + arg.local_vector_id = local_vector_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CFG_MSIX_IRQ, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure MSIX irq ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_cfg_msix_irq(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_cfg_msix_irq_arg *arg; + u16 arg_len; + u16 srcid; + u16 local_vector_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure msix irq mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_msix_irq_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure msix irq mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_cfg_msix_irq_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + local_vector_id = arg->local_vector_id; + nbl_af_configure_msix_irq(hw, srcid, local_vector_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_clear_msix_irq_conf(struct nbl_hw *hw, u16 local_vector_id) +{ + struct nbl_mailbox_clear_msix_irq_conf_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CLEAR_MSIX_IRQ_CONF; + /* ensure request message related variables are completely written */ + wmb(); + + arg.local_vector_id = local_vector_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CLEAR_MSIX_IRQ_CONF, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait clear MSIX irq configuration ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_clear_msix_irq_conf(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_clear_msix_irq_conf_arg *arg; + u16 arg_len; + u16 srcid; + u16 local_vector_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Clear msix irq configuration mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_clear_msix_irq_conf_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Clear msix irq configuration mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_clear_msix_irq_conf_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + local_vector_id = arg->local_vector_id; + nbl_af_clear_msix_irq_conf(hw, srcid, local_vector_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_eth_tx_enable(struct nbl_adapter *adapter, u8 eth_port_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_mailbox_eth_tx_enable_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_ETH_TX_ENABLE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_ETH_TX_ENABLE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait enable eth tx ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_eth_tx_enable(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_adapter *adapter = hw->back; + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_eth_tx_enable_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("ETH tx enable mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_eth_tx_enable_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("ETH tx enable mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_eth_tx_enable_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + nbl_af_eth_tx_enable(adapter, eth_port_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_eth_tx_disable(struct nbl_adapter *adapter, u8 eth_port_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_mailbox_eth_tx_disable_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_ETH_TX_DISABLE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_ETH_TX_DISABLE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait disable eth tx ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_eth_tx_disable(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_adapter *adapter = hw->back; + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_eth_tx_disable_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("ETH tx disable mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_eth_tx_disable_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("ETH tx disable mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_eth_tx_disable_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + nbl_af_eth_tx_disable(adapter, eth_port_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_eth_rx_enable(struct nbl_adapter *adapter, u8 eth_port_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_mailbox_eth_rx_enable_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_ETH_RX_ENABLE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_ETH_RX_ENABLE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait enable eth rx ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_eth_rx_enable(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_adapter *adapter = hw->back; + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_eth_rx_enable_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("ETH rx enable mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_eth_rx_enable_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("ETH rx enable mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_eth_rx_enable_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + nbl_af_eth_rx_enable(adapter, eth_port_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_eth_rx_disable(struct nbl_adapter *adapter, u8 eth_port_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_mailbox_eth_rx_disable_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_ETH_RX_DISABLE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_ETH_RX_DISABLE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait disable eth rx ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_eth_rx_disable(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_adapter *adapter = hw->back; + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_eth_rx_disable_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("ETH rx disable mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_eth_rx_disable_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("ETH rx disable mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_eth_rx_disable_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + nbl_af_eth_rx_disable(adapter, eth_port_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +#ifdef CONFIG_PCI_IOV +void nbl_mailbox_req_enter_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_mailbox_enter_forward_ring_mode_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_ENTER_FORWARD_RING_MODE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.vsi_id = vsi_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_ENTER_FORWARD_RING_MODE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait enter forward ring mode ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_enter_forward_ring_mode(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_enter_forward_ring_mode_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + u8 vsi_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Enter forward ring mode mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_enter_forward_ring_mode_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Enter forward ring mode mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_enter_forward_ring_mode_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + vsi_id = arg->vsi_id; + nbl_af_enter_forward_ring_mode(hw, eth_port_id, vsi_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_leave_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_mailbox_leave_forward_ring_mode_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_LEAVE_FORWARD_RING_MODE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.vsi_id = vsi_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_LEAVE_FORWARD_RING_MODE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait leave forward ring mode ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_leave_forward_ring_mode(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_leave_forward_ring_mode_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + u8 vsi_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Leave forward ring mode mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_leave_forward_ring_mode_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Leave forward ring mode mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_leave_forward_ring_mode_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + vsi_id = arg->vsi_id; + nbl_af_leave_forward_ring_mode(hw, eth_port_id, vsi_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} +#endif + +u32 nbl_mailbox_req_get_firmware_version(struct nbl_hw *hw) +{ + struct nbl_mailbox_dummy_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + u32 firmware_version; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_FIRMWARE_VERSION; + mailbox->ack_data = (char *)&firmware_version; + mailbox->ack_data_len = sizeof(firmware_version); + /* Make sure mailbox info hae been written */ + wmb(); + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_FIRMWARE_VERSION, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get firmware version ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return firmware_version; +} + +static void nbl_mailbox_resp_get_firmware_version(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + u16 arg_len; + u32 firmware_version; + u16 srcid; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(struct nbl_mailbox_dummy_arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get firmware version mailbox message has wrong argument size\n"); + return; + } + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get firmware version mailbox message has wrong argument size\n"); + return; + } + } + + firmware_version = nbl_af_get_firmware_version(hw); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + &firmware_version, sizeof(firmware_version)); +} + +int nbl_mailbox_req_get_module_eeprom(struct nbl_hw *hw, u8 eth_port_id, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nbl_mailbox_get_module_eeprom_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_MODULE_EEPROM; + mailbox->ack_data = (char *)data; + mailbox->ack_data_len = eeprom->len; + /* Make sure mailbox info hae been written */ + wmb(); + arg.eth_port_id = eth_port_id; + arg.eeprom = *eeprom; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_MODULE_EEPROM, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get module eeprom ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_get_module_eeprom(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_module_eeprom_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + struct ethtool_eeprom *eeprom; + u8 *recv_data; + int err = 0; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get module eeprom mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_module_eeprom_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get module eeprom mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_module_eeprom_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + eeprom = &arg->eeprom; + recv_data = kmalloc(eeprom->len, GFP_ATOMIC); + if (!recv_data) { + pr_err("Allocate memory to store module eeprom failed\n"); + err = -ENOMEM; + } + if (!err) + err = nbl_af_get_module_eeprom(hw, eth_port_id, eeprom, recv_data); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + if (err < 0) + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); + else + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, err, req_msg_type, + recv_data, eeprom->len); + + kfree(recv_data); +} + +int nbl_mailbox_req_get_module_info(struct nbl_hw *hw, u8 eth_port_id, struct ethtool_modinfo *info) +{ + struct nbl_mailbox_get_module_info_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_MODULE_INFO; + mailbox->ack_data = (char *)info; + mailbox->ack_data_len = sizeof(*info); + /* Make sure mailbox info hae been written */ + wmb(); + arg.eth_port_id = eth_port_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_MODULE_INFO, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get module information ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_get_module_info(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_module_info_arg *arg; + struct ethtool_modinfo info; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + int err = 0; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get module information mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_module_info_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get module information mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_module_info_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + err = nbl_af_get_module_info(hw, eth_port_id, &info); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + if (err < 0) + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); + else + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, err, req_msg_type, + &info, sizeof(info)); +} + +int nbl_mailbox_req_get_eeprom(struct nbl_hw *hw, u32 offset, u32 length, u8 *bytes) +{ + struct nbl_mailbox_get_eeprom_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_EEPROM; + mailbox->ack_data = (char *)bytes; + mailbox->ack_data_len = length; + /* Make sure mailbox info hae been written */ + wmb(); + arg.offset = offset; + arg.length = length; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_EEPROM, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get eeprom ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_get_eeprom(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_eeprom_arg *arg; + u16 arg_len; + u16 srcid; + u32 offset; + u32 length; + u8 *recv_data; + unsigned int req_msg_type; + int err = 0; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get eeprom mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_eeprom_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get eeprom mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_eeprom_arg *)tx_desc->data; + } + + offset = arg->offset; + length = arg->length; + recv_data = kmalloc(length, GFP_ATOMIC); + if (!recv_data) { + pr_err("Allocate memory to store eeprom content failed\n"); + err = -ENOMEM; + } + if (!err) + err = nbl_af_get_eeprom(hw, offset, length, recv_data); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + if (err < 0) + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); + else + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, err, req_msg_type, + recv_data, length); + + kfree(recv_data); +} + +enum NBL_MODULE_INPLACE_STATUS +nbl_mailbox_req_check_module_inplace(struct nbl_hw *hw, u8 eth_port_id) +{ + struct nbl_mailbox_check_module_inplace_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + enum NBL_MODULE_INPLACE_STATUS inplace; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CHECK_MODULE_INPLACE; + mailbox->ack_data = (char *)&inplace; + mailbox->ack_data_len = sizeof(inplace); + /* ensure args are completely written */ + wmb(); + arg.eth_port_id = eth_port_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CHECK_MODULE_INPLACE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait check module inplace information ack message timeout\n"); + goto err_out; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return inplace; + +err_out: + mutex_unlock(&mailbox->send_normal_msg_lock); + return NBL_MODULE_NOT_INPLACE; +} + +static void nbl_mailbox_resp_check_module_inplace(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_check_module_inplace_arg *arg; + u16 arg_len; + u8 eth_port_id; + u16 srcid; + enum NBL_MODULE_INPLACE_STATUS inplace; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Check module inplace mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_check_module_inplace_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Check module inplace mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_check_module_inplace_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + inplace = nbl_af_check_module_inplace(hw, eth_port_id); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + &inplace, sizeof(inplace)); +} + +u32 nbl_mailbox_req_get_rxlos(struct nbl_hw *hw, u8 eth_port_id) +{ + struct nbl_mailbox_get_rxlos_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + u32 rxlos; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_RXLOS; + mailbox->ack_data = (char *)&rxlos; + mailbox->ack_data_len = sizeof(rxlos); + /* ensure args are completely written */ + wmb(); + arg.eth_port_id = eth_port_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_RXLOS, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get rxlos information ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return rxlos; +} + +static void nbl_mailbox_resp_get_rxlos(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_rxlos_arg *arg; + u16 arg_len; + u8 eth_port_id; + u16 srcid; + u32 rxlos; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get rxlos mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_rxlos_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get rxlos mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_rxlos_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + rxlos = nbl_af_get_rxlos(hw, eth_port_id); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + &rxlos, sizeof(rxlos)); +} + +void nbl_mailbox_req_reset_eth(struct nbl_hw *hw, u8 eth_port_id) +{ + struct nbl_mailbox_reset_eth_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_RESET_ETH; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_RESET_ETH, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait reset eth ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_reset_eth(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_reset_eth_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Reset eth mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_reset_eth_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("reset eth mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_reset_eth_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + nbl_af_reset_eth(hw, eth_port_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +int nbl_mailbox_req_config_module_speed(struct nbl_hw *hw, u8 target_speed, u8 eth_port_id) +{ + struct nbl_mailbox_config_module_speed_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int speed_stat; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CONFIG_MODULE_SPEED; + mailbox->ack_data = (char *)&speed_stat; + mailbox->ack_data_len = sizeof(speed_stat); + /* ensure args are completely written */ + wmb(); + arg.eth_port_id = eth_port_id; + arg.target_speed = target_speed; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CONFIG_MODULE_SPEED, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait set eth speed information ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return speed_stat; +} + +static void nbl_mailbox_resp_config_module_speed(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_config_module_speed_arg *arg; + u16 arg_len; + u8 eth_port_id; + u8 target_speed; + u16 srcid; + int speed_stat; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Set eth speed mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_config_module_speed_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Set eth speed mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_config_module_speed_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + target_speed = arg->target_speed; + speed_stat = nbl_af_config_module_speed(hw, target_speed, eth_port_id); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + &speed_stat, sizeof(speed_stat)); +} + +int nbl_mailbox_req_link_speed(struct nbl_hw *hw, u8 eth_port_id, u32 *speed_stat) +{ + struct nbl_mailbox_get_link_speed_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_LINK_SPEED; + mailbox->ack_data = (char *)speed_stat; + mailbox->ack_data_len = sizeof(*speed_stat); + /* ensure args are completely written */ + wmb(); + arg.eth_port_id = eth_port_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_LINK_SPEED, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get link speed information ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* make sure receive mailbox->acked before read ack_err */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_get_link_speed(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_link_speed_arg *arg; + u16 arg_len; + u8 eth_port_id; + int ret; + u32 speed_stat; + u16 srcid; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get link speed mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_link_speed_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get link speed mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_link_speed_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + ret = nbl_af_query_link_speed(hw, eth_port_id, &speed_stat); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, ret, req_msg_type, + &speed_stat, sizeof(speed_stat)); +} + +u64 nbl_mailbox_req_reg_test(struct nbl_hw *hw, u8 eth_port_id) +{ + struct nbl_mailbox_reg_test_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + u64 test_val; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_REG_TEST; + mailbox->ack_data = (char *)&test_val; + mailbox->ack_data_len = sizeof(test_val); + /* ensure args are completely written */ + wmb(); + arg.eth_port_id = eth_port_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_REG_TEST, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait reg test information ack message timeout\n"); + goto err_out; + } + cpu_relax(); + } + /* make sure receive mailbox->acked before read ack_err */ + rmb(); + err = mailbox->ack_err; + if (err) { + pr_err("Reg test mailbox ack error: %d\n", err); + goto err_out; + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return test_val; + +err_out: + mutex_unlock(&mailbox->send_normal_msg_lock); + return 1; +} + +static void nbl_mailbox_resp_reg_test(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_reg_test_arg *arg; + u16 arg_len; + u8 eth_port_id; + u16 srcid; + u64 test_val; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Ethtool reg test mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_reg_test_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Ethtool reg test mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_reg_test_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + test_val = nbl_af_reg_test(hw, eth_port_id); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + &test_val, sizeof(test_val)); +} + +int nbl_mailbox_req_get_ethtool_dump_regs(struct nbl_hw *hw, u32 *regs_buff, u32 count) +{ + struct nbl_mailbox_get_ethtool_dump_regs_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_ETHTOOL_DUMP_REGS; + mailbox->ack_data = (char *)regs_buff; + mailbox->ack_data_len = count * sizeof(u32); + /* make sure mailbox is setup before send */ + wmb(); + arg.count = count; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_ETHTOOL_DUMP_REGS, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get ethtool dump regs information ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* make sure receive mailbox->acked before read ack_err */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_get_ethtool_dump_regs(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_ethtool_dump_regs_arg *arg; + u16 arg_len; + u16 srcid; + u32 count; + u32 size; + u32 *regs_buff; + unsigned int req_msg_type; + int err = 0; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Ethtool get regs mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_ethtool_dump_regs_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Ethtool get regs mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_ethtool_dump_regs_arg *)tx_desc->data; + } + + count = arg->count; + size = count * sizeof(u32); + regs_buff = kmalloc(size, GFP_ATOMIC); + if (!regs_buff) { + pr_err("Allocate memory to ethtool get regs content failed\n"); + err = -ENOMEM; + } + if (!err) + nbl_af_get_ethtool_dump_regs(hw, regs_buff, count); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + if (err < 0) + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); + else + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, err, + req_msg_type, regs_buff, size); + + kfree(regs_buff); +} + +int nbl_mailbox_req_get_board_info(struct nbl_hw *hw, u8 eth_port_id, + union nbl_board_info *board_info) +{ + struct nbl_mailbox_get_board_info_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_BOARD_INFO; + mailbox->ack_data = (char *)board_info; + mailbox->ack_data_len = sizeof(*board_info); + /* Make sure mailbox info hae been written */ + wmb(); + arg.eth_port_id = eth_port_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_BOARD_INFO, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get board information ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_get_board_info(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_board_info_arg *arg; + union nbl_board_info board_info; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get board info mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_board_info_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get board info mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_board_info_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + err = nbl_af_get_board_info(hw, eth_port_id, &board_info); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + if (err < 0) + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); + else + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, err, req_msg_type, + &board_info, sizeof(board_info)); +} + +bool nbl_mailbox_req_query_link_status(struct nbl_hw *hw, u8 eth_port_id) +{ + struct nbl_mailbox_query_link_status_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + bool link_up; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_QUERY_LINK_STATUS; + mailbox->ack_data = (char *)&link_up; + mailbox->ack_data_len = sizeof(link_up); + /* Make sure mailbox info hae been written */ + wmb(); + arg.eth_port_id = eth_port_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_QUERY_LINK_STATUS, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait query link status ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + /* assume link is down when timeout */ + return false; + } + cpu_relax(); + } + /* Make sure ack_err read in order */ + rmb(); + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return link_up; +} + +static void nbl_mailbox_resp_query_link_status(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_query_link_status_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + bool link_up; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Query link status mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_query_link_status_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Query link status mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_query_link_status_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + link_up = nbl_af_query_link_status(hw, eth_port_id); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + &link_up, sizeof(link_up)); +} + +int nbl_mailbox_req_set_phy_id(struct nbl_hw *hw, u8 eth_port_id, enum ethtool_phys_id_state state) +{ + struct nbl_mailbox_info *mailbox = &hw->mailbox; + struct nbl_mailbox_set_phy_id_arg arg; + int i; + int ret; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_SET_PHY_ID; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.state = state; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_SET_PHY_ID, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait set phy id status ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + /* return -EINVAL when timeout */ + return -EINVAL; + } + cpu_relax(); + } + /* ensure ack is received */ + rmb(); + mailbox->acked = 0; + ret = mailbox->ack_err; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return ret; +} + +static void nbl_mailbox_resp_set_phy_id(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_set_phy_id_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + enum ethtool_phys_id_state state; + unsigned int req_msg_type; + int ret; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Set phy id mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_set_phy_id_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Set phy id mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_set_phy_id_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + state = arg->state; + + ret = nbl_af_set_phys_id(hw, eth_port_id, state); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, ret, req_msg_type); +} + +void nbl_mailbox_req_set_pauseparam(struct nbl_hw *hw, u8 eth_port_id, struct nbl_fc_info fc) +{ + struct nbl_mailbox_set_pause_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_SET_PAUSEPARAM; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.fc = fc; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_SET_PAUSEPARAM, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait set pauseparam ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_set_pauseparam(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_set_pause_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("set_pauseparam mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_set_pause_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("set_pauseparam mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_set_pause_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + nbl_af_set_pauseparam(hw, eth_port_id, arg->fc); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_write_mac_to_logic(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr) +{ + struct nbl_mailbox_write_mac_to_logic_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_WRITE_MAC_TO_LOGIC; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + memcpy(arg.smac, mac_addr, ETH_ALEN); + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_WRITE_MAC_TO_LOGIC, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait write mac to logic ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_write_mac_to_logic(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_write_mac_to_logic_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("write_mac_to_logic mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_write_mac_to_logic_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("write_mac_to_logic mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_write_mac_to_logic_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + nbl_af_write_mac_to_logic(hw, eth_port_id, arg->smac); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +void nbl_mailbox_req_get_pause_stats(struct nbl_hw *hw, u8 eth_port_id, + struct ethtool_pause_stats *stats) +{ + struct nbl_mailbox_get_pause_stats_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_PAUSE_STATS; + mailbox->ack_data = (char *)stats; + mailbox->ack_data_len = sizeof(*stats); + /* ensure args are completely written */ + wmb(); + arg.eth_port_id = eth_port_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_PAUSE_STATS, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get pause stats ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + /* ensure ack is received */ + rmb(); + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_get_pause_stats(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_pause_stats_arg *arg; + struct ethtool_pause_stats stats; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get pause stats mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_pause_stats_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get pause stats mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_pause_stats_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + nbl_af_get_pause_stats(hw, eth_port_id, &stats); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + &stats, sizeof(stats)); +} + +void nbl_mailbox_req_init_pkt_len_limit(struct nbl_hw *hw, u8 eth_port_id, + struct nbl_pkt_len_limit pkt_len_limit) +{ + struct nbl_mailbox_init_pkt_len_limit_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_INIT_PKT_LEN_LIMIT; + /* ensure request message related variables are completely written */ + wmb(); + + arg.eth_port_id = eth_port_id; + arg.pkt_len_limit = pkt_len_limit; + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_INIT_PKT_LEN_LIMIT, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait init pkt len limit ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return; + } + cpu_relax(); + } + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); +} + +static void nbl_mailbox_resp_init_pkt_len_limit(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_init_pkt_len_limit_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("init_pkt_len_limit mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_init_pkt_len_limit_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("init_pkt_len_limit mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_init_pkt_len_limit_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + eth_port_id = arg->eth_port_id; + nbl_af_init_pkt_len_limit(hw, eth_port_id, arg->pkt_len_limit); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +int nbl_mailbox_req_get_coalesce(struct nbl_hw *hw, struct ethtool_coalesce *ec, + u16 local_vector_id) +{ + struct nbl_mailbox_get_coalesce_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + int err; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_COALESCE; + mailbox->ack_data = (char *)ec; + mailbox->ack_data_len = sizeof(*ec); + /* ensure args are completely written */ + wmb(); + arg.local_vector_id = local_vector_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_COALESCE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get coalesce ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* ensure ack is received */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_get_coalesce(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_get_coalesce_arg *arg; + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct ethtool_coalesce ec; + u16 local_vector_id; + u16 arg_len; + u16 srcid; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(struct nbl_mailbox_get_coalesce_arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get coalesce mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_coalesce_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get coalesce mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_coalesce_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + memset(&ec, 0, sizeof(ec)); + local_vector_id = arg->local_vector_id; + nbl_af_get_coalesce(hw, &ec, srcid, local_vector_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + &ec, sizeof(ec)); +} + +int nbl_mailbox_req_set_coalesce(struct nbl_hw *hw, u16 local_vector_id, + u16 num_q_vectors, u32 regval) +{ + struct nbl_mailbox_set_coalesce_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int i; + int err; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_SET_COALESCE; + /* ensure request message related variables are completely written */ + wmb(); + + arg.num_q_vectors = num_q_vectors; + arg.regval = regval; + arg.local_vector_id = local_vector_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_SET_COALESCE, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait set coalesce ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* ensure ack is received */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_set_coalesce(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_set_coalesce_arg *arg; + u32 regval; + u16 local_vector_id; + u16 num_q_vectors; + u16 arg_len; + u16 srcid; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(struct nbl_mailbox_set_coalesce_arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Set coalesce mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_set_coalesce_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Set coalesce mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_set_coalesce_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + regval = arg->regval; + local_vector_id = arg->local_vector_id; + num_q_vectors = arg->num_q_vectors; + nbl_af_set_coalesce(hw, srcid, local_vector_id, num_q_vectors, regval); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +int nbl_mailbox_req_get_eth_stats(struct nbl_hw *hw, u8 eth_port_id, struct nbl_hw_stats *hw_stats) +{ + struct nbl_mailbox_get_eth_stats_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_GET_ETH_STATS; + mailbox->ack_data = (char *)hw_stats; + mailbox->ack_data_len = sizeof(*hw_stats); + /* ensure args are completely written */ + wmb(); + arg.eth_port_id = eth_port_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_GET_ETH_STATS, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait get eth stats ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* ensure ack is received */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_get_eth_stats(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_eth_stats_arg *arg; + struct nbl_hw_stats hw_stats; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + unsigned int req_msg_type; + int ret; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get eth stats mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_eth_stats_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get eth stats mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_eth_stats_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + ret = nbl_af_get_eth_stats(hw, eth_port_id, &hw_stats); + + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, ret, req_msg_type, + &hw_stats, sizeof(hw_stats)); +} + +int nbl_mailbox_req_configure_mac_addr(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u8 vsi_id) +{ + struct nbl_mailbox_configure_mac_addr_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CONFIGURE_MAC_ADDR; + /* ensure request message related variables are completely written */ + wmb(); + + memcpy(arg.mac_addr, mac_addr, ETH_ALEN); + arg.eth_port_id = eth_port_id; + arg.vsi_id = vsi_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CONFIGURE_MAC_ADDR, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait configure mac addr ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* ensure ack is received */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_configure_mac_addr(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_configure_mac_addr_arg *arg; + u16 arg_len; + u8 *mac_addr; + u8 eth_port_id; + u8 vsi_id; + u16 srcid; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Configure mac addr mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_configure_mac_addr_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Configure mac addr mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_configure_mac_addr_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + mac_addr = arg->mac_addr; + vsi_id = arg->vsi_id; + srcid = tx_desc->srcid; + err = nbl_af_configure_mac_addr(hw, srcid, eth_port_id, mac_addr, vsi_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +int nbl_mailbox_req_clear_mac_addr(struct nbl_hw *hw) +{ + struct nbl_mailbox_dummy_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CLEAR_MAC_ADDR; + /* ensure request message related variables are completely written */ + wmb(); + + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CLEAR_MAC_ADDR, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait clear mac addr ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* ensure ack is received */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_clear_mac_addr(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_dummy_arg *arg; + u16 arg_len; + u16 srcid; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Clear mac addr mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_dummy_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Clear mac addr mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_dummy_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + err = nbl_af_clear_mac_addr(hw, srcid); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +int nbl_mailbox_req_change_mac_addr(struct nbl_hw *hw, u8 *mac_addr, u8 vsi_id) +{ + struct nbl_mailbox_change_mac_addr_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_CHANGE_MAC_ADDR; + /* ensure request message related variables are completely written */ + wmb(); + + memcpy(arg.mac_addr, mac_addr, ETH_ALEN); + arg.vsi_id = vsi_id; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_CHANGE_MAC_ADDR, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait change mac addr ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* ensure ack is received */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_change_mac_addr(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_change_mac_addr_arg *arg; + u16 arg_len; + u16 srcid; + u8 *mac_addr; + u8 vsi_id; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Change mac addr mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_change_mac_addr_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Change mac addr mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_change_mac_addr_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + mac_addr = arg->mac_addr; + vsi_id = arg->vsi_id; + err = nbl_af_change_mac_addr(hw, srcid, mac_addr, vsi_id); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +int nbl_mailbox_req_operate_vlan_id(struct nbl_hw *hw, u16 vlan_id, u8 vsi_id, bool add) +{ + struct nbl_mailbox_operate_vlan_id_arg arg; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + int err; + int i; + + mutex_lock(&mailbox->send_normal_msg_lock); + + mailbox->ack_req_msg_type = NBL_MAILBOX_OPERATE_VLAN_ID; + /* ensure request message related variables are completely written */ + wmb(); + + arg.vsi_id = vsi_id; + arg.vlan_id = vlan_id; + arg.add = add; + nbl_mailbox_send_msg(hw, mailbox, 0, NBL_MAILBOX_OPERATE_VLAN_ID, + &arg, sizeof(arg)); + + i = 0; + while (!mailbox->acked) { + usleep_range(100, 200); + i++; + if (i == 10000) { + pr_warn("Wait operate vlan id ack message timeout\n"); + mutex_unlock(&mailbox->send_normal_msg_lock); + return -ETIMEDOUT; + } + cpu_relax(); + } + /* ensure ack is received */ + rmb(); + err = mailbox->ack_err; + mailbox->acked = 0; + mutex_unlock(&mailbox->send_normal_msg_lock); + + return err; +} + +static void nbl_mailbox_resp_operate_vlan_id(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_operate_vlan_id_arg *arg; + u16 arg_len; + u16 srcid; + u8 vsi_id; + u16 vlan_id; + bool add; + unsigned int req_msg_type; + int err; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Operate vlan id mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_operate_vlan_id_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Operate vlan id mailbox msg has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_operate_vlan_id_arg *)tx_desc->data; + } + + srcid = tx_desc->srcid; + vsi_id = arg->vsi_id; + vlan_id = arg->vlan_id; + add = arg->add; + err = nbl_af_operate_vlan_id(hw, srcid, vlan_id, vsi_id, add); + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, err, req_msg_type); +} + +/* when receive hello, goodbye and release done msg, do noting but ack */ +static void nbl_mailbox_resp_hello_msg(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + u16 srcid; + unsigned int req_msg_type; + + tx_desc = data; + srcid = tx_desc->srcid; + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +static void nbl_mailbox_resp_goodbye_msg(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + u16 srcid; + unsigned int req_msg_type; + + tx_desc = data; + srcid = tx_desc->srcid; + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +static void nbl_mailbox_resp_release_done_msg(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + u16 srcid; + unsigned int req_msg_type; + + tx_desc = data; + srcid = tx_desc->srcid; + + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, 0, req_msg_type); +} + +static void nbl_af_get_pmd_vsi_stats(struct nbl_hw *hw, u8 vsi_id, u8 eth_port_id, + struct nbl_pmd_stats *stats) +{ + u8 i; + u32 rxq_pkt_drop_cnt; + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[vsi_id]; + u8 nb_rx_queues = func_res->num_txrx_queues; + u8 global_rx_qid; + u64 value_high; + u64 value_low; + + stats->nb_rx_queues = nb_rx_queues; + + for (i = 0; i < nb_rx_queues; i++) { + global_rx_qid = func_res->txrx_queues[i]; + rd32_for_each(hw, NBL_UVN_DROP_CNT_REG_ARR(global_rx_qid), + (u32 *)&rxq_pkt_drop_cnt, sizeof(u32)); + stats->pkt_drop_cnt[i] = rxq_pkt_drop_cnt; + } + + value_low = rd32(hw, NBL_ETH_RX_BAD_FCS_CNT_L_REG(eth_port_id)) + + rd32(hw, NBL_ETH_RX_FRAMING_ERR_CNT_L_REG(eth_port_id)) + + rd32(hw, NBL_ETH_RX_BADCODE_CNT_L_REG(eth_port_id)); + value_high = (rd32(hw, NBL_ETH_RX_BAD_FCS_CNT_H_REG(eth_port_id)) & 0xFFFF) + + (rd32(hw, NBL_ETH_RX_FRAMING_ERR_CNT_H_REG(eth_port_id)) & 0xFFFF) + + (rd32(hw, NBL_ETH_RX_BADCODE_CNT_H_REG(eth_port_id)) & 0xFFFF); + stats->ierrors = (value_high << 32) + value_low; + + value_low = rd32(hw, NBL_ETH_TX_BAD_FCS_CNT_L_REG(eth_port_id)) + + rd32(hw, NBL_ETH_TX_FRAME_ERROR_CNT_L_REG(eth_port_id)); + value_high = (rd32(hw, NBL_ETH_TX_BAD_FCS_CNT_H_REG(eth_port_id)) & 0xFFFF) + + (rd32(hw, NBL_ETH_TX_FRAME_ERROR_CNT_H_REG(eth_port_id)) & 0xFFFF); + stats->oerrors = (value_high << 32) + value_low; + + stats->eth_ipackets = rd32(hw, NBL_URMUX_ETHX_RX_PKT_REG(eth_port_id)); + + value_low = rd32(hw, NBL_URMUX_ETHX_RX_BYTE_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_URMUX_ETHX_RX_BYTE_H_REG(eth_port_id)); + stats->eth_ibytes = (value_high << 32) + value_low; + + stats->eth_opackets = rd32(hw, NBL_DMUX_ETHX_TX_PKT_REG(eth_port_id)); + + value_low = rd32(hw, NBL_DMUX_ETHX_TX_BYTE_L_REG(eth_port_id)); + value_high = rd32(hw, NBL_DMUX_ETHX_TX_BYTE_H_REG(eth_port_id)); + stats->eth_obytes = (value_high << 32) + value_low; +} + +static void nbl_mailbox_resp_get_pmd_stats(struct nbl_hw *hw, void *data, u32 datalen) +{ + struct nbl_mailbox_info *mailbox; + struct nbl_mailbox_tx_desc *tx_desc; + struct nbl_mailbox_get_pmd_stats_arg *arg; + u16 arg_len; + u16 srcid; + u8 eth_port_id; + struct nbl_pmd_stats *stats; + unsigned int req_msg_type; + + tx_desc = data; + + arg_len = (u16)sizeof(*arg); + if (arg_len > NBL_MAILBOX_TX_DESC_EMBEDDED_DATA_LEN) { + if (tx_desc->buf_len != arg_len) { + pr_err("Get pmd stats mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_pmd_stats_arg *)(tx_desc + 1); + } else { + if (tx_desc->data_len != arg_len) { + pr_err("Get pmd stats mailbox message has wrong argument size\n"); + return; + } + arg = (struct nbl_mailbox_get_pmd_stats_arg *)tx_desc->data; + } + + eth_port_id = arg->eth_port_id; + srcid = tx_desc->srcid; + mailbox = &hw->mailbox; + req_msg_type = tx_desc->msg_type; + stats = kmalloc(sizeof(*stats), GFP_ATOMIC | __GFP_ZERO); + if (!stats) { + nbl_mailbox_send_ack_msg(hw, mailbox, srcid, -ENOMEM, req_msg_type); + return; + } + + nbl_af_get_pmd_vsi_stats(hw, srcid, eth_port_id, stats); + nbl_mailbox_send_ack_msg_with_data(hw, mailbox, srcid, 0, req_msg_type, + stats, sizeof(*stats)); + + kfree(stats); +} + +static void nbl_mailbox_advance_rx_ring(struct nbl_hw *hw, struct nbl_mailbox_ring *rxq) +{ + struct nbl_mailbox_info *mailbox = &hw->mailbox; + struct nbl_mailbox_rx_desc *rx_desc; + struct nbl_mailbox_buf *rx_buf; + u16 next_to_use; + + next_to_use = rxq->next_to_use; + rx_desc = NBL_MAILBOX_RX_DESC(rxq, next_to_use); + rx_buf = NBL_MAILBOX_RX_BUF(rxq, next_to_use); + + rx_desc->flags = NBL_MAILBOX_RX_DESC_AVAIL; + rx_desc->buf_addr = rx_buf->pa; + rx_desc->buf_len = mailbox->rxq_buf_size; + + /* Make sure descriptor hae been written */ + wmb(); + rxq->next_to_use++; + if (rxq->next_to_use == mailbox->num_rxq_entries) + rxq->next_to_use = 0; + rxq->tail_ptr++; + nbl_mailbox_update_rxq_tail_ptr(hw, rxq->tail_ptr); +} + +#define NBL_FUNC_ARR_ENTRY(type, func)[type] = func +static nbl_mailbox_msg_handler nbl_mailbox_handlers[NBL_MAILBOX_TYPE_MAX] = { + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_ACK, nbl_mailbox_recv_ack_msg), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_MSIX_MAP_TABLE, nbl_mailbox_resp_cfg_msix_map_table), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_DESTROY_MSIX_MAP_TABLE, + nbl_mailbox_resp_destroy_msix_map_table), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_ENABLE_MAILBOX_IRQ, nbl_mailbox_resp_enable_mailbox_irq), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_DISABLE_MAILBOX_IRQ, nbl_mailbox_resp_disable_mailbox_irq), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_VSI_ID, nbl_mailbox_resp_get_vsi_id), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_REGISTER_VF_BAR_INFO, nbl_mailbox_resp_register_vf_bar_info), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_VF_BAR_BASE_ADDR, nbl_mailbox_resp_get_vf_bar_base_addr), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_QID_MAP, nbl_mailbox_resp_cfg_qid_map), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CLEAR_QID_MAP, nbl_mailbox_resp_clear_qid_map), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_PROMISC, nbl_mailbox_resp_cfg_promisc), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_INGRESS_ETH_PORT_TABLE, + nbl_mailbox_resp_cfg_ingress_eth_port_table), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_SRC_VSI_TABLE, nbl_mailbox_resp_cfg_src_vsi_table), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_DEST_VSI_TABLE, nbl_mailbox_resp_cfg_dest_vsi_table), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_TX_RING, nbl_mailbox_resp_cfg_tx_ring), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_RX_RING, nbl_mailbox_resp_cfg_rx_ring), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_QUEUE_MAP, nbl_mailbox_resp_cfg_queue_map), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CONTROL_QUEUE, nbl_mailbox_resp_control_queue), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_RESET_TX_QUEUE, nbl_mailbox_resp_reset_tx_queue), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_RESET_RX_QUEUE, nbl_mailbox_resp_reset_rx_queue), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_WAIT_RX_QUEUE_RESET_DONE, + nbl_mailbox_resp_wait_rx_queue_reset_done), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_PORT_MAP, nbl_mailbox_resp_cfg_port_map), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_RSS_GROUP_TABLE, nbl_mailbox_resp_cfg_rss_group_table), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CFG_MSIX_IRQ, nbl_mailbox_resp_cfg_msix_irq), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CLEAR_MSIX_IRQ_CONF, nbl_mailbox_resp_clear_msix_irq_conf), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_ETH_TX_ENABLE, nbl_mailbox_resp_eth_tx_enable), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_ETH_RX_ENABLE, nbl_mailbox_resp_eth_rx_enable), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_ETH_TX_DISABLE, nbl_mailbox_resp_eth_tx_disable), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_ETH_RX_DISABLE, nbl_mailbox_resp_eth_rx_disable), +#ifdef CONFIG_PCI_IOV + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_ENTER_FORWARD_RING_MODE, + nbl_mailbox_resp_enter_forward_ring_mode), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_LEAVE_FORWARD_RING_MODE, + nbl_mailbox_resp_leave_forward_ring_mode), +#endif + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_FIRMWARE_VERSION, nbl_mailbox_resp_get_firmware_version), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_MODULE_EEPROM, nbl_mailbox_resp_get_module_eeprom), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_MODULE_INFO, nbl_mailbox_resp_get_module_info), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_EEPROM, nbl_mailbox_resp_get_eeprom), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CHECK_MODULE_INPLACE, nbl_mailbox_resp_check_module_inplace), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_RXLOS, nbl_mailbox_resp_get_rxlos), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_RESET_ETH, nbl_mailbox_resp_reset_eth), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CONFIG_MODULE_SPEED, nbl_mailbox_resp_config_module_speed), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_LINK_SPEED, nbl_mailbox_resp_get_link_speed), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_REG_TEST, nbl_mailbox_resp_reg_test), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_ETHTOOL_DUMP_REGS, + nbl_mailbox_resp_get_ethtool_dump_regs), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_BOARD_INFO, nbl_mailbox_resp_get_board_info), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_QUERY_LINK_STATUS, nbl_mailbox_resp_query_link_status), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_SET_PHY_ID, nbl_mailbox_resp_set_phy_id), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_SET_PAUSEPARAM, nbl_mailbox_resp_set_pauseparam), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_WRITE_MAC_TO_LOGIC, nbl_mailbox_resp_write_mac_to_logic), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_PAUSE_STATS, nbl_mailbox_resp_get_pause_stats), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_INIT_PKT_LEN_LIMIT, nbl_mailbox_resp_init_pkt_len_limit), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_COALESCE, nbl_mailbox_resp_get_coalesce), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_SET_COALESCE, nbl_mailbox_resp_set_coalesce), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_ETH_STATS, nbl_mailbox_resp_get_eth_stats), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CONFIGURE_MAC_ADDR, nbl_mailbox_resp_configure_mac_addr), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CLEAR_MAC_ADDR, nbl_mailbox_resp_clear_mac_addr), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_CHANGE_MAC_ADDR, nbl_mailbox_resp_change_mac_addr), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_OPERATE_VLAN_ID, nbl_mailbox_resp_operate_vlan_id), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GET_PMD_VSI_STATS, nbl_mailbox_resp_get_pmd_stats), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_HELLO_MSG, nbl_mailbox_resp_hello_msg), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_GOODBYE_MSG, nbl_mailbox_resp_goodbye_msg), + NBL_FUNC_ARR_ENTRY(NBL_MAILBOX_RESOURE_RELEASE_DONE, nbl_mailbox_resp_release_done_msg), +}; + +static void nbl_mailbox_recv_msg(struct nbl_hw *hw, void *data, u32 data_len) +{ + struct nbl_mailbox_tx_desc *tx_desc; + u16 msg_type; + + tx_desc = data; + msg_type = tx_desc->msg_type; + if (msg_type >= NBL_MAILBOX_TYPE_MAX) { + pr_err("Invalid mailbox message type %u\n", msg_type); + return; + } + nbl_mailbox_handlers[msg_type](hw, data, data_len); +} + +/* This function is only used when mailbox interrupt has + * not been setup yet. + */ +static void nbl_mailbox_poll_once_rxq(struct nbl_hw *hw) +{ + struct nbl_mailbox_info *mailbox = &hw->mailbox; + struct nbl_mailbox_ring *rxq = &mailbox->rxq; + struct nbl_mailbox_rx_desc *rx_desc; + struct nbl_mailbox_buf *rx_buf; + u16 next_to_clean; + + next_to_clean = rxq->next_to_clean; + rx_desc = NBL_MAILBOX_RX_DESC(rxq, next_to_clean); + rx_buf = NBL_MAILBOX_RX_BUF(rxq, next_to_clean); + while (rx_desc->flags & NBL_MAILBOX_RX_DESC_USED) { + dma_rmb(); + nbl_mailbox_recv_msg(hw, rx_buf->va, rx_desc->buf_len); + + nbl_mailbox_advance_rx_ring(hw, rxq); + + next_to_clean++; + if (next_to_clean == mailbox->num_rxq_entries) + next_to_clean = 0; + rx_desc = NBL_MAILBOX_RX_DESC(rxq, next_to_clean); + rx_buf = NBL_MAILBOX_RX_BUF(rxq, next_to_clean); + } + rxq->next_to_clean = next_to_clean; +} + +static void nbl_clean_mailbox(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + struct nbl_mailbox_ring *rxq = &mailbox->rxq; + struct nbl_mailbox_rx_desc *rx_desc; + struct nbl_mailbox_buf *rx_buf; + u16 next_to_clean; + + next_to_clean = rxq->next_to_clean; + rx_desc = NBL_MAILBOX_RX_DESC(rxq, next_to_clean); + rx_buf = NBL_MAILBOX_RX_BUF(rxq, next_to_clean); + while (rx_desc->flags & NBL_MAILBOX_RX_DESC_USED) { + dma_rmb(); + nbl_mailbox_recv_msg(hw, rx_buf->va, rx_desc->buf_len); + + nbl_mailbox_advance_rx_ring(hw, rxq); + + next_to_clean++; + if (next_to_clean == mailbox->num_rxq_entries) + next_to_clean = 0; + rx_desc = NBL_MAILBOX_RX_DESC(rxq, next_to_clean); + rx_buf = NBL_MAILBOX_RX_BUF(rxq, next_to_clean); + } + rxq->next_to_clean = next_to_clean; +} + +void nbl_clean_mailbox_subtask(struct nbl_adapter *adapter) +{ + if (!test_and_clear_bit(NBL_MAILBOX_EVENT_PENDING, adapter->state)) + return; + + nbl_clean_mailbox(adapter); +} + +static irqreturn_t nbl_msix_clean_mailbox(int __always_unused irq, void *data) +{ + struct nbl_hw *hw = data; + struct nbl_adapter *adapter = hw->back; + + set_bit(NBL_MAILBOX_EVENT_PENDING, adapter->state); + nbl_service_task1_schedule(adapter); + return IRQ_HANDLED; +} + +int nbl_mailbox_request_irq(struct nbl_adapter *adapter) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_hw *hw = &adapter->hw; + struct nbl_mailbox_info *mailbox = &hw->mailbox; + u16 local_vector_id; + u32 irq_num; + int err; + + /* The first several MSIX irq is used by tx/rx queue, + * and the last one is used by mailbox. + */ + local_vector_id = adapter->num_lan_msix; + /* AF has an hidden forward queue used to process + * protocol packets. + */ + local_vector_id += is_af(hw) ? 1 : 0; + irq_num = adapter->msix_entries[local_vector_id].vector; + + snprintf(mailbox->name, sizeof(mailbox->name) - 1, "%s-%s", + dev_name(dev), "mailbox"); + + err = devm_request_irq(dev, irq_num, nbl_msix_clean_mailbox, + 0, mailbox->name, hw); + if (err) { + dev_err(dev, "Request mailbox irq handler failed\n"); + return err; + } + + return 0; +} + +void nbl_mailbox_free_irq(struct nbl_adapter *adapter) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_hw *hw = &adapter->hw; + u16 local_vector_id; + u32 irq_num; + + local_vector_id = adapter->num_lan_msix; + /* AF has an hidden forward queue used to process + * protocol packets. + */ + local_vector_id += is_af(hw) ? 1 : 0; + irq_num = adapter->msix_entries[local_vector_id].vector; + + devm_free_irq(dev, irq_num, hw); +} diff --git a/drivers/net/ethernet/nebula-matrix/m1600/mailbox.h b/drivers/net/ethernet/nebula-matrix/m1600/mailbox.h new file mode 100644 index 0000000000000000000000000000000000000000..9493fc0db89ca2f227a8cdb308f2357ea5dc3463 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/mailbox.h @@ -0,0 +1,489 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#ifndef _NBL_MAILBOX_H_ +#define _NBL_MAILBOX_H_ + +#include + +#define NBL_MAILBOX_TX_DESC(tx_ring, i) \ + (&(((struct nbl_mailbox_tx_desc *)((tx_ring)->desc))[i])) +#define NBL_MAILBOX_RX_DESC(rx_ring, i) \ + (&(((struct nbl_mailbox_rx_desc *)((rx_ring)->desc))[i])) +#define NBL_MAILBOX_TX_BUF(tx_ring, i) (&(((tx_ring)->buf)[i])) +#define NBL_MAILBOX_RX_BUF(rx_ring, i) (&(((rx_ring)->buf)[i])) + +#define NBL_MAILBOX_TX_WAIT_US 100 +#define NBL_MAILBOX_TX_WAIT_TIMES 10000 +#define NBL_MAILBOX_TX_UPDATE_NOTIFY_LIMITS (NBL_MAILBOX_TX_WAIT_TIMES / 4) + +typedef void (*nbl_mailbox_msg_handler)(struct nbl_hw *, void *, u32); + +/* WARNING: please keep consistent with PMD driver */ +enum nbl_mailbox_msg_type { + NBL_MAILBOX_ACK, + NBL_MAILBOX_CFG_MSIX_MAP_TABLE, + NBL_MAILBOX_DESTROY_MSIX_MAP_TABLE, + NBL_MAILBOX_ENABLE_MAILBOX_IRQ, + NBL_MAILBOX_DISABLE_MAILBOX_IRQ, + NBL_MAILBOX_GET_VSI_ID, + NBL_MAILBOX_REGISTER_VF_BAR_INFO, + NBL_MAILBOX_GET_VF_BAR_BASE_ADDR, + NBL_MAILBOX_CFG_QID_MAP, + NBL_MAILBOX_CLEAR_QID_MAP, + NBL_MAILBOX_CFG_PROMISC, + NBL_MAILBOX_CFG_INGRESS_ETH_PORT_TABLE, + NBL_MAILBOX_CFG_SRC_VSI_TABLE, + NBL_MAILBOX_CFG_DEST_VSI_TABLE, + NBL_MAILBOX_CFG_TX_RING, + NBL_MAILBOX_CFG_RX_RING, + NBL_MAILBOX_CFG_QUEUE_MAP, + NBL_MAILBOX_CONTROL_QUEUE, + NBL_MAILBOX_RESET_TX_QUEUE, + NBL_MAILBOX_RESET_RX_QUEUE, + NBL_MAILBOX_WAIT_RX_QUEUE_RESET_DONE, + NBL_MAILBOX_CFG_PORT_MAP, + NBL_MAILBOX_CFG_RSS_GROUP_TABLE, + NBL_MAILBOX_CFG_MSIX_IRQ, + NBL_MAILBOX_CLEAR_MSIX_IRQ_CONF, + NBL_MAILBOX_ETH_TX_ENABLE, + NBL_MAILBOX_ETH_RX_ENABLE, + NBL_MAILBOX_ETH_TX_DISABLE, + NBL_MAILBOX_ETH_RX_DISABLE, + NBL_MAILBOX_ENTER_FORWARD_RING_MODE, + NBL_MAILBOX_LEAVE_FORWARD_RING_MODE, + NBL_MAILBOX_GET_FIRMWARE_VERSION, + NBL_MAILBOX_GET_MODULE_EEPROM, + NBL_MAILBOX_GET_MODULE_INFO, + NBL_MAILBOX_GET_EEPROM, + NBL_MAILBOX_CHECK_MODULE_INPLACE, + NBL_MAILBOX_GET_RXLOS, + NBL_MAILBOX_RESET_ETH, + NBL_MAILBOX_CONFIG_MODULE_SPEED, + NBL_MAILBOX_GET_LINK_SPEED, + NBL_MAILBOX_REG_TEST, + NBL_MAILBOX_GET_ETHTOOL_DUMP_REGS, + NBL_MAILBOX_GET_BOARD_INFO, + NBL_MAILBOX_QUERY_LINK_STATUS, + NBL_MAILBOX_SET_PHY_ID, + NBL_MAILBOX_SET_PAUSEPARAM, + NBL_MAILBOX_WRITE_MAC_TO_LOGIC, + NBL_MAILBOX_GET_PAUSE_STATS, + NBL_MAILBOX_INIT_PKT_LEN_LIMIT, + NBL_MAILBOX_GET_COALESCE, + NBL_MAILBOX_SET_COALESCE, + NBL_MAILBOX_GET_ETH_STATS, + NBL_MAILBOX_CONFIGURE_MAC_ADDR, + NBL_MAILBOX_CLEAR_MAC_ADDR, + NBL_MAILBOX_CHANGE_MAC_ADDR, + NBL_MAILBOX_OPERATE_VLAN_ID, + NBL_MAILBOX_GET_PMD_VSI_STATS, + NBL_MAILBOX_HELLO_MSG, /* when pf install, send this msg to af */ + NBL_MAILBOX_GOODBYE_MSG, /* when af remove, send this msg to pf , only use in pmd */ + NBL_MAILBOX_RESOURE_RELEASE_DONE, /* when pf release done, send this msg to af */ + NBL_MAILBOX_TYPE_MAX, +}; + +struct nbl_mailbox_ack_msg_ret { + unsigned int req_msg_type; + int err; +} __packed; + +struct nbl_mailbox_cfg_msix_map_table_arg { + u16 requested; +}; + +struct nbl_mailbox_dummy_arg { + int dummy; +}; + +struct nbl_mailbox_enable_mailbox_irq_arg { + u16 vector_id; +}; + +struct nbl_mailbox_disable_mailbox_irq_arg { + u16 local_vector_id; +}; + +struct nbl_mailbox_register_vf_bar_info_arg { + u64 vf_bar_start; + u64 vf_bar_len; +}; + +struct nbl_mailbox_cfg_qid_map_arg { + u8 num_queues; + u64 notify_addr; +}; + +struct nbl_mailbox_clear_qid_map_arg { + u64 notify_addr; +}; + +struct nbl_mailbox_cfg_promisc_arg { + u8 eth_port_id; + bool enable; +}; + +struct nbl_mailbox_cfg_ingress_eth_port_table_arg { + u8 eth_port_id; + u8 vsi_id; +}; + +struct nbl_mailbox_cfg_src_vsi_table_arg { + u8 eth_port_id; + u8 vsi_id; +}; + +struct nbl_mailbox_cfg_dest_vsi_table_arg { + u8 eth_port_id; + u8 vsi_id; +}; + +struct nbl_mailbox_cfg_tx_ring_arg { + u8 vsi_id; + u8 local_queue_id; + u16 desc_num; + dma_addr_t dma; +}; + +struct nbl_mailbox_cfg_rx_ring_arg { + u8 local_queue_id; + u16 desc_num; + u32 buf_len; + dma_addr_t dma; +}; + +struct nbl_mailbox_cfg_queue_map_arg { + bool rx; + bool enable; + bool msix_enable; + u8 local_queue_id; + u16 local_vector_id; +}; + +struct nbl_mailbox_control_queue_arg { + bool rx; + bool enable; + u8 local_queue_id; +}; + +struct nbl_mailbox_reset_tx_queue_arg { + u8 local_queue_id; +}; + +struct nbl_mailbox_reset_rx_queue_arg { + u8 local_queue_id; +}; + +struct nbl_mailbox_wait_rx_queue_reset_done_arg { + u8 local_queue_id; +}; + +struct nbl_mailbox_cfg_port_map_arg { + u8 eth_port_id; + u8 local_queue_id; +}; + +struct nbl_mailbox_cfg_rss_group_table_arg { + u8 vsi_id; + u8 rx_queue_num; +}; + +struct nbl_mailbox_cfg_msix_irq_arg { + u16 local_vector_id; +}; + +struct nbl_mailbox_clear_msix_irq_conf_arg { + u16 local_vector_id; +}; + +struct nbl_mailbox_eth_tx_enable_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_eth_tx_disable_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_eth_rx_enable_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_eth_rx_disable_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_enter_forward_ring_mode_arg { + u8 eth_port_id; + u8 vsi_id; +}; + +struct nbl_mailbox_leave_forward_ring_mode_arg { + u8 eth_port_id; + u8 vsi_id; +}; + +struct nbl_mailbox_get_module_eeprom_arg { + u8 eth_port_id; + struct ethtool_eeprom eeprom; +}; + +struct nbl_mailbox_get_module_info_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_get_eeprom_arg { + u32 offset; + u32 length; +}; + +struct nbl_mailbox_check_module_inplace_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_get_rxlos_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_reset_eth_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_config_module_speed_arg { + u8 target_speed; + u8 eth_port_id; +}; + +struct nbl_mailbox_get_link_speed_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_reg_test_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_get_ethtool_dump_regs_arg { + u32 count; +}; + +struct nbl_mailbox_get_board_info_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_query_link_status_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_set_phy_id_arg { + u8 eth_port_id; + enum ethtool_phys_id_state state; +}; + +struct nbl_mailbox_set_pause_arg { + u8 eth_port_id; + struct nbl_fc_info fc; +}; + +struct nbl_mailbox_write_mac_to_logic_arg { + u8 eth_port_id; + u8 smac[ETH_ALEN]; +}; + +struct nbl_mailbox_get_pause_stats_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_init_pkt_len_limit_arg { + u8 eth_port_id; + struct nbl_pkt_len_limit pkt_len_limit; +}; + +struct nbl_mailbox_get_coalesce_arg { + u16 local_vector_id; +}; + +struct nbl_mailbox_set_coalesce_arg { + u32 regval; + u16 local_vector_id; + u16 num_q_vectors; +}; + +struct nbl_mailbox_get_eth_stats_arg { + u8 eth_port_id; +}; + +struct nbl_mailbox_configure_mac_addr_arg { + u8 mac_addr[ETH_ALEN]; + u8 eth_port_id; + u8 vsi_id; +}; + +struct nbl_mailbox_change_mac_addr_arg { + u8 mac_addr[ETH_ALEN]; + u8 vsi_id; +}; + +struct nbl_mailbox_operate_vlan_id_arg { + u8 vsi_id; + u16 vlan_id; + bool add; +}; + +#define NBL_PMD_MAX_QUEUE_NUM (16) + +struct nbl_pmd_stats { + u16 nb_rx_queues; + u32 pkt_drop_cnt[NBL_PMD_MAX_QUEUE_NUM]; + u32 eth_ipackets; + u64 eth_ibytes; + u32 eth_opackets; + u64 eth_obytes; + u64 ierrors; + u64 oerrors; +}; + +struct nbl_mailbox_get_pmd_stats_arg { + u8 eth_port_id; +}; + +static inline void nbl_mailbox_update_txq_tail_ptr(struct nbl_hw *hw, u16 tail_ptr) +{ + /* local_qid 0 and 1 denote rx and tx queue respectively */ + u32 local_qid = 1; + u32 value = ((u32)tail_ptr << 16) | local_qid; + + mb_wr32(hw, NBL_MAILBOX_NOTIFY_ADDR, value); +} + +static inline void nbl_mailbox_update_rxq_tail_ptr(struct nbl_hw *hw, u16 tail_ptr) +{ + /* local_qid 0 and 1 denote rx and tx queue respectively */ + u32 local_qid = 0; + u32 value = ((u32)tail_ptr << 16) | local_qid; + + mb_wr32(hw, NBL_MAILBOX_NOTIFY_ADDR, value); +} + +int nbl_setup_mailbox(struct nbl_hw *hw); +void nbl_teardown_mailbox(struct nbl_hw *hw); + +int nbl_mailbox_req_cfg_msix_map_table(struct nbl_hw *hw, u16 requested); +void nbl_mailbox_req_destroy_msix_map_table(struct nbl_hw *hw); + +void nbl_mailbox_enable_irq(struct nbl_adapter *adapter); +void nbl_mailbox_disable_irq(struct nbl_adapter *adapter); + +int nbl_mailbox_req_get_vsi_id(struct nbl_hw *hw); + +int nbl_mailbox_req_register_vf_bar_info(struct nbl_hw *hw, u64 vf_bar_start, u64 vf_bar_len); + +int nbl_mailbox_req_get_vf_bar_base_addr(struct nbl_hw *hw, u64 *base_addr); + +int nbl_mailbox_req_cfg_qid_map(struct nbl_hw *hw, u8 num_queues, u64 notify_addr); +void nbl_mailbox_req_clear_qid_map(struct nbl_hw *hw, u64 notify_addr); + +void nbl_mailbox_req_enable_promisc(struct nbl_hw *hw, u8 eth_port_id); +void nbl_mailbox_req_disable_promisc(struct nbl_hw *hw, u8 eth_port_id); + +void nbl_mailbox_req_cfg_ingress_eth_port_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); + +void nbl_mailbox_req_cfg_src_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); +void nbl_mailbox_req_cfg_dest_vsi_table(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); + +void nbl_mailbox_req_cfg_tx_ring(struct nbl_hw *hw, dma_addr_t dma, u16 desc_num, + u8 vsi_id, u8 local_queue_id); +void nbl_mailbox_req_cfg_rx_ring(struct nbl_hw *hw, dma_addr_t dma, u16 desc_num, + u32 buf_len, u8 local_queue_id); + +void nbl_mailbox_req_cfg_queue_map(struct nbl_hw *hw, u8 local_queue_id, bool rx, + u16 local_vector_id, bool enable, bool msix_enable); + +void nbl_mailbox_req_control_queue(struct nbl_hw *hw, u8 local_queue_id, bool rx, bool enable); + +int nbl_mailbox_req_reset_tx_queue(struct nbl_hw *hw, u8 local_queue_id); +int nbl_mailbox_req_reset_rx_queue(struct nbl_hw *hw, u8 local_queue_id); +int nbl_mailbox_req_wait_rx_queue_reset_done(struct nbl_hw *hw, u8 local_queue_id); + +void nbl_mailbox_req_cfg_port_map(struct nbl_hw *hw, u8 eth_port_id, u8 tx_queue_num); + +void nbl_mailbox_req_cfg_rss_group_table(struct nbl_hw *hw, u8 vsi_id, u8 rx_queue_num); + +void nbl_mailbox_req_cfg_msix_irq(struct nbl_hw *hw, u16 local_vector_id); +void nbl_mailbox_req_clear_msix_irq_conf(struct nbl_hw *hw, u16 local_vector_id); + +void nbl_mailbox_req_eth_tx_enable(struct nbl_adapter *adapter, u8 eth_port_id); +void nbl_mailbox_req_eth_rx_enable(struct nbl_adapter *adapter, u8 eth_port_id); +void nbl_mailbox_req_eth_tx_disable(struct nbl_adapter *adapter, u8 eth_port_id); +void nbl_mailbox_req_eth_rx_disable(struct nbl_adapter *adapter, u8 eth_port_id); + +#ifdef CONFIG_PCI_IOV +void nbl_mailbox_req_enter_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); +void nbl_mailbox_req_leave_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); +#endif + +u32 nbl_mailbox_req_get_firmware_version(struct nbl_hw *hw); +int nbl_mailbox_req_get_module_eeprom(struct nbl_hw *hw, u8 eth_port_id, + struct ethtool_eeprom *eeprom, u8 *data); +int nbl_mailbox_req_get_module_info(struct nbl_hw *hw, u8 eth_port_id, + struct ethtool_modinfo *info); + +int nbl_mailbox_req_get_eeprom(struct nbl_hw *hw, u32 offset, u32 length, u8 *bytes); + +enum NBL_MODULE_INPLACE_STATUS +nbl_mailbox_req_check_module_inplace(struct nbl_hw *hw, u8 eth_port_id); + +u32 nbl_mailbox_req_get_rxlos(struct nbl_hw *hw, u8 eth_port_id); + +void nbl_mailbox_req_reset_eth(struct nbl_hw *hw, u8 eth_port_id); + +int nbl_mailbox_req_config_module_speed(struct nbl_hw *hw, u8 target_speed, u8 eth_port_id); + +int nbl_mailbox_req_link_speed(struct nbl_hw *hw, u8 eth_port_id, u32 *speed_stat); + +u64 nbl_mailbox_req_reg_test(struct nbl_hw *hw, u8 port_id); + +int nbl_mailbox_req_get_ethtool_dump_regs(struct nbl_hw *hw, u32 *regs_buff, u32 count); + +int nbl_mailbox_req_get_board_info(struct nbl_hw *hw, u8 eth_port_id, + union nbl_board_info *board_info); + +bool nbl_mailbox_req_query_link_status(struct nbl_hw *hw, u8 eth_port_id); + +int nbl_mailbox_req_set_phy_id(struct nbl_hw *hw, u8 eth_port_id, enum ethtool_phys_id_state state); + +void nbl_mailbox_req_set_pauseparam(struct nbl_hw *hw, u8 eth_port_id, struct nbl_fc_info fc); + +void nbl_mailbox_req_write_mac_to_logic(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr); + +void nbl_mailbox_req_get_pause_stats(struct nbl_hw *hw, u8 eth_port_id, + struct ethtool_pause_stats *stats); + +void nbl_mailbox_req_init_pkt_len_limit(struct nbl_hw *hw, u8 eth_port_id, + struct nbl_pkt_len_limit pkt_len_limit); + +int nbl_mailbox_req_get_coalesce(struct nbl_hw *hw, struct ethtool_coalesce *ec, + u16 local_vector_id); +int nbl_mailbox_req_set_coalesce(struct nbl_hw *hw, u16 local_vector_id, + u16 num_q_vectors, u32 regval); + +int nbl_mailbox_req_get_eth_stats(struct nbl_hw *hw, u8 eth_port_id, + struct nbl_hw_stats *hw_stats); + +int nbl_mailbox_req_configure_mac_addr(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr, u8 vsi_id); +int nbl_mailbox_req_clear_mac_addr(struct nbl_hw *hw); + +int nbl_mailbox_req_change_mac_addr(struct nbl_hw *hw, u8 *mac_addr, u8 vsi_id); + +int nbl_mailbox_req_operate_vlan_id(struct nbl_hw *hw, u16 vlan_id, u8 vsi_id, bool add); + +void nbl_clean_mailbox_subtask(struct nbl_adapter *adapter); + +int nbl_mailbox_request_irq(struct nbl_adapter *adapter); +void nbl_mailbox_free_irq(struct nbl_adapter *adapter); + +void nbl_af_set_mailbox_bdf_for_all_func(struct nbl_hw *hw); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/m1600/main.c b/drivers/net/ethernet/nebula-matrix/m1600/main.c new file mode 100644 index 0000000000000000000000000000000000000000..a3c7d03d50bbec74c65ef3d3baa6a303e0d4d0f2 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/main.c @@ -0,0 +1,1530 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "hw.h" +#include "common.h" +#include "ethtool.h" +#include "interrupt.h" +#include "txrx.h" +#include "mailbox.h" +#include "hwmon.h" +#include "macvlan.h" +#include "sriov.h" + +/* workqueue 1 is used to process mailbox work or others conflict with + * works in workqueue 2, and workqueue 2 is used to process link status + * monitoring task. We use two different workqueue because works like + * link status monitoring wait for mailbox work to complete. If these + * works is added to the same workqueue, deadlock arises. + */ +#define NBL_X4_WQ1 "nbl_X4_wq1" +#define NBL_X4_WQ2 "nbl_X4_wq2" +static struct workqueue_struct *nbl_wq1; +static struct workqueue_struct *nbl_wq2; + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Network interface message level setting"); + +static u32 ring_num = NBL_DEFAULT_RING_NUM; +module_param(ring_num, uint, 0444); +MODULE_PARM_DESC(ring_num, "Netdev ring num setting, with a maximum value of 16"); + +static u32 vf_ring_num = NBL_VF_DEFAULT_RING_NUM; +module_param(vf_ring_num, uint, 0444); +MODULE_PARM_DESC(vf_ring_num, "Netdev ring num setting of VF, with a maximum value of 16"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + +void nbl_af_write_mac_to_logic(struct nbl_hw *hw, u8 eth_port_id, u8 *mac_addr) +{ + struct nbl_ped_port_smac ped_port_smac = { 0 }; + u32 *smac = (u32 *)&ped_port_smac; + + memcpy(ped_port_smac.smac, mac_addr, ETH_ALEN); + wr32(hw, NBL_PED_PORT_SMAC_REG_L(eth_port_id), *smac); + wr32(hw, NBL_PED_PORT_SMAC_REG_H(eth_port_id), *(smac + 1)); +} + +static void nbl_mac_addr_random_init(u8 *mac_addr) +{ + mac_addr[0] = 0x68; + mac_addr[1] = 0xbe; + mac_addr[2] = 0x49; + mac_addr[3] = 0x10; + + get_random_bytes(&mac_addr[4], ETH_ALEN - 4); +} + +int nbl_af_get_board_info(struct nbl_hw *hw, u8 eth_port_id, union nbl_board_info *board_info) +{ + u8 *addr = (u8 *)board_info; + u32 i; + int ret; + + for (i = 0; i < sizeof(union nbl_board_info); i++, addr++) { + ret = nbl_read_eeprom_byte(hw, i, addr); + if (ret < 0) + return ret; + } + + return ret; +} + +static bool nbl_check_board_info(u8 *addr, u8 *magic, u32 len, u32 crc) +{ + u32 checksum = 0; + u32 i; + + if (strncmp((char *)magic, NBL_MAC_X4_MAGIC, NBL_MAC_MAGIC_LEN)) + return false; + + for (i = 0; i < (len - sizeof(u32)); i++) { + checksum += *addr; + addr++; + } + + return checksum == crc; +} + +void nbl_write_mac_to_logic(struct nbl_hw *hw, u8 *mac_addr) +{ + if (is_af(hw)) + nbl_af_write_mac_to_logic(hw, hw->eth_port_id, mac_addr); + else + nbl_mailbox_req_write_mac_to_logic(hw, hw->eth_port_id, mac_addr); +} + +static void nbl_mac_addr_init(struct nbl_adapter *adapter, u8 *mac_addr) +{ + union nbl_board_info board_info = { 0 }; + u8 eth_port_id = adapter->hw.eth_port_id; + struct nbl_hw *hw = &adapter->hw; + int err; + + if (is_vf(hw)) { + nbl_mac_addr_random_init(mac_addr); + return; + } + + if (is_af(hw)) + err = nbl_af_get_board_info(hw, eth_port_id, &board_info); + else + err = nbl_mailbox_req_get_board_info(hw, eth_port_id, &board_info); + + if (err || !nbl_check_board_info((u8 *)&board_info, board_info.magic, + sizeof(union nbl_board_info), board_info.crc)) { + nbl_mac_addr_random_init(mac_addr); + } else { + if (eth_port_id == 0) + memcpy(mac_addr, board_info.mac1, ETH_ALEN); + else if (eth_port_id == 1) + memcpy(mac_addr, board_info.mac2, ETH_ALEN); + else if (eth_port_id == 2) + memcpy(mac_addr, board_info.mac3, ETH_ALEN); + else if (eth_port_id == 3) + memcpy(mac_addr, board_info.mac4, ETH_ALEN); + } + + /* write mac addr to logic for sending pause frame */ + nbl_write_mac_to_logic(hw, mac_addr); +} + +void nbl_service_task1_schedule(struct nbl_adapter *adapter) +{ + queue_work(nbl_wq1, &adapter->serv_task1); +} + +void nbl_service_task2_schedule(struct nbl_adapter *adapter) +{ + if (test_bit(NBL_MAILBOX_READY, adapter->state)) + queue_work(nbl_wq2, &adapter->serv_task2); +} + +void nbl_service_task_schedule(struct nbl_adapter *adapter) +{ + queue_work(nbl_wq1, &adapter->serv_task1); + + if (test_bit(NBL_MAILBOX_READY, adapter->state)) + queue_work(nbl_wq2, &adapter->serv_task2); +} + +static void nbl_service_timer(struct timer_list *t) +{ + struct nbl_adapter *adapter = from_timer(adapter, t, serv_timer); + + mod_timer(&adapter->serv_timer, round_jiffies(adapter->serv_timer_period + jiffies)); + nbl_service_task_schedule(adapter); +} + +enum NBL_MODULE_INPLACE_STATUS nbl_af_check_module_inplace(struct nbl_hw *hw, u8 eth_port_id) +{ + /* low means module inplace */ + if (!(rd32(hw, NBL_LSP_SFP_MOD_REG(eth_port_id)) & BIT(0))) + return NBL_MODULE_INPLACE; + + return NBL_MODULE_NOT_INPLACE; +} + +static inline enum NBL_MODULE_INPLACE_STATUS +nbl_check_module_inplace(struct nbl_hw *hw, u8 eth_port_id) +{ + if (is_af(hw)) + return nbl_af_check_module_inplace(hw, eth_port_id); + else + return nbl_mailbox_req_check_module_inplace(hw, eth_port_id); +} + +static int nbl_get_module_eeprom_by_offset(struct nbl_hw *hw, u8 eth_port_id, + unsigned int offset, u8 *data, unsigned int len) +{ + struct ethtool_eeprom eeprom; + + eeprom.offset = offset; + eeprom.len = len; + + if (is_af(hw)) + return nbl_af_get_module_eeprom(hw, eth_port_id, &eeprom, data); + else + return nbl_mailbox_req_get_module_eeprom(hw, eth_port_id, &eeprom, data); +} + +static bool nbl_check_module_identifier(struct nbl_hw *hw, u8 eth_port_id) +{ + u8 identifier; + int ret; + + ret = nbl_get_module_eeprom_by_offset(hw, eth_port_id, SFF_8472_IDENTIFIER, + &identifier, sizeof(identifier)); + if (ret) { + pr_err("Read SFF_8472_IDENTIFIER register failed, eth_port: %d, ret: %d\n", + eth_port_id, ret); + return 0; + } + + return (identifier == SFF_IDENTIFIER_SFP); +} + +static int nbl_get_module_bitrate(struct nbl_hw *hw, u8 eth_port_id) +{ + int ret; + int bit_rate; + u8 br_nom; + u8 br_max; + + ret = nbl_get_module_eeprom_by_offset(hw, eth_port_id, SFF_8472_SIGNALING_RATE, + &br_nom, sizeof(br_nom)); + if (ret) { + pr_err("Read SFF_8472_SIGNALING_RATE register failed, eth_port: %d\n", + eth_port_id); + return ret; + } + + ret = nbl_get_module_eeprom_by_offset(hw, eth_port_id, SFF_8472_SIGNALING_RATE_MAX, + &br_max, sizeof(br_max)); + if (ret) { + pr_err("Read SFF_8472_SIGNALING_RATE_MAX register failed, eth_port: %d\n", + eth_port_id); + return ret; + } + + /* sff-8472 section 5.6 */ + if (br_nom == 0xFF) + bit_rate = (u32)br_max * 250; + else if (br_nom == 0) + bit_rate = 0; + else + bit_rate = (u32)br_nom * 100; + + return bit_rate; +} + +static int nbl_query_module_speed(struct nbl_hw *hw, u8 eth_port_id, + bool *support_10g, bool *support_1g) +{ + int ret; + u8 capa_10g; + u8 capa_1g; + u8 cable_tech; + int bit_rate; + + ret = nbl_get_module_eeprom_by_offset(hw, eth_port_id, SFF_8472_10GB_CAPABILITY, + &capa_10g, sizeof(capa_10g)); + if (ret) { + pr_err("Read SFF_8472_10GB_CAPABILITY register failed, eth_port: %d\n", + eth_port_id); + return ret; + } + + /* check for support of 10G capability */ + if (capa_10g & BIT(SFF_8472_10G_SR_BIT) || capa_10g & BIT(SFF_8472_10G_LR_BIT) || + capa_10g & BIT(SFF_8472_10G_LRM_BIT) || capa_10g & BIT(SFF_8472_10G_ER_BIT)) + *support_10g = true; + + ret = nbl_get_module_eeprom_by_offset(hw, eth_port_id, SFF_8472_1GB_CAPABILITY, + &capa_1g, sizeof(capa_1g)); + if (ret) { + pr_err("Read SFF_8472_1GB_CAPABILITY register failed, eth_port: %d\n", + eth_port_id); + return ret; + } + + /* check for support of 1G capability */ + if (capa_1g & BIT(SFF_8472_1G_SX_BIT) || capa_1g & BIT(SFF_8472_1G_LX_BIT) || + capa_1g & BIT(SFF_8472_1G_CX_BIT) || capa_1g & BIT(SFF_8472_1G_T_BIT)) + *support_1g = true; + + bit_rate = nbl_get_module_bitrate(hw, eth_port_id); + if (bit_rate < 0) { + pr_warn("Eth module %d read bit_rate failed\n", eth_port_id); + return bit_rate; + } + + switch (bit_rate / 1000) { + case 10: + *support_10g = true; + break; + case 1: + *support_1g = true; + break; + default: + pr_warn("Read module bit rate not 10G or 1G, eth port: %d\n", eth_port_id); + break; + } + + if (!(*support_10g) && !(*support_1g)) { + /* if it is passive cable, set to 10G */ + ret = nbl_get_module_eeprom_by_offset(hw, eth_port_id, SFF_8472_CABLE_TECHNOLOGY, + &cable_tech, sizeof(cable_tech)); + if (ret) { + pr_err("Read SFF_8472_CABLE_TECHNOLOGY register failed, eth_port: %d\n", + eth_port_id); + return ret; + } + + if (cable_tech & SFF_PASSIVE_CABLE) + *support_10g = true; + } + + return 0; +} + +int nbl_af_config_module_speed(struct nbl_hw *hw, u8 target_speed, u8 eth_port_id) +{ + int speed_sel; + u32 timeout = 100; + enum nbl_eth_speed_mode current_speed; + struct nbl_loopback_mode loopback_mode; + struct nbl_eth_rx_stat eth_rx_stat; + + if (target_speed == NBL_MODULE_SPEED_1G) + speed_sel = NBL_ETH_SPEED_MODE_1G; + else + speed_sel = NBL_ETH_SPEED_MODE_10G; + + rd32_for_each(hw, NBL_ETH_LOOPBACK_MODE_REG(eth_port_id), + (u32 *)&loopback_mode, sizeof(loopback_mode)); + current_speed = loopback_mode.speed_stat; + + if (speed_sel == current_speed) + return current_speed; + + /* config registor to switch speed */ + loopback_mode.speed_sel = speed_sel; + wr32_for_each(hw, NBL_ETH_LOOPBACK_MODE_REG(eth_port_id), + (u32 *)&loopback_mode, sizeof(loopback_mode)); + + /* reset eth after change speed */ + wr32(hw, NBL_ETH_RESET_REG(eth_port_id), 0x1E); + wr32(hw, NBL_ETH_RESET_REG(eth_port_id), 0x0); + + /* make sure registor loopback_bode.speed_sel is configed */ + mb(); + + /* switching serdes speed */ + while (timeout) { + rd32_for_each(hw, NBL_ETH_RX_STAT_REG(eth_port_id), + (u32 *)ð_rx_stat, sizeof(eth_rx_stat)); + + if (eth_rx_stat.switching == 0) + break; + + usleep_range(100, 200); + timeout--; + } + if (timeout == 0) { + pr_warn("Wait switching serdes speed timeout, eth port: %d\n", eth_port_id); + return -ETIMEDOUT; + } + + /* check speed switch success or not */ + rd32_for_each(hw, NBL_ETH_LOOPBACK_MODE_REG(eth_port_id), + (u32 *)&loopback_mode, sizeof(loopback_mode)); + if (loopback_mode.speed_stat != speed_sel) { + pr_err("Module speed set failed, eth port: %d\n", eth_port_id); + return -ERANGE; + } + + return speed_sel; +} + +static void nbl_config_module_speed(struct nbl_hw *hw, u8 target_speed) +{ + int speed_stat; + u8 eth_port_id = hw->eth_port_id; + + if (is_af(hw)) + speed_stat = nbl_af_config_module_speed(hw, target_speed, eth_port_id); + else + speed_stat = nbl_mailbox_req_config_module_speed(hw, target_speed, eth_port_id); + + if (speed_stat == NBL_ETH_SPEED_MODE_10G) { + pr_info("Eth port %d speed mode: 10G", eth_port_id); + __clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, hw->advertising); + __clear_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, hw->advertising); + } else if (speed_stat == NBL_ETH_SPEED_MODE_1G) { + pr_info("Eth port %d speed mode: 1G", eth_port_id); + __clear_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, hw->advertising); + __clear_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, hw->advertising); + __clear_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, hw->advertising); + } else { + pr_err("Speed set failed with err: %d, eth_port: %d\n", speed_stat, eth_port_id); + } +} + +void nbl_set_module_speed(struct nbl_hw *hw, u8 target_speed) +{ + u8 module_support = hw->module_support_speed; + + /* target_speed only use when support both 1G and 10G */ + switch (module_support) { + case (NBL_MODULE_SPEED_10G | NBL_MODULE_SPEED_1G): + nbl_config_module_speed(hw, target_speed); + break; + case NBL_MODULE_SPEED_1G: + nbl_config_module_speed(hw, NBL_MODULE_SPEED_1G); + break; + case NBL_MODULE_SPEED_10G: + default: + nbl_config_module_speed(hw, NBL_MODULE_SPEED_10G); + break; + } +} + +void nbl_check_and_set_module_info(struct nbl_hw *hw) +{ + int ret; + enum NBL_MODULE_INPLACE_STATUS inplace; + bool support_10g = false; + bool support_1g = false; + u8 eth_port_id = hw->eth_port_id; + + inplace = nbl_check_module_inplace(hw, eth_port_id); + + hw->module_inplace = inplace; + + if (inplace == NBL_MODULE_NOT_INPLACE) { + hw->module_support_speed = 0; + return; + } + if (!nbl_check_module_identifier(hw, eth_port_id)) { + pr_warn("Module identifier check failed, eth port: %d\n", eth_port_id); + return; + } + + ret = nbl_query_module_speed(hw, eth_port_id, &support_10g, &support_1g); + if (ret) { + pr_warn("Get module speed failed with ret: %d, eth port: %d\n", + ret, eth_port_id); + return; + } + + if (support_10g) + hw->module_support_speed |= NBL_MODULE_SPEED_10G; + + if (support_1g) + hw->module_support_speed |= NBL_MODULE_SPEED_1G; + + nbl_set_module_speed(hw, hw->module_support_speed); +} + +static void nbl_check_module_subtask(struct nbl_hw *hw) +{ + enum NBL_MODULE_INPLACE_STATUS inplace; + + inplace = nbl_check_module_inplace(hw, hw->eth_port_id); + if (inplace == hw->module_inplace) + return; + else if (inplace == NBL_MODULE_INPLACE) + pr_info("Eth port %d module plugged in\n", hw->eth_port_id); + else + pr_info("Eth port %d module pulled out\n", hw->eth_port_id); + + nbl_check_and_set_module_info(hw); +} + +u32 nbl_af_get_rxlos(struct nbl_hw *hw, u8 eth_port_id) +{ + return rd32(hw, NBL_LSP_SFP_RXLOS_REG(eth_port_id)); +} + +static u32 nbl_get_rxlos(struct nbl_hw *hw) +{ + if (is_af(hw)) + return nbl_af_get_rxlos(hw, hw->eth_port_id); + else + return nbl_mailbox_req_get_rxlos(hw, hw->eth_port_id); +} + +static bool nbl_eth_status_correct(struct nbl_hw *hw) +{ + u32 rxlos; + + if (nbl_query_link_status(hw)) + return true; + + rxlos = nbl_get_rxlos(hw); + return (bool)(rxlos & BIT(0)); +} + +void nbl_af_reset_eth(struct nbl_hw *hw, u8 eth_port_id) +{ + wr32(hw, NBL_ETH_RESET_REG(eth_port_id), 0x1E); + wr32(hw, NBL_ETH_RESET_REG(eth_port_id), 0x00); +} + +static void nbl_reset_eth(struct nbl_hw *hw, u8 eth_port_id) +{ + if (is_af(hw)) + nbl_af_reset_eth(hw, eth_port_id); + else + nbl_mailbox_req_reset_eth(hw, eth_port_id); +} + +static void nbl_eth_self_healing_subtask(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u8 eth_port_id = hw->eth_port_id; + struct nbl_healing_var *healing_var = &adapter->healing_var; + + if (is_vf(hw)) + return; + + if (healing_var->status_chk_timer >= 5) { + healing_var->status_chk_timer = 0; + if (!nbl_eth_status_correct(hw)) { + healing_var->bad_code_increase = 0; + nbl_reset_eth(hw, eth_port_id); + adapter->stats.err_status_reset++; + return; + } + } + healing_var->status_chk_timer++; + + if (healing_var->bad_code_increase == 0) { + healing_var->former_bad_code = adapter->stats.rx_bad_code; + healing_var->bad_code_increase++; + return; + } + + if (healing_var->former_bad_code == adapter->stats.rx_bad_code) { + healing_var->bad_code_increase = 0; + } else { + healing_var->former_bad_code = adapter->stats.rx_bad_code; + healing_var->bad_code_increase++; + } + + if (healing_var->bad_code_increase > 3) { + healing_var->bad_code_increase = 0; + nbl_reset_eth(hw, eth_port_id); + adapter->stats.bad_code_reset++; + } +} + +static void nbl_service_task1(struct work_struct *work) +{ + struct nbl_adapter *adapter = container_of(work, struct nbl_adapter, serv_task1); + + nbl_clean_mailbox_subtask(adapter); +} + +static void nbl_service_task2(struct work_struct *work) +{ + struct nbl_adapter *adapter = container_of(work, struct nbl_adapter, serv_task2); + + nbl_reset_subtask(adapter); + nbl_query_link_status_subtask(adapter); + nbl_update_stats_subtask(adapter); + nbl_check_module_subtask(&adapter->hw); + nbl_eth_self_healing_subtask(adapter); +} + +static void nbl_adapter_init(struct nbl_adapter *adapter, unsigned int req_ring_num) +{ + adapter->num_txq = (u8)req_ring_num; + adapter->num_rxq = (u8)req_ring_num; + + adapter->tx_desc_num = NBL_DEFAULT_TX_DESC_NUM; + adapter->rx_desc_num = NBL_DEFAULT_RX_DESC_NUM; + + timer_setup(&adapter->serv_timer, nbl_service_timer, 0); + adapter->serv_timer_period = HZ; + INIT_WORK(&adapter->serv_task1, nbl_service_task1); + INIT_WORK(&adapter->serv_task2, nbl_service_task2); +} + +static void nbl_config_netdev(struct net_device *netdev) +{ + netdev_features_t csum_features; +#ifdef NBL_TSO + netdev_features_t tso_features; +#endif + + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features = netdev->features; +#ifdef NBL_TSO + tso_features = NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4; +#endif + csum_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_IPV6_CSUM; + + netdev->features |= csum_features; + netdev->hw_features |= csum_features; +} + +static void nbl_start_service_task(struct nbl_adapter *adapter) +{ + mod_timer(&adapter->serv_timer, round_jiffies(jiffies + adapter->serv_timer_period)); +} + +static void nbl_stop_service_task(struct nbl_adapter *adapter) +{ + del_timer_sync(&adapter->serv_timer); + + cancel_work_sync(&adapter->serv_task2); + cancel_work_sync(&adapter->serv_task1); +} + +static void nbl_up_complete(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + bool link_up; + + nbl_enable_all_napis(adapter); + + nbl_configure_msix_irqs(adapter); + + nbl_start_all_tx_rings(adapter); + nbl_start_all_rx_rings(adapter); + + nbl_eth_tx_enable(adapter); + nbl_eth_rx_enable(adapter); + + /* WARNING: Cannot call netif_carrier_on before + * ndo_start_xmit netdev ops is implemented. + * Otherwise, the whole system will crash without + * any dmesg information. + */ + netif_tx_start_all_queues(netdev); + link_up = nbl_query_link_status(hw); + if (link_up) + netif_carrier_on(netdev); + + clear_bit(NBL_DOWN, adapter->state); +} + +int nbl_open(struct net_device *netdev) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + int err; + + netif_carrier_off(netdev); + + err = nbl_setup_rings(adapter); + if (err) + return err; + + nbl_hw_config_rings(adapter); + + nbl_alloc_all_rx_bufs(adapter); + + err = nbl_request_irq(adapter); + if (err) + goto request_irq_err; + + err = netif_set_real_num_tx_queues(netdev, adapter->num_txq); + if (err) + goto set_queue_num_err; + err = netif_set_real_num_rx_queues(netdev, adapter->num_rxq); + if (err) + goto set_queue_num_err; + + nbl_up_complete(adapter); + + return 0; + +set_queue_num_err: + nbl_free_irq(adapter); +request_irq_err: + nbl_free_all_rx_bufs(adapter); + nbl_teardown_rings(adapter); + + return err; +} + +static void nbl_down(struct nbl_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + set_bit(NBL_DOWN, adapter->state); + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + nbl_eth_rx_disable(adapter); + + nbl_stop_all_tx_rings(adapter); + nbl_stop_all_rx_rings(adapter); + + nbl_clear_msix_irqs_conf(adapter); + + nbl_disable_all_napis(adapter); + + nbl_eth_tx_disable(adapter); +} + +int nbl_stop(struct net_device *netdev) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + + nbl_down(adapter); + + nbl_free_irq(adapter); + + nbl_free_all_tx_bufs(adapter); + nbl_free_all_rx_bufs(adapter); + + nbl_teardown_rings(adapter); + + return 0; +} + +void nbl_do_reset(struct nbl_adapter *adapter) +{ + while (test_and_set_bit(NBL_RESETTING, adapter->state)) + usleep_range(1000, 2000); + + nbl_down(adapter); + nbl_free_all_tx_bufs(adapter); + nbl_free_all_rx_bufs(adapter); + + nbl_alloc_all_rx_bufs(adapter); + nbl_up_complete(adapter); + + clear_bit(NBL_RESETTING, adapter->state); +} + +static int nbl_change_mtu(struct net_device *netdev, int new_mtu) +{ + netdev->mtu = new_mtu; + return 0; +} + +static void nbl_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_ring *ring; + u64 bytes; + u64 packets; + unsigned int start; + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rxq; i++) { + ring = READ_ONCE(adapter->rx_rings[i]); + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_txq; i++) { + ring = READ_ONCE(adapter->tx_rings[i]); + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + + mutex_lock(&adapter->stats.lock); + stats->multicast = adapter->stats.rx_multicast; + stats->rx_errors = adapter->stats.rx_error_packets; + stats->tx_errors = adapter->stats.tx_error_packets; + stats->rx_length_errors = adapter->stats.rx_oversize + adapter->stats.rx_undersize; + stats->rx_crc_errors = adapter->stats.rx_bad_fcs; + stats->rx_frame_errors = adapter->stats.rx_frame_err; + stats->rx_dropped = 0; + stats->tx_dropped = 0; + mutex_unlock(&adapter->stats.lock); +} + +static void nbl_init_link_setting(struct nbl_hw *hw) +{ + __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, hw->supported); + __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, hw->supported); + __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, hw->supported); + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, hw->supported); + __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, hw->supported); + __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, hw->supported); + __set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, hw->supported); + __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, hw->advertising); + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, hw->supported); + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, hw->advertising); + + nbl_check_and_set_module_info(hw); +} + +void nbl_af_set_pauseparam(struct nbl_hw *hw, u8 eth_port_id, struct nbl_fc_info fc) +{ + u32 rx_pause_reg_value; + u32 tx_pause_reg_value; + + rx_pause_reg_value = rd32(hw, NBL_PA_PAUSE_RX_EN); + rx_pause_reg_value &= ~(1 << (eth_port_id << 1)); + rx_pause_reg_value |= fc.rx_pause << (eth_port_id << 1); + wr32(hw, NBL_PA_PAUSE_RX_EN, rx_pause_reg_value); + + tx_pause_reg_value = rd32(hw, NBL_QM_PORT_TX_PAUSE_EN); + tx_pause_reg_value &= ~(1 << eth_port_id); + tx_pause_reg_value |= fc.tx_pause << eth_port_id; + wr32(hw, NBL_QM_PORT_TX_PAUSE_EN, tx_pause_reg_value); +} + +static void nbl_init_pauseparam(struct nbl_hw *hw) +{ + if (is_vf(hw)) + return; + + hw->fc.rx_pause = 1; + hw->fc.tx_pause = 1; + if (is_af(hw)) + nbl_af_set_pauseparam(hw, hw->eth_port_id, hw->fc); + else + nbl_mailbox_req_set_pauseparam(hw, hw->eth_port_id, hw->fc); +} + +static int nbl_set_mac_address(struct net_device *netdev, void *p) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + u8 vsi_id; + int err; + + vsi_id = hw->vsi_id; + if (vsi_id >= NBL_MAX_PF_FUNC) { + pr_err("It is not allowed to change mac address of VF\n"); + return -EOPNOTSUPP; + } + + if (!is_valid_ether_addr(addr->sa_data)) { + pr_err("We can not change to invalid mac address %pM\n", addr->sa_data); + return -EADDRNOTAVAIL; + } + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + pr_info("We are already using mac address %pM\n", addr->sa_data); + return 0; + } + + err = nbl_change_mac_addr(hw, addr->sa_data); + if (err) { + pr_err("Failed to change mac address to %pM with error %d\n", addr->sa_data, err); + return err; + } + + ether_addr_copy(netdev->dev_addr, addr->sa_data); + + nbl_write_mac_to_logic(hw, (u8 *)addr->sa_data); + + return 0; +} + +static void nbl_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(NBL_DOWN, adapter->state)) { + set_bit(NBL_RESET_REQUESTED, adapter->state); + + nbl_service_task2_schedule(adapter); + } +} + +static int nbl_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, + u16 vid) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + + if (vid >= VLAN_N_VID) + return -EINVAL; + + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + return nbl_add_vlan_id(hw, vid); +} + +static int nbl_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, + u16 vid) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + + if (vid >= VLAN_N_VID) + return -EINVAL; + + if (vid == NBL_DEFAULT_VLAN_ID) + return 0; + + return nbl_delete_vlan_id(hw, vid); +} + +static void nbl_set_rx_mode(struct net_device *netdev) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + + if (netdev->flags & IFF_PROMISC) + nbl_enable_promisc(hw); + else + nbl_disable_promisc(hw); +} + +static netdev_features_t nbl_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + u32 l4_len, tot_len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + unsigned char *exthdr; + __be16 protocol, frag_off; + u8 l4_proto; + int ret; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < NBL_TX_MIN_GSO_SIZE)) + features &= ~NETIF_F_GSO_MASK; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + protocol = vlan_get_protocol(skb); + + if (skb->encapsulation) { + if (protocol == htons(ETH_P_IP)) { + l4_proto = ip.v4->protocol; + /* Only support IPV4 with no opt */ + if (ip.v4->ihl > 5) + goto out_rm_features; + } else if (protocol == htons(ETH_P_IPV6)) { + /* Now skip ipv6 with opt */ + if (ip.v6->nexthdr != NEXTHDR_NONE) + goto out_rm_features; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + if (ret < 0) + goto out_rm_features; + } else { + goto out_rm_features; + } + + switch (l4_proto) { + case IPPROTO_UDP: + case IPPROTO_GRE: + break; + default: + goto out_rm_features; + } + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + if (ip.v4->version == 4) + protocol = htons(ETH_P_IP); + if (ip.v6->version == 6) + protocol = htons(ETH_P_IPV6); + } + + if (protocol == htons(ETH_P_IP)) { + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + /* Skip inner pkt ipv6 with opt */ + if (ip.v6->nexthdr != NEXTHDR_NONE) + goto out_rm_features; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } else { + goto out_rm_features; + } + + switch (l4_proto) { + case IPPROTO_TCP: + l4_len = (l4.tcp->doff) << 2; + break; + case IPPROTO_UDP: + l4_len = sizeof(struct udphdr); + break; + default: + goto out_rm_features; + } + + tot_len = l4.hdr - skb->data; + tot_len += l4_len; + + if (tot_len > NBL_TX_MAX_OFFLOAD_HEADER_LEN) + goto out_rm_features; + + return features; + +out_rm_features: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +static const struct net_device_ops nbl_netdev_ops = { + .ndo_open = nbl_open, + .ndo_stop = nbl_stop, + .ndo_start_xmit = nbl_start_xmit, + .ndo_get_stats64 = nbl_get_stats64, + .ndo_change_mtu = nbl_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = nbl_set_mac_address, + .ndo_tx_timeout = nbl_tx_timeout, + .ndo_vlan_rx_add_vid = nbl_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = nbl_vlan_rx_kill_vid, + .ndo_set_rx_mode = nbl_set_rx_mode, + .ndo_features_check = nbl_features_check, +}; + +static const struct pci_device_id nbl_id_table[] = { + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_X4_PF) }, + { PCI_DEVICE(NBL_VENDOR_ID, NBL_DEVICE_ID_X4_VF) }, + /* required as sentinel */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, nbl_id_table); + +static int nbl_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *id) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct nbl_adapter *adapter; + struct nbl_hw *hw; + u8 mac_addr[ETH_ALEN]; + int bar_mask; + int pci_using_dac; + u8 function_id; + enum nbl_func_type func_type; + unsigned int req_ring_num; + int err; + bool golden; + + err = pci_enable_device(pdev); + if (err) + return err; + + bar_mask = BIT(NBL_X4_MEMORY_BAR) | BIT(NBL_X4_MAILBOX_BAR); + err = pci_request_selected_regions(pdev, bar_mask, NBL_X4_DRIVER_NAME); + if (err) { + dev_err(dev, "Request memory bar and mailbox bar failed, err = %d\n", err); + goto request_bar_region_err; + } + + pci_using_dac = 1; + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(dev, "Configure DMA 64 bit mask failed, err = %d\n", err); + pci_using_dac = 0; + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + } + if (err) { + dev_err(dev, "Configure DMA 32 bit mask failed, err = %d\n", err); + goto configure_dma_err; + } + + pci_set_master(pdev); + + pci_enable_pcie_error_reporting(pdev); + pci_save_state(pdev); + + function_id = PCI_FUNC(pdev->devfn); + if (pdev->device == NBL_DEVICE_ID_X4_PF && !function_id) + func_type = NBL_X4_AF; + else if (pdev->device == NBL_DEVICE_ID_X4_PF) + func_type = NBL_X4_PF; + else + func_type = NBL_X4_VF; + + if (func_type == NBL_X4_VF) + req_ring_num = vf_ring_num; + else + req_ring_num = ring_num; + + if (req_ring_num > NBL_MAX_RING_NUM) + req_ring_num = NBL_MAX_RING_NUM; + /* We can allocate 16 tx/rx queues for each function + * at most, but for AF we must reserve a tx/rx queue + * pair which is a software forward queue used in + * SRIOV case. + */ + if (func_type == NBL_X4_AF && req_ring_num > NBL_MAX_RING_NUM - 1) + req_ring_num = NBL_MAX_RING_NUM - 1; + if (req_ring_num > num_online_cpus()) + req_ring_num = num_online_cpus(); + if (!req_ring_num) { + if (func_type == NBL_X4_VF) + req_ring_num = NBL_VF_DEFAULT_RING_NUM; + else + req_ring_num = NBL_DEFAULT_RING_NUM; + } + + netdev = alloc_etherdev_mqs(sizeof(struct nbl_adapter), req_ring_num, req_ring_num); + if (!netdev) { + pr_err("Allocate net device failed\n"); + err = -ENOMEM; + goto alloc_netdev_err; + } + + nbl_config_netdev(netdev); + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + adapter = netdev_priv(netdev); + adapter->pdev = pdev; + adapter->netdev = netdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + mutex_init(&adapter->stats.lock); + hw = &adapter->hw; + hw->back = adapter; + hw->function = function_id; + hw->devid = PCI_SLOT(pdev->devfn); + hw->bus = pdev->bus->number; + hw->func_type = func_type; + + set_bit(NBL_DOWN, adapter->state); + + hw->hw_addr = pci_ioremap_bar(pdev, NBL_X4_MEMORY_BAR); + if (!hw->hw_addr) { + dev_err(dev, "Memory bar ioremap failed\n"); + err = -EIO; + goto ioremap_err; + } + + hw->mailbox_bar_hw_addr = pci_ioremap_bar(pdev, NBL_X4_MAILBOX_BAR); + if (!hw->mailbox_bar_hw_addr) { + dev_err(dev, "Mailbox bar ioremap failed\n"); + err = -EIO; + goto mailbox_ioremap_err; + } + + hw->msix_bar_hw_addr = pci_ioremap_bar(pdev, NBL_X4_MSIX_BAR); + if (!hw->msix_bar_hw_addr) { + dev_err(dev, "MSIX bar ioremap failed\n"); + err = -EIO; + goto msix_ioremap_err; + } + + nbl_adapter_init(adapter, req_ring_num); + + if (is_af(hw)) { + nbl_firmware_init(hw); + + nbl_af_configure_fc_cplh_up_th(hw); + + nbl_af_configure_captured_packets(hw); + + nbl_af_set_mailbox_bdf_for_all_func(hw); + + err = nbl_af_res_mng_init(hw); + if (err) + goto res_mng_init_err; + } + + golden = nbl_check_golden_version(hw); + if (golden) { + dev_info(dev, "The firmware is golden version, please use regular version\n"); + err = -EINVAL; + goto golden_version_err; + } + + err = nbl_setup_mailbox(hw); + if (err) + goto golden_version_err; + + /* NOTICE: AF must be probed successfully first */ + err = nbl_configure_msix_map(hw); + if (err) + goto config_msix_map_err; + + err = nbl_init_interrupt_scheme(adapter); + if (err) + goto init_intr_err; + + nbl_start_service_task(adapter); + + err = nbl_mailbox_request_irq(adapter); + if (err) + goto mailbox_req_irq_err; + + nbl_mailbox_enable_irq(adapter); + + set_bit(NBL_MAILBOX_READY, adapter->state); + + err = nbl_get_vsi_id(hw); + if (err) + goto get_vsi_id_err; + + if (hw->vsi_id < NBL_MAX_PF_FUNC) + hw->eth_port_id = hw->vsi_id; + else + hw->eth_port_id = (hw->vsi_id - NBL_MAX_PF_FUNC) / NBL_MAX_VF_PER_PF; + + err = nbl_register_vf_bar_info(hw); + if (err) + goto get_vsi_id_err; + + err = nbl_configure_notify_addr(hw); + if (err) + goto get_vsi_id_err; + + err = nbl_alloc_q_vectors(adapter); + if (err) + goto alloc_q_vectors_err; + + err = nbl_alloc_rings(adapter); + if (err) + goto alloc_rings_err; + + nbl_map_rings_to_vectors(adapter); + + nbl_datapath_init(hw); + + netdev->netdev_ops = &nbl_netdev_ops; + nbl_set_ethtool_ops(netdev); + + netdev->watchdog_timeo = 5 * HZ; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->max_mtu = NBL_MAX_MTU; + netdev->min_mtu = ETH_MIN_MTU; + + nbl_init_pkt_len_limit(hw); + + nbl_mac_addr_init(adapter, mac_addr); + if (!is_valid_ether_addr(mac_addr)) { + pr_err("MAC address %02X:%02X:%02X:%02X:%02X:%02X is invalid\n", + mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], + mac_addr[5]); + } + ether_addr_copy(netdev->dev_addr, mac_addr); + err = nbl_configure_mac_addr(hw, mac_addr); + if (err) + pr_err("Configure mac address into hardware device failed with error %d\n", err); + + err = register_netdev(netdev); + if (err) { + pr_err("Register net device failed\n"); + goto register_netdev_err; + } + netif_carrier_off(netdev); + + err = nbl_activate_af_forward_queue(adapter); + if (err) { + pr_err("Start AF forward queue failed with error %d\n", err); + goto activate_forward_queue_err; + } + + err = nbl_hwmon_init(adapter); + if (err) { + pr_err("Unable to register hwmon device with error %d\n", err); + goto register_hwmon_err; + } + + nbl_debugfs_hw_init(hw); + + nbl_init_pauseparam(hw); + + nbl_init_link_setting(hw); + + nbl_init_hw_stats(hw); + + return 0; + +register_hwmon_err: + nbl_deactivate_af_forward_queue(adapter); +activate_forward_queue_err: + unregister_netdev(netdev); + err = nbl_clear_mac_addr(hw); + if (err) + pr_err("Failed to clear mac address when error occurs\n"); +register_netdev_err: + nbl_free_rings(adapter); +alloc_rings_err: + nbl_free_q_vectors(adapter); +alloc_q_vectors_err: + nbl_clear_notify_addr(hw); +get_vsi_id_err: + clear_bit(NBL_MAILBOX_READY, adapter->state); + cancel_work_sync(&adapter->serv_task2); + + nbl_mailbox_disable_irq(adapter); + nbl_mailbox_free_irq(adapter); +mailbox_req_irq_err: + nbl_stop_service_task(adapter); + nbl_fini_interrupt_scheme(adapter); +init_intr_err: + nbl_destroy_msix_map(hw); +config_msix_map_err: + nbl_teardown_mailbox(hw); +golden_version_err: + if (is_af(hw)) + nbl_af_free_res(hw); +res_mng_init_err: + iounmap(hw->msix_bar_hw_addr); +msix_ioremap_err: + iounmap(hw->mailbox_bar_hw_addr); +mailbox_ioremap_err: + iounmap(hw->hw_addr); +ioremap_err: + free_netdev(netdev); +alloc_netdev_err: + pci_disable_pcie_error_reporting(pdev); + pci_clear_master(pdev); +configure_dma_err: + pci_release_selected_regions(pdev, bar_mask); +request_bar_region_err: + pci_disable_device(pdev); + return err; +} + +static void nbl_wait_other_functions_removed(struct nbl_hw *hw) +{ + struct nbl_af_res_info *af_res; + struct nbl_func_res *func_res; + bool all_removed; + int i; + + af_res = hw->af_res; + all_removed = true; + /* Do not consider AF itself */ + for (i = 1; i < NBL_MAX_FUNC; i++) { + func_res = af_res->res_record[i]; + if (func_res->txrx_queues) { + all_removed = false; + pr_warn("Please remove all other PF/VFs before remove AF\n"); + break; + } + } + + while (!all_removed) { + cpu_relax(); + ssleep(1); + + all_removed = true; + for (i = 1; i < NBL_MAX_FUNC; i++) { + func_res = af_res->res_record[i]; + if (func_res->txrx_queues) { + all_removed = false; + break; + } + } + } +} + +static void nbl_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + int bar_mask; + int err; + + if (is_af(hw)) + nbl_wait_other_functions_removed(hw); + + /* NOTICE: cancel work to prevent update statistics + * work from referencing tx/rx ring pointer + * after it is freed by nbl_remove already. + */ + cancel_work_sync(&adapter->serv_task2); + + bar_mask = BIT(NBL_X4_MEMORY_BAR) | BIT(NBL_X4_MAILBOX_BAR); + + nbl_debugfs_hw_exit(hw); + + nbl_hwmon_fini(adapter); + + nbl_deactivate_af_forward_queue(adapter); + + unregister_netdev(netdev); + + err = nbl_clear_mac_addr(hw); + if (err) + pr_err("Failed to clear mac address when remove module\n"); + + nbl_free_rings(adapter); + + nbl_free_q_vectors(adapter); + + nbl_clear_notify_addr(hw); + + clear_bit(NBL_MAILBOX_READY, adapter->state); + cancel_work_sync(&adapter->serv_task2); + + /* NOTICE: AF must be removed last */ + nbl_mailbox_disable_irq(adapter); + nbl_mailbox_free_irq(adapter); + + nbl_stop_service_task(adapter); + + nbl_fini_interrupt_scheme(adapter); + + nbl_destroy_msix_map(hw); + + nbl_teardown_mailbox(hw); + + if (is_af(hw)) { + nbl_af_clear_captured_packets_conf(hw); + nbl_af_free_res(hw); + } + + iounmap(hw->msix_bar_hw_addr); + iounmap(hw->mailbox_bar_hw_addr); + iounmap(hw->hw_addr); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + pci_clear_master(pdev); + pci_release_selected_regions(pdev, bar_mask); + pci_disable_device(pdev); +} + +static struct pci_driver nbl_driver = { + .name = NBL_X4_DRIVER_NAME, + .id_table = nbl_id_table, + .probe = nbl_probe, + .remove = nbl_remove, + .sriov_configure = nbl_sriov_configure, +}; + +static int __init nbl_module_init(void) +{ + int status; + + nbl_debugfs_init(); + + status = -ENOMEM; + nbl_wq1 = create_singlethread_workqueue(NBL_X4_WQ1); + if (!nbl_wq1) { + pr_err("Failed to create workqueue for %s\n", NBL_X4_WQ1); + return status; + } + + nbl_wq2 = create_singlethread_workqueue(NBL_X4_WQ2); + if (!nbl_wq2) { + pr_err("Failed to create workqueue for %s\n", NBL_X4_WQ2); + goto create_wq2_err; + } + + status = pci_register_driver(&nbl_driver); + if (status) { + pr_err("Failed to register PCI driver, err = %d\n", status); + goto register_driver_err; + } + + return 0; + +register_driver_err: + destroy_workqueue(nbl_wq2); +create_wq2_err: + destroy_workqueue(nbl_wq1); + + return status; +} + +static void __exit nbl_module_exit(void) +{ + pci_unregister_driver(&nbl_driver); + destroy_workqueue(nbl_wq2); + destroy_workqueue(nbl_wq1); + nbl_debugfs_exit(); + pr_info("nbl module unloaded\n"); +} + +module_init(nbl_module_init); +module_exit(nbl_module_exit); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(NBL_X4_DRIVER_VERSION); diff --git a/drivers/net/ethernet/nebula-matrix/m1600/sriov.c b/drivers/net/ethernet/nebula-matrix/m1600/sriov.c new file mode 100644 index 0000000000000000000000000000000000000000..0a8f8f28e7a599c5c765059e4ad8f5f7db68acee --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/sriov.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#include +#include + +#include +#include +#include + +#include "common.h" +#include "mailbox.h" +#include "sriov.h" + +#ifdef CONFIG_PCI_IOV +void nbl_af_enter_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + u8 forward_ring_index = af_res->forward_ring_index; + struct nbl_ingress_eth_port_fwd port_fwd_config; + struct nbl_src_vsi_port src_vsi_port_config; + + rd32_for_each(hw, NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(eth_port_id), + (u32 *)&port_fwd_config, sizeof(port_fwd_config)); + port_fwd_config.forward_queue_id_en = 1; + port_fwd_config.forward_queue_id = forward_ring_index; + wr32_for_each(hw, NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(eth_port_id), + (u32 *)&port_fwd_config, sizeof(port_fwd_config)); + + rd32_for_each(hw, NBL_PRO_SRC_VSI_PORT_REG_ARR(vsi_id), + (u32 *)&src_vsi_port_config, sizeof(src_vsi_port_config)); + src_vsi_port_config.mac_lut_en = 1; + src_vsi_port_config.forward_queue_id_en = 1; + src_vsi_port_config.forward_queue_id = forward_ring_index; + wr32_for_each(hw, NBL_PRO_SRC_VSI_PORT_REG_ARR(vsi_id), + (u32 *)&src_vsi_port_config, sizeof(src_vsi_port_config)); +} + +void nbl_af_leave_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id) +{ + struct nbl_ingress_eth_port_fwd port_fwd_config; + struct nbl_src_vsi_port src_vsi_port_config; + + rd32_for_each(hw, NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(eth_port_id), + (u32 *)&port_fwd_config, sizeof(port_fwd_config)); + port_fwd_config.forward_queue_id_en = 0; + port_fwd_config.forward_queue_id = 0; + wr32_for_each(hw, NBL_PRO_INGRESS_ETH_PORT_FWD_REG_ARR(eth_port_id), + (u32 *)&port_fwd_config, sizeof(port_fwd_config)); + + rd32_for_each(hw, NBL_PRO_SRC_VSI_PORT_REG_ARR(vsi_id), + (u32 *)&src_vsi_port_config, sizeof(src_vsi_port_config)); + src_vsi_port_config.mac_lut_en = 0; + src_vsi_port_config.forward_queue_id_en = 0; + src_vsi_port_config.forward_queue_id = 0; + wr32_for_each(hw, NBL_PRO_SRC_VSI_PORT_REG_ARR(vsi_id), + (u32 *)&src_vsi_port_config, sizeof(src_vsi_port_config)); +} + +static void nbl_enter_forward_ring_mode(struct nbl_hw *hw) +{ + u8 eth_port_id; + u8 vsi_id; + + eth_port_id = hw->eth_port_id; + vsi_id = hw->vsi_id; + if (is_af(hw)) + nbl_af_enter_forward_ring_mode(hw, eth_port_id, vsi_id); + else + nbl_mailbox_req_enter_forward_ring_mode(hw, eth_port_id, vsi_id); +} + +static void nbl_leave_forward_ring_mode(struct nbl_hw *hw) +{ + u8 eth_port_id; + u8 vsi_id; + + eth_port_id = hw->eth_port_id; + vsi_id = hw->vsi_id; + if (is_af(hw)) + nbl_af_leave_forward_ring_mode(hw, eth_port_id, vsi_id); + else + nbl_mailbox_req_leave_forward_ring_mode(hw, eth_port_id, vsi_id); +} +#endif + +static int nbl_sriov_disable(struct pci_dev *pdev) +{ +#ifdef CONFIG_PCI_IOV + struct net_device *netdev = pci_get_drvdata(pdev); + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + + if (pci_vfs_assigned(pdev)) { + pr_warn("Unloading driver while VFs are assigned\n"); + return -EPERM; + } + + nbl_leave_forward_ring_mode(hw); + + pci_disable_sriov(pdev); +#endif + return 0; +} + +static int nbl_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct net_device *netdev = pci_get_drvdata(pdev); + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_hw *hw = &adapter->hw; + int existing_vfs = pci_num_vf(pdev); + int err; + + if (existing_vfs) { + pr_err("VFs is created already\n"); + return -EINVAL; + } + + nbl_enter_forward_ring_mode(hw); + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + pr_warn("Failed to enable SR-IOV with error %d\n", err); + return err; + } + + return num_vfs; +#else + return 0; +#endif +} + +int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs == 0) + return nbl_sriov_disable(pdev); + else + return nbl_sriov_enable(pdev, num_vfs); +} diff --git a/drivers/net/ethernet/nebula-matrix/m1600/sriov.h b/drivers/net/ethernet/nebula-matrix/m1600/sriov.h new file mode 100644 index 0000000000000000000000000000000000000000..447aba660f769576e34b0f7561f1674be507a88a --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/sriov.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#ifndef _NBL_SRIOV_H_ +#define _NBL_SRIOV_H_ + +#ifdef CONFIG_PCI_IOV +void nbl_af_enter_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); +void nbl_af_leave_forward_ring_mode(struct nbl_hw *hw, u8 eth_port_id, u8 vsi_id); +#endif + +int nbl_sriov_configure(struct pci_dev *pdev, int num_vfs); + +#endif diff --git a/drivers/net/ethernet/nebula-matrix/m1600/txrx.c b/drivers/net/ethernet/nebula-matrix/m1600/txrx.c new file mode 100644 index 0000000000000000000000000000000000000000..cdc6bbc6f6d51fbc98b9389f979845b1bfcb13de --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/txrx.c @@ -0,0 +1,3103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "interrupt.h" +#include "mailbox.h" +#include "txrx.h" + +static int nbl_alloc_q_vector(struct nbl_adapter *adapter, u16 q_vector_id) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_q_vector *q_vector; + + q_vector = devm_kzalloc(dev, sizeof(struct nbl_q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->adapter = adapter; + q_vector->q_vector_id = q_vector_id; + q_vector->global_vector_id = adapter->hw.vsi_id * + adapter->num_q_vectors + q_vector_id; + + netif_napi_add(adapter->netdev, &q_vector->napi, nbl_napi_poll, NAPI_POLL_WEIGHT); + + adapter->q_vectors[q_vector_id] = q_vector; + + return 0; +} + +static void nbl_free_q_vector(struct nbl_adapter *adapter, u16 q_vector_id) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_q_vector *q_vector; + + q_vector = adapter->q_vectors[q_vector_id]; + if (!q_vector) { + pr_warn("Try to free queue vector %u which is not allocated", q_vector_id); + return; + } + + netif_napi_del(&q_vector->napi); + + devm_kfree(dev, q_vector); + adapter->q_vectors[q_vector_id] = NULL; +} + +int nbl_alloc_q_vectors(struct nbl_adapter *adapter) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_hw *hw = &adapter->hw; + u16 q_vector_num; + u16 q_vector_id; + int err; + + q_vector_num = adapter->num_q_vectors; + /* AF has an additional forward queue */ + q_vector_num += is_af(hw) ? 1 : 0; + adapter->q_vectors = devm_kcalloc(dev, q_vector_num, sizeof(*adapter->q_vectors), + GFP_KERNEL); + if (!adapter->q_vectors) + return -ENOMEM; + + for (q_vector_id = 0; q_vector_id < q_vector_num; q_vector_id++) { + err = nbl_alloc_q_vector(adapter, q_vector_id); + if (err) { + pr_err("Failed to allocate memory for queue vector %d\n", q_vector_id); + goto err_out; + } + } + + return 0; + +err_out: + while (q_vector_id--) + nbl_free_q_vector(adapter, q_vector_id); + devm_kfree(dev, adapter->q_vectors); + adapter->num_q_vectors = 0; + return err; +} + +void nbl_free_q_vectors(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct device *dev = nbl_adapter_to_dev(adapter); + u16 q_vector_num; + u16 q_vector_id; + + q_vector_num = adapter->num_q_vectors; + /* AF has an additional forward queue */ + q_vector_num += is_af(hw) ? 1 : 0; + for (q_vector_id = 0; q_vector_id < q_vector_num; q_vector_id++) + nbl_free_q_vector(adapter, q_vector_id); + devm_kfree(dev, adapter->q_vectors); + adapter->q_vectors = NULL; + adapter->num_q_vectors = 0; +} + +static int nbl_alloc_tx_rings(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_ring *ring; + struct device *dev; + u8 __iomem *notify_addr; + u8 ring_count; + u8 ring_index; + + ring_count = adapter->num_txq; + /* AF has an additional forward queue */ + ring_count += is_af(hw) ? 1 : 0; + dev = nbl_adapter_to_dev(adapter); + + if (adapter->tx_rings) { + pr_err("Try to allocate tx_rings which already exists\n"); + return -EINVAL; + } + + adapter->tx_rings = devm_kcalloc(dev, ring_count, sizeof(*adapter->tx_rings), + GFP_KERNEL); + if (!adapter->tx_rings) + return -ENOMEM; + + if (is_af(hw)) + notify_addr = hw->hw_addr + NBL_PCOMPLETER_AF_NOTIFY_REG; + else + notify_addr = hw->hw_addr; + + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = adapter->tx_rings[ring_index]; + ring = devm_kzalloc(dev, sizeof(struct nbl_ring), GFP_KERNEL); + if (!ring) { + pr_err("Allocate the %xth tx ring failed\n", ring_index); + goto alloc_tx_ring_failed; + } + + ring->queue_index = ring_index; + ring->dev = dev; + ring->netdev = adapter->netdev; + ring->desc_num = adapter->tx_desc_num; + ring->local_qid = ring_index * 2 + 1; + ring->notify_addr = notify_addr; + WRITE_ONCE(adapter->tx_rings[ring_index], ring); + } + + return 0; + +alloc_tx_ring_failed: + while (ring_index--) + devm_kfree(dev, adapter->tx_rings[ring_index]); + devm_kfree(dev, adapter->tx_rings); + adapter->tx_rings = NULL; + return -ENOMEM; +} + +static void nbl_free_tx_rings(struct nbl_adapter *adapter) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_hw *hw = &adapter->hw; + struct nbl_ring *ring; + u8 ring_count; + u8 ring_index; + + ring_count = adapter->num_txq; + /* AF has an additional forward queue */ + ring_count += is_af(hw) ? 1 : 0; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = adapter->tx_rings[ring_index]; + devm_kfree(dev, ring); + } + devm_kfree(dev, adapter->tx_rings); + adapter->tx_rings = NULL; +} + +static int nbl_alloc_rx_rings(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_ring *ring; + struct device *dev; + u8 __iomem *notify_addr; + u8 ring_count; + u8 ring_index; +#ifdef RHEL_RELEASE +#if (RHEL_RELEASE_VERSION(8, 0) > RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 7) < RHEL_RELEASE_CODE) + struct dma_attrs attrs = { 0 }; + + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif +#endif + + ring_count = adapter->num_rxq; + /* AF has an additional forward queue */ + ring_count += is_af(hw) ? 1 : 0; + dev = nbl_adapter_to_dev(adapter); + + if (adapter->rx_rings) { + pr_err("Try to allocate rx_rings which already exists\n"); + return -EINVAL; + } + + adapter->rx_rings = devm_kcalloc(dev, ring_count, sizeof(*adapter->rx_rings), + GFP_KERNEL); + if (!adapter->rx_rings) + return -ENOMEM; + + if (is_af(hw)) + notify_addr = hw->hw_addr + NBL_PCOMPLETER_AF_NOTIFY_REG; + else + notify_addr = hw->hw_addr; + + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = adapter->rx_rings[ring_index]; + ring = devm_kzalloc(dev, sizeof(struct nbl_ring), GFP_KERNEL); + if (!ring) { + pr_err("Allocate the %xth rx ring failed\n", ring_index); + goto alloc_rx_ring_failed; + } + + ring->queue_index = ring_index; + ring->dev = dev; + ring->netdev = adapter->netdev; + ring->desc_num = adapter->rx_desc_num; + ring->local_qid = 2 * ring_index; + ring->notify_addr = notify_addr; +#ifdef RHEL_RELEASE +#if (RHEL_RELEASE_VERSION(8, 0) > RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 7) < RHEL_RELEASE_CODE) + memcpy(&ring->rx_buf_attrs, &attrs, sizeof(attrs)); +#endif +#endif + ring->buf_len = NBL_RX_BUF_LEN; + WRITE_ONCE(adapter->rx_rings[ring_index], ring); + } + + return 0; + +alloc_rx_ring_failed: + while (ring_index--) + devm_kfree(dev, adapter->rx_rings[ring_index]); + devm_kfree(dev, adapter->rx_rings); + adapter->rx_rings = NULL; + return -ENOMEM; +} + +static void nbl_free_rx_rings(struct nbl_adapter *adapter) +{ + struct device *dev = nbl_adapter_to_dev(adapter); + struct nbl_hw *hw = &adapter->hw; + struct nbl_ring *ring; + u8 ring_count; + u8 ring_index; + + ring_count = adapter->num_rxq; + /* AF has an additional forward queue */ + ring_count += is_af(hw) ? 1 : 0; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + ring = adapter->rx_rings[ring_index]; + devm_kfree(dev, ring); + } + devm_kfree(dev, adapter->rx_rings); + adapter->rx_rings = NULL; +} + +int nbl_alloc_rings(struct nbl_adapter *adapter) +{ + int err = 0; + + err = nbl_alloc_tx_rings(adapter); + if (err) + return err; + + err = nbl_alloc_rx_rings(adapter); + if (err) + goto alloc_rx_rings_err; + + return 0; + +alloc_rx_rings_err: + nbl_free_tx_rings(adapter); + return err; +} + +void nbl_free_rings(struct nbl_adapter *adapter) +{ + nbl_free_tx_rings(adapter); + nbl_free_rx_rings(adapter); +} + +void nbl_map_rings_to_vectors(struct nbl_adapter *adapter) +{ + u16 tx_rings_rem; + u16 rx_rings_rem; + u16 q_vector_num; + u16 q_vector_id; + + tx_rings_rem = adapter->num_txq; + rx_rings_rem = adapter->num_rxq; + q_vector_num = adapter->num_q_vectors; + + for (q_vector_id = 0; q_vector_id < q_vector_num; q_vector_id++) { + struct nbl_q_vector *q_vector = adapter->q_vectors[q_vector_id]; + u16 tx_rings_per_vector; + u16 rx_rings_per_vector; + u16 ring_base; + u32 ring_end; + u16 ring_id; + struct nbl_ring *ring; + + tx_rings_per_vector = DIV_ROUND_UP(tx_rings_rem, q_vector_num - q_vector_id); + q_vector->num_ring_tx = tx_rings_per_vector; + q_vector->tx_ring = NULL; + ring_base = adapter->num_txq - tx_rings_rem; + ring_end = ring_base + tx_rings_per_vector; + + for (ring_id = ring_base; ring_id < ring_end; ring_id++) { + ring = adapter->tx_rings[ring_id]; + ring->next = q_vector->tx_ring; + ring->q_vector = q_vector; + q_vector->tx_ring = ring; + } + tx_rings_rem = tx_rings_rem - tx_rings_per_vector; + + rx_rings_per_vector = DIV_ROUND_UP(rx_rings_rem, q_vector_num - q_vector_id); + q_vector->num_ring_rx = rx_rings_per_vector; + q_vector->rx_ring = NULL; + ring_base = adapter->num_rxq - rx_rings_rem; + ring_end = ring_base + rx_rings_per_vector; + + for (ring_id = ring_base; ring_id < ring_end; ring_id++) { + ring = adapter->rx_rings[ring_id]; + ring->next = q_vector->rx_ring; + ring->q_vector = q_vector; + q_vector->rx_ring = ring; + } + rx_rings_rem = rx_rings_rem - rx_rings_per_vector; + } +} + +static int nbl_setup_tx_ring(struct nbl_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + + if (tx_ring->tx_bufs) { + pr_err("Try to setup a TX ring with buffer management array already allocated\n"); + return -EINVAL; + } + + tx_ring->tx_bufs = devm_kcalloc(dev, tx_ring->desc_num, sizeof(*tx_ring->tx_bufs), + GFP_KERNEL); + if (!tx_ring->tx_bufs) + return -ENOMEM; + + tx_ring->size = ALIGN(tx_ring->desc_num * sizeof(struct nbl_tx_desc), PAGE_SIZE); + tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, + GFP_KERNEL | __GFP_ZERO); + + if (!tx_ring->desc) { + pr_err("Allocate %u bytes descriptor DMA memory for TX queue %u failed\n", + tx_ring->size, tx_ring->queue_index); + goto alloc_dma_err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->next_to_alloc = 0; + tx_ring->tail_ptr = 0; + + return 0; + +alloc_dma_err: + devm_kfree(dev, tx_ring->tx_bufs); + tx_ring->tx_bufs = NULL; + tx_ring->size = 0; + return -ENOMEM; +} + +static void nbl_teardown_tx_ring(struct nbl_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + + devm_kfree(dev, tx_ring->tx_bufs); + tx_ring->tx_bufs = NULL; + + dmam_free_coherent(dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + tx_ring->dma = (dma_addr_t)NULL; + tx_ring->size = 0; +} + +static int nbl_setup_tx_rings(struct nbl_adapter *adapter) +{ + struct nbl_ring *tx_ring; + u8 ring_count; + u8 ring_index; + int err; + + ring_count = adapter->num_txq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + tx_ring = adapter->tx_rings[ring_index]; + WARN_ON(!tx_ring); + + err = nbl_setup_tx_ring(tx_ring); + if (err) + goto err; + } + + return 0; + +err: + while (ring_index--) { + tx_ring = adapter->tx_rings[ring_index]; + nbl_teardown_tx_ring(tx_ring); + } + return err; +} + +static void nbl_teardown_tx_rings(struct nbl_adapter *adapter) +{ + struct nbl_ring *tx_ring; + u8 ring_count; + u8 ring_index; + + ring_count = adapter->num_txq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + tx_ring = adapter->tx_rings[ring_index]; + WARN_ON(!tx_ring); + + nbl_teardown_tx_ring(tx_ring); + } +} + +static int nbl_setup_rx_ring(struct nbl_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + + if (rx_ring->rx_bufs) { + pr_err("Try to setup a TX ring with buffer management array already allocated\n"); + return -EINVAL; + } + + rx_ring->rx_bufs = devm_kcalloc(dev, rx_ring->desc_num, sizeof(*rx_ring->rx_bufs), + GFP_KERNEL); + if (!rx_ring->rx_bufs) + return -ENOMEM; + + rx_ring->size = ALIGN(rx_ring->desc_num * sizeof(struct nbl_rx_desc), PAGE_SIZE); + rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, + GFP_KERNEL | __GFP_ZERO); + + if (!rx_ring->desc) { + pr_err("Allocate %u bytes descriptor DMA memory for TX queue %u failed\n", + rx_ring->size, rx_ring->queue_index); + goto alloc_dma_err; + } + + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_alloc = 0; + rx_ring->tail_ptr = 0; + + return 0; + +alloc_dma_err: + devm_kfree(dev, rx_ring->rx_bufs); + rx_ring->rx_bufs = NULL; + rx_ring->size = 0; + return -ENOMEM; +} + +static void nbl_teardown_rx_ring(struct nbl_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + + devm_kfree(dev, rx_ring->rx_bufs); + rx_ring->rx_bufs = NULL; + + dmam_free_coherent(dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + rx_ring->dma = (dma_addr_t)NULL; + rx_ring->size = 0; +} + +static int nbl_setup_rx_rings(struct nbl_adapter *adapter) +{ + struct nbl_ring *rx_ring; + u8 ring_count; + u8 ring_index; + int err; + + ring_count = adapter->num_rxq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + rx_ring = adapter->rx_rings[ring_index]; + WARN_ON(!rx_ring); + + err = nbl_setup_rx_ring(rx_ring); + if (err) + goto err; + } + + return 0; + +err: + while (ring_index--) { + rx_ring = adapter->rx_rings[ring_index]; + nbl_teardown_rx_ring(rx_ring); + } + return err; +} + +static void nbl_teardown_rx_rings(struct nbl_adapter *adapter) +{ + struct nbl_ring *rx_ring; + u8 ring_count; + u8 ring_index; + + ring_count = adapter->num_rxq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + rx_ring = adapter->rx_rings[ring_index]; + WARN_ON(!rx_ring); + + nbl_teardown_rx_ring(rx_ring); + } +} + +int nbl_setup_rings(struct nbl_adapter *adapter) +{ + int err; + + err = nbl_setup_tx_rings(adapter); + if (err) + return err; + + err = nbl_setup_rx_rings(adapter); + if (err) + goto setup_rx_rings_err; + + return 0; + +setup_rx_rings_err: + nbl_teardown_tx_rings(adapter); + return err; +} + +void nbl_teardown_rings(struct nbl_adapter *adapter) +{ + nbl_teardown_tx_rings(adapter); + nbl_teardown_rx_rings(adapter); +} + +static int nbl_wait_tx_queue_idle(struct nbl_hw *hw, u8 global_queue_id) +{ + u8 index; + u8 offset; + u32 bitmap; + u16 i; + + index = global_queue_id / BITS_PER_DWORD; + offset = global_queue_id % BITS_PER_DWORD; + i = 0; + + bitmap = rd32(hw, NBL_DSCH_NOTIFY_BITMAP_ARR(index)); + bitmap |= rd32(hw, NBL_DSCH_FLY_BITMAP_ARR(index)); + while (bitmap & (1 << offset)) { + i++; + if (i == 2000) { + pr_warn("Wait too long for tx queue %u to be idle\n", global_queue_id); + return -EBUSY; + } + + udelay(5); + + bitmap = rd32(hw, NBL_DSCH_NOTIFY_BITMAP_ARR(index)); + bitmap |= rd32(hw, NBL_DSCH_FLY_BITMAP_ARR(index)); + } + + return 0; +} + +void nbl_af_hw_config_tx_ring(struct nbl_hw *hw, u16 func_id, dma_addr_t dma, + u16 desc_num, u8 vsi_id, u8 local_queue_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct tx_queue_info info = { 0 }; + u8 global_queue_id; + + if (!func_res || local_queue_id >= func_res->num_txrx_queues) + return; + + global_queue_id = func_res->txrx_queues[local_queue_id]; + + info.base_addr_l = (u32)(dma & 0xFFFFFFFF); + info.base_addr_h = (u32)(dma >> 32); + info.log2_size = ilog2(desc_num); + /* use the same vsi id for src vsi and dest vsi */ + info.src_vsi_idx = vsi_id; + info.priority = 7; + info.enable = 0; + + wr32_for_each(hw, NBL_DVN_QUEUE_INFO_ARR(global_queue_id), + (u32 *)&info, sizeof(info) - 4); +} + +static void nbl_hw_config_tx_ring(struct nbl_ring *tx_ring) +{ + struct nbl_adapter *adapter = netdev_priv(tx_ring->netdev); + struct nbl_hw *hw = &adapter->hw; + dma_addr_t dma = tx_ring->dma; + u16 desc_num = tx_ring->desc_num; + u8 vsi_id = hw->vsi_id; + u8 local_queue_id = tx_ring->queue_index; + + if (is_af(hw)) + nbl_af_hw_config_tx_ring(hw, 0, dma, desc_num, vsi_id, local_queue_id); + else + nbl_mailbox_req_cfg_tx_ring(hw, dma, desc_num, vsi_id, local_queue_id); +} + +static int nbl_wait_rx_queue_idle(struct nbl_hw *hw, u8 global_queue_id) +{ + u32 value; + u8 offset; + u8 rem; + u16 i; + + i = 0; + + offset = global_queue_id / BITS_PER_DWORD; + rem = global_queue_id % BITS_PER_DWORD; + value = rd32(hw, NBL_UVN_QUEUE_STATE_REG_ARR(offset)); + while (value & (1 << rem)) { + i++; + if (i == 2000) { + pr_warn("Wait too long for rx queue %u to be idle\n", global_queue_id); + return -EBUSY; + } + + udelay(5); + value = rd32(hw, NBL_UVN_QUEUE_STATE_REG_ARR(offset)); + } + + return 0; +} + +static int nbl_wait_rx_queue_reset_usable(struct nbl_hw *hw) +{ + struct nbl_rx_queue_reset queue_reset; + u16 i; + + i = 0; + rd32_for_each(hw, NBL_UVN_QUEUE_RESET_REG, (u32 *)&queue_reset, + sizeof(queue_reset)); + while (unlikely(queue_reset.valid)) { + i++; + if (i == 2000) { + pr_warn("Wait too long for rx queue reset to be usable\n"); + return -EBUSY; + } + + udelay(5); + rd32_for_each(hw, NBL_UVN_QUEUE_RESET_REG, (u32 *)&queue_reset, + sizeof(queue_reset)); + } + + return 0; +} + +void nbl_af_hw_config_rx_ring(struct nbl_hw *hw, u16 func_id, dma_addr_t dma, + u16 desc_num, u32 buf_len, u8 local_queue_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct rx_queue_info info = { 0 }; + u8 global_queue_id; + + if (!func_res || local_queue_id >= func_res->num_txrx_queues) + return; + + global_queue_id = func_res->txrx_queues[local_queue_id]; + + info.base_addr_l = (u32)(dma & 0xFFFFFFFF); + info.base_addr_h = (u32)(dma >> 32); + info.log2_size = ilog2(desc_num); + info.buf_length_pow = ilog2(buf_len / 2048); + info.enable = 0; + + /* There is no need to write whole rx_queue_info structure + * for head_ptr and tail_ptr are read only. + */ + wr32_for_each(hw, NBL_UVN_QUEUE_INFO_ARR(global_queue_id), + (u32 *)&info, sizeof(info) - 4); +} + +static void nbl_hw_config_rx_ring(struct nbl_ring *rx_ring) +{ + struct nbl_adapter *adapter = netdev_priv(rx_ring->netdev); + struct nbl_hw *hw = &adapter->hw; + dma_addr_t dma = rx_ring->dma; + u16 desc_num = rx_ring->desc_num; + u32 buf_len = rx_ring->buf_len; + u8 local_queue_id = rx_ring->queue_index; + + if (is_af(hw)) + nbl_af_hw_config_rx_ring(hw, 0, dma, desc_num, buf_len, local_queue_id); + else + nbl_mailbox_req_cfg_rx_ring(hw, dma, desc_num, buf_len, local_queue_id); +} + +static void nbl_hw_config_tx_rings(struct nbl_adapter *adapter) +{ + struct nbl_ring *tx_ring; + u8 ring_count; + u8 ring_index; + + ring_count = adapter->num_rxq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + tx_ring = adapter->tx_rings[ring_index]; + + nbl_hw_config_tx_ring(tx_ring); + } +} + +static void nbl_hw_config_rx_rings(struct nbl_adapter *adapter) +{ + struct nbl_ring *rx_ring; + u8 ring_count; + u8 ring_index; + + ring_count = adapter->num_rxq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + rx_ring = adapter->rx_rings[ring_index]; + + nbl_hw_config_rx_ring(rx_ring); + } +} + +void nbl_hw_config_rings(struct nbl_adapter *adapter) +{ + nbl_hw_config_tx_rings(adapter); + nbl_hw_config_rx_rings(adapter); +} + +static bool nbl_alloc_mapped_page(struct nbl_ring *rx_ring, + struct nbl_rx_buf *rx_buf) +{ + struct page *page = rx_buf->page; + dma_addr_t dma; + + if (likely(page)) + return true; + + page = dev_alloc_pages(nbl_rx_page_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + +#ifdef RHEL_RELEASE +#if (RHEL_RELEASE_VERSION(8, 0) > RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 7) < RHEL_RELEASE_CODE) + dma = dma_map_page_attrs(rx_ring->dev, page, 0, NBL_RX_PAGE_SIZE(rx_ring), + DMA_FROM_DEVICE, &rx_ring->rx_buf_attrs); +#else + dma = dma_map_page_attrs(rx_ring->dev, page, 0, NBL_RX_PAGE_SIZE(rx_ring), + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); +#endif +#else + dma = dma_map_page_attrs(rx_ring->dev, page, 0, NBL_RX_PAGE_SIZE(rx_ring), + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); +#endif + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, nbl_rx_page_order(rx_ring)); + rx_ring->rx_stats.rx_dma_err++; + return false; + } + + rx_buf->dma = dma; + rx_buf->page = page; + rx_buf->page_offset = 0; + + return true; +} + +static bool nbl_alloc_rx_bufs(struct nbl_ring *rx_ring, u16 count) +{ + u32 buf_len; + u16 next_to_use; + u16 head; + struct nbl_rx_desc *rx_desc; + struct nbl_rx_buf *rx_buf; + + if (unlikely(!count)) { + pr_warn("Try to allocate zero buffer for RX ring %u\n", + rx_ring->queue_index); + return true; + } + + buf_len = rx_ring->buf_len; + next_to_use = rx_ring->next_to_use; + + head = next_to_use; + rx_desc = NBL_RX_DESC(rx_ring, next_to_use); + rx_buf = NBL_RX_BUF(rx_ring, next_to_use); + do { + if (!nbl_alloc_mapped_page(rx_ring, rx_buf)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, rx_buf->page_offset, + buf_len, DMA_FROM_DEVICE); + + rx_desc->buffer_addr = cpu_to_le64(rx_buf->dma + rx_buf->page_offset); + rx_desc->dd = 0; + + rx_desc++; + rx_buf++; + next_to_use++; + rx_ring->tail_ptr++; + if (next_to_use == rx_ring->desc_num) { + next_to_use = 0; + rx_desc = NBL_RX_DESC(rx_ring, next_to_use); + rx_buf = NBL_RX_BUF(rx_ring, next_to_use); + } + + count--; + } while (count); + + if (next_to_use != head) { + /* Make sure descriptor has been written */ + wmb(); + rx_ring->next_to_use = next_to_use; + rx_ring->next_to_alloc = next_to_use; + + nbl_update_tail_ptr(rx_ring->notify_addr, rx_ring->local_qid, rx_ring->tail_ptr); + } + + return !count; +} + +void nbl_alloc_all_rx_bufs(struct nbl_adapter *adapter) +{ + struct nbl_ring *rx_ring; + u16 ring_count; + u16 ring_index; + u16 desc_count; + + ring_count = adapter->num_rxq; + for (ring_index = 0; ring_index < adapter->num_rxq; ring_index++) { + rx_ring = adapter->rx_rings[ring_index]; + desc_count = nbl_unused_desc_count(rx_ring); + if (unlikely(!nbl_alloc_rx_bufs(rx_ring, desc_count))) { + pr_warn("Allocate RX bufs for ring %u failed with desc count %u\n", + ring_index, desc_count); + } + } +} + +void nbl_af_configure_queue_map(struct nbl_hw *hw, u16 func_id, u8 local_queue_id, + bool rx, u16 local_vector_id, bool enable, + bool msix_enable) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + u16 global_queue_id; + u16 txrx_queue_id; + u16 global_vector_id; + struct nbl_queue_map queue_map; + u8 bus; + u8 devid; + u8 function; + + if (!func_res) + return; + + if (msix_enable) { + WARN_ON(local_vector_id >= func_res->num_interrupts); + global_vector_id = func_res->interrupts[local_vector_id]; + } + + WARN_ON(local_queue_id >= func_res->num_txrx_queues); + global_queue_id = func_res->txrx_queues[local_queue_id]; + if (rx) + txrx_queue_id = 2 * global_queue_id; + else + txrx_queue_id = 2 * global_queue_id + 1; + + nbl_af_compute_bdf(hw, func_id, &bus, &devid, &function); + + memset(&queue_map, 0, sizeof(queue_map)); + queue_map.function = function; + queue_map.devid = devid; + queue_map.bus = bus; + + if (enable) { + if (msix_enable) { + queue_map.msix_idx = global_vector_id; + queue_map.msix_idx_valid = 1; + } + queue_map.valid = 1; + } else { + queue_map.msix_idx_valid = 0; + queue_map.valid = 0; + } + + wr32_for_each(hw, NBL_PADPT_QUEUE_MAP_REG_ARR(txrx_queue_id), + (u32 *)&queue_map, sizeof(queue_map)); +} + +static void nbl_configure_queue_map(struct nbl_hw *hw, u8 local_queue_id, bool rx, + u16 local_vector_id, bool enable) +{ + if (is_af(hw)) + nbl_af_configure_queue_map(hw, 0, local_queue_id, rx, local_vector_id, + enable, true); + else + nbl_mailbox_req_cfg_queue_map(hw, local_queue_id, rx, local_vector_id, + enable, true); +} + +static void nbl_af_control_tx_queue(struct nbl_hw *hw, u8 global_queue_id, bool enable) +{ + struct tx_queue_info info; + + /* No need to read head and tail pointer */ + rd32_for_each(hw, NBL_DVN_QUEUE_INFO_ARR(global_queue_id), + (u32 *)&info, sizeof(info) - 4); + if (enable) + info.enable = 1; + else + info.enable = 0; + wr32_for_each(hw, NBL_DVN_QUEUE_INFO_ARR(global_queue_id), + (u32 *)&info, sizeof(info) - 4); +} + +static void nbl_af_control_rx_queue(struct nbl_hw *hw, u8 global_queue_id, bool enable) +{ + struct rx_queue_info info; + + /* No need to read head and tail pointer */ + rd32_for_each(hw, NBL_UVN_QUEUE_INFO_ARR(global_queue_id), + (u32 *)&info, sizeof(info) - 4); + if (enable) + info.enable = 1; + else + info.enable = 0; + wr32_for_each(hw, NBL_UVN_QUEUE_INFO_ARR(global_queue_id), + (u32 *)&info, sizeof(info) - 4); +} + +void nbl_af_control_queue(struct nbl_hw *hw, u16 func_id, u8 local_queue_id, bool rx, bool enable) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + u8 global_queue_id; + + WARN_ON(!func_res); + WARN_ON(local_queue_id >= func_res->num_txrx_queues); + global_queue_id = func_res->txrx_queues[local_queue_id]; + if (rx) + nbl_af_control_rx_queue(hw, global_queue_id, enable); + else + nbl_af_control_tx_queue(hw, global_queue_id, enable); +} + +static void nbl_control_queue(struct nbl_hw *hw, u8 local_queue_id, bool rx, bool enable) +{ + if (is_af(hw)) + nbl_af_control_queue(hw, 0, local_queue_id, rx, enable); + else + nbl_mailbox_req_control_queue(hw, local_queue_id, rx, enable); +} + +static inline void nbl_enable_tx_queue(struct nbl_hw *hw, u8 local_queue_id) +{ + nbl_control_queue(hw, local_queue_id, false, true); +} + +static inline void nbl_disable_tx_queue(struct nbl_hw *hw, u8 local_queue_id) +{ + nbl_control_queue(hw, local_queue_id, false, false); +} + +static inline void nbl_enable_rx_queue(struct nbl_hw *hw, u8 local_queue_id) +{ + nbl_control_queue(hw, local_queue_id, true, true); +} + +static inline void nbl_disable_rx_queue(struct nbl_hw *hw, u8 local_queue_id) +{ + nbl_control_queue(hw, local_queue_id, true, false); +} + +int nbl_af_reset_tx_queue(struct nbl_hw *hw, u16 func_id, u8 local_queue_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_queue_reset queue_reset = { 0 }; + u8 global_queue_id; + int err; + + WARN_ON(!func_res); + WARN_ON(local_queue_id >= func_res->num_txrx_queues); + global_queue_id = func_res->txrx_queues[local_queue_id]; + + err = nbl_wait_tx_queue_idle(hw, global_queue_id); + if (err) + return err; + + queue_reset.queue_rst_id = global_queue_id; + wr32_for_each(hw, NBL_DVN_QUEUE_RESET_REG, (u32 *)&queue_reset, sizeof(queue_reset)); + + /* clear tx queue statistics manually */ + wr32_zero_for_each(hw, NBL_DVN_QUEUE_STAT_REG_ARR(global_queue_id), + sizeof(struct nbl_tx_queue_stat)); + + return 0; +} + +static int nbl_reset_tx_queue(struct nbl_hw *hw, u8 local_queue_id) +{ + int err; + + if (is_af(hw)) + err = nbl_af_reset_tx_queue(hw, 0, local_queue_id); + else + err = nbl_mailbox_req_reset_tx_queue(hw, local_queue_id); + + return err; +} + +int nbl_af_reset_rx_queue(struct nbl_hw *hw, u16 func_id, u8 local_queue_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_rx_queue_reset queue_reset = { 0 }; + u8 global_queue_id; + int err; + + WARN_ON(!func_res); + WARN_ON(local_queue_id >= func_res->num_txrx_queues); + global_queue_id = func_res->txrx_queues[local_queue_id]; + + err = nbl_wait_rx_queue_idle(hw, global_queue_id); + if (err) + return err; + + err = nbl_wait_rx_queue_reset_usable(hw); + if (err) + return err; + + queue_reset.queue_rst_id = global_queue_id; + queue_reset.valid = 1; + wr32_for_each(hw, NBL_UVN_QUEUE_RESET_REG, (u32 *)&queue_reset, + sizeof(queue_reset)); + + return 0; +} + +static int nbl_reset_rx_queue(struct nbl_hw *hw, u8 local_queue_id) +{ + int err; + + if (is_af(hw)) + err = nbl_af_reset_rx_queue(hw, 0, local_queue_id); + else + err = nbl_mailbox_req_reset_rx_queue(hw, local_queue_id); + + return 0; +} + +int nbl_af_wait_rx_queue_reset_done(struct nbl_hw *hw, u16 func_id, u8 local_queue_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_rx_queue_reset queue_reset = { 0 }; + u8 global_queue_id; + u16 i; + + WARN_ON(!func_res); + WARN_ON(local_queue_id >= func_res->num_txrx_queues); + global_queue_id = func_res->txrx_queues[local_queue_id]; + + i = 0; + rd32_for_each(hw, NBL_UVN_QUEUE_RESET_REG, (u32 *)&queue_reset, + sizeof(queue_reset)); + while ((queue_reset.queue_rst_id == global_queue_id) && queue_reset.valid) { + i++; + if (i == 2000) { + pr_warn("Wait too long for rx queue %u reset to be done\n", + global_queue_id); + return -ETIMEDOUT; + } + + udelay(5); + rd32_for_each(hw, NBL_UVN_QUEUE_RESET_REG, (u32 *)&queue_reset, + sizeof(queue_reset)); + } + + return 0; +} + +static int nbl_wait_rx_queue_reset_done(struct nbl_hw *hw, u8 local_queue_id) +{ + int err; + + if (is_af(hw)) + err = nbl_af_wait_rx_queue_reset_done(hw, 0, local_queue_id); + else + err = nbl_mailbox_req_wait_rx_queue_reset_done(hw, local_queue_id); + + return err; +} + +void nbl_af_configure_port_map(struct nbl_hw *hw, u16 func_id, u8 eth_port_id, u8 local_queue_id) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_port_map port_map; + u8 global_queue_id; + + if (!func_res || local_queue_id >= func_res->num_txrx_queues) { + pr_alert("Cannot configure port map relationship for severe errors\n"); + return; + } + memset(&port_map, 0, sizeof(port_map)); + port_map.port_id = eth_port_id; + global_queue_id = func_res->txrx_queues[local_queue_id]; + wr32_for_each(hw, NBL_DSCH_PORT_MAP_REG_ARR(global_queue_id), + (u32 *)&port_map, sizeof(port_map)); +} + +static void nbl_configure_port_map(struct nbl_hw *hw, u8 eth_port_id, u8 local_queue_id) +{ + if (is_af(hw)) + nbl_af_configure_port_map(hw, 0, eth_port_id, local_queue_id); + else + nbl_mailbox_req_cfg_port_map(hw, eth_port_id, local_queue_id); +} + +void nbl_af_configure_rss_group_table(struct nbl_hw *hw, u16 func_id, u8 vsi_id, u8 rx_queue_num) +{ + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[func_id]; + struct nbl_rss_entry rss_entry; + int i; + u8 local_id; + + WARN_ON(!func_res); + WARN_ON(rx_queue_num > func_res->num_txrx_queues); + memset(&rss_entry, 0, sizeof(rss_entry)); + for (i = 0; i < RSS_ENTRIES_PER_VSI; i++) { + local_id = i % rx_queue_num; + rss_entry.rx_queue_id = func_res->txrx_queues[local_id]; + wr32_for_each(hw, NBL_PRO_RSS_GROUP_REG_ARR(vsi_id, i), + (u32 *)&rss_entry, sizeof(rss_entry)); + } +} + +static void nbl_configure_rss_group_table(struct nbl_hw *hw, u8 vsi_id, u8 rx_queue_num) +{ + if (is_af(hw)) + nbl_af_configure_rss_group_table(hw, 0, vsi_id, rx_queue_num); + else + nbl_mailbox_req_cfg_rss_group_table(hw, vsi_id, rx_queue_num); +} + +void nbl_start_all_tx_rings(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_ring *tx_ring; + struct nbl_q_vector *q_vector; + u16 local_vector_id; + u8 ring_index; + u8 ring_count; + + ring_count = adapter->num_txq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + tx_ring = adapter->tx_rings[ring_index]; + q_vector = tx_ring->q_vector; + local_vector_id = q_vector->q_vector_id; + nbl_configure_port_map(hw, hw->eth_port_id, ring_index); + nbl_configure_queue_map(hw, ring_index, false, local_vector_id, true); + nbl_enable_tx_queue(hw, ring_index); + } +} + +void nbl_start_all_rx_rings(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_ring *rx_ring; + struct nbl_q_vector *q_vector; + u16 local_vector_id; + u8 ring_index; + u8 ring_count; + + ring_count = adapter->num_txq; + + nbl_configure_rss_group_table(hw, hw->vsi_id, ring_count); + + for (ring_index = 0; ring_index < ring_count; ring_index++) { + rx_ring = adapter->rx_rings[ring_index]; + q_vector = rx_ring->q_vector; + local_vector_id = q_vector->q_vector_id; + nbl_configure_queue_map(hw, ring_index, true, local_vector_id, true); + nbl_enable_rx_queue(hw, ring_index); + } +} + +void nbl_stop_all_tx_rings(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u8 ring_index; + u8 ring_count; + int err; + + ring_count = adapter->num_txq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + nbl_disable_tx_queue(hw, ring_index); + usleep_range(3000, 6000); + nbl_configure_queue_map(hw, ring_index, false, 0, false); + err = nbl_reset_tx_queue(hw, ring_index); + if (unlikely(err)) + pr_err("Reset tx queue %hhu failed with error %d\n", ring_index, err); + usleep_range(2000, 4000); + } +} + +void nbl_stop_all_rx_rings(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u8 ring_index; + u8 ring_count; + int err; + + ring_count = adapter->num_txq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + nbl_disable_rx_queue(hw, ring_index); + usleep_range(3000, 6000); + nbl_configure_queue_map(hw, ring_index, true, 0, false); + err = nbl_reset_rx_queue(hw, ring_index); + if (unlikely(err)) { + pr_err("Reset rx queue %u failed with error %d\n", ring_index, err); + continue; + } + usleep_range(2000, 4000); + err = nbl_wait_rx_queue_reset_done(hw, ring_index); + if (unlikely(err)) + pr_err("Wait rx queue %hhu reset done failed with error %d\n", + ring_index, err); + } +} + +void nbl_af_eth_tx_enable(struct nbl_adapter *adapter, u8 eth_port_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_tx_ctrl tx_ctrl; + + if (atomic_inc_return(&af_res->eth_port_tx_refcount[eth_port_id]) == 1) { + rd32_for_each(hw, NBL_ETH_TX_CTRL_REG(eth_port_id), + (u32 *)&tx_ctrl, sizeof(tx_ctrl)); + tx_ctrl.tx_ipg_value = 0x8; + tx_ctrl.tx_enable = 1; + wr32_for_each(hw, NBL_ETH_TX_CTRL_REG(eth_port_id), + (u32 *)&tx_ctrl, sizeof(tx_ctrl)); + } +} + +void nbl_eth_tx_enable(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u8 eth_port_id; + + eth_port_id = hw->eth_port_id; + if (is_af(hw)) + nbl_af_eth_tx_enable(adapter, eth_port_id); + else + nbl_mailbox_req_eth_tx_enable(adapter, eth_port_id); +} + +void nbl_af_eth_tx_disable(struct nbl_adapter *adapter, u8 eth_port_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_tx_ctrl tx_ctrl; + + if (!atomic_dec_return(&af_res->eth_port_tx_refcount[eth_port_id])) { + rd32_for_each(hw, NBL_ETH_TX_CTRL_REG(eth_port_id), + (u32 *)&tx_ctrl, sizeof(tx_ctrl)); + tx_ctrl.tx_enable = 0; + wr32_for_each(hw, NBL_ETH_TX_CTRL_REG(eth_port_id), + (u32 *)&tx_ctrl, sizeof(tx_ctrl)); + } +} + +void nbl_eth_tx_disable(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u8 eth_port_id; + + eth_port_id = hw->eth_port_id; + if (is_af(hw)) + nbl_af_eth_tx_disable(adapter, eth_port_id); + else + nbl_mailbox_req_eth_tx_disable(adapter, eth_port_id); +} + +void nbl_af_eth_rx_enable(struct nbl_adapter *adapter, u8 eth_port_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_rx_ctrl rx_ctrl; + + if (atomic_inc_return(&af_res->eth_port_rx_refcount[eth_port_id]) == 1) { + rd32_for_each(hw, NBL_ETH_RX_CTRL_REG(eth_port_id), + (u32 *)&rx_ctrl, sizeof(rx_ctrl)); + rx_ctrl.rx_enable = 1; + wr32_for_each(hw, NBL_ETH_RX_CTRL_REG(eth_port_id), + (u32 *)&rx_ctrl, sizeof(rx_ctrl)); + } +} + +void nbl_eth_rx_enable(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u8 eth_port_id; + + eth_port_id = hw->eth_port_id; + if (is_af(hw)) + nbl_af_eth_rx_enable(adapter, eth_port_id); + else + nbl_mailbox_req_eth_rx_enable(adapter, eth_port_id); +} + +void nbl_af_eth_rx_disable(struct nbl_adapter *adapter, u8 eth_port_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_rx_ctrl rx_ctrl; + + if (!atomic_dec_return(&af_res->eth_port_rx_refcount[eth_port_id])) { + rd32_for_each(hw, NBL_ETH_RX_CTRL_REG(eth_port_id), + (u32 *)&rx_ctrl, sizeof(rx_ctrl)); + rx_ctrl.rx_enable = 0; + wr32_for_each(hw, NBL_ETH_RX_CTRL_REG(eth_port_id), + (u32 *)&rx_ctrl, sizeof(rx_ctrl)); + } +} + +void nbl_eth_rx_disable(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u8 eth_port_id; + + eth_port_id = hw->eth_port_id; + if (is_af(hw)) + nbl_af_eth_rx_disable(adapter, eth_port_id); + else + nbl_mailbox_req_eth_rx_disable(adapter, eth_port_id); +} + +static inline unsigned int nbl_txd_use_count(unsigned int size) +{ + return DIV_ROUND_UP(size, NBL_TXD_DATALEN_MAX); +} + +static unsigned int nbl_xmit_desc_count(struct sk_buff *skb) +{ + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int size; + unsigned int count; + + size = skb_headlen(skb); + count = 0; + for (;;) { + count += nbl_txd_use_count(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +static int __nbl_maybe_stop_tx(struct nbl_ring *tx_ring, unsigned int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Memory barrier before checking head and tail */ + smp_mb(); + + if (likely(nbl_unused_desc_count(tx_ring) < size)) + return -EBUSY; + + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static inline int nbl_maybe_stop_tx(struct nbl_ring *tx_ring, unsigned int size) +{ + if (likely(nbl_unused_desc_count(tx_ring) >= size)) + return 0; + + return __nbl_maybe_stop_tx(tx_ring, size); +} + +static void nbl_unmap_and_free_tx_resource(struct nbl_ring *ring, + struct nbl_tx_buf *tx_buf, + int napi_budget) +{ + if (tx_buf->skb) { + napi_consume_skb(tx_buf->skb, napi_budget); + if (dma_unmap_len(tx_buf, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + } + + tx_buf->next_to_watch = NULL; + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); +} + +static int nbl_tx_tso(struct nbl_tx_desc *tx_desc, struct nbl_ring *tx_ring, + struct sk_buff *skb, bool *tso) +{ +#ifdef NBL_TSO + struct nbl_tso_desc *desc = tx_desc; + int err; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + unsigned char *exthdr; + __be16 protocol, frag_off; + u8 l3_start_offset, l4_proto, mac_len, ip_len, l4_len; + u8 iipt, eipt = 0, eip_len = 0, l4_tunt = 0, l4_tun_len = 0, l4_type = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + mac_len = ip.hdr - skb->data; + + protocol = vlan_get_protocol(skb); + + if (skb->encapsulation) { + if (protocol == htons(ETH_P_IP)) { + eipt = NBL_EXT_IPV4; + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + eipt = NBL_EXT_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + err = ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + if (err < 0) + return err; + } else { + return -EIO; + } + + switch (l4_proto) { + case IPPROTO_UDP: + case IPPROTO_GRE: + l4_tunt = NBL_TUN_NVGRE; + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + l4_tunt = NBL_TUN_VXLAN; + l4.hdr = skb_inner_network_header(skb); + break; + default: + skb_checksum_help(skb); + return 0; + } + + eip_len = l4.hdr - ip.hdr; + + ip.hdr = skb_inner_network_header(skb); + /* todo */ + l4_tun_len = ip.hdr - l4.hdr; + + l4.hdr = skb_inner_transport_header(skb); + if (ip.v4->version == 4) + protocol = htons(ETH_P_IP); + + if (ip.v6->version == 6) + protocol = htons(ETH_P_IPV6); + } + + l3_start_offset = ip.hdr - skb->data; + + if (protocol == htons(ETH_P_IP)) { + iipt = NBL_INNER_IPV4; + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + iipt = NBL_INNER_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } else { + return -EIO; + } + + ip_len = l4.hdr - ip.hdr; + switch (l4_proto) { + case IPPROTO_TCP: + l4_len = l4.tcp->doff >> 2; + l4_type = NBL_TCP_TYPE; + break; + case IPPROTO_UDP: + l4_len = sizeof(struct udphdr); + l4_type = NBL_UDP_TYPE; + break; + default: + skb_checksum_help(skb); + return 0; + } + + desc->mss = skb_shinfo(skb)->gso_size; + desc->dd = 0; + desc->l3_checksum = 1; + desc->l4_checksum = 1; + desc->l3_start_offset = l3_start_offset; + desc->dtype = NBL_TSO_DESC; + desc->mac_len = mac_len >> 1; + desc->ip_len = ip_len >> 2; + desc->l4_len = l4_len >> 2; + desc->iipt = iipt; + desc->eipt = eipt; + desc->eip_len = eip_len >> 2; + desc->l4_tunt = l4_tunt; + desc->l4_tun_len = l4_tun_len >> 1; + desc->l4_type = l4_type; + *tso = true; + + return 1; +#else + return 0; +#endif +} + +static int nbl_tx_csum(struct nbl_tx_desc *desc, struct nbl_ring *tx_ring, + struct sk_buff *skb) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + unsigned char *exthdr; + __be16 protocol, frag_off; + u8 l3_start_offset, l4_proto; + int ret; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + l3_start_offset = ip.hdr - skb->data; + + protocol = vlan_get_protocol(skb); + + if (skb->encapsulation) { + if (protocol == htons(ETH_P_IP)) { + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + if (ret < 0) + return ret; + } else { + return -EIO; + } + + switch (l4_proto) { + case IPPROTO_UDP: + case IPPROTO_GRE: + break; + default: + skb_checksum_help(skb); + return 0; + } + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + if (ip.v4->version == 4) + protocol = htons(ETH_P_IP); + + if (ip.v6->version == 6) + protocol = htons(ETH_P_IPV6); + } + + if (protocol == htons(ETH_P_IP)) { + l4_proto = ip.v4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } else { + return -EIO; + } + + switch (l4_proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + break; + default: + skb_checksum_help(skb); + return 0; + } + + desc->dd = 0; + desc->l3_checksum = 1; + desc->l4_checksum = 1; + desc->l3_start_offset = l3_start_offset; + tx_ring->tx_stats.tx_csum_pkts++; + + return 1; +} + +static int nbl_tx_map(struct sk_buff *skb, struct nbl_ring *tx_ring, + const struct nbl_adapter *adapter) +{ + struct nbl_tx_buf *first_buf; + struct nbl_tx_desc *first_desc; + struct nbl_tx_buf *tx_buf; + struct nbl_tx_desc *tx_desc; + unsigned int data_len; + unsigned int size; + dma_addr_t dma; + const skb_frag_t *frag; + u16 i; + int ret; + bool tso = false; + + i = tx_ring->next_to_use; + first_buf = NBL_TX_BUF(tx_ring, i); + first_buf->skb = skb; + first_buf->bytes = skb->len; + first_buf->pkts = 1; + + first_desc = NBL_TX_DESC(tx_ring, i); + first_desc->pkt_len = skb->len; + first_desc->sop = 1; + first_desc->fwd = NBL_FWD_NORMAL; + + ret = nbl_tx_tso(first_desc, tx_ring, skb, &tso); + if (ret > 0) { + i++; + first_desc++; + tx_ring->tail_ptr++; + if (unlikely(i == tx_ring->desc_num)) { + first_desc = NBL_TX_DESC(tx_ring, 0); + i = 0; + } + first_buf = NBL_TX_BUF(tx_ring, i); + first_buf->skb = skb; + first_buf->bytes = skb->len; + first_buf->pkts = 1; + + first_desc->pkt_len = skb->len; + first_desc->sop = 1; + first_desc->fwd = NBL_FWD_NORMAL; + } else if (ret < 0) { + dev_kfree_skb_any(skb); + first_buf->skb = NULL; + return NETDEV_TX_OK; + } + + if (!tso) { + ret = nbl_tx_csum(first_desc, tx_ring, skb); + if (unlikely(ret < 0)) { + dev_kfree_skb_any(skb); + first_buf->skb = NULL; + return NETDEV_TX_OK; + } + } + + tx_buf = first_buf; + tx_desc = first_desc; + + data_len = skb->data_len; + size = skb_headlen(skb); + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { + dev_warn(tx_ring->dev, "Allocate DMA to transmit skb failed\n"); + goto dma_error; + } + + tx_buf->dma = dma; + tx_buf->len = size; + + tx_desc->dtype = NBL_DATA_DESC; + tx_desc->buffer_addr = dma; + tx_desc->dd = 0; + + while (unlikely(size > NBL_TXD_DATALEN_MAX)) { + tx_desc->data_len = NBL_TXD_DATALEN_MAX; + + dma += NBL_TXD_DATALEN_MAX; + size -= NBL_TXD_DATALEN_MAX; + + i++; + tx_desc++; + tx_ring->tail_ptr++; + if (unlikely(i == tx_ring->desc_num)) { + tx_desc = NBL_TX_DESC(tx_ring, 0); + i = 0; + } + + tx_desc->buffer_addr = dma; + tx_desc->dd = 0; + } + + tx_desc->data_len = size; + + if (likely(!data_len)) + break; + + i++; + tx_desc++; + tx_ring->tail_ptr++; + if (unlikely(i == tx_ring->desc_num)) { + tx_desc = NBL_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); + tx_buf = NBL_TX_BUF(tx_ring, i); + } + + tx_desc->eop = 1; + /* Memory barrier before write tail ptr */ + wmb(); + + i++; + tx_ring->tail_ptr++; + if (unlikely(i == tx_ring->desc_num)) + i = 0; + first_buf->next_to_watch = tx_desc; + tx_ring->next_to_use = i; + + skb_tx_timestamp(skb); + + nbl_update_tail_ptr(tx_ring->notify_addr, tx_ring->local_qid, tx_ring->tail_ptr); + + return NETDEV_TX_OK; + +dma_error: + tx_ring->tx_stats.tx_dma_err++; + for (;;) { + tx_buf = NBL_TX_BUF(tx_ring, i); + nbl_unmap_and_free_tx_resource(tx_ring, tx_buf, 0); + if (tx_buf == first_buf) + break; + if (unlikely(!i)) + i = tx_ring->desc_num; + i--; + tx_ring->tail_ptr--; + } + first_desc->sop = 0; + first_desc->l3_checksum = 0; + first_desc->l4_checksum = 0; + + return NETDEV_TX_OK; +} + +static netdev_tx_t nbl_xmit_frame_ring(struct sk_buff *skb, struct nbl_ring *tx_ring, + const struct nbl_adapter *adapter) +{ + unsigned int count; + + count = nbl_xmit_desc_count(skb); + if (unlikely(count > 8)) { + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + count = nbl_xmit_desc_count(skb); + tx_ring->tx_stats.tx_linearize++; + } + + if (unlikely(nbl_maybe_stop_tx(tx_ring, count))) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + return nbl_tx_map(skb, tx_ring, adapter); +} + +netdev_tx_t nbl_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nbl_adapter *adapter = netdev_priv(netdev); + struct nbl_ring *tx_ring; + + tx_ring = adapter->tx_rings[skb_get_queue_mapping(skb)]; + + return nbl_xmit_frame_ring(skb, tx_ring, adapter); +} + +static inline int nbl_tx_desc_used(struct nbl_tx_desc *tx_desc) +{ + return tx_desc->dd; +} + +static inline int nbl_rx_desc_used(struct nbl_rx_desc *rx_desc) +{ + return rx_desc->dd; +} + +bool nbl_clean_tx_irq(struct nbl_ring *tx_ring, int napi_budget) +{ + struct nbl_tx_buf *tx_buf; + struct nbl_tx_desc *tx_desc; + unsigned int budget = NBL_DEFAULT_IRQ_WORK; + unsigned int total_tx_pkts = 0; + unsigned int total_tx_bytes = 0; + s16 i = tx_ring->next_to_clean; + + tx_buf = NBL_TX_BUF(tx_ring, i); + tx_desc = NBL_TX_DESC(tx_ring, i); + i -= tx_ring->desc_num; + do { + struct nbl_tx_desc *end_desc = tx_buf->next_to_watch; + + if (!end_desc) + break; + + /* prevent any other reads prior to end_desc */ + smp_rmb(); + + if (!nbl_tx_desc_used(tx_desc)) + break; + + total_tx_bytes += tx_buf->bytes; + total_tx_pkts += tx_buf->pkts; + while (true) { + nbl_unmap_and_free_tx_resource(tx_ring, tx_buf, napi_budget); + tx_desc->sop = 0; + tx_desc->eop = 0; + tx_desc->l3_checksum = 0; + tx_desc->l4_checksum = 0; + if (tx_desc == end_desc) + break; + i++; + tx_buf++; + tx_desc++; + if (unlikely(!i)) { + i -= tx_ring->desc_num; + tx_buf = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + } + } + + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->desc_num; + tx_buf = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + } + + prefetch(tx_desc); + + budget--; + } while (likely(budget)); + + i += tx_ring->desc_num; + + tx_ring->next_to_clean = i; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_tx_bytes; + tx_ring->stats.packets += total_tx_pkts; + u64_stats_update_end(&tx_ring->syncp); + +#define TX_WAKE_THRESHOLD (MAX_DESC_NEEDED_PER_PKT * 2) + if (unlikely(total_tx_pkts && netif_carrier_ok(tx_ring->netdev) && + (nbl_unused_desc_count(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index)) + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + } + + return !!budget; +} + +static void nbl_rx_csum(struct nbl_ring *rx_ring, struct sk_buff *skb, + struct nbl_rx_desc *rx_desc) +{ + /* Init with no checksum in device */ + skb->ip_summed = CHECKSUM_NONE; + + if (!(rx_ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (rx_desc->checksum_status == NBL_RX_CSUM_ERR) + return; + + rx_ring->rx_stats.rx_csum_pkts++; + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static struct sk_buff *nbl_construct_skb(struct nbl_ring *rx_ring, + struct nbl_rx_buf *rx_buf, + struct nbl_rx_desc *rx_desc, + u16 data_len, bool *add_to_skb) +{ + unsigned int truesize; + const char *va; + struct sk_buff *skb; + struct page *page; + +#if (PAGE_SIZE < NBL_PAGE_SIZE_THRESH) + truesize = NBL_RX_PAGE_SIZE(rx_ring) / 2; +#else + truesize = rx_ring->buf_len; +#endif + + skb = napi_alloc_skb(&rx_ring->q_vector->napi, NBL_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, rx_buf->page_offset, + data_len, DMA_FROM_DEVICE); + page = rx_buf->page; + nbl_rx_csum(rx_ring, skb, rx_desc); + if (data_len <= NBL_RX_HDR_SIZE) { + va = page_address(page) + rx_buf->page_offset; + memcpy(__skb_put(skb, data_len), va, ALIGN(data_len, sizeof(long))); + *add_to_skb = false; + } else { + skb_add_rx_frag(skb, 0, page, rx_buf->page_offset, data_len, truesize); +#if (PAGE_SIZE < NBL_PAGE_SIZE_THRESH) + rx_buf->page_offset ^= truesize; +#else + rx_buf->page_offset += truesize; +#endif + } + + return skb; +} + +static bool nbl_page_is_reusable(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +static void nbl_add_rx_frag(struct nbl_ring *rx_ring, struct nbl_rx_buf *rx_buf, + struct sk_buff *skb, u16 data_len) +{ + unsigned int truesize; + struct page *page; + +#if (PAGE_SIZE < NBL_PAGE_SIZE_THRESH) + truesize = NBL_RX_PAGE_SIZE(rx_ring) / 2; +#else + truesize = rx_ring->buf_len; +#endif + + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, rx_buf->page_offset, + data_len, DMA_FROM_DEVICE); + page = rx_buf->page; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buf->page_offset, data_len, truesize); +#if (PAGE_SIZE < NBL_PAGE_SIZE_THRESH) + rx_buf->page_offset ^= truesize; +#else + rx_buf->page_offset += truesize; +#endif +} + +static bool nbl_can_reuse_rx_page(struct nbl_ring *rx_ring, struct nbl_rx_buf *rx_buf, + bool add_to_skb) +{ + struct page *page = rx_buf->page; +#if (PAGE_SIZE >= NBL_PAGE_SIZE_THRESH) + unsigned int last_offset; + + last_offset = NBL_RX_PAGE_SIZE(rx_ring) - rx_ring->buf_len; +#endif + + if (!nbl_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < NBL_PAGE_SIZE_THRESH) + if (unlikely(page_count(page) != 1)) + return false; + + /* Since we are the only owner of the page and we need to + * increment it, just set the value to 2 in order to avoid + * an unnecessary locked operation + */ + if (add_to_skb) +#ifdef RHEL_RELEASE +#if (RHEL_RELEASE_VERSION(8, 0) <= RHEL_RELEASE_CODE) + atomic_set(&page->_refcount, 2); +#else + set_page_count(page, 2); +#endif +#else + atomic_set(&page->_refcount, 2); +#endif + +#else + if (rx_buf->page_offset > last_offset) + return false; + + if (add_to_skb) + get_page(page); +#endif + + return true; +} + +static void nbl_reuse_rx_page(struct nbl_ring *rx_ring, struct nbl_rx_buf *old_buff) +{ + struct nbl_rx_buf *new_buff; + u16 next_to_alloc = rx_ring->next_to_alloc; + + new_buff = NBL_RX_BUF(rx_ring, next_to_alloc); + + next_to_alloc++; + rx_ring->next_to_alloc = (next_to_alloc < rx_ring->desc_num) ? next_to_alloc : 0; + + new_buff->page = old_buff->page; + new_buff->dma = old_buff->dma; + new_buff->page_offset = old_buff->page_offset; +} + +static void nbl_put_rx_buf(struct nbl_ring *rx_ring, struct nbl_rx_buf *rx_buf, + struct nbl_rx_desc *rx_desc, bool add_to_skb) +{ + if (nbl_can_reuse_rx_page(rx_ring, rx_buf, add_to_skb)) { + nbl_reuse_rx_page(rx_ring, rx_buf); + } else { +#ifdef RHEL_RELEASE +#if (RHEL_RELEASE_VERSION(8, 0) > RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 7) < RHEL_RELEASE_CODE) + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, NBL_RX_PAGE_SIZE(rx_ring), + DMA_FROM_DEVICE, &rx_ring->rx_buf_attrs); +#else + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, NBL_RX_PAGE_SIZE(rx_ring), + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); +#endif +#else + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, NBL_RX_PAGE_SIZE(rx_ring), + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); +#endif + if (!add_to_skb) + put_page(rx_buf->page); + } + + rx_buf->page = NULL; + rx_desc->dd = 0; +} + +static void nbl_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, NBL_RX_HDR_SIZE); + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static bool nbl_cleanup_headers(struct sk_buff *skb) +{ + if (!skb_headlen(skb)) + nbl_pull_tail(skb); + + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void nbl_process_skb_fields(struct nbl_ring *rx_ring, struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, dev); +} + +int nbl_clean_rx_irq(struct nbl_ring *rx_ring, int budget) +{ + struct nbl_q_vector *q_vector = rx_ring->q_vector; + struct nbl_rx_desc *rx_desc; + struct nbl_rx_buf *rx_buf; + struct sk_buff *skb; + unsigned int total_rx_pkts; + unsigned int total_rx_bytes; + u16 cleaned_count; + u16 data_len; + u16 buf_len; + u16 sync_len; + bool add_to_skb; + u16 i; + + cleaned_count = nbl_unused_desc_count(rx_ring); + if (cleaned_count >= NBL_RX_BUF_WRITE) { + nbl_alloc_rx_bufs(rx_ring, cleaned_count); + cleaned_count = 0; + } + + skb = NULL; + total_rx_pkts = 0; + total_rx_bytes = 0; + buf_len = (u16)rx_ring->buf_len; + + i = rx_ring->next_to_clean; + rx_desc = NBL_RX_DESC(rx_ring, i); + rx_buf = NBL_RX_BUF(rx_ring, i); + if (!nbl_rx_desc_used(rx_desc)) + return total_rx_pkts; + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + data_len = rx_desc->data_len; + + while (likely(total_rx_pkts < budget)) { + sync_len = (data_len > buf_len) ? buf_len : data_len; + add_to_skb = true; + if (!skb) + skb = nbl_construct_skb(rx_ring, rx_buf, rx_desc, sync_len, &add_to_skb); + else + nbl_add_rx_frag(rx_ring, rx_buf, skb, sync_len); + + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_skb_failed++; + break; + } + + nbl_put_rx_buf(rx_ring, rx_buf, rx_desc, add_to_skb); + + cleaned_count++; + i++; + rx_buf++; + rx_desc++; + if (i == rx_ring->desc_num) { + i = 0; + rx_buf = NBL_RX_BUF(rx_ring, 0); + rx_desc = NBL_RX_DESC(rx_ring, 0); + } + data_len -= sync_len; + if (data_len) + continue; + + if (likely(!nbl_cleanup_headers(skb))) { + total_rx_bytes += skb->len; + nbl_process_skb_fields(rx_ring, skb); + napi_gro_receive(&q_vector->napi, skb); + total_rx_pkts++; + } + + skb = NULL; + + if (!nbl_rx_desc_used(rx_desc)) + break; + + dma_rmb(); + data_len = rx_desc->data_len; + } + + if (cleaned_count) + nbl_alloc_rx_bufs(rx_ring, cleaned_count); + + rx_ring->next_to_clean = i; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_pkts; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + + return total_rx_pkts; +} + +static void nbl_free_tx_ring_bufs(struct nbl_ring *tx_ring) +{ + struct nbl_tx_buf *tx_buf; + u16 i; + + i = tx_ring->next_to_clean; + tx_buf = NBL_TX_BUF(tx_ring, i); + while (i != tx_ring->next_to_use) { + nbl_unmap_and_free_tx_resource(tx_ring, tx_buf, 0); + i++; + tx_buf++; + if (i == tx_ring->desc_num) { + i = 0; + tx_buf = NBL_TX_BUF(tx_ring, i); + } + } + + tx_ring->next_to_clean = 0; + tx_ring->next_to_use = 0; + tx_ring->tail_ptr = 0; +} + +void nbl_free_all_tx_bufs(struct nbl_adapter *adapter) +{ + struct nbl_ring *tx_ring; + u16 ring_count; + u16 ring_index; + + ring_count = adapter->num_txq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + tx_ring = adapter->tx_rings[ring_index]; + nbl_free_tx_ring_bufs(tx_ring); + } +} + +static void nbl_unmap_and_free_rx_resource(struct nbl_ring *rx_ring, struct nbl_rx_buf *rx_buf) +{ + u32 buf_len = rx_ring->buf_len; + + /* Invalidate cache lines that may have been written by device to avoid + * memory corruption. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, rx_buf->page_offset, + buf_len, DMA_FROM_DEVICE); +#ifdef RHEL_RELEASE +#if (RHEL_RELEASE_VERSION(8, 0) > RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 7) < RHEL_RELEASE_CODE) + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, NBL_RX_PAGE_SIZE(rx_ring), + DMA_FROM_DEVICE, &rx_ring->rx_buf_attrs); +#else + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, NBL_RX_PAGE_SIZE(rx_ring), + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); +#endif +#else + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, NBL_RX_PAGE_SIZE(rx_ring), + DMA_FROM_DEVICE, NBL_RX_DMA_ATTR); +#endif + put_page(rx_buf->page); + rx_buf->page = NULL; +} + +static void nbl_free_rx_ring_bufs(struct nbl_ring *rx_ring) +{ + struct nbl_rx_buf *rx_buf; + u16 i; + + i = rx_ring->next_to_clean; + rx_buf = NBL_RX_BUF(rx_ring, i); + while (i != rx_ring->next_to_alloc) { + nbl_unmap_and_free_rx_resource(rx_ring, rx_buf); + i++; + rx_buf++; + if (i == rx_ring->desc_num) { + i = 0; + rx_buf = NBL_RX_BUF(rx_ring, i); + } + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->next_to_alloc = 0; + rx_ring->tail_ptr = 0; +} + +void nbl_free_all_rx_bufs(struct nbl_adapter *adapter) +{ + struct nbl_ring *rx_ring; + u16 ring_count; + u16 ring_index; + + ring_count = adapter->num_txq; + for (ring_index = 0; ring_index < ring_count; ring_index++) { + rx_ring = adapter->rx_rings[ring_index]; + nbl_free_rx_ring_bufs(rx_ring); + } +} + +static void nbl_af_forward_ring_tx_map(struct nbl_adapter *adapter, struct nbl_ring *tx_ring, + struct sk_buff *skb, unsigned int dport, + unsigned int dport_id) +{ + struct nbl_tx_buf *first_buf; + struct nbl_tx_desc *first_desc; + struct nbl_tx_buf *tx_buf; + struct nbl_tx_desc *tx_desc; + unsigned int data_len; + unsigned int size; + dma_addr_t dma; + const skb_frag_t *frag; + u16 i; + + i = tx_ring->next_to_use; + first_buf = NBL_TX_BUF(tx_ring, i); + first_buf->skb = skb; + first_buf->bytes = skb->len; + first_buf->pkts = 1; + + first_desc = NBL_TX_DESC(tx_ring, i); + first_desc->pkt_len = skb->len; + first_desc->sop = 1; + first_desc->fwd = NBL_FWD_CPU; + first_desc->dport = dport; + first_desc->dport_id = dport_id; + + tx_buf = first_buf; + tx_desc = first_desc; + + data_len = skb->data_len; + size = skb_headlen(skb); + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { + dev_warn(tx_ring->dev, "AF forward ring allocate DMA to transmit skb failed\n"); + goto dma_error; + } + + tx_buf->dma = dma; + tx_buf->len = size; + + tx_desc->buffer_addr = dma; + tx_desc->dd = 0; + while (unlikely(size > NBL_TXD_DATALEN_MAX)) { + tx_desc->data_len = NBL_TXD_DATALEN_MAX; + + dma += NBL_TXD_DATALEN_MAX; + size -= NBL_TXD_DATALEN_MAX; + + i++; + tx_desc++; + tx_ring->tail_ptr++; + if (unlikely(i == tx_ring->desc_num)) { + tx_desc = NBL_TX_DESC(tx_ring, 0); + i = 0; + } + + tx_desc->buffer_addr = dma; + tx_desc->dd = 0; + } + + tx_desc->data_len = size; + + if (likely(!data_len)) + break; + + i++; + tx_desc++; + tx_ring->tail_ptr++; + if (unlikely(i == tx_ring->desc_num)) { + tx_desc = NBL_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); + tx_buf = NBL_TX_BUF(tx_ring, i); + } + + tx_desc->eop = 1; + /* Make sure descriptor has been written before write tail_ptr */ + wmb(); + + i++; + tx_ring->tail_ptr++; + if (unlikely(i == tx_ring->desc_num)) + i = 0; + first_buf->next_to_watch = tx_desc; + tx_ring->next_to_use = i; + + skb_tx_timestamp(skb); + + nbl_update_tail_ptr(tx_ring->notify_addr, tx_ring->local_qid, tx_ring->tail_ptr); + + return; + +dma_error: + for (;;) { + tx_buf = NBL_TX_BUF(tx_ring, i); + nbl_unmap_and_free_tx_resource(tx_ring, tx_buf, 0); + if (tx_buf == first_buf) + break; + if (unlikely(!i)) + i = tx_ring->desc_num; + i--; + tx_ring->tail_ptr--; + } + first_desc->sop = 0; +} + +static void nbl_af_forward_ring_xmit_frame(struct nbl_adapter *adapter, struct nbl_ring *tx_ring, + struct sk_buff *skb, unsigned int dport, + unsigned int dport_id) +{ + unsigned int count; + + count = nbl_xmit_desc_count(skb); + if (unlikely(count > 8)) { + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return; + } + count = nbl_xmit_desc_count(skb); + tx_ring->tx_stats.tx_linearize++; + } + + if (unlikely(nbl_unused_desc_count(tx_ring) < count)) { + tx_ring->tx_stats.tx_busy++; + dev_kfree_skb_any(skb); + return; + } + + nbl_af_forward_ring_tx_map(adapter, tx_ring, skb, dport, dport_id); +} + +static void nbl_af_software_forward_eth_captured_packet(struct nbl_adapter *adapter, + struct sk_buff *skb, + unsigned int sport_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_af_res_info *af_res = hw->af_res; + struct sk_buff *new_skb; + u8 txq_index; + struct nbl_ring *tx_ring; + struct nbl_func_res *func_res; + unsigned int vf_vsi_id_start; + unsigned int vf_vsi_id_end; + unsigned int vsi_id; + + if (sport_id >= NBL_MAX_PF_FUNC) { + pr_err("Receive captured packet from invalid ETH port id %u\n", sport_id); + return; + } + + txq_index = adapter->num_txq; + tx_ring = adapter->tx_rings[txq_index]; + /* Forward captured packet to PF */ + func_res = af_res->res_record[sport_id]; + if (unlikely(!func_res)) { + pr_err("Receive captured packet from ETH port, but there is no corresponding PF\n"); + } else { + new_skb = skb_copy(skb, GFP_ATOMIC); + if (unlikely(!new_skb)) + pr_warn("There is no memory to copy captured packet for PF\n"); + else + nbl_af_forward_ring_xmit_frame(adapter, tx_ring, new_skb, + NBL_TXD_DPORT_HOST, sport_id); + } + + /* Forward captured packet to VFs */ + vf_vsi_id_start = NBL_MAX_PF_FUNC + sport_id * NBL_MAX_VF_PER_PF; + vf_vsi_id_end = vf_vsi_id_start + NBL_MAX_VF_PER_PF; + for (vsi_id = vf_vsi_id_start; vsi_id < vf_vsi_id_end; vsi_id++) { + func_res = af_res->res_record[vsi_id]; + if (!func_res) + break; + new_skb = skb_copy(skb, GFP_ATOMIC); + if (unlikely(!new_skb)) + pr_warn("There is no memory to copy captured packet for VF\n"); + else + nbl_af_forward_ring_xmit_frame(adapter, tx_ring, new_skb, + NBL_TXD_DPORT_HOST, vsi_id); + } +} + +static void nbl_af_software_forward_host_captured_packet(struct nbl_adapter *adapter, + struct sk_buff *skb, + unsigned int sport_id) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_af_res_info *af_res = hw->af_res; + struct sk_buff *new_skb; + u8 txq_index; + struct nbl_ring *tx_ring; + struct nbl_func_res *func_res; + unsigned int pf_vsi_id; + unsigned int vf_vsi_id_start; + unsigned int vf_vsi_id_end; + unsigned int vsi_id; + + if (sport_id >= NBL_MAX_FUNC) { + pr_err("Receive captured packet from invalid vsi port id %u\n", sport_id); + return; + } + + if (sport_id >= NBL_MAX_PF_FUNC) + pf_vsi_id = (sport_id - NBL_MAX_PF_FUNC) / NBL_MAX_VF_PER_PF; + else + pf_vsi_id = sport_id; + + txq_index = adapter->num_txq; + tx_ring = adapter->tx_rings[txq_index]; + /* Forward captured packet to ETH port */ + new_skb = skb_copy(skb, GFP_ATOMIC); + if (unlikely(!new_skb)) + pr_warn("There is no memory to copy captured packet for ETH port\n"); + else + nbl_af_forward_ring_xmit_frame(adapter, tx_ring, new_skb, + NBL_TXD_DPORT_ETH, pf_vsi_id); + + /* Forward captured packet to PF */ + func_res = af_res->res_record[pf_vsi_id]; + if (unlikely(!func_res)) { + pr_err("Receive captured packet from ETH port, but there is no corresponding PF\n"); + } else if (pf_vsi_id != sport_id) { + new_skb = skb_copy(skb, GFP_ATOMIC); + if (unlikely(!new_skb)) + pr_warn("There is no memory to copy captured packet for PF\n"); + else + nbl_af_forward_ring_xmit_frame(adapter, tx_ring, new_skb, + NBL_TXD_DPORT_HOST, pf_vsi_id); + } + + /* Forward captured packet to VFs */ + vf_vsi_id_start = NBL_MAX_PF_FUNC + pf_vsi_id * NBL_MAX_VF_PER_PF; + vf_vsi_id_end = vf_vsi_id_start + NBL_MAX_VF_PER_PF; + for (vsi_id = vf_vsi_id_start; vsi_id < vf_vsi_id_end; vsi_id++) { + if (vsi_id == sport_id) + continue; + + func_res = af_res->res_record[vsi_id]; + if (!func_res) + break; + new_skb = skb_copy(skb, GFP_ATOMIC); + if (unlikely(!new_skb)) + pr_warn("There is no memory to copy captured packet for VF\n"); + else + nbl_af_forward_ring_xmit_frame(adapter, tx_ring, new_skb, + NBL_TXD_DPORT_HOST, vsi_id); + } +} + +static void nbl_af_software_forward_captured_packet(struct nbl_adapter *adapter, + struct sk_buff *skb, + unsigned int sport_type, + unsigned int sport_id) +{ + if (sport_type == NBL_RXD_SPORT_ETH) + nbl_af_software_forward_eth_captured_packet(adapter, skb, sport_id); + else + nbl_af_software_forward_host_captured_packet(adapter, skb, sport_id); + + kfree_skb(skb); +} + +int nbl_af_clean_forward_ring_rx_irq(struct nbl_ring *rx_ring, int budget) +{ + struct nbl_q_vector *q_vector = rx_ring->q_vector; + struct nbl_adapter *adapter = q_vector->adapter; + struct nbl_rx_desc *rx_desc; + struct nbl_rx_buf *rx_buf; + struct sk_buff *skb; + unsigned int total_rx_pkts; + unsigned int total_rx_bytes; + u16 cleaned_count; + u16 data_len; + u16 buf_len; + unsigned int fwd_mode; + unsigned int sport_type; + unsigned int sport_id; + u16 sync_len; + bool add_to_skb; + u16 i; + + cleaned_count = nbl_unused_desc_count(rx_ring); + if (cleaned_count >= NBL_RX_BUF_WRITE) { + nbl_alloc_rx_bufs(rx_ring, cleaned_count); + cleaned_count = 0; + } + + skb = NULL; + total_rx_pkts = 0; + total_rx_bytes = 0; + buf_len = (u16)rx_ring->buf_len; + + i = rx_ring->next_to_clean; + rx_desc = NBL_RX_DESC(rx_ring, i); + rx_buf = NBL_RX_BUF(rx_ring, i); + if (!nbl_rx_desc_used(rx_desc)) + return total_rx_pkts; + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + data_len = rx_desc->data_len; + fwd_mode = rx_desc->fwd; + sport_type = rx_desc->sport; + sport_id = rx_desc->sport_id; + + while (likely(total_rx_pkts < budget)) { + sync_len = (data_len > buf_len) ? buf_len : data_len; + add_to_skb = true; + if (!skb) + skb = nbl_construct_skb(rx_ring, rx_buf, rx_desc, sync_len, &add_to_skb); + else + nbl_add_rx_frag(rx_ring, rx_buf, skb, sync_len); + + if (unlikely(!skb)) { + pr_warn("Allocate for RX packets failed\n"); + break; + } + + nbl_put_rx_buf(rx_ring, rx_buf, rx_desc, add_to_skb); + + cleaned_count++; + i++; + rx_buf++; + rx_desc++; + if (i == rx_ring->desc_num) { + i = 0; + rx_buf = NBL_RX_BUF(rx_ring, 0); + rx_desc = NBL_RX_DESC(rx_ring, 0); + } + data_len -= sync_len; + if (data_len) + continue; + + if (likely(!nbl_cleanup_headers(skb))) { + if (unlikely(fwd_mode != NBL_RXD_FWD_CPU)) { + pr_err("AF forwrad ring received non-captured packet\n"); + kfree_skb(skb); + } else { + total_rx_bytes += skb->len; + nbl_af_software_forward_captured_packet(adapter, skb, + sport_type, sport_id); + total_rx_pkts++; + } + } + + skb = NULL; + + if (!nbl_rx_desc_used(rx_desc)) + break; + + dma_rmb(); + data_len = rx_desc->data_len; + fwd_mode = rx_desc->fwd; + sport_type = rx_desc->sport; + sport_id = rx_desc->sport_id; + } + + if (cleaned_count) + nbl_alloc_rx_bufs(rx_ring, cleaned_count); + + rx_ring->next_to_clean = i; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_pkts; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + + return total_rx_pkts; +} + +bool nbl_af_clean_forward_ring_tx_irq(struct nbl_ring *tx_ring, int napi_budget) +{ + struct nbl_tx_buf *tx_buf; + struct nbl_tx_desc *tx_desc; + unsigned int budget = NBL_DEFAULT_IRQ_WORK; + unsigned int total_tx_pkts = 0; + unsigned int total_tx_bytes = 0; + s16 i = tx_ring->next_to_clean; + + tx_buf = NBL_TX_BUF(tx_ring, i); + tx_desc = NBL_TX_DESC(tx_ring, i); + i -= tx_ring->desc_num; + do { + struct nbl_tx_desc *end_desc = tx_buf->next_to_watch; + + if (!end_desc) + break; + + /* ensure end_desc is read and checked first */ + smp_rmb(); + + if (!nbl_tx_desc_used(tx_desc)) + break; + + total_tx_bytes += tx_buf->bytes; + total_tx_pkts += tx_buf->pkts; + while (true) { + nbl_unmap_and_free_tx_resource(tx_ring, tx_buf, napi_budget); + tx_desc->sop = 0; + tx_desc->eop = 0; + if (tx_desc == end_desc) + break; + i++; + tx_buf++; + tx_desc++; + if (unlikely(!i)) { + i -= tx_ring->desc_num; + tx_buf = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + } + } + + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->desc_num; + tx_buf = NBL_TX_BUF(tx_ring, 0); + tx_desc = NBL_TX_DESC(tx_ring, 0); + } + + prefetch(tx_desc); + + budget--; + } while (likely(budget)); + + i += tx_ring->desc_num; + + tx_ring->next_to_clean = i; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_tx_bytes; + tx_ring->stats.packets += total_tx_pkts; + u64_stats_update_end(&tx_ring->syncp); + + return !!budget; +} + +static void nbl_af_forward_ring_q_vector_fixup(struct nbl_adapter *adapter) +{ + struct nbl_q_vector *q_vector; + u16 q_vector_id; + + q_vector_id = adapter->num_q_vectors; + q_vector = adapter->q_vectors[q_vector_id]; + netif_napi_del(&q_vector->napi); + netif_napi_add(adapter->netdev, &q_vector->napi, + nbl_af_forward_ring_napi_poll, NAPI_POLL_WEIGHT); +} + +static void nbl_af_map_forward_ring_to_vector(struct nbl_adapter *adapter) +{ + struct nbl_q_vector *q_vector; + struct nbl_ring *ring; + u16 txq_index = adapter->num_txq; + u16 rxq_index = adapter->num_rxq; + u16 q_vector_id = adapter->num_q_vectors; + + q_vector = adapter->q_vectors[q_vector_id]; + + q_vector->num_ring_tx = 1; + q_vector->tx_ring = NULL; + ring = adapter->tx_rings[txq_index]; + ring->next = q_vector->tx_ring; + ring->q_vector = q_vector; + q_vector->tx_ring = ring; + + q_vector->num_ring_rx = 1; + q_vector->rx_ring = NULL; + ring = adapter->rx_rings[rxq_index]; + ring->next = q_vector->rx_ring; + ring->q_vector = q_vector; + q_vector->rx_ring = ring; +} + +static int nbl_af_setup_forward_tx_ring(struct nbl_adapter *adapter) +{ + u16 txq_index = adapter->num_txq; + struct nbl_ring *tx_ring; + + tx_ring = adapter->tx_rings[txq_index]; + + return nbl_setup_tx_ring(tx_ring); +} + +static void nbl_af_teardown_forward_tx_ring(struct nbl_adapter *adapter) +{ + u16 txq_index = adapter->num_txq; + struct nbl_ring *tx_ring; + + tx_ring = adapter->tx_rings[txq_index]; + + nbl_teardown_tx_ring(tx_ring); +} + +static int nbl_af_setup_forward_rx_ring(struct nbl_adapter *adapter) +{ + u16 rxq_index = adapter->num_rxq; + struct nbl_ring *rx_ring; + + rx_ring = adapter->rx_rings[rxq_index]; + + return nbl_setup_rx_ring(rx_ring); +} + +static void nbl_af_teardown_forward_rx_ring(struct nbl_adapter *adapter) +{ + u16 rxq_index = adapter->num_rxq; + struct nbl_ring *rx_ring; + + rx_ring = adapter->rx_rings[rxq_index]; + + nbl_teardown_rx_ring(rx_ring); +} + +static int nbl_af_setup_forward_ring(struct nbl_adapter *adapter) +{ + int err; + + err = nbl_af_setup_forward_tx_ring(adapter); + if (err) { + pr_err("Setup AF forward tx ring failed with error %d\n", err); + return err; + } + + err = nbl_af_setup_forward_rx_ring(adapter); + if (err) { + pr_err("Setup AF forward rx ring failed with error %d\n", err); + goto setup_forward_rx_ring_err; + } + + return 0; + +setup_forward_rx_ring_err: + nbl_af_teardown_forward_tx_ring(adapter); + return err; +} + +static void nbl_af_teardown_forward_ring(struct nbl_adapter *adapter) +{ + nbl_af_teardown_forward_tx_ring(adapter); + nbl_af_teardown_forward_rx_ring(adapter); +} + +static void nbl_af_hw_config_forward_tx_ring(struct nbl_adapter *adapter) +{ + u16 txq_index = adapter->num_txq; + struct nbl_ring *tx_ring; + + tx_ring = adapter->tx_rings[txq_index]; + + nbl_hw_config_tx_ring(tx_ring); +} + +static void nbl_af_hw_config_forward_rx_ring(struct nbl_adapter *adapter) +{ + u16 rxq_index = adapter->num_rxq; + struct nbl_ring *rx_ring; + + rx_ring = adapter->rx_rings[rxq_index]; + + nbl_hw_config_rx_ring(rx_ring); +} + +static void nbl_af_hw_config_forward_ring(struct nbl_adapter *adapter) +{ + nbl_af_hw_config_forward_tx_ring(adapter); + nbl_af_hw_config_forward_rx_ring(adapter); +} + +static void nbl_af_forward_ring_alloc_all_rx_bufs(struct nbl_adapter *adapter) +{ + u16 rxq_index = adapter->num_rxq; + struct nbl_ring *rx_ring; + u16 desc_count; + + rx_ring = adapter->rx_rings[rxq_index]; + desc_count = nbl_unused_desc_count(rx_ring); + if (unlikely(!nbl_alloc_rx_bufs(rx_ring, desc_count))) + pr_warn("Allocate RX bufs for AF forward ring failed\n"); +} + +static void nbl_af_forward_ring_free_all_rx_bufs(struct nbl_adapter *adapter) +{ + u16 rxq_index = adapter->num_rxq; + struct nbl_ring *rx_ring; + + rx_ring = adapter->rx_rings[rxq_index]; + nbl_free_rx_ring_bufs(rx_ring); +} + +static void nbl_af_forward_ring_free_all_tx_bufs(struct nbl_adapter *adapter) +{ + u16 txq_index = adapter->num_txq; + struct nbl_ring *tx_ring; + + tx_ring = adapter->tx_rings[txq_index]; + nbl_free_tx_ring_bufs(tx_ring); +} + +static void nbl_af_start_forward_tx_ring(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_ring *tx_ring; + struct nbl_q_vector *q_vector; + u16 local_vector_id; + u8 ring_index; + + ring_index = adapter->num_txq; + tx_ring = adapter->tx_rings[ring_index]; + q_vector = tx_ring->q_vector; + local_vector_id = q_vector->q_vector_id; + nbl_configure_port_map(hw, hw->eth_port_id, ring_index); + nbl_configure_queue_map(hw, ring_index, false, local_vector_id, true); + nbl_enable_tx_queue(hw, ring_index); +} + +static void nbl_af_start_forward_rx_ring(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_ring *rx_ring; + struct nbl_q_vector *q_vector; + u16 local_vector_id; + u8 ring_index; + + ring_index = adapter->num_txq; + rx_ring = adapter->rx_rings[ring_index]; + q_vector = rx_ring->q_vector; + local_vector_id = q_vector->q_vector_id; + nbl_configure_queue_map(hw, ring_index, true, local_vector_id, true); + nbl_enable_rx_queue(hw, ring_index); +} + +static void nbl_af_stop_forward_tx_ring(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u8 ring_index; + int err; + + ring_index = adapter->num_txq; + nbl_disable_tx_queue(hw, ring_index); + usleep_range(3000, 6000); + nbl_configure_queue_map(hw, ring_index, false, 0, false); + err = nbl_reset_tx_queue(hw, ring_index); + if (unlikely(err)) + pr_err("Reset AF forward tx queue %hhu failed with error %d\n", ring_index, err); + usleep_range(2000, 4000); +} + +static void nbl_af_stop_forward_rx_ring(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + u8 ring_index; + int err; + + ring_index = adapter->num_txq; + nbl_disable_rx_queue(hw, ring_index); + usleep_range(3000, 6000); + nbl_configure_queue_map(hw, ring_index, true, 0, false); + err = nbl_reset_rx_queue(hw, ring_index); + if (unlikely(err)) { + pr_err("Reset AF forward rx queue %u failed with error %d\n", ring_index, err); + return; + } + usleep_range(2000, 4000); + err = nbl_wait_rx_queue_reset_done(hw, ring_index); + if (unlikely(err)) + pr_err("Wait AF forward rx queue %hhu reset done failed with error %d\n", + ring_index, err); +} + +static void nbl_af_register_forward_ring(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + struct nbl_af_res_info *af_res = hw->af_res; + struct nbl_func_res *func_res = af_res->res_record[0]; + u8 local_ring_index; + u8 global_ring_index; + + local_ring_index = adapter->num_rxq; + global_ring_index = func_res->txrx_queues[local_ring_index]; + af_res->forward_ring_index = global_ring_index; +} + +int nbl_activate_af_forward_queue(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + int err; + + if (!is_af(hw)) + return 0; + + nbl_af_forward_ring_q_vector_fixup(adapter); + + nbl_af_map_forward_ring_to_vector(adapter); + + err = nbl_af_setup_forward_ring(adapter); + if (err) + return err; + + nbl_af_hw_config_forward_ring(adapter); + + nbl_af_forward_ring_alloc_all_rx_bufs(adapter); + + err = nbl_af_forward_ring_request_irq(adapter); + if (err) { + pr_err("AF forward ring requests irq failed with error %d\n", err); + goto forward_ring_request_irq_err; + } + + nbl_af_start_forward_tx_ring(adapter); + nbl_af_start_forward_rx_ring(adapter); + + nbl_af_enable_forward_ring_napi(adapter); + + nbl_af_configure_forward_ring_irq(adapter); + + nbl_af_register_forward_ring(adapter); + + return 0; + +forward_ring_request_irq_err: + nbl_af_forward_ring_free_all_rx_bufs(adapter); + nbl_af_teardown_forward_ring(adapter); + return err; +} + +void nbl_deactivate_af_forward_queue(struct nbl_adapter *adapter) +{ + struct nbl_hw *hw = &adapter->hw; + + if (!is_af(hw)) + return; + + nbl_af_clear_forward_ring_irq_conf(adapter); + + nbl_af_disable_forward_ring_napi(adapter); + + nbl_af_stop_forward_tx_ring(adapter); + nbl_af_stop_forward_rx_ring(adapter); + + nbl_af_forward_ring_free_irq(adapter); + + nbl_af_forward_ring_free_all_tx_bufs(adapter); + nbl_af_forward_ring_free_all_rx_bufs(adapter); + + nbl_af_teardown_forward_ring(adapter); +} diff --git a/drivers/net/ethernet/nebula-matrix/m1600/txrx.h b/drivers/net/ethernet/nebula-matrix/m1600/txrx.h new file mode 100644 index 0000000000000000000000000000000000000000..eb27131b3fd9bbe0c585446d229f90c45d463f92 --- /dev/null +++ b/drivers/net/ethernet/nebula-matrix/m1600/txrx.h @@ -0,0 +1,351 @@ +/* SPDX-License-Identifier: GPL-2.0*/ +/* + * Copyright (c) 2022 nebula-matrix Limited. + * Author: Monte Song + */ + +#ifndef _NBL_TXRX_H_ +#define _NBL_TXRX_H_ + +#include +#include + +#define NBL_DEFAULT_TX_DESC_NUM 512 +#define NBL_DEFAULT_RX_DESC_NUM 512 +#define NBL_MAX_TX_DESC_NUM 4096 +#define NBL_MAX_RX_DESC_NUM 4096 +#define NBL_MIN_TX_DESC_NUM 32 +#define NBL_MIN_RX_DESC_NUM 32 + +#define NBL_PAGE_SIZE_THRESH 8192 + +#define NBL_DEFAULT_RING_NUM (4) +#define NBL_VF_DEFAULT_RING_NUM (1) +#define NBL_MAX_RING_NUM (16) + +#define NBL_RX_BUF_LEN 2048 +#define NBL_RX_BUF_256 256 +#define NBL_RX_HDR_SIZE NBL_RX_BUF_256 +#define NBL_RX_BUF_WRITE 16 + +#define MAX_DESC_NEEDED_PER_PKT 8 + +#define NBL_DEFAULT_IRQ_WORK 128 + +#define NBL_TX_MIN_GSO_SIZE 256 +#define NBL_TX_MAX_OFFLOAD_HEADER_LEN 256 + +enum nbl_rx_checksum_status { + NBL_RX_CSUM_ERR, + NBL_RX_CSUM_OK, +}; + +enum nbl_inner_ip_type { + NBL_INNER_NONE, + NBL_INNER_IPV6, + NBL_INNER_IPV4, + NBL_INNER_RSV, +}; + +enum nbl_external_ip_type { + NBL_EXT_NONE, + NBL_EXT_IPV6, + NBL_EXT_IPV4, + NBL_EXT_RSV, +}; + +enum nbl_tx_desc_type { + NBL_DATA_DESC, + NBL_TSO_DESC, +}; + +enum nbl_tunnel_type { + NBL_TUN_NONE, + NBL_TUN_VXLAN, + NBL_TUN_NVGRE, + NBL_TUN_RSV, +}; + +enum nbl_l4_type { + NBL_FRAGMENTED_TYPE, + NBL_TCP_TYPE, + NBL_UDP_TYPE, +}; + +/* Note: keep sizeof(nbl_tso_desc) equal sizeof(nbl_tx_desc) */ +struct nbl_tso_desc { + u32 mss:14; + u32 rsv0:2; + u32 pkt_len:16; + u32 dd:1; + u32 eop:1; + u32 sop:1; + u32 fwd:2; + u32 dport:1; + u32 dport_id:7; + u32 l3_checksum:1; + u32 l4_checksum:1; + u32 rss_lag:1; + u32 l3_start_offset:6; + u32 rsv1:8; + u32 dtype:2; + u32 mac_len:4; + u32 ip_len:5; + u32 l4_len:4; + u32 iipt:2; + u32 eipt:2; + u32 eip_len:5; + u32 l4_tunt:3; + u32 l4_tun_len:5; + u32 l4_type:2; + u32 rsv2; +}; + +struct nbl_tx_desc { + u32 data_len:16; + u32 pkt_len:16; + u32 dd:1; + u32 eop:1; + u32 sop:1; + u32 fwd:2; + u32 dport:1; + u32 dport_id:7; + u32 l3_checksum:1; + u32 l4_checksum:1; + u32 rss_lag:1; + u32 l3_start_offset:6; + u32 rsv1:8; + u32 dtype:2; + u64 buffer_addr; +}; + +enum nbl_txd_fwd_type { + NBL_FWD_DROP, + NBL_FWD_NORMAL, + NBL_FWD_RSV, + NBL_FWD_CPU, +}; + +enum nbl_txd_dst_port_type { + NBL_TXD_DPORT_ETH, + NBL_TXD_DPORT_HOST, +}; + +#define NBL_TXD_DATALEN_BITS 14 +#define NBL_TXD_DATALEN_MAX ((1 << NBL_TXD_DATALEN_BITS) - 1) + +struct nbl_rx_desc { + u32 data_len:14; + u32 rsv0:18; + u32 dd:1; + u32 eop:1; + u32 sop:1; + u32 fwd:2; + u32 sport:1; + u32 sport_id:7; + u32 checksum_status:1; + u32 ptype:8; + u32 lag:1; + u32 lag_id:2; + u32 rsv1:7; + u64 buffer_addr; +}; + +enum nbl_rxd_fwd_type { + NBL_RXD_FWD_DROP, + NBL_RXD_FWD_NORMAL, + NBL_RXD_FWD_CPU, + NBL_RXD_FWD_RSV, +}; + +enum nbl_rxd_src_port_type { + NBL_RXD_SPORT_ETH, + NBL_RXD_SPORT_HOST, +}; + +struct nbl_tx_buf { + struct nbl_tx_desc *next_to_watch; + struct sk_buff *skb; + unsigned int bytes; + unsigned short pkts; + DEFINE_DMA_UNMAP_LEN(len); + DEFINE_DMA_UNMAP_ADDR(dma); +}; + +struct nbl_rx_buf { + dma_addr_t dma; + struct page *page; + u32 page_offset; +}; + +struct nbl_queue_stats { + u64 packets; + u64 bytes; +}; + +#define NBL_QUEUE_STAT_ENTRIES (2) + +struct nbl_tx_queue_stats { + u64 tx_busy; + u64 tx_linearize; + u64 tx_csum_pkts; + u64 tx_dma_err; +}; + +struct nbl_rx_queue_stats { + u64 rx_csum_pkts; + u64 alloc_page_failed; + u64 rx_dma_err; + u64 alloc_skb_failed; +}; + +struct nbl_ring { + struct nbl_ring *next; + struct nbl_q_vector *q_vector; + struct device *dev; + struct net_device *netdev; + void *desc; + u8 __iomem *notify_addr; + union { + struct nbl_tx_buf *tx_bufs; + struct nbl_rx_buf *rx_bufs; + }; + + u8 queue_index; + u8 local_qid; + + u16 desc_num; + + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + u16 tail_ptr; + +#ifdef RHEL_RELEASE +#if (RHEL_RELEASE_VERSION(8, 0) > RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 7) < RHEL_RELEASE_CODE) + struct dma_attrs rx_buf_attrs; +#endif +#endif + u32 buf_len; + + unsigned int size; + dma_addr_t dma; + + struct nbl_queue_stats stats; + struct u64_stats_sync syncp; + + union { + struct nbl_tx_queue_stats tx_stats; + struct nbl_rx_queue_stats rx_stats; + }; +}; + +struct nbl_q_vector { + int q_vector_id; + int global_vector_id; + struct nbl_adapter *adapter; + struct napi_struct napi; + u32 num_ring_rx; + u32 num_ring_tx; + struct nbl_ring *tx_ring; + struct nbl_ring *rx_ring; + + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + + /* "-TxRx-nn" use 8 characters */ + char name[IFNAMSIZ + 8]; +}; + +static inline u16 nbl_unused_desc_count(struct nbl_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->desc_num) + ntc - ntu - 1; +} + +#define NBL_TX_DESC(tx_ring, i) (&(((struct nbl_tx_desc *)((tx_ring)->desc))[i])) +#define NBL_RX_DESC(rx_ring, i) (&(((struct nbl_rx_desc *)((rx_ring)->desc))[i])) +#define NBL_TX_BUF(tx_ring, i) (&(((tx_ring)->tx_bufs)[i])) +#define NBL_RX_BUF(rx_ring, i) (&(((rx_ring)->rx_bufs)[i])) + +static inline u32 nbl_rx_page_order(struct nbl_ring *rx_ring) +{ +#if (PAGE_SIZE < NBL_PAGE_SIZE_THRESH) + if (rx_ring->buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define NBL_RX_PAGE_SIZE(rx_ring) (PAGE_SIZE << nbl_rx_page_order(rx_ring)) +#define NBL_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +static inline void nbl_update_tail_ptr(u8 __iomem *notify_addr, u16 local_qid, u16 tail_ptr) +{ + writel((((u32)tail_ptr << 16) | (u32)local_qid), notify_addr); +} + +int nbl_alloc_q_vectors(struct nbl_adapter *adapter); +void nbl_free_q_vectors(struct nbl_adapter *adapter); + +int nbl_alloc_rings(struct nbl_adapter *adapter); +void nbl_free_rings(struct nbl_adapter *adapter); + +void nbl_map_rings_to_vectors(struct nbl_adapter *adapter); + +int nbl_setup_rings(struct nbl_adapter *adapter); +void nbl_teardown_rings(struct nbl_adapter *adapter); + +void nbl_af_hw_config_tx_ring(struct nbl_hw *hw, u16 func_id, dma_addr_t dma, + u16 desc_num, u8 vsi_id, u8 local_queue_id); +void nbl_af_hw_config_rx_ring(struct nbl_hw *hw, u16 func_id, dma_addr_t dma, + u16 desc_num, u32 buf_len, u8 local_queue_id); +void nbl_hw_config_rings(struct nbl_adapter *adapter); + +void nbl_alloc_all_rx_bufs(struct nbl_adapter *adapter); + +void nbl_af_configure_queue_map(struct nbl_hw *hw, u16 func_id, u8 local_queue_id, + bool rx, u16 local_vector_id, bool enable, + bool msix_enable); + +void nbl_af_control_queue(struct nbl_hw *hw, u16 func_id, u8 local_queue_id, bool rx, bool enable); +int nbl_af_reset_tx_queue(struct nbl_hw *hw, u16 func_id, u8 local_queue_id); +int nbl_af_reset_rx_queue(struct nbl_hw *hw, u16 func_id, u8 local_queue_id); +int nbl_af_wait_rx_queue_reset_done(struct nbl_hw *hw, u16 func_id, u8 local_queue_id); + +void nbl_af_configure_port_map(struct nbl_hw *hw, u16 func_id, u8 eth_port_id, u8 tx_queue_num); +void nbl_af_configure_rss_group_table(struct nbl_hw *hw, u16 func_id, u8 vsi_id, u8 rx_queue_num); + +void nbl_start_all_tx_rings(struct nbl_adapter *adapter); +void nbl_start_all_rx_rings(struct nbl_adapter *adapter); +void nbl_stop_all_tx_rings(struct nbl_adapter *adapter); +void nbl_stop_all_rx_rings(struct nbl_adapter *adapter); + +void nbl_af_eth_tx_enable(struct nbl_adapter *adapter, u8 eth_port_id); +void nbl_af_eth_tx_disable(struct nbl_adapter *adapter, u8 eth_port_id); +void nbl_af_eth_rx_enable(struct nbl_adapter *adapter, u8 eth_port_id); +void nbl_af_eth_rx_disable(struct nbl_adapter *adapter, u8 eth_port_id); + +void nbl_eth_tx_enable(struct nbl_adapter *adapter); +void nbl_eth_tx_disable(struct nbl_adapter *adapter); +void nbl_eth_rx_enable(struct nbl_adapter *adapter); +void nbl_eth_rx_disable(struct nbl_adapter *adapter); + +netdev_tx_t nbl_start_xmit(struct sk_buff *skb, struct net_device *netdev); + +bool nbl_clean_tx_irq(struct nbl_ring *tx_ring, int napi_budget); +int nbl_clean_rx_irq(struct nbl_ring *rx_ring, int budget); + +void nbl_free_all_rx_bufs(struct nbl_adapter *adapter); +void nbl_free_all_tx_bufs(struct nbl_adapter *adapter); + +int nbl_af_clean_forward_ring_rx_irq(struct nbl_ring *rx_ring, int budget); +bool nbl_af_clean_forward_ring_tx_irq(struct nbl_ring *tx_ring, int napi_budget); + +int nbl_activate_af_forward_queue(struct nbl_adapter *adapter); +void nbl_deactivate_af_forward_queue(struct nbl_adapter *adapter); + +#endif diff --git a/openEuler/MAINTAINERS b/openEuler/MAINTAINERS index a5913e881d5f7557c904c16f9a34894358fd1626..120fdd11acd099af4e1bcb89f9cdeb9b7ad1030d 100644 --- a/openEuler/MAINTAINERS +++ b/openEuler/MAINTAINERS @@ -97,6 +97,12 @@ F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt F: include/uapi/rdma/hns-abi.h F: drivers/infiniband/hw/hns/ +NEBULA-MATRIX Ethernet Controller DRIVERS +M: Yi Chen +S: Maintained +F: Documentation/networking/device_drivers/ethernet/nebula-matrix/ +F: drivers/net/ethernet/nebula-matrix/ + HISILICON ROH DRIVER M: Ke Chen S: Maintained